Bug Summary

File:build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
Warning:line 1154, column 10
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name InstrEmitter.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/build-llvm -resource-dir /usr/lib/llvm-15/lib/clang/15.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I lib/CodeGen/SelectionDAG -I /build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/llvm/lib/CodeGen/SelectionDAG -I include -I /build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/llvm/include -D _FORTIFY_SOURCE=2 -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-15/lib/clang/15.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -fmacro-prefix-map=/build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/build-llvm=build-llvm -fmacro-prefix-map=/build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/= -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/build-llvm=build-llvm -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/= -O3 -Wno-unused-command-line-argument -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/build-llvm -fdebug-prefix-map=/build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/build-llvm=build-llvm -fdebug-prefix-map=/build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/= -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2022-04-20-140412-16051-1 -x c++ /build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp

/build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp

1//==--- InstrEmitter.cpp - Emit MachineInstrs for the SelectionDAG class ---==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This implements the Emit routines for the SelectionDAG class, which creates
10// MachineInstrs based on the decisions of the SelectionDAG instruction
11// selection.
12//
13//===----------------------------------------------------------------------===//
14
15#include "InstrEmitter.h"
16#include "SDNodeDbgValue.h"
17#include "llvm/BinaryFormat/Dwarf.h"
18#include "llvm/CodeGen/MachineConstantPool.h"
19#include "llvm/CodeGen/MachineFunction.h"
20#include "llvm/CodeGen/MachineInstrBuilder.h"
21#include "llvm/CodeGen/MachineRegisterInfo.h"
22#include "llvm/CodeGen/StackMaps.h"
23#include "llvm/CodeGen/TargetInstrInfo.h"
24#include "llvm/CodeGen/TargetLowering.h"
25#include "llvm/CodeGen/TargetSubtargetInfo.h"
26#include "llvm/IR/DebugInfoMetadata.h"
27#include "llvm/IR/PseudoProbe.h"
28#include "llvm/Support/ErrorHandling.h"
29#include "llvm/Target/TargetMachine.h"
30using namespace llvm;
31
32#define DEBUG_TYPE"instr-emitter" "instr-emitter"
33
34/// MinRCSize - Smallest register class we allow when constraining virtual
35/// registers. If satisfying all register class constraints would require
36/// using a smaller register class, emit a COPY to a new virtual register
37/// instead.
38const unsigned MinRCSize = 4;
39
40/// CountResults - The results of target nodes have register or immediate
41/// operands first, then an optional chain, and optional glue operands (which do
42/// not go into the resulting MachineInstr).
43unsigned InstrEmitter::CountResults(SDNode *Node) {
44 unsigned N = Node->getNumValues();
45 while (N && Node->getValueType(N - 1) == MVT::Glue)
46 --N;
47 if (N && Node->getValueType(N - 1) == MVT::Other)
48 --N; // Skip over chain result.
49 return N;
50}
51
52/// countOperands - The inputs to target nodes have any actual inputs first,
53/// followed by an optional chain operand, then an optional glue operand.
54/// Compute the number of actual operands that will go into the resulting
55/// MachineInstr.
56///
57/// Also count physreg RegisterSDNode and RegisterMaskSDNode operands preceding
58/// the chain and glue. These operands may be implicit on the machine instr.
59static unsigned countOperands(SDNode *Node, unsigned NumExpUses,
60 unsigned &NumImpUses) {
61 unsigned N = Node->getNumOperands();
62 while (N && Node->getOperand(N - 1).getValueType() == MVT::Glue)
63 --N;
64 if (N && Node->getOperand(N - 1).getValueType() == MVT::Other)
65 --N; // Ignore chain if it exists.
66
67 // Count RegisterSDNode and RegisterMaskSDNode operands for NumImpUses.
68 NumImpUses = N - NumExpUses;
69 for (unsigned I = N; I > NumExpUses; --I) {
70 if (isa<RegisterMaskSDNode>(Node->getOperand(I - 1)))
71 continue;
72 if (RegisterSDNode *RN = dyn_cast<RegisterSDNode>(Node->getOperand(I - 1)))
73 if (Register::isPhysicalRegister(RN->getReg()))
74 continue;
75 NumImpUses = N - I;
76 break;
77 }
78
79 return N;
80}
81
82/// EmitCopyFromReg - Generate machine code for an CopyFromReg node or an
83/// implicit physical register output.
84void InstrEmitter::
85EmitCopyFromReg(SDNode *Node, unsigned ResNo, bool IsClone, bool IsCloned,
86 Register SrcReg, DenseMap<SDValue, Register> &VRBaseMap) {
87 Register VRBase;
88 if (SrcReg.isVirtual()) {
89 // Just use the input register directly!
90 SDValue Op(Node, ResNo);
91 if (IsClone)
92 VRBaseMap.erase(Op);
93 bool isNew = VRBaseMap.insert(std::make_pair(Op, SrcReg)).second;
94 (void)isNew; // Silence compiler warning.
95 assert(isNew && "Node emitted out of order - early")(static_cast <bool> (isNew && "Node emitted out of order - early"
) ? void (0) : __assert_fail ("isNew && \"Node emitted out of order - early\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 95, __extension__
__PRETTY_FUNCTION__))
;
96 return;
97 }
98
99 // If the node is only used by a CopyToReg and the dest reg is a vreg, use
100 // the CopyToReg'd destination register instead of creating a new vreg.
101 bool MatchReg = true;
102 const TargetRegisterClass *UseRC = nullptr;
103 MVT VT = Node->getSimpleValueType(ResNo);
104
105 // Stick to the preferred register classes for legal types.
106 if (TLI->isTypeLegal(VT))
107 UseRC = TLI->getRegClassFor(VT, Node->isDivergent());
108
109 if (!IsClone && !IsCloned)
110 for (SDNode *User : Node->uses()) {
111 bool Match = true;
112 if (User->getOpcode() == ISD::CopyToReg &&
113 User->getOperand(2).getNode() == Node &&
114 User->getOperand(2).getResNo() == ResNo) {
115 Register DestReg = cast<RegisterSDNode>(User->getOperand(1))->getReg();
116 if (DestReg.isVirtual()) {
117 VRBase = DestReg;
118 Match = false;
119 } else if (DestReg != SrcReg)
120 Match = false;
121 } else {
122 for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) {
123 SDValue Op = User->getOperand(i);
124 if (Op.getNode() != Node || Op.getResNo() != ResNo)
125 continue;
126 MVT VT = Node->getSimpleValueType(Op.getResNo());
127 if (VT == MVT::Other || VT == MVT::Glue)
128 continue;
129 Match = false;
130 if (User->isMachineOpcode()) {
131 const MCInstrDesc &II = TII->get(User->getMachineOpcode());
132 const TargetRegisterClass *RC = nullptr;
133 if (i+II.getNumDefs() < II.getNumOperands()) {
134 RC = TRI->getAllocatableClass(
135 TII->getRegClass(II, i+II.getNumDefs(), TRI, *MF));
136 }
137 if (!UseRC)
138 UseRC = RC;
139 else if (RC) {
140 const TargetRegisterClass *ComRC =
141 TRI->getCommonSubClass(UseRC, RC);
142 // If multiple uses expect disjoint register classes, we emit
143 // copies in AddRegisterOperand.
144 if (ComRC)
145 UseRC = ComRC;
146 }
147 }
148 }
149 }
150 MatchReg &= Match;
151 if (VRBase)
152 break;
153 }
154
155 const TargetRegisterClass *SrcRC = nullptr, *DstRC = nullptr;
156 SrcRC = TRI->getMinimalPhysRegClass(SrcReg, VT);
157
158 // Figure out the register class to create for the destreg.
159 if (VRBase) {
160 DstRC = MRI->getRegClass(VRBase);
161 } else if (UseRC) {
162 assert(TRI->isTypeLegalForClass(*UseRC, VT) &&(static_cast <bool> (TRI->isTypeLegalForClass(*UseRC
, VT) && "Incompatible phys register def and uses!") ?
void (0) : __assert_fail ("TRI->isTypeLegalForClass(*UseRC, VT) && \"Incompatible phys register def and uses!\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 163, __extension__
__PRETTY_FUNCTION__))
163 "Incompatible phys register def and uses!")(static_cast <bool> (TRI->isTypeLegalForClass(*UseRC
, VT) && "Incompatible phys register def and uses!") ?
void (0) : __assert_fail ("TRI->isTypeLegalForClass(*UseRC, VT) && \"Incompatible phys register def and uses!\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 163, __extension__
__PRETTY_FUNCTION__))
;
164 DstRC = UseRC;
165 } else
166 DstRC = SrcRC;
167
168 // If all uses are reading from the src physical register and copying the
169 // register is either impossible or very expensive, then don't create a copy.
170 if (MatchReg && SrcRC->getCopyCost() < 0) {
171 VRBase = SrcReg;
172 } else {
173 // Create the reg, emit the copy.
174 VRBase = MRI->createVirtualRegister(DstRC);
175 BuildMI(*MBB, InsertPos, Node->getDebugLoc(), TII->get(TargetOpcode::COPY),
176 VRBase).addReg(SrcReg);
177 }
178
179 SDValue Op(Node, ResNo);
180 if (IsClone)
181 VRBaseMap.erase(Op);
182 bool isNew = VRBaseMap.insert(std::make_pair(Op, VRBase)).second;
183 (void)isNew; // Silence compiler warning.
184 assert(isNew && "Node emitted out of order - early")(static_cast <bool> (isNew && "Node emitted out of order - early"
) ? void (0) : __assert_fail ("isNew && \"Node emitted out of order - early\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 184, __extension__
__PRETTY_FUNCTION__))
;
185}
186
187void InstrEmitter::CreateVirtualRegisters(SDNode *Node,
188 MachineInstrBuilder &MIB,
189 const MCInstrDesc &II,
190 bool IsClone, bool IsCloned,
191 DenseMap<SDValue, Register> &VRBaseMap) {
192 assert(Node->getMachineOpcode() != TargetOpcode::IMPLICIT_DEF &&(static_cast <bool> (Node->getMachineOpcode() != TargetOpcode
::IMPLICIT_DEF && "IMPLICIT_DEF should have been handled as a special case elsewhere!"
) ? void (0) : __assert_fail ("Node->getMachineOpcode() != TargetOpcode::IMPLICIT_DEF && \"IMPLICIT_DEF should have been handled as a special case elsewhere!\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 193, __extension__
__PRETTY_FUNCTION__))
193 "IMPLICIT_DEF should have been handled as a special case elsewhere!")(static_cast <bool> (Node->getMachineOpcode() != TargetOpcode
::IMPLICIT_DEF && "IMPLICIT_DEF should have been handled as a special case elsewhere!"
) ? void (0) : __assert_fail ("Node->getMachineOpcode() != TargetOpcode::IMPLICIT_DEF && \"IMPLICIT_DEF should have been handled as a special case elsewhere!\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 193, __extension__
__PRETTY_FUNCTION__))
;
194
195 unsigned NumResults = CountResults(Node);
196 bool HasVRegVariadicDefs = !MF->getTarget().usesPhysRegsForValues() &&
197 II.isVariadic() && II.variadicOpsAreDefs();
198 unsigned NumVRegs = HasVRegVariadicDefs ? NumResults : II.getNumDefs();
199 if (Node->getMachineOpcode() == TargetOpcode::STATEPOINT)
200 NumVRegs = NumResults;
201 for (unsigned i = 0; i < NumVRegs; ++i) {
202 // If the specific node value is only used by a CopyToReg and the dest reg
203 // is a vreg in the same register class, use the CopyToReg'd destination
204 // register instead of creating a new vreg.
205 Register VRBase;
206 const TargetRegisterClass *RC =
207 TRI->getAllocatableClass(TII->getRegClass(II, i, TRI, *MF));
208 // Always let the value type influence the used register class. The
209 // constraints on the instruction may be too lax to represent the value
210 // type correctly. For example, a 64-bit float (X86::FR64) can't live in
211 // the 32-bit float super-class (X86::FR32).
212 if (i < NumResults && TLI->isTypeLegal(Node->getSimpleValueType(i))) {
213 const TargetRegisterClass *VTRC = TLI->getRegClassFor(
214 Node->getSimpleValueType(i),
215 (Node->isDivergent() || (RC && TRI->isDivergentRegClass(RC))));
216 if (RC)
217 VTRC = TRI->getCommonSubClass(RC, VTRC);
218 if (VTRC)
219 RC = VTRC;
220 }
221
222 if (II.OpInfo != nullptr && II.OpInfo[i].isOptionalDef()) {
223 // Optional def must be a physical register.
224 VRBase = cast<RegisterSDNode>(Node->getOperand(i-NumResults))->getReg();
225 assert(VRBase.isPhysical())(static_cast <bool> (VRBase.isPhysical()) ? void (0) : __assert_fail
("VRBase.isPhysical()", "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp"
, 225, __extension__ __PRETTY_FUNCTION__))
;
226 MIB.addReg(VRBase, RegState::Define);
227 }
228
229 if (!VRBase && !IsClone && !IsCloned)
230 for (SDNode *User : Node->uses()) {
231 if (User->getOpcode() == ISD::CopyToReg &&
232 User->getOperand(2).getNode() == Node &&
233 User->getOperand(2).getResNo() == i) {
234 unsigned Reg = cast<RegisterSDNode>(User->getOperand(1))->getReg();
235 if (Register::isVirtualRegister(Reg)) {
236 const TargetRegisterClass *RegRC = MRI->getRegClass(Reg);
237 if (RegRC == RC) {
238 VRBase = Reg;
239 MIB.addReg(VRBase, RegState::Define);
240 break;
241 }
242 }
243 }
244 }
245
246 // Create the result registers for this node and add the result regs to
247 // the machine instruction.
248 if (VRBase == 0) {
249 assert(RC && "Isn't a register operand!")(static_cast <bool> (RC && "Isn't a register operand!"
) ? void (0) : __assert_fail ("RC && \"Isn't a register operand!\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 249, __extension__
__PRETTY_FUNCTION__))
;
250 VRBase = MRI->createVirtualRegister(RC);
251 MIB.addReg(VRBase, RegState::Define);
252 }
253
254 // If this def corresponds to a result of the SDNode insert the VRBase into
255 // the lookup map.
256 if (i < NumResults) {
257 SDValue Op(Node, i);
258 if (IsClone)
259 VRBaseMap.erase(Op);
260 bool isNew = VRBaseMap.insert(std::make_pair(Op, VRBase)).second;
261 (void)isNew; // Silence compiler warning.
262 assert(isNew && "Node emitted out of order - early")(static_cast <bool> (isNew && "Node emitted out of order - early"
) ? void (0) : __assert_fail ("isNew && \"Node emitted out of order - early\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 262, __extension__
__PRETTY_FUNCTION__))
;
263 }
264 }
265}
266
267/// getVR - Return the virtual register corresponding to the specified result
268/// of the specified node.
269Register InstrEmitter::getVR(SDValue Op,
270 DenseMap<SDValue, Register> &VRBaseMap) {
271 if (Op.isMachineOpcode() &&
6
Calling 'SDValue::isMachineOpcode'
272 Op.getMachineOpcode() == TargetOpcode::IMPLICIT_DEF) {
273 // Add an IMPLICIT_DEF instruction before every use.
274 // IMPLICIT_DEF can produce any type of result so its MCInstrDesc
275 // does not include operand register class info.
276 const TargetRegisterClass *RC = TLI->getRegClassFor(
277 Op.getSimpleValueType(), Op.getNode()->isDivergent());
278 Register VReg = MRI->createVirtualRegister(RC);
279 BuildMI(*MBB, InsertPos, Op.getDebugLoc(),
280 TII->get(TargetOpcode::IMPLICIT_DEF), VReg);
281 return VReg;
282 }
283
284 DenseMap<SDValue, Register>::iterator I = VRBaseMap.find(Op);
285 assert(I != VRBaseMap.end() && "Node emitted out of order - late")(static_cast <bool> (I != VRBaseMap.end() && "Node emitted out of order - late"
) ? void (0) : __assert_fail ("I != VRBaseMap.end() && \"Node emitted out of order - late\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 285, __extension__
__PRETTY_FUNCTION__))
;
286 return I->second;
287}
288
289
290/// AddRegisterOperand - Add the specified register as an operand to the
291/// specified machine instr. Insert register copies if the register is
292/// not in the required register class.
293void
294InstrEmitter::AddRegisterOperand(MachineInstrBuilder &MIB,
295 SDValue Op,
296 unsigned IIOpNum,
297 const MCInstrDesc *II,
298 DenseMap<SDValue, Register> &VRBaseMap,
299 bool IsDebug, bool IsClone, bool IsCloned) {
300 assert(Op.getValueType() != MVT::Other &&(static_cast <bool> (Op.getValueType() != MVT::Other &&
Op.getValueType() != MVT::Glue && "Chain and glue operands should occur at end of operand list!"
) ? void (0) : __assert_fail ("Op.getValueType() != MVT::Other && Op.getValueType() != MVT::Glue && \"Chain and glue operands should occur at end of operand list!\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 302, __extension__
__PRETTY_FUNCTION__))
301 Op.getValueType() != MVT::Glue &&(static_cast <bool> (Op.getValueType() != MVT::Other &&
Op.getValueType() != MVT::Glue && "Chain and glue operands should occur at end of operand list!"
) ? void (0) : __assert_fail ("Op.getValueType() != MVT::Other && Op.getValueType() != MVT::Glue && \"Chain and glue operands should occur at end of operand list!\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 302, __extension__
__PRETTY_FUNCTION__))
302 "Chain and glue operands should occur at end of operand list!")(static_cast <bool> (Op.getValueType() != MVT::Other &&
Op.getValueType() != MVT::Glue && "Chain and glue operands should occur at end of operand list!"
) ? void (0) : __assert_fail ("Op.getValueType() != MVT::Other && Op.getValueType() != MVT::Glue && \"Chain and glue operands should occur at end of operand list!\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 302, __extension__
__PRETTY_FUNCTION__))
;
303 // Get/emit the operand.
304 Register VReg = getVR(Op, VRBaseMap);
305
306 const MCInstrDesc &MCID = MIB->getDesc();
307 bool isOptDef = IIOpNum < MCID.getNumOperands() &&
308 MCID.OpInfo[IIOpNum].isOptionalDef();
309
310 // If the instruction requires a register in a different class, create
311 // a new virtual register and copy the value into it, but first attempt to
312 // shrink VReg's register class within reason. For example, if VReg == GR32
313 // and II requires a GR32_NOSP, just constrain VReg to GR32_NOSP.
314 if (II) {
315 const TargetRegisterClass *OpRC = nullptr;
316 if (IIOpNum < II->getNumOperands())
317 OpRC = TII->getRegClass(*II, IIOpNum, TRI, *MF);
318
319 if (OpRC) {
320 const TargetRegisterClass *ConstrainedRC
321 = MRI->constrainRegClass(VReg, OpRC, MinRCSize);
322 if (!ConstrainedRC) {
323 OpRC = TRI->getAllocatableClass(OpRC);
324 assert(OpRC && "Constraints cannot be fulfilled for allocation")(static_cast <bool> (OpRC && "Constraints cannot be fulfilled for allocation"
) ? void (0) : __assert_fail ("OpRC && \"Constraints cannot be fulfilled for allocation\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 324, __extension__
__PRETTY_FUNCTION__))
;
325 Register NewVReg = MRI->createVirtualRegister(OpRC);
326 BuildMI(*MBB, InsertPos, Op.getNode()->getDebugLoc(),
327 TII->get(TargetOpcode::COPY), NewVReg).addReg(VReg);
328 VReg = NewVReg;
329 } else {
330 assert(ConstrainedRC->isAllocatable() &&(static_cast <bool> (ConstrainedRC->isAllocatable() &&
"Constraining an allocatable VReg produced an unallocatable class?"
) ? void (0) : __assert_fail ("ConstrainedRC->isAllocatable() && \"Constraining an allocatable VReg produced an unallocatable class?\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 331, __extension__
__PRETTY_FUNCTION__))
331 "Constraining an allocatable VReg produced an unallocatable class?")(static_cast <bool> (ConstrainedRC->isAllocatable() &&
"Constraining an allocatable VReg produced an unallocatable class?"
) ? void (0) : __assert_fail ("ConstrainedRC->isAllocatable() && \"Constraining an allocatable VReg produced an unallocatable class?\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 331, __extension__
__PRETTY_FUNCTION__))
;
332 }
333 }
334 }
335
336 // If this value has only one use, that use is a kill. This is a
337 // conservative approximation. InstrEmitter does trivial coalescing
338 // with CopyFromReg nodes, so don't emit kill flags for them.
339 // Avoid kill flags on Schedule cloned nodes, since there will be
340 // multiple uses.
341 // Tied operands are never killed, so we need to check that. And that
342 // means we need to determine the index of the operand.
343 bool isKill = Op.hasOneUse() &&
344 Op.getNode()->getOpcode() != ISD::CopyFromReg &&
345 !IsDebug &&
346 !(IsClone || IsCloned);
347 if (isKill) {
348 unsigned Idx = MIB->getNumOperands();
349 while (Idx > 0 &&
350 MIB->getOperand(Idx-1).isReg() &&
351 MIB->getOperand(Idx-1).isImplicit())
352 --Idx;
353 bool isTied = MCID.getOperandConstraint(Idx, MCOI::TIED_TO) != -1;
354 if (isTied)
355 isKill = false;
356 }
357
358 MIB.addReg(VReg, getDefRegState(isOptDef) | getKillRegState(isKill) |
359 getDebugRegState(IsDebug));
360}
361
362/// AddOperand - Add the specified operand to the specified machine instr. II
363/// specifies the instruction information for the node, and IIOpNum is the
364/// operand number (in the II) that we are adding.
365void InstrEmitter::AddOperand(MachineInstrBuilder &MIB,
366 SDValue Op,
367 unsigned IIOpNum,
368 const MCInstrDesc *II,
369 DenseMap<SDValue, Register> &VRBaseMap,
370 bool IsDebug, bool IsClone, bool IsCloned) {
371 if (Op.isMachineOpcode()) {
372 AddRegisterOperand(MIB, Op, IIOpNum, II, VRBaseMap,
373 IsDebug, IsClone, IsCloned);
374 } else if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
375 MIB.addImm(C->getSExtValue());
376 } else if (ConstantFPSDNode *F = dyn_cast<ConstantFPSDNode>(Op)) {
377 MIB.addFPImm(F->getConstantFPValue());
378 } else if (RegisterSDNode *R = dyn_cast<RegisterSDNode>(Op)) {
379 Register VReg = R->getReg();
380 MVT OpVT = Op.getSimpleValueType();
381 const TargetRegisterClass *IIRC =
382 II ? TRI->getAllocatableClass(TII->getRegClass(*II, IIOpNum, TRI, *MF))
383 : nullptr;
384 const TargetRegisterClass *OpRC =
385 TLI->isTypeLegal(OpVT)
386 ? TLI->getRegClassFor(OpVT,
387 Op.getNode()->isDivergent() ||
388 (IIRC && TRI->isDivergentRegClass(IIRC)))
389 : nullptr;
390
391 if (OpRC && IIRC && OpRC != IIRC && Register::isVirtualRegister(VReg)) {
392 Register NewVReg = MRI->createVirtualRegister(IIRC);
393 BuildMI(*MBB, InsertPos, Op.getNode()->getDebugLoc(),
394 TII->get(TargetOpcode::COPY), NewVReg).addReg(VReg);
395 VReg = NewVReg;
396 }
397 // Turn additional physreg operands into implicit uses on non-variadic
398 // instructions. This is used by call and return instructions passing
399 // arguments in registers.
400 bool Imp = II && (IIOpNum >= II->getNumOperands() && !II->isVariadic());
401 MIB.addReg(VReg, getImplRegState(Imp));
402 } else if (RegisterMaskSDNode *RM = dyn_cast<RegisterMaskSDNode>(Op)) {
403 MIB.addRegMask(RM->getRegMask());
404 } else if (GlobalAddressSDNode *TGA = dyn_cast<GlobalAddressSDNode>(Op)) {
405 MIB.addGlobalAddress(TGA->getGlobal(), TGA->getOffset(),
406 TGA->getTargetFlags());
407 } else if (BasicBlockSDNode *BBNode = dyn_cast<BasicBlockSDNode>(Op)) {
408 MIB.addMBB(BBNode->getBasicBlock());
409 } else if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Op)) {
410 MIB.addFrameIndex(FI->getIndex());
411 } else if (JumpTableSDNode *JT = dyn_cast<JumpTableSDNode>(Op)) {
412 MIB.addJumpTableIndex(JT->getIndex(), JT->getTargetFlags());
413 } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op)) {
414 int Offset = CP->getOffset();
415 Align Alignment = CP->getAlign();
416
417 unsigned Idx;
418 MachineConstantPool *MCP = MF->getConstantPool();
419 if (CP->isMachineConstantPoolEntry())
420 Idx = MCP->getConstantPoolIndex(CP->getMachineCPVal(), Alignment);
421 else
422 Idx = MCP->getConstantPoolIndex(CP->getConstVal(), Alignment);
423 MIB.addConstantPoolIndex(Idx, Offset, CP->getTargetFlags());
424 } else if (ExternalSymbolSDNode *ES = dyn_cast<ExternalSymbolSDNode>(Op)) {
425 MIB.addExternalSymbol(ES->getSymbol(), ES->getTargetFlags());
426 } else if (auto *SymNode = dyn_cast<MCSymbolSDNode>(Op)) {
427 MIB.addSym(SymNode->getMCSymbol());
428 } else if (BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(Op)) {
429 MIB.addBlockAddress(BA->getBlockAddress(),
430 BA->getOffset(),
431 BA->getTargetFlags());
432 } else if (TargetIndexSDNode *TI = dyn_cast<TargetIndexSDNode>(Op)) {
433 MIB.addTargetIndex(TI->getIndex(), TI->getOffset(), TI->getTargetFlags());
434 } else {
435 assert(Op.getValueType() != MVT::Other &&(static_cast <bool> (Op.getValueType() != MVT::Other &&
Op.getValueType() != MVT::Glue && "Chain and glue operands should occur at end of operand list!"
) ? void (0) : __assert_fail ("Op.getValueType() != MVT::Other && Op.getValueType() != MVT::Glue && \"Chain and glue operands should occur at end of operand list!\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 437, __extension__
__PRETTY_FUNCTION__))
436 Op.getValueType() != MVT::Glue &&(static_cast <bool> (Op.getValueType() != MVT::Other &&
Op.getValueType() != MVT::Glue && "Chain and glue operands should occur at end of operand list!"
) ? void (0) : __assert_fail ("Op.getValueType() != MVT::Other && Op.getValueType() != MVT::Glue && \"Chain and glue operands should occur at end of operand list!\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 437, __extension__
__PRETTY_FUNCTION__))
437 "Chain and glue operands should occur at end of operand list!")(static_cast <bool> (Op.getValueType() != MVT::Other &&
Op.getValueType() != MVT::Glue && "Chain and glue operands should occur at end of operand list!"
) ? void (0) : __assert_fail ("Op.getValueType() != MVT::Other && Op.getValueType() != MVT::Glue && \"Chain and glue operands should occur at end of operand list!\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 437, __extension__
__PRETTY_FUNCTION__))
;
438 AddRegisterOperand(MIB, Op, IIOpNum, II, VRBaseMap,
439 IsDebug, IsClone, IsCloned);
440 }
441}
442
443Register InstrEmitter::ConstrainForSubReg(Register VReg, unsigned SubIdx,
444 MVT VT, bool isDivergent, const DebugLoc &DL) {
445 const TargetRegisterClass *VRC = MRI->getRegClass(VReg);
446 const TargetRegisterClass *RC = TRI->getSubClassWithSubReg(VRC, SubIdx);
447
448 // RC is a sub-class of VRC that supports SubIdx. Try to constrain VReg
449 // within reason.
450 if (RC && RC != VRC)
451 RC = MRI->constrainRegClass(VReg, RC, MinRCSize);
452
453 // VReg has been adjusted. It can be used with SubIdx operands now.
454 if (RC)
455 return VReg;
456
457 // VReg couldn't be reasonably constrained. Emit a COPY to a new virtual
458 // register instead.
459 RC = TRI->getSubClassWithSubReg(TLI->getRegClassFor(VT, isDivergent), SubIdx);
460 assert(RC && "No legal register class for VT supports that SubIdx")(static_cast <bool> (RC && "No legal register class for VT supports that SubIdx"
) ? void (0) : __assert_fail ("RC && \"No legal register class for VT supports that SubIdx\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 460, __extension__
__PRETTY_FUNCTION__))
;
461 Register NewReg = MRI->createVirtualRegister(RC);
462 BuildMI(*MBB, InsertPos, DL, TII->get(TargetOpcode::COPY), NewReg)
463 .addReg(VReg);
464 return NewReg;
465}
466
467/// EmitSubregNode - Generate machine code for subreg nodes.
468///
469void InstrEmitter::EmitSubregNode(SDNode *Node,
470 DenseMap<SDValue, Register> &VRBaseMap,
471 bool IsClone, bool IsCloned) {
472 Register VRBase;
473 unsigned Opc = Node->getMachineOpcode();
474
475 // If the node is only used by a CopyToReg and the dest reg is a vreg, use
476 // the CopyToReg'd destination register instead of creating a new vreg.
477 for (SDNode *User : Node->uses()) {
478 if (User->getOpcode() == ISD::CopyToReg &&
479 User->getOperand(2).getNode() == Node) {
480 Register DestReg = cast<RegisterSDNode>(User->getOperand(1))->getReg();
481 if (DestReg.isVirtual()) {
482 VRBase = DestReg;
483 break;
484 }
485 }
486 }
487
488 if (Opc == TargetOpcode::EXTRACT_SUBREG) {
489 // EXTRACT_SUBREG is lowered as %dst = COPY %src:sub. There are no
490 // constraints on the %dst register, COPY can target all legal register
491 // classes.
492 unsigned SubIdx = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
493 const TargetRegisterClass *TRC =
494 TLI->getRegClassFor(Node->getSimpleValueType(0), Node->isDivergent());
495
496 Register Reg;
497 MachineInstr *DefMI;
498 RegisterSDNode *R = dyn_cast<RegisterSDNode>(Node->getOperand(0));
499 if (R && Register::isPhysicalRegister(R->getReg())) {
500 Reg = R->getReg();
501 DefMI = nullptr;
502 } else {
503 Reg = R ? R->getReg() : getVR(Node->getOperand(0), VRBaseMap);
504 DefMI = MRI->getVRegDef(Reg);
505 }
506
507 Register SrcReg, DstReg;
508 unsigned DefSubIdx;
509 if (DefMI &&
510 TII->isCoalescableExtInstr(*DefMI, SrcReg, DstReg, DefSubIdx) &&
511 SubIdx == DefSubIdx &&
512 TRC == MRI->getRegClass(SrcReg)) {
513 // Optimize these:
514 // r1025 = s/zext r1024, 4
515 // r1026 = extract_subreg r1025, 4
516 // to a copy
517 // r1026 = copy r1024
518 VRBase = MRI->createVirtualRegister(TRC);
519 BuildMI(*MBB, InsertPos, Node->getDebugLoc(),
520 TII->get(TargetOpcode::COPY), VRBase).addReg(SrcReg);
521 MRI->clearKillFlags(SrcReg);
522 } else {
523 // Reg may not support a SubIdx sub-register, and we may need to
524 // constrain its register class or issue a COPY to a compatible register
525 // class.
526 if (Reg.isVirtual())
527 Reg = ConstrainForSubReg(Reg, SubIdx,
528 Node->getOperand(0).getSimpleValueType(),
529 Node->isDivergent(), Node->getDebugLoc());
530 // Create the destreg if it is missing.
531 if (!VRBase)
532 VRBase = MRI->createVirtualRegister(TRC);
533
534 // Create the extract_subreg machine instruction.
535 MachineInstrBuilder CopyMI =
536 BuildMI(*MBB, InsertPos, Node->getDebugLoc(),
537 TII->get(TargetOpcode::COPY), VRBase);
538 if (Reg.isVirtual())
539 CopyMI.addReg(Reg, 0, SubIdx);
540 else
541 CopyMI.addReg(TRI->getSubReg(Reg, SubIdx));
542 }
543 } else if (Opc == TargetOpcode::INSERT_SUBREG ||
544 Opc == TargetOpcode::SUBREG_TO_REG) {
545 SDValue N0 = Node->getOperand(0);
546 SDValue N1 = Node->getOperand(1);
547 SDValue N2 = Node->getOperand(2);
548 unsigned SubIdx = cast<ConstantSDNode>(N2)->getZExtValue();
549
550 // Figure out the register class to create for the destreg. It should be
551 // the largest legal register class supporting SubIdx sub-registers.
552 // RegisterCoalescer will constrain it further if it decides to eliminate
553 // the INSERT_SUBREG instruction.
554 //
555 // %dst = INSERT_SUBREG %src, %sub, SubIdx
556 //
557 // is lowered by TwoAddressInstructionPass to:
558 //
559 // %dst = COPY %src
560 // %dst:SubIdx = COPY %sub
561 //
562 // There is no constraint on the %src register class.
563 //
564 const TargetRegisterClass *SRC =
565 TLI->getRegClassFor(Node->getSimpleValueType(0), Node->isDivergent());
566 SRC = TRI->getSubClassWithSubReg(SRC, SubIdx);
567 assert(SRC && "No register class supports VT and SubIdx for INSERT_SUBREG")(static_cast <bool> (SRC && "No register class supports VT and SubIdx for INSERT_SUBREG"
) ? void (0) : __assert_fail ("SRC && \"No register class supports VT and SubIdx for INSERT_SUBREG\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 567, __extension__
__PRETTY_FUNCTION__))
;
568
569 if (VRBase == 0 || !SRC->hasSubClassEq(MRI->getRegClass(VRBase)))
570 VRBase = MRI->createVirtualRegister(SRC);
571
572 // Create the insert_subreg or subreg_to_reg machine instruction.
573 MachineInstrBuilder MIB =
574 BuildMI(*MF, Node->getDebugLoc(), TII->get(Opc), VRBase);
575
576 // If creating a subreg_to_reg, then the first input operand
577 // is an implicit value immediate, otherwise it's a register
578 if (Opc == TargetOpcode::SUBREG_TO_REG) {
579 const ConstantSDNode *SD = cast<ConstantSDNode>(N0);
580 MIB.addImm(SD->getZExtValue());
581 } else
582 AddOperand(MIB, N0, 0, nullptr, VRBaseMap, /*IsDebug=*/false,
583 IsClone, IsCloned);
584 // Add the subregister being inserted
585 AddOperand(MIB, N1, 0, nullptr, VRBaseMap, /*IsDebug=*/false,
586 IsClone, IsCloned);
587 MIB.addImm(SubIdx);
588 MBB->insert(InsertPos, MIB);
589 } else
590 llvm_unreachable("Node is not insert_subreg, extract_subreg, or subreg_to_reg")::llvm::llvm_unreachable_internal("Node is not insert_subreg, extract_subreg, or subreg_to_reg"
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 590)
;
591
592 SDValue Op(Node, 0);
593 bool isNew = VRBaseMap.insert(std::make_pair(Op, VRBase)).second;
594 (void)isNew; // Silence compiler warning.
595 assert(isNew && "Node emitted out of order - early")(static_cast <bool> (isNew && "Node emitted out of order - early"
) ? void (0) : __assert_fail ("isNew && \"Node emitted out of order - early\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 595, __extension__
__PRETTY_FUNCTION__))
;
596}
597
598/// EmitCopyToRegClassNode - Generate machine code for COPY_TO_REGCLASS nodes.
599/// COPY_TO_REGCLASS is just a normal copy, except that the destination
600/// register is constrained to be in a particular register class.
601///
602void
603InstrEmitter::EmitCopyToRegClassNode(SDNode *Node,
604 DenseMap<SDValue, Register> &VRBaseMap) {
605 unsigned VReg = getVR(Node->getOperand(0), VRBaseMap);
606
607 // Create the new VReg in the destination class and emit a copy.
608 unsigned DstRCIdx = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
609 const TargetRegisterClass *DstRC =
610 TRI->getAllocatableClass(TRI->getRegClass(DstRCIdx));
611 Register NewVReg = MRI->createVirtualRegister(DstRC);
612 BuildMI(*MBB, InsertPos, Node->getDebugLoc(), TII->get(TargetOpcode::COPY),
613 NewVReg).addReg(VReg);
614
615 SDValue Op(Node, 0);
616 bool isNew = VRBaseMap.insert(std::make_pair(Op, NewVReg)).second;
617 (void)isNew; // Silence compiler warning.
618 assert(isNew && "Node emitted out of order - early")(static_cast <bool> (isNew && "Node emitted out of order - early"
) ? void (0) : __assert_fail ("isNew && \"Node emitted out of order - early\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 618, __extension__
__PRETTY_FUNCTION__))
;
619}
620
621/// EmitRegSequence - Generate machine code for REG_SEQUENCE nodes.
622///
623void InstrEmitter::EmitRegSequence(SDNode *Node,
624 DenseMap<SDValue, Register> &VRBaseMap,
625 bool IsClone, bool IsCloned) {
626 unsigned DstRCIdx = cast<ConstantSDNode>(Node->getOperand(0))->getZExtValue();
627 const TargetRegisterClass *RC = TRI->getRegClass(DstRCIdx);
628 Register NewVReg = MRI->createVirtualRegister(TRI->getAllocatableClass(RC));
629 const MCInstrDesc &II = TII->get(TargetOpcode::REG_SEQUENCE);
630 MachineInstrBuilder MIB = BuildMI(*MF, Node->getDebugLoc(), II, NewVReg);
631 unsigned NumOps = Node->getNumOperands();
632 // If the input pattern has a chain, then the root of the corresponding
633 // output pattern will get a chain as well. This can happen to be a
634 // REG_SEQUENCE (which is not "guarded" by countOperands/CountResults).
635 if (NumOps && Node->getOperand(NumOps-1).getValueType() == MVT::Other)
636 --NumOps; // Ignore chain if it exists.
637
638 assert((NumOps & 1) == 1 &&(static_cast <bool> ((NumOps & 1) == 1 && "REG_SEQUENCE must have an odd number of operands!"
) ? void (0) : __assert_fail ("(NumOps & 1) == 1 && \"REG_SEQUENCE must have an odd number of operands!\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 639, __extension__
__PRETTY_FUNCTION__))
639 "REG_SEQUENCE must have an odd number of operands!")(static_cast <bool> ((NumOps & 1) == 1 && "REG_SEQUENCE must have an odd number of operands!"
) ? void (0) : __assert_fail ("(NumOps & 1) == 1 && \"REG_SEQUENCE must have an odd number of operands!\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 639, __extension__
__PRETTY_FUNCTION__))
;
640 for (unsigned i = 1; i != NumOps; ++i) {
641 SDValue Op = Node->getOperand(i);
642 if ((i & 1) == 0) {
643 RegisterSDNode *R = dyn_cast<RegisterSDNode>(Node->getOperand(i-1));
644 // Skip physical registers as they don't have a vreg to get and we'll
645 // insert copies for them in TwoAddressInstructionPass anyway.
646 if (!R || !Register::isPhysicalRegister(R->getReg())) {
647 unsigned SubIdx = cast<ConstantSDNode>(Op)->getZExtValue();
648 unsigned SubReg = getVR(Node->getOperand(i-1), VRBaseMap);
649 const TargetRegisterClass *TRC = MRI->getRegClass(SubReg);
650 const TargetRegisterClass *SRC =
651 TRI->getMatchingSuperRegClass(RC, TRC, SubIdx);
652 if (SRC && SRC != RC) {
653 MRI->setRegClass(NewVReg, SRC);
654 RC = SRC;
655 }
656 }
657 }
658 AddOperand(MIB, Op, i+1, &II, VRBaseMap, /*IsDebug=*/false,
659 IsClone, IsCloned);
660 }
661
662 MBB->insert(InsertPos, MIB);
663 SDValue Op(Node, 0);
664 bool isNew = VRBaseMap.insert(std::make_pair(Op, NewVReg)).second;
665 (void)isNew; // Silence compiler warning.
666 assert(isNew && "Node emitted out of order - early")(static_cast <bool> (isNew && "Node emitted out of order - early"
) ? void (0) : __assert_fail ("isNew && \"Node emitted out of order - early\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 666, __extension__
__PRETTY_FUNCTION__))
;
667}
668
669/// EmitDbgValue - Generate machine instruction for a dbg_value node.
670///
671MachineInstr *
672InstrEmitter::EmitDbgValue(SDDbgValue *SD,
673 DenseMap<SDValue, Register> &VRBaseMap) {
674 MDNode *Var = SD->getVariable();
675 MDNode *Expr = SD->getExpression();
676 DebugLoc DL = SD->getDebugLoc();
677 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&(static_cast <bool> (cast<DILocalVariable>(Var)->
isValidLocationForIntrinsic(DL) && "Expected inlined-at fields to agree"
) ? void (0) : __assert_fail ("cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && \"Expected inlined-at fields to agree\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 678, __extension__
__PRETTY_FUNCTION__))
678 "Expected inlined-at fields to agree")(static_cast <bool> (cast<DILocalVariable>(Var)->
isValidLocationForIntrinsic(DL) && "Expected inlined-at fields to agree"
) ? void (0) : __assert_fail ("cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && \"Expected inlined-at fields to agree\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 678, __extension__
__PRETTY_FUNCTION__))
;
679
680 SD->setIsEmitted();
681
682 ArrayRef<SDDbgOperand> LocationOps = SD->getLocationOps();
683 assert(!LocationOps.empty() && "dbg_value with no location operands?")(static_cast <bool> (!LocationOps.empty() && "dbg_value with no location operands?"
) ? void (0) : __assert_fail ("!LocationOps.empty() && \"dbg_value with no location operands?\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 683, __extension__
__PRETTY_FUNCTION__))
;
684
685 if (SD->isInvalidated())
686 return EmitDbgNoLocation(SD);
687
688 // Emit variadic dbg_value nodes as DBG_VALUE_LIST.
689 if (SD->isVariadic()) {
690 // DBG_VALUE_LIST := "DBG_VALUE_LIST" var, expression, loc (, loc)*
691 const MCInstrDesc &DbgValDesc = TII->get(TargetOpcode::DBG_VALUE_LIST);
692 // Build the DBG_VALUE_LIST instruction base.
693 auto MIB = BuildMI(*MF, DL, DbgValDesc);
694 MIB.addMetadata(Var);
695 MIB.addMetadata(Expr);
696 AddDbgValueLocationOps(MIB, DbgValDesc, LocationOps, VRBaseMap);
697 return &*MIB;
698 }
699
700 // Attempt to produce a DBG_INSTR_REF if we've been asked to.
701 // We currently exclude the possibility of instruction references for
702 // variadic nodes; if at some point we enable them, this should be moved
703 // above the variadic block.
704 if (EmitDebugInstrRefs)
705 if (auto *InstrRef = EmitDbgInstrRef(SD, VRBaseMap))
706 return InstrRef;
707
708 return EmitDbgValueFromSingleOp(SD, VRBaseMap);
709}
710
711void InstrEmitter::AddDbgValueLocationOps(
712 MachineInstrBuilder &MIB, const MCInstrDesc &DbgValDesc,
713 ArrayRef<SDDbgOperand> LocationOps,
714 DenseMap<SDValue, Register> &VRBaseMap) {
715 for (const SDDbgOperand &Op : LocationOps) {
716 switch (Op.getKind()) {
717 case SDDbgOperand::FRAMEIX:
718 MIB.addFrameIndex(Op.getFrameIx());
719 break;
720 case SDDbgOperand::VREG:
721 MIB.addReg(Op.getVReg());
722 break;
723 case SDDbgOperand::SDNODE: {
724 SDValue V = SDValue(Op.getSDNode(), Op.getResNo());
725 // It's possible we replaced this SDNode with other(s) and therefore
726 // didn't generate code for it. It's better to catch these cases where
727 // they happen and transfer the debug info, but trying to guarantee that
728 // in all cases would be very fragile; this is a safeguard for any
729 // that were missed.
730 if (VRBaseMap.count(V) == 0)
731 MIB.addReg(0U); // undef
732 else
733 AddOperand(MIB, V, (*MIB).getNumOperands(), &DbgValDesc, VRBaseMap,
734 /*IsDebug=*/true, /*IsClone=*/false, /*IsCloned=*/false);
735 } break;
736 case SDDbgOperand::CONST: {
737 const Value *V = Op.getConst();
738 if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
739 if (CI->getBitWidth() > 64)
740 MIB.addCImm(CI);
741 else
742 MIB.addImm(CI->getSExtValue());
743 } else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
744 MIB.addFPImm(CF);
745 } else if (isa<ConstantPointerNull>(V)) {
746 // Note: This assumes that all nullptr constants are zero-valued.
747 MIB.addImm(0);
748 } else {
749 // Could be an Undef. In any case insert an Undef so we can see what we
750 // dropped.
751 MIB.addReg(0U);
752 }
753 } break;
754 }
755 }
756}
757
758MachineInstr *
759InstrEmitter::EmitDbgInstrRef(SDDbgValue *SD,
760 DenseMap<SDValue, Register> &VRBaseMap) {
761 assert(!SD->isVariadic())(static_cast <bool> (!SD->isVariadic()) ? void (0) :
__assert_fail ("!SD->isVariadic()", "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp"
, 761, __extension__ __PRETTY_FUNCTION__))
;
762 SDDbgOperand DbgOperand = SD->getLocationOps()[0];
763 MDNode *Var = SD->getVariable();
764 DIExpression *Expr = (DIExpression*)SD->getExpression();
765 DebugLoc DL = SD->getDebugLoc();
766 const MCInstrDesc &RefII = TII->get(TargetOpcode::DBG_INSTR_REF);
767
768 // Handle variable locations that don't actually depend on the instructions
769 // in the program: constants and stack locations.
770 if (DbgOperand.getKind() == SDDbgOperand::FRAMEIX ||
771 DbgOperand.getKind() == SDDbgOperand::CONST)
772 return EmitDbgValueFromSingleOp(SD, VRBaseMap);
773
774 // Immediately fold any indirectness from the LLVM-IR intrinsic into the
775 // expression:
776 if (SD->isIndirect()) {
777 std::vector<uint64_t> Elts = {dwarf::DW_OP_deref};
778 Expr = DIExpression::append(Expr, Elts);
779 }
780
781 // It may not be immediately possible to identify the MachineInstr that
782 // defines a VReg, it can depend for example on the order blocks are
783 // emitted in. When this happens, or when further analysis is needed later,
784 // produce an instruction like this:
785 //
786 // DBG_INSTR_REF %0:gr64, 0, !123, !456
787 //
788 // i.e., point the instruction at the vreg, and patch it up later in
789 // MachineFunction::finalizeDebugInstrRefs.
790 auto EmitHalfDoneInstrRef = [&](unsigned VReg) -> MachineInstr * {
791 auto MIB = BuildMI(*MF, DL, RefII);
792 MIB.addReg(VReg);
793 MIB.addImm(0);
794 MIB.addMetadata(Var);
795 MIB.addMetadata(Expr);
796 return MIB;
797 };
798
799 // Try to find both the defined register and the instruction defining it.
800 MachineInstr *DefMI = nullptr;
801 unsigned VReg;
802
803 if (DbgOperand.getKind() == SDDbgOperand::VREG) {
804 VReg = DbgOperand.getVReg();
805
806 // No definition means that block hasn't been emitted yet. Leave a vreg
807 // reference to be fixed later.
808 if (!MRI->hasOneDef(VReg))
809 return EmitHalfDoneInstrRef(VReg);
810
811 DefMI = &*MRI->def_instr_begin(VReg);
812 } else {
813 assert(DbgOperand.getKind() == SDDbgOperand::SDNODE)(static_cast <bool> (DbgOperand.getKind() == SDDbgOperand
::SDNODE) ? void (0) : __assert_fail ("DbgOperand.getKind() == SDDbgOperand::SDNODE"
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 813, __extension__
__PRETTY_FUNCTION__))
;
814 // Look up the corresponding VReg for the given SDNode, if any.
815 SDNode *Node = DbgOperand.getSDNode();
816 SDValue Op = SDValue(Node, DbgOperand.getResNo());
817 DenseMap<SDValue, Register>::iterator I = VRBaseMap.find(Op);
818 // No VReg -> produce a DBG_VALUE $noreg instead.
819 if (I==VRBaseMap.end())
820 return EmitDbgNoLocation(SD);
821
822 // Try to pick out a defining instruction at this point.
823 VReg = getVR(Op, VRBaseMap);
824
825 // Again, if there's no instruction defining the VReg right now, fix it up
826 // later.
827 if (!MRI->hasOneDef(VReg))
828 return EmitHalfDoneInstrRef(VReg);
829
830 DefMI = &*MRI->def_instr_begin(VReg);
831 }
832
833 // Avoid copy like instructions: they don't define values, only move them.
834 // Leave a virtual-register reference until it can be fixed up later, to find
835 // the underlying value definition.
836 if (DefMI->isCopyLike() || TII->isCopyInstr(*DefMI))
837 return EmitHalfDoneInstrRef(VReg);
838
839 auto MIB = BuildMI(*MF, DL, RefII);
840
841 // Find the operand number which defines the specified VReg.
842 unsigned OperandIdx = 0;
843 for (const auto &MO : DefMI->operands()) {
844 if (MO.isReg() && MO.isDef() && MO.getReg() == VReg)
845 break;
846 ++OperandIdx;
847 }
848 assert(OperandIdx < DefMI->getNumOperands())(static_cast <bool> (OperandIdx < DefMI->getNumOperands
()) ? void (0) : __assert_fail ("OperandIdx < DefMI->getNumOperands()"
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 848, __extension__
__PRETTY_FUNCTION__))
;
849
850 // Make the DBG_INSTR_REF refer to that instruction, and that operand.
851 unsigned InstrNum = DefMI->getDebugInstrNum();
852 MIB.addImm(InstrNum);
853 MIB.addImm(OperandIdx);
854 MIB.addMetadata(Var);
855 MIB.addMetadata(Expr);
856 return &*MIB;
857}
858
859MachineInstr *InstrEmitter::EmitDbgNoLocation(SDDbgValue *SD) {
860 // An invalidated SDNode must generate an undef DBG_VALUE: although the
861 // original value is no longer computed, earlier DBG_VALUEs live ranges
862 // must not leak into later code.
863 MDNode *Var = SD->getVariable();
864 MDNode *Expr = SD->getExpression();
865 DebugLoc DL = SD->getDebugLoc();
866 auto MIB = BuildMI(*MF, DL, TII->get(TargetOpcode::DBG_VALUE));
867 MIB.addReg(0U);
868 MIB.addReg(0U);
869 MIB.addMetadata(Var);
870 MIB.addMetadata(Expr);
871 return &*MIB;
872}
873
874MachineInstr *
875InstrEmitter::EmitDbgValueFromSingleOp(SDDbgValue *SD,
876 DenseMap<SDValue, Register> &VRBaseMap) {
877 MDNode *Var = SD->getVariable();
878 DIExpression *Expr = SD->getExpression();
879 DebugLoc DL = SD->getDebugLoc();
880 const MCInstrDesc &II = TII->get(TargetOpcode::DBG_VALUE);
881
882 assert(SD->getLocationOps().size() == 1 &&(static_cast <bool> (SD->getLocationOps().size() == 1
&& "Non variadic dbg_value should have only one location op"
) ? void (0) : __assert_fail ("SD->getLocationOps().size() == 1 && \"Non variadic dbg_value should have only one location op\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 883, __extension__
__PRETTY_FUNCTION__))
883 "Non variadic dbg_value should have only one location op")(static_cast <bool> (SD->getLocationOps().size() == 1
&& "Non variadic dbg_value should have only one location op"
) ? void (0) : __assert_fail ("SD->getLocationOps().size() == 1 && \"Non variadic dbg_value should have only one location op\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 883, __extension__
__PRETTY_FUNCTION__))
;
884
885 // See about constant-folding the expression.
886 // Copy the location operand in case we replace it.
887 SmallVector<SDDbgOperand, 1> LocationOps(1, SD->getLocationOps()[0]);
888 if (Expr && LocationOps[0].getKind() == SDDbgOperand::CONST) {
889 const Value *V = LocationOps[0].getConst();
890 if (auto *C = dyn_cast<ConstantInt>(V)) {
891 std::tie(Expr, C) = Expr->constantFold(C);
892 LocationOps[0] = SDDbgOperand::fromConst(C);
893 }
894 }
895
896 // Emit non-variadic dbg_value nodes as DBG_VALUE.
897 // DBG_VALUE := "DBG_VALUE" loc, isIndirect, var, expr
898 auto MIB = BuildMI(*MF, DL, II);
899 AddDbgValueLocationOps(MIB, II, LocationOps, VRBaseMap);
900
901 if (SD->isIndirect())
902 MIB.addImm(0U);
903 else
904 MIB.addReg(0U);
905
906 return MIB.addMetadata(Var).addMetadata(Expr);
907}
908
909MachineInstr *
910InstrEmitter::EmitDbgLabel(SDDbgLabel *SD) {
911 MDNode *Label = SD->getLabel();
912 DebugLoc DL = SD->getDebugLoc();
913 assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(DL) &&(static_cast <bool> (cast<DILabel>(Label)->isValidLocationForIntrinsic
(DL) && "Expected inlined-at fields to agree") ? void
(0) : __assert_fail ("cast<DILabel>(Label)->isValidLocationForIntrinsic(DL) && \"Expected inlined-at fields to agree\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 914, __extension__
__PRETTY_FUNCTION__))
914 "Expected inlined-at fields to agree")(static_cast <bool> (cast<DILabel>(Label)->isValidLocationForIntrinsic
(DL) && "Expected inlined-at fields to agree") ? void
(0) : __assert_fail ("cast<DILabel>(Label)->isValidLocationForIntrinsic(DL) && \"Expected inlined-at fields to agree\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 914, __extension__
__PRETTY_FUNCTION__))
;
915
916 const MCInstrDesc &II = TII->get(TargetOpcode::DBG_LABEL);
917 MachineInstrBuilder MIB = BuildMI(*MF, DL, II);
918 MIB.addMetadata(Label);
919
920 return &*MIB;
921}
922
923/// EmitMachineNode - Generate machine code for a target-specific node and
924/// needed dependencies.
925///
926void InstrEmitter::
927EmitMachineNode(SDNode *Node, bool IsClone, bool IsCloned,
928 DenseMap<SDValue, Register> &VRBaseMap) {
929 unsigned Opc = Node->getMachineOpcode();
930
931 // Handle subreg insert/extract specially
932 if (Opc == TargetOpcode::EXTRACT_SUBREG ||
933 Opc == TargetOpcode::INSERT_SUBREG ||
934 Opc == TargetOpcode::SUBREG_TO_REG) {
935 EmitSubregNode(Node, VRBaseMap, IsClone, IsCloned);
936 return;
937 }
938
939 // Handle COPY_TO_REGCLASS specially.
940 if (Opc == TargetOpcode::COPY_TO_REGCLASS) {
941 EmitCopyToRegClassNode(Node, VRBaseMap);
942 return;
943 }
944
945 // Handle REG_SEQUENCE specially.
946 if (Opc == TargetOpcode::REG_SEQUENCE) {
947 EmitRegSequence(Node, VRBaseMap, IsClone, IsCloned);
948 return;
949 }
950
951 if (Opc == TargetOpcode::IMPLICIT_DEF)
952 // We want a unique VR for each IMPLICIT_DEF use.
953 return;
954
955 const MCInstrDesc &II = TII->get(Opc);
956 unsigned NumResults = CountResults(Node);
957 unsigned NumDefs = II.getNumDefs();
958 const MCPhysReg *ScratchRegs = nullptr;
959
960 // Handle STACKMAP and PATCHPOINT specially and then use the generic code.
961 if (Opc == TargetOpcode::STACKMAP || Opc == TargetOpcode::PATCHPOINT) {
962 // Stackmaps do not have arguments and do not preserve their calling
963 // convention. However, to simplify runtime support, they clobber the same
964 // scratch registers as AnyRegCC.
965 unsigned CC = CallingConv::AnyReg;
966 if (Opc == TargetOpcode::PATCHPOINT) {
967 CC = Node->getConstantOperandVal(PatchPointOpers::CCPos);
968 NumDefs = NumResults;
969 }
970 ScratchRegs = TLI->getScratchRegisters((CallingConv::ID) CC);
971 } else if (Opc == TargetOpcode::STATEPOINT) {
972 NumDefs = NumResults;
973 }
974
975 unsigned NumImpUses = 0;
976 unsigned NodeOperands =
977 countOperands(Node, II.getNumOperands() - NumDefs, NumImpUses);
978 bool HasVRegVariadicDefs = !MF->getTarget().usesPhysRegsForValues() &&
979 II.isVariadic() && II.variadicOpsAreDefs();
980 bool HasPhysRegOuts = NumResults > NumDefs &&
981 II.getImplicitDefs() != nullptr && !HasVRegVariadicDefs;
982#ifndef NDEBUG
983 unsigned NumMIOperands = NodeOperands + NumResults;
984 if (II.isVariadic())
985 assert(NumMIOperands >= II.getNumOperands() &&(static_cast <bool> (NumMIOperands >= II.getNumOperands
() && "Too few operands for a variadic node!") ? void
(0) : __assert_fail ("NumMIOperands >= II.getNumOperands() && \"Too few operands for a variadic node!\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 986, __extension__
__PRETTY_FUNCTION__))
986 "Too few operands for a variadic node!")(static_cast <bool> (NumMIOperands >= II.getNumOperands
() && "Too few operands for a variadic node!") ? void
(0) : __assert_fail ("NumMIOperands >= II.getNumOperands() && \"Too few operands for a variadic node!\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 986, __extension__
__PRETTY_FUNCTION__))
;
987 else
988 assert(NumMIOperands >= II.getNumOperands() &&(static_cast <bool> (NumMIOperands >= II.getNumOperands
() && NumMIOperands <= II.getNumOperands() + II.getNumImplicitDefs
() + NumImpUses && "#operands for dag node doesn't match .td file!"
) ? void (0) : __assert_fail ("NumMIOperands >= II.getNumOperands() && NumMIOperands <= II.getNumOperands() + II.getNumImplicitDefs() + NumImpUses && \"#operands for dag node doesn't match .td file!\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 991, __extension__
__PRETTY_FUNCTION__))
989 NumMIOperands <= II.getNumOperands() + II.getNumImplicitDefs() +(static_cast <bool> (NumMIOperands >= II.getNumOperands
() && NumMIOperands <= II.getNumOperands() + II.getNumImplicitDefs
() + NumImpUses && "#operands for dag node doesn't match .td file!"
) ? void (0) : __assert_fail ("NumMIOperands >= II.getNumOperands() && NumMIOperands <= II.getNumOperands() + II.getNumImplicitDefs() + NumImpUses && \"#operands for dag node doesn't match .td file!\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 991, __extension__
__PRETTY_FUNCTION__))
990 NumImpUses &&(static_cast <bool> (NumMIOperands >= II.getNumOperands
() && NumMIOperands <= II.getNumOperands() + II.getNumImplicitDefs
() + NumImpUses && "#operands for dag node doesn't match .td file!"
) ? void (0) : __assert_fail ("NumMIOperands >= II.getNumOperands() && NumMIOperands <= II.getNumOperands() + II.getNumImplicitDefs() + NumImpUses && \"#operands for dag node doesn't match .td file!\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 991, __extension__
__PRETTY_FUNCTION__))
991 "#operands for dag node doesn't match .td file!")(static_cast <bool> (NumMIOperands >= II.getNumOperands
() && NumMIOperands <= II.getNumOperands() + II.getNumImplicitDefs
() + NumImpUses && "#operands for dag node doesn't match .td file!"
) ? void (0) : __assert_fail ("NumMIOperands >= II.getNumOperands() && NumMIOperands <= II.getNumOperands() + II.getNumImplicitDefs() + NumImpUses && \"#operands for dag node doesn't match .td file!\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 991, __extension__
__PRETTY_FUNCTION__))
;
992#endif
993
994 // Create the new machine instruction.
995 MachineInstrBuilder MIB = BuildMI(*MF, Node->getDebugLoc(), II);
996
997 // Add result register values for things that are defined by this
998 // instruction.
999 if (NumResults) {
1000 CreateVirtualRegisters(Node, MIB, II, IsClone, IsCloned, VRBaseMap);
1001
1002 // Transfer any IR flags from the SDNode to the MachineInstr
1003 MachineInstr *MI = MIB.getInstr();
1004 const SDNodeFlags Flags = Node->getFlags();
1005 if (Flags.hasNoSignedZeros())
1006 MI->setFlag(MachineInstr::MIFlag::FmNsz);
1007
1008 if (Flags.hasAllowReciprocal())
1009 MI->setFlag(MachineInstr::MIFlag::FmArcp);
1010
1011 if (Flags.hasNoNaNs())
1012 MI->setFlag(MachineInstr::MIFlag::FmNoNans);
1013
1014 if (Flags.hasNoInfs())
1015 MI->setFlag(MachineInstr::MIFlag::FmNoInfs);
1016
1017 if (Flags.hasAllowContract())
1018 MI->setFlag(MachineInstr::MIFlag::FmContract);
1019
1020 if (Flags.hasApproximateFuncs())
1021 MI->setFlag(MachineInstr::MIFlag::FmAfn);
1022
1023 if (Flags.hasAllowReassociation())
1024 MI->setFlag(MachineInstr::MIFlag::FmReassoc);
1025
1026 if (Flags.hasNoUnsignedWrap())
1027 MI->setFlag(MachineInstr::MIFlag::NoUWrap);
1028
1029 if (Flags.hasNoSignedWrap())
1030 MI->setFlag(MachineInstr::MIFlag::NoSWrap);
1031
1032 if (Flags.hasExact())
1033 MI->setFlag(MachineInstr::MIFlag::IsExact);
1034
1035 if (Flags.hasNoFPExcept())
1036 MI->setFlag(MachineInstr::MIFlag::NoFPExcept);
1037 }
1038
1039 // Emit all of the actual operands of this instruction, adding them to the
1040 // instruction as appropriate.
1041 bool HasOptPRefs = NumDefs > NumResults;
1042 assert((!HasOptPRefs || !HasPhysRegOuts) &&(static_cast <bool> ((!HasOptPRefs || !HasPhysRegOuts) &&
"Unable to cope with optional defs and phys regs defs!") ? void
(0) : __assert_fail ("(!HasOptPRefs || !HasPhysRegOuts) && \"Unable to cope with optional defs and phys regs defs!\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 1043, __extension__
__PRETTY_FUNCTION__))
1043 "Unable to cope with optional defs and phys regs defs!")(static_cast <bool> ((!HasOptPRefs || !HasPhysRegOuts) &&
"Unable to cope with optional defs and phys regs defs!") ? void
(0) : __assert_fail ("(!HasOptPRefs || !HasPhysRegOuts) && \"Unable to cope with optional defs and phys regs defs!\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 1043, __extension__
__PRETTY_FUNCTION__))
;
1044 unsigned NumSkip = HasOptPRefs ? NumDefs - NumResults : 0;
1045 for (unsigned i = NumSkip; i != NodeOperands; ++i)
1046 AddOperand(MIB, Node->getOperand(i), i-NumSkip+NumDefs, &II,
1047 VRBaseMap, /*IsDebug=*/false, IsClone, IsCloned);
1048
1049 // Add scratch registers as implicit def and early clobber
1050 if (ScratchRegs)
1051 for (unsigned i = 0; ScratchRegs[i]; ++i)
1052 MIB.addReg(ScratchRegs[i], RegState::ImplicitDefine |
1053 RegState::EarlyClobber);
1054
1055 // Set the memory reference descriptions of this instruction now that it is
1056 // part of the function.
1057 MIB.setMemRefs(cast<MachineSDNode>(Node)->memoperands());
1058
1059 // Insert the instruction into position in the block. This needs to
1060 // happen before any custom inserter hook is called so that the
1061 // hook knows where in the block to insert the replacement code.
1062 MBB->insert(InsertPos, MIB);
1063
1064 // The MachineInstr may also define physregs instead of virtregs. These
1065 // physreg values can reach other instructions in different ways:
1066 //
1067 // 1. When there is a use of a Node value beyond the explicitly defined
1068 // virtual registers, we emit a CopyFromReg for one of the implicitly
1069 // defined physregs. This only happens when HasPhysRegOuts is true.
1070 //
1071 // 2. A CopyFromReg reading a physreg may be glued to this instruction.
1072 //
1073 // 3. A glued instruction may implicitly use a physreg.
1074 //
1075 // 4. A glued instruction may use a RegisterSDNode operand.
1076 //
1077 // Collect all the used physreg defs, and make sure that any unused physreg
1078 // defs are marked as dead.
1079 SmallVector<Register, 8> UsedRegs;
1080
1081 // Additional results must be physical register defs.
1082 if (HasPhysRegOuts) {
1083 for (unsigned i = NumDefs; i < NumResults; ++i) {
1084 Register Reg = II.getImplicitDefs()[i - NumDefs];
1085 if (!Node->hasAnyUseOfValue(i))
1086 continue;
1087 // This implicitly defined physreg has a use.
1088 UsedRegs.push_back(Reg);
1089 EmitCopyFromReg(Node, i, IsClone, IsCloned, Reg, VRBaseMap);
1090 }
1091 }
1092
1093 // Scan the glue chain for any used physregs.
1094 if (Node->getValueType(Node->getNumValues()-1) == MVT::Glue) {
1095 for (SDNode *F = Node->getGluedUser(); F; F = F->getGluedUser()) {
1096 if (F->getOpcode() == ISD::CopyFromReg) {
1097 UsedRegs.push_back(cast<RegisterSDNode>(F->getOperand(1))->getReg());
1098 continue;
1099 } else if (F->getOpcode() == ISD::CopyToReg) {
1100 // Skip CopyToReg nodes that are internal to the glue chain.
1101 continue;
1102 }
1103 // Collect declared implicit uses.
1104 const MCInstrDesc &MCID = TII->get(F->getMachineOpcode());
1105 UsedRegs.append(MCID.getImplicitUses(),
1106 MCID.getImplicitUses() + MCID.getNumImplicitUses());
1107 // In addition to declared implicit uses, we must also check for
1108 // direct RegisterSDNode operands.
1109 for (unsigned i = 0, e = F->getNumOperands(); i != e; ++i)
1110 if (RegisterSDNode *R = dyn_cast<RegisterSDNode>(F->getOperand(i))) {
1111 Register Reg = R->getReg();
1112 if (Reg.isPhysical())
1113 UsedRegs.push_back(Reg);
1114 }
1115 }
1116 }
1117
1118 // Finally mark unused registers as dead.
1119 if (!UsedRegs.empty() || II.getImplicitDefs() || II.hasOptionalDef())
1120 MIB->setPhysRegsDeadExcept(UsedRegs, *TRI);
1121
1122 // STATEPOINT is too 'dynamic' to have meaningful machine description.
1123 // We have to manually tie operands.
1124 if (Opc == TargetOpcode::STATEPOINT && NumDefs > 0) {
1125 assert(!HasPhysRegOuts && "STATEPOINT mishandled")(static_cast <bool> (!HasPhysRegOuts && "STATEPOINT mishandled"
) ? void (0) : __assert_fail ("!HasPhysRegOuts && \"STATEPOINT mishandled\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 1125, __extension__
__PRETTY_FUNCTION__))
;
1126 MachineInstr *MI = MIB;
1127 unsigned Def = 0;
1128 int First = StatepointOpers(MI).getFirstGCPtrIdx();
1129 assert(First > 0 && "Statepoint has Defs but no GC ptr list")(static_cast <bool> (First > 0 && "Statepoint has Defs but no GC ptr list"
) ? void (0) : __assert_fail ("First > 0 && \"Statepoint has Defs but no GC ptr list\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 1129, __extension__
__PRETTY_FUNCTION__))
;
1130 unsigned Use = (unsigned)First;
1131 while (Def < NumDefs) {
1132 if (MI->getOperand(Use).isReg())
1133 MI->tieOperands(Def++, Use);
1134 Use = StackMaps::getNextMetaArgIdx(MI, Use);
1135 }
1136 }
1137
1138 // Run post-isel target hook to adjust this instruction if needed.
1139 if (II.hasPostISelHook())
1140 TLI->AdjustInstrPostInstrSelection(*MIB, Node);
1141}
1142
1143/// EmitSpecialNode - Generate machine code for a target-independent node and
1144/// needed dependencies.
1145void InstrEmitter::
1146EmitSpecialNode(SDNode *Node, bool IsClone, bool IsCloned,
1147 DenseMap<SDValue, Register> &VRBaseMap) {
1148 switch (Node->getOpcode()) {
1
Control jumps to 'case CopyToReg:' at line 1159
1149 default:
1150#ifndef NDEBUG
1151 Node->dump();
1152#endif
1153 llvm_unreachable("This target-independent node should have been selected!")::llvm::llvm_unreachable_internal("This target-independent node should have been selected!"
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 1153)
;
1154 case ISD::EntryToken:
1155 llvm_unreachable("EntryToken should have been excluded from the schedule!")::llvm::llvm_unreachable_internal("EntryToken should have been excluded from the schedule!"
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 1155)
;
1156 case ISD::MERGE_VALUES:
1157 case ISD::TokenFactor: // fall thru
1158 break;
1159 case ISD::CopyToReg: {
1160 Register DestReg = cast<RegisterSDNode>(Node->getOperand(1))->getReg();
1161 SDValue SrcVal = Node->getOperand(2);
1162 if (Register::isVirtualRegister(DestReg) && SrcVal.isMachineOpcode() &&
1163 SrcVal.getMachineOpcode() == TargetOpcode::IMPLICIT_DEF) {
1164 // Instead building a COPY to that vreg destination, build an
1165 // IMPLICIT_DEF instruction instead.
1166 BuildMI(*MBB, InsertPos, Node->getDebugLoc(),
1167 TII->get(TargetOpcode::IMPLICIT_DEF), DestReg);
1168 break;
1169 }
1170 Register SrcReg;
1171 if (RegisterSDNode *R = dyn_cast<RegisterSDNode>(SrcVal))
2
Assuming 'R' is null
3
Taking false branch
1172 SrcReg = R->getReg();
1173 else
1174 SrcReg = getVR(SrcVal, VRBaseMap);
4
The value of 'SrcVal' is assigned to 'Op.Node'
5
Calling 'InstrEmitter::getVR'
1175
1176 if (SrcReg == DestReg) // Coalesced away the copy? Ignore.
1177 break;
1178
1179 BuildMI(*MBB, InsertPos, Node->getDebugLoc(), TII->get(TargetOpcode::COPY),
1180 DestReg).addReg(SrcReg);
1181 break;
1182 }
1183 case ISD::CopyFromReg: {
1184 unsigned SrcReg = cast<RegisterSDNode>(Node->getOperand(1))->getReg();
1185 EmitCopyFromReg(Node, 0, IsClone, IsCloned, SrcReg, VRBaseMap);
1186 break;
1187 }
1188 case ISD::EH_LABEL:
1189 case ISD::ANNOTATION_LABEL: {
1190 unsigned Opc = (Node->getOpcode() == ISD::EH_LABEL)
1191 ? TargetOpcode::EH_LABEL
1192 : TargetOpcode::ANNOTATION_LABEL;
1193 MCSymbol *S = cast<LabelSDNode>(Node)->getLabel();
1194 BuildMI(*MBB, InsertPos, Node->getDebugLoc(),
1195 TII->get(Opc)).addSym(S);
1196 break;
1197 }
1198
1199 case ISD::LIFETIME_START:
1200 case ISD::LIFETIME_END: {
1201 unsigned TarOp = (Node->getOpcode() == ISD::LIFETIME_START)
1202 ? TargetOpcode::LIFETIME_START
1203 : TargetOpcode::LIFETIME_END;
1204 auto *FI = cast<FrameIndexSDNode>(Node->getOperand(1));
1205 BuildMI(*MBB, InsertPos, Node->getDebugLoc(), TII->get(TarOp))
1206 .addFrameIndex(FI->getIndex());
1207 break;
1208 }
1209
1210 case ISD::PSEUDO_PROBE: {
1211 unsigned TarOp = TargetOpcode::PSEUDO_PROBE;
1212 auto Guid = cast<PseudoProbeSDNode>(Node)->getGuid();
1213 auto Index = cast<PseudoProbeSDNode>(Node)->getIndex();
1214 auto Attr = cast<PseudoProbeSDNode>(Node)->getAttributes();
1215
1216 BuildMI(*MBB, InsertPos, Node->getDebugLoc(), TII->get(TarOp))
1217 .addImm(Guid)
1218 .addImm(Index)
1219 .addImm((uint8_t)PseudoProbeType::Block)
1220 .addImm(Attr);
1221 break;
1222 }
1223
1224 case ISD::INLINEASM:
1225 case ISD::INLINEASM_BR: {
1226 unsigned NumOps = Node->getNumOperands();
1227 if (Node->getOperand(NumOps-1).getValueType() == MVT::Glue)
1228 --NumOps; // Ignore the glue operand.
1229
1230 // Create the inline asm machine instruction.
1231 unsigned TgtOpc = Node->getOpcode() == ISD::INLINEASM_BR
1232 ? TargetOpcode::INLINEASM_BR
1233 : TargetOpcode::INLINEASM;
1234 MachineInstrBuilder MIB =
1235 BuildMI(*MF, Node->getDebugLoc(), TII->get(TgtOpc));
1236
1237 // Add the asm string as an external symbol operand.
1238 SDValue AsmStrV = Node->getOperand(InlineAsm::Op_AsmString);
1239 const char *AsmStr = cast<ExternalSymbolSDNode>(AsmStrV)->getSymbol();
1240 MIB.addExternalSymbol(AsmStr);
1241
1242 // Add the HasSideEffect, isAlignStack, AsmDialect, MayLoad and MayStore
1243 // bits.
1244 int64_t ExtraInfo =
1245 cast<ConstantSDNode>(Node->getOperand(InlineAsm::Op_ExtraInfo))->
1246 getZExtValue();
1247 MIB.addImm(ExtraInfo);
1248
1249 // Remember to operand index of the group flags.
1250 SmallVector<unsigned, 8> GroupIdx;
1251
1252 // Remember registers that are part of early-clobber defs.
1253 SmallVector<unsigned, 8> ECRegs;
1254
1255 // Add all of the operand registers to the instruction.
1256 for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
1257 unsigned Flags =
1258 cast<ConstantSDNode>(Node->getOperand(i))->getZExtValue();
1259 const unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags);
1260
1261 GroupIdx.push_back(MIB->getNumOperands());
1262 MIB.addImm(Flags);
1263 ++i; // Skip the ID value.
1264
1265 switch (InlineAsm::getKind(Flags)) {
1266 default: llvm_unreachable("Bad flags!")::llvm::llvm_unreachable_internal("Bad flags!", "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp"
, 1266)
;
1267 case InlineAsm::Kind_RegDef:
1268 for (unsigned j = 0; j != NumVals; ++j, ++i) {
1269 unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
1270 // FIXME: Add dead flags for physical and virtual registers defined.
1271 // For now, mark physical register defs as implicit to help fast
1272 // regalloc. This makes inline asm look a lot like calls.
1273 MIB.addReg(Reg,
1274 RegState::Define |
1275 getImplRegState(Register::isPhysicalRegister(Reg)));
1276 }
1277 break;
1278 case InlineAsm::Kind_RegDefEarlyClobber:
1279 case InlineAsm::Kind_Clobber:
1280 for (unsigned j = 0; j != NumVals; ++j, ++i) {
1281 unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
1282 MIB.addReg(Reg,
1283 RegState::Define | RegState::EarlyClobber |
1284 getImplRegState(Register::isPhysicalRegister(Reg)));
1285 ECRegs.push_back(Reg);
1286 }
1287 break;
1288 case InlineAsm::Kind_RegUse: // Use of register.
1289 case InlineAsm::Kind_Imm: // Immediate.
1290 case InlineAsm::Kind_Mem: // Addressing mode.
1291 // The addressing mode has been selected, just add all of the
1292 // operands to the machine instruction.
1293 for (unsigned j = 0; j != NumVals; ++j, ++i)
1294 AddOperand(MIB, Node->getOperand(i), 0, nullptr, VRBaseMap,
1295 /*IsDebug=*/false, IsClone, IsCloned);
1296
1297 // Manually set isTied bits.
1298 if (InlineAsm::getKind(Flags) == InlineAsm::Kind_RegUse) {
1299 unsigned DefGroup = 0;
1300 if (InlineAsm::isUseOperandTiedToDef(Flags, DefGroup)) {
1301 unsigned DefIdx = GroupIdx[DefGroup] + 1;
1302 unsigned UseIdx = GroupIdx.back() + 1;
1303 for (unsigned j = 0; j != NumVals; ++j)
1304 MIB->tieOperands(DefIdx + j, UseIdx + j);
1305 }
1306 }
1307 break;
1308 }
1309 }
1310
1311 // GCC inline assembly allows input operands to also be early-clobber
1312 // output operands (so long as the operand is written only after it's
1313 // used), but this does not match the semantics of our early-clobber flag.
1314 // If an early-clobber operand register is also an input operand register,
1315 // then remove the early-clobber flag.
1316 for (unsigned Reg : ECRegs) {
1317 if (MIB->readsRegister(Reg, TRI)) {
1318 MachineOperand *MO =
1319 MIB->findRegisterDefOperand(Reg, false, false, TRI);
1320 assert(MO && "No def operand for clobbered register?")(static_cast <bool> (MO && "No def operand for clobbered register?"
) ? void (0) : __assert_fail ("MO && \"No def operand for clobbered register?\""
, "llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp", 1320, __extension__
__PRETTY_FUNCTION__))
;
1321 MO->setIsEarlyClobber(false);
1322 }
1323 }
1324
1325 // Get the mdnode from the asm if it exists and add it to the instruction.
1326 SDValue MDV = Node->getOperand(InlineAsm::Op_MDNode);
1327 const MDNode *MD = cast<MDNodeSDNode>(MDV)->getMD();
1328 if (MD)
1329 MIB.addMetadata(MD);
1330
1331 MBB->insert(InsertPos, MIB);
1332 break;
1333 }
1334 }
1335}
1336
1337/// InstrEmitter - Construct an InstrEmitter and set it to start inserting
1338/// at the given position in the given block.
1339InstrEmitter::InstrEmitter(const TargetMachine &TM, MachineBasicBlock *mbb,
1340 MachineBasicBlock::iterator insertpos,
1341 bool UseInstrRefDebugInfo)
1342 : MF(mbb->getParent()), MRI(&MF->getRegInfo()),
1343 TII(MF->getSubtarget().getInstrInfo()),
1344 TRI(MF->getSubtarget().getRegisterInfo()),
1345 TLI(MF->getSubtarget().getTargetLowering()), MBB(mbb),
1346 InsertPos(insertpos) {
1347 EmitDebugInstrRefs = UseInstrRefDebugInfo;
1348}

/build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/llvm/include/llvm/CodeGen/SelectionDAGNodes.h

1//===- llvm/CodeGen/SelectionDAGNodes.h - SelectionDAG Nodes ----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file declares the SDNode class and derived classes, which are used to
10// represent the nodes and operations present in a SelectionDAG. These nodes
11// and operations are machine code level operations, with some similarities to
12// the GCC RTL representation.
13//
14// Clients should include the SelectionDAG.h file instead of this file directly.
15//
16//===----------------------------------------------------------------------===//
17
18#ifndef LLVM_CODEGEN_SELECTIONDAGNODES_H
19#define LLVM_CODEGEN_SELECTIONDAGNODES_H
20
21#include "llvm/ADT/APFloat.h"
22#include "llvm/ADT/ArrayRef.h"
23#include "llvm/ADT/BitVector.h"
24#include "llvm/ADT/FoldingSet.h"
25#include "llvm/ADT/GraphTraits.h"
26#include "llvm/ADT/SmallPtrSet.h"
27#include "llvm/ADT/SmallVector.h"
28#include "llvm/ADT/ilist_node.h"
29#include "llvm/ADT/iterator.h"
30#include "llvm/ADT/iterator_range.h"
31#include "llvm/CodeGen/ISDOpcodes.h"
32#include "llvm/CodeGen/MachineMemOperand.h"
33#include "llvm/CodeGen/Register.h"
34#include "llvm/CodeGen/ValueTypes.h"
35#include "llvm/IR/Constants.h"
36#include "llvm/IR/DebugLoc.h"
37#include "llvm/IR/Instruction.h"
38#include "llvm/IR/Instructions.h"
39#include "llvm/IR/Metadata.h"
40#include "llvm/IR/Operator.h"
41#include "llvm/Support/AlignOf.h"
42#include "llvm/Support/AtomicOrdering.h"
43#include "llvm/Support/Casting.h"
44#include "llvm/Support/ErrorHandling.h"
45#include "llvm/Support/MachineValueType.h"
46#include "llvm/Support/TypeSize.h"
47#include <algorithm>
48#include <cassert>
49#include <climits>
50#include <cstddef>
51#include <cstdint>
52#include <cstring>
53#include <iterator>
54#include <string>
55#include <tuple>
56
57namespace llvm {
58
59class APInt;
60class Constant;
61class GlobalValue;
62class MachineBasicBlock;
63class MachineConstantPoolValue;
64class MCSymbol;
65class raw_ostream;
66class SDNode;
67class SelectionDAG;
68class Type;
69class Value;
70
71void checkForCycles(const SDNode *N, const SelectionDAG *DAG = nullptr,
72 bool force = false);
73
74/// This represents a list of ValueType's that has been intern'd by
75/// a SelectionDAG. Instances of this simple value class are returned by
76/// SelectionDAG::getVTList(...).
77///
78struct SDVTList {
79 const EVT *VTs;
80 unsigned int NumVTs;
81};
82
83namespace ISD {
84
85 /// Node predicates
86
87/// If N is a BUILD_VECTOR or SPLAT_VECTOR node whose elements are all the
88/// same constant or undefined, return true and return the constant value in
89/// \p SplatValue.
90bool isConstantSplatVector(const SDNode *N, APInt &SplatValue);
91
92/// Return true if the specified node is a BUILD_VECTOR or SPLAT_VECTOR where
93/// all of the elements are ~0 or undef. If \p BuildVectorOnly is set to
94/// true, it only checks BUILD_VECTOR.
95bool isConstantSplatVectorAllOnes(const SDNode *N,
96 bool BuildVectorOnly = false);
97
98/// Return true if the specified node is a BUILD_VECTOR or SPLAT_VECTOR where
99/// all of the elements are 0 or undef. If \p BuildVectorOnly is set to true, it
100/// only checks BUILD_VECTOR.
101bool isConstantSplatVectorAllZeros(const SDNode *N,
102 bool BuildVectorOnly = false);
103
104/// Return true if the specified node is a BUILD_VECTOR where all of the
105/// elements are ~0 or undef.
106bool isBuildVectorAllOnes(const SDNode *N);
107
108/// Return true if the specified node is a BUILD_VECTOR where all of the
109/// elements are 0 or undef.
110bool isBuildVectorAllZeros(const SDNode *N);
111
112/// Return true if the specified node is a BUILD_VECTOR node of all
113/// ConstantSDNode or undef.
114bool isBuildVectorOfConstantSDNodes(const SDNode *N);
115
116/// Return true if the specified node is a BUILD_VECTOR node of all
117/// ConstantFPSDNode or undef.
118bool isBuildVectorOfConstantFPSDNodes(const SDNode *N);
119
120/// Return true if the node has at least one operand and all operands of the
121/// specified node are ISD::UNDEF.
122bool allOperandsUndef(const SDNode *N);
123
124} // end namespace ISD
125
126//===----------------------------------------------------------------------===//
127/// Unlike LLVM values, Selection DAG nodes may return multiple
128/// values as the result of a computation. Many nodes return multiple values,
129/// from loads (which define a token and a return value) to ADDC (which returns
130/// a result and a carry value), to calls (which may return an arbitrary number
131/// of values).
132///
133/// As such, each use of a SelectionDAG computation must indicate the node that
134/// computes it as well as which return value to use from that node. This pair
135/// of information is represented with the SDValue value type.
136///
137class SDValue {
138 friend struct DenseMapInfo<SDValue>;
139
140 SDNode *Node = nullptr; // The node defining the value we are using.
141 unsigned ResNo = 0; // Which return value of the node we are using.
142
143public:
144 SDValue() = default;
145 SDValue(SDNode *node, unsigned resno);
146
147 /// get the index which selects a specific result in the SDNode
148 unsigned getResNo() const { return ResNo; }
149
150 /// get the SDNode which holds the desired result
151 SDNode *getNode() const { return Node; }
152
153 /// set the SDNode
154 void setNode(SDNode *N) { Node = N; }
155
156 inline SDNode *operator->() const { return Node; }
157
158 bool operator==(const SDValue &O) const {
159 return Node == O.Node && ResNo == O.ResNo;
160 }
161 bool operator!=(const SDValue &O) const {
162 return !operator==(O);
163 }
164 bool operator<(const SDValue &O) const {
165 return std::tie(Node, ResNo) < std::tie(O.Node, O.ResNo);
166 }
167 explicit operator bool() const {
168 return Node != nullptr;
169 }
170
171 SDValue getValue(unsigned R) const {
172 return SDValue(Node, R);
173 }
174
175 /// Return true if this node is an operand of N.
176 bool isOperandOf(const SDNode *N) const;
177
178 /// Return the ValueType of the referenced return value.
179 inline EVT getValueType() const;
180
181 /// Return the simple ValueType of the referenced return value.
182 MVT getSimpleValueType() const {
183 return getValueType().getSimpleVT();
184 }
185
186 /// Returns the size of the value in bits.
187 ///
188 /// If the value type is a scalable vector type, the scalable property will
189 /// be set and the runtime size will be a positive integer multiple of the
190 /// base size.
191 TypeSize getValueSizeInBits() const {
192 return getValueType().getSizeInBits();
193 }
194
195 uint64_t getScalarValueSizeInBits() const {
196 return getValueType().getScalarType().getFixedSizeInBits();
197 }
198
199 // Forwarding methods - These forward to the corresponding methods in SDNode.
200 inline unsigned getOpcode() const;
201 inline unsigned getNumOperands() const;
202 inline const SDValue &getOperand(unsigned i) const;
203 inline uint64_t getConstantOperandVal(unsigned i) const;
204 inline const APInt &getConstantOperandAPInt(unsigned i) const;
205 inline bool isTargetMemoryOpcode() const;
206 inline bool isTargetOpcode() const;
207 inline bool isMachineOpcode() const;
208 inline bool isUndef() const;
209 inline unsigned getMachineOpcode() const;
210 inline const DebugLoc &getDebugLoc() const;
211 inline void dump() const;
212 inline void dump(const SelectionDAG *G) const;
213 inline void dumpr() const;
214 inline void dumpr(const SelectionDAG *G) const;
215
216 /// Return true if this operand (which must be a chain) reaches the
217 /// specified operand without crossing any side-effecting instructions.
218 /// In practice, this looks through token factors and non-volatile loads.
219 /// In order to remain efficient, this only
220 /// looks a couple of nodes in, it does not do an exhaustive search.
221 bool reachesChainWithoutSideEffects(SDValue Dest,
222 unsigned Depth = 2) const;
223
224 /// Return true if there are no nodes using value ResNo of Node.
225 inline bool use_empty() const;
226
227 /// Return true if there is exactly one node using value ResNo of Node.
228 inline bool hasOneUse() const;
229};
230
231template<> struct DenseMapInfo<SDValue> {
232 static inline SDValue getEmptyKey() {
233 SDValue V;
234 V.ResNo = -1U;
235 return V;
236 }
237
238 static inline SDValue getTombstoneKey() {
239 SDValue V;
240 V.ResNo = -2U;
241 return V;
242 }
243
244 static unsigned getHashValue(const SDValue &Val) {
245 return ((unsigned)((uintptr_t)Val.getNode() >> 4) ^
246 (unsigned)((uintptr_t)Val.getNode() >> 9)) + Val.getResNo();
247 }
248
249 static bool isEqual(const SDValue &LHS, const SDValue &RHS) {
250 return LHS == RHS;
251 }
252};
253
254/// Allow casting operators to work directly on
255/// SDValues as if they were SDNode*'s.
256template<> struct simplify_type<SDValue> {
257 using SimpleType = SDNode *;
258
259 static SimpleType getSimplifiedValue(SDValue &Val) {
260 return Val.getNode();
261 }
262};
263template<> struct simplify_type<const SDValue> {
264 using SimpleType = /*const*/ SDNode *;
265
266 static SimpleType getSimplifiedValue(const SDValue &Val) {
267 return Val.getNode();
268 }
269};
270
271/// Represents a use of a SDNode. This class holds an SDValue,
272/// which records the SDNode being used and the result number, a
273/// pointer to the SDNode using the value, and Next and Prev pointers,
274/// which link together all the uses of an SDNode.
275///
276class SDUse {
277 /// Val - The value being used.
278 SDValue Val;
279 /// User - The user of this value.
280 SDNode *User = nullptr;
281 /// Prev, Next - Pointers to the uses list of the SDNode referred by
282 /// this operand.
283 SDUse **Prev = nullptr;
284 SDUse *Next = nullptr;
285
286public:
287 SDUse() = default;
288 SDUse(const SDUse &U) = delete;
289 SDUse &operator=(const SDUse &) = delete;
290
291 /// Normally SDUse will just implicitly convert to an SDValue that it holds.
292 operator const SDValue&() const { return Val; }
293
294 /// If implicit conversion to SDValue doesn't work, the get() method returns
295 /// the SDValue.
296 const SDValue &get() const { return Val; }
297
298 /// This returns the SDNode that contains this Use.
299 SDNode *getUser() { return User; }
300
301 /// Get the next SDUse in the use list.
302 SDUse *getNext() const { return Next; }
303
304 /// Convenience function for get().getNode().
305 SDNode *getNode() const { return Val.getNode(); }
306 /// Convenience function for get().getResNo().
307 unsigned getResNo() const { return Val.getResNo(); }
308 /// Convenience function for get().getValueType().
309 EVT getValueType() const { return Val.getValueType(); }
310
311 /// Convenience function for get().operator==
312 bool operator==(const SDValue &V) const {
313 return Val == V;
314 }
315
316 /// Convenience function for get().operator!=
317 bool operator!=(const SDValue &V) const {
318 return Val != V;
319 }
320
321 /// Convenience function for get().operator<
322 bool operator<(const SDValue &V) const {
323 return Val < V;
324 }
325
326private:
327 friend class SelectionDAG;
328 friend class SDNode;
329 // TODO: unfriend HandleSDNode once we fix its operand handling.
330 friend class HandleSDNode;
331
332 void setUser(SDNode *p) { User = p; }
333
334 /// Remove this use from its existing use list, assign it the
335 /// given value, and add it to the new value's node's use list.
336 inline void set(const SDValue &V);
337 /// Like set, but only supports initializing a newly-allocated
338 /// SDUse with a non-null value.
339 inline void setInitial(const SDValue &V);
340 /// Like set, but only sets the Node portion of the value,
341 /// leaving the ResNo portion unmodified.
342 inline void setNode(SDNode *N);
343
344 void addToList(SDUse **List) {
345 Next = *List;
346 if (Next) Next->Prev = &Next;
347 Prev = List;
348 *List = this;
349 }
350
351 void removeFromList() {
352 *Prev = Next;
353 if (Next) Next->Prev = Prev;
354 }
355};
356
357/// simplify_type specializations - Allow casting operators to work directly on
358/// SDValues as if they were SDNode*'s.
359template<> struct simplify_type<SDUse> {
360 using SimpleType = SDNode *;
361
362 static SimpleType getSimplifiedValue(SDUse &Val) {
363 return Val.getNode();
364 }
365};
366
367/// These are IR-level optimization flags that may be propagated to SDNodes.
368/// TODO: This data structure should be shared by the IR optimizer and the
369/// the backend.
370struct SDNodeFlags {
371private:
372 bool NoUnsignedWrap : 1;
373 bool NoSignedWrap : 1;
374 bool Exact : 1;
375 bool NoNaNs : 1;
376 bool NoInfs : 1;
377 bool NoSignedZeros : 1;
378 bool AllowReciprocal : 1;
379 bool AllowContract : 1;
380 bool ApproximateFuncs : 1;
381 bool AllowReassociation : 1;
382
383 // We assume instructions do not raise floating-point exceptions by default,
384 // and only those marked explicitly may do so. We could choose to represent
385 // this via a positive "FPExcept" flags like on the MI level, but having a
386 // negative "NoFPExcept" flag here (that defaults to true) makes the flag
387 // intersection logic more straightforward.
388 bool NoFPExcept : 1;
389
390public:
391 /// Default constructor turns off all optimization flags.
392 SDNodeFlags()
393 : NoUnsignedWrap(false), NoSignedWrap(false), Exact(false), NoNaNs(false),
394 NoInfs(false), NoSignedZeros(false), AllowReciprocal(false),
395 AllowContract(false), ApproximateFuncs(false),
396 AllowReassociation(false), NoFPExcept(false) {}
397
398 /// Propagate the fast-math-flags from an IR FPMathOperator.
399 void copyFMF(const FPMathOperator &FPMO) {
400 setNoNaNs(FPMO.hasNoNaNs());
401 setNoInfs(FPMO.hasNoInfs());
402 setNoSignedZeros(FPMO.hasNoSignedZeros());
403 setAllowReciprocal(FPMO.hasAllowReciprocal());
404 setAllowContract(FPMO.hasAllowContract());
405 setApproximateFuncs(FPMO.hasApproxFunc());
406 setAllowReassociation(FPMO.hasAllowReassoc());
407 }
408
409 // These are mutators for each flag.
410 void setNoUnsignedWrap(bool b) { NoUnsignedWrap = b; }
411 void setNoSignedWrap(bool b) { NoSignedWrap = b; }
412 void setExact(bool b) { Exact = b; }
413 void setNoNaNs(bool b) { NoNaNs = b; }
414 void setNoInfs(bool b) { NoInfs = b; }
415 void setNoSignedZeros(bool b) { NoSignedZeros = b; }
416 void setAllowReciprocal(bool b) { AllowReciprocal = b; }
417 void setAllowContract(bool b) { AllowContract = b; }
418 void setApproximateFuncs(bool b) { ApproximateFuncs = b; }
419 void setAllowReassociation(bool b) { AllowReassociation = b; }
420 void setNoFPExcept(bool b) { NoFPExcept = b; }
421
422 // These are accessors for each flag.
423 bool hasNoUnsignedWrap() const { return NoUnsignedWrap; }
424 bool hasNoSignedWrap() const { return NoSignedWrap; }
425 bool hasExact() const { return Exact; }
426 bool hasNoNaNs() const { return NoNaNs; }
427 bool hasNoInfs() const { return NoInfs; }
428 bool hasNoSignedZeros() const { return NoSignedZeros; }
429 bool hasAllowReciprocal() const { return AllowReciprocal; }
430 bool hasAllowContract() const { return AllowContract; }
431 bool hasApproximateFuncs() const { return ApproximateFuncs; }
432 bool hasAllowReassociation() const { return AllowReassociation; }
433 bool hasNoFPExcept() const { return NoFPExcept; }
434
435 /// Clear any flags in this flag set that aren't also set in Flags. All
436 /// flags will be cleared if Flags are undefined.
437 void intersectWith(const SDNodeFlags Flags) {
438 NoUnsignedWrap &= Flags.NoUnsignedWrap;
439 NoSignedWrap &= Flags.NoSignedWrap;
440 Exact &= Flags.Exact;
441 NoNaNs &= Flags.NoNaNs;
442 NoInfs &= Flags.NoInfs;
443 NoSignedZeros &= Flags.NoSignedZeros;
444 AllowReciprocal &= Flags.AllowReciprocal;
445 AllowContract &= Flags.AllowContract;
446 ApproximateFuncs &= Flags.ApproximateFuncs;
447 AllowReassociation &= Flags.AllowReassociation;
448 NoFPExcept &= Flags.NoFPExcept;
449 }
450};
451
452/// Represents one node in the SelectionDAG.
453///
454class SDNode : public FoldingSetNode, public ilist_node<SDNode> {
455private:
456 /// The operation that this node performs.
457 int16_t NodeType;
458
459protected:
460 // We define a set of mini-helper classes to help us interpret the bits in our
461 // SubclassData. These are designed to fit within a uint16_t so they pack
462 // with NodeType.
463
464#if defined(_AIX) && (!defined(__GNUC__4) || defined(__clang__1))
465// Except for GCC; by default, AIX compilers store bit-fields in 4-byte words
466// and give the `pack` pragma push semantics.
467#define BEGIN_TWO_BYTE_PACK() _Pragma("pack(2)")pack(2)
468#define END_TWO_BYTE_PACK() _Pragma("pack(pop)")pack(pop)
469#else
470#define BEGIN_TWO_BYTE_PACK()
471#define END_TWO_BYTE_PACK()
472#endif
473
474BEGIN_TWO_BYTE_PACK()
475 class SDNodeBitfields {
476 friend class SDNode;
477 friend class MemIntrinsicSDNode;
478 friend class MemSDNode;
479 friend class SelectionDAG;
480
481 uint16_t HasDebugValue : 1;
482 uint16_t IsMemIntrinsic : 1;
483 uint16_t IsDivergent : 1;
484 };
485 enum { NumSDNodeBits = 3 };
486
487 class ConstantSDNodeBitfields {
488 friend class ConstantSDNode;
489
490 uint16_t : NumSDNodeBits;
491
492 uint16_t IsOpaque : 1;
493 };
494
495 class MemSDNodeBitfields {
496 friend class MemSDNode;
497 friend class MemIntrinsicSDNode;
498 friend class AtomicSDNode;
499
500 uint16_t : NumSDNodeBits;
501
502 uint16_t IsVolatile : 1;
503 uint16_t IsNonTemporal : 1;
504 uint16_t IsDereferenceable : 1;
505 uint16_t IsInvariant : 1;
506 };
507 enum { NumMemSDNodeBits = NumSDNodeBits + 4 };
508
509 class LSBaseSDNodeBitfields {
510 friend class LSBaseSDNode;
511 friend class VPBaseLoadStoreSDNode;
512 friend class MaskedLoadStoreSDNode;
513 friend class MaskedGatherScatterSDNode;
514 friend class VPGatherScatterSDNode;
515
516 uint16_t : NumMemSDNodeBits;
517
518 // This storage is shared between disparate class hierarchies to hold an
519 // enumeration specific to the class hierarchy in use.
520 // LSBaseSDNode => enum ISD::MemIndexedMode
521 // VPLoadStoreBaseSDNode => enum ISD::MemIndexedMode
522 // MaskedLoadStoreBaseSDNode => enum ISD::MemIndexedMode
523 // VPGatherScatterSDNode => enum ISD::MemIndexType
524 // MaskedGatherScatterSDNode => enum ISD::MemIndexType
525 uint16_t AddressingMode : 3;
526 };
527 enum { NumLSBaseSDNodeBits = NumMemSDNodeBits + 3 };
528
529 class LoadSDNodeBitfields {
530 friend class LoadSDNode;
531 friend class VPLoadSDNode;
532 friend class VPStridedLoadSDNode;
533 friend class MaskedLoadSDNode;
534 friend class MaskedGatherSDNode;
535 friend class VPGatherSDNode;
536
537 uint16_t : NumLSBaseSDNodeBits;
538
539 uint16_t ExtTy : 2; // enum ISD::LoadExtType
540 uint16_t IsExpanding : 1;
541 };
542
543 class StoreSDNodeBitfields {
544 friend class StoreSDNode;
545 friend class VPStoreSDNode;
546 friend class VPStridedStoreSDNode;
547 friend class MaskedStoreSDNode;
548 friend class MaskedScatterSDNode;
549 friend class VPScatterSDNode;
550
551 uint16_t : NumLSBaseSDNodeBits;
552
553 uint16_t IsTruncating : 1;
554 uint16_t IsCompressing : 1;
555 };
556
557 union {
558 char RawSDNodeBits[sizeof(uint16_t)];
559 SDNodeBitfields SDNodeBits;
560 ConstantSDNodeBitfields ConstantSDNodeBits;
561 MemSDNodeBitfields MemSDNodeBits;
562 LSBaseSDNodeBitfields LSBaseSDNodeBits;
563 LoadSDNodeBitfields LoadSDNodeBits;
564 StoreSDNodeBitfields StoreSDNodeBits;
565 };
566END_TWO_BYTE_PACK()
567#undef BEGIN_TWO_BYTE_PACK
568#undef END_TWO_BYTE_PACK
569
570 // RawSDNodeBits must cover the entirety of the union. This means that all of
571 // the union's members must have size <= RawSDNodeBits. We write the RHS as
572 // "2" instead of sizeof(RawSDNodeBits) because MSVC can't handle the latter.
573 static_assert(sizeof(SDNodeBitfields) <= 2, "field too wide");
574 static_assert(sizeof(ConstantSDNodeBitfields) <= 2, "field too wide");
575 static_assert(sizeof(MemSDNodeBitfields) <= 2, "field too wide");
576 static_assert(sizeof(LSBaseSDNodeBitfields) <= 2, "field too wide");
577 static_assert(sizeof(LoadSDNodeBitfields) <= 2, "field too wide");
578 static_assert(sizeof(StoreSDNodeBitfields) <= 2, "field too wide");
579
580private:
581 friend class SelectionDAG;
582 // TODO: unfriend HandleSDNode once we fix its operand handling.
583 friend class HandleSDNode;
584
585 /// Unique id per SDNode in the DAG.
586 int NodeId = -1;
587
588 /// The values that are used by this operation.
589 SDUse *OperandList = nullptr;
590
591 /// The types of the values this node defines. SDNode's may
592 /// define multiple values simultaneously.
593 const EVT *ValueList;
594
595 /// List of uses for this SDNode.
596 SDUse *UseList = nullptr;
597
598 /// The number of entries in the Operand/Value list.
599 unsigned short NumOperands = 0;
600 unsigned short NumValues;
601
602 // The ordering of the SDNodes. It roughly corresponds to the ordering of the
603 // original LLVM instructions.
604 // This is used for turning off scheduling, because we'll forgo
605 // the normal scheduling algorithms and output the instructions according to
606 // this ordering.
607 unsigned IROrder;
608
609 /// Source line information.
610 DebugLoc debugLoc;
611
612 /// Return a pointer to the specified value type.
613 static const EVT *getValueTypeList(EVT VT);
614
615 SDNodeFlags Flags;
616
617public:
618 /// Unique and persistent id per SDNode in the DAG. Used for debug printing.
619 /// We do not place that under `#if LLVM_ENABLE_ABI_BREAKING_CHECKS`
620 /// intentionally because it adds unneeded complexity without noticeable
621 /// benefits (see discussion with @thakis in D120714).
622 uint16_t PersistentId;
623
624 //===--------------------------------------------------------------------===//
625 // Accessors
626 //
627
628 /// Return the SelectionDAG opcode value for this node. For
629 /// pre-isel nodes (those for which isMachineOpcode returns false), these
630 /// are the opcode values in the ISD and <target>ISD namespaces. For
631 /// post-isel opcodes, see getMachineOpcode.
632 unsigned getOpcode() const { return (unsigned short)NodeType; }
633
634 /// Test if this node has a target-specific opcode (in the
635 /// \<target\>ISD namespace).
636 bool isTargetOpcode() const { return NodeType >= ISD::BUILTIN_OP_END; }
637
638 /// Test if this node has a target-specific opcode that may raise
639 /// FP exceptions (in the \<target\>ISD namespace and greater than
640 /// FIRST_TARGET_STRICTFP_OPCODE). Note that all target memory
641 /// opcode are currently automatically considered to possibly raise
642 /// FP exceptions as well.
643 bool isTargetStrictFPOpcode() const {
644 return NodeType >= ISD::FIRST_TARGET_STRICTFP_OPCODE;
645 }
646
647 /// Test if this node has a target-specific
648 /// memory-referencing opcode (in the \<target\>ISD namespace and
649 /// greater than FIRST_TARGET_MEMORY_OPCODE).
650 bool isTargetMemoryOpcode() const {
651 return NodeType >= ISD::FIRST_TARGET_MEMORY_OPCODE;
652 }
653
654 /// Return true if the type of the node type undefined.
655 bool isUndef() const { return NodeType == ISD::UNDEF; }
656
657 /// Test if this node is a memory intrinsic (with valid pointer information).
658 /// INTRINSIC_W_CHAIN and INTRINSIC_VOID nodes are sometimes created for
659 /// non-memory intrinsics (with chains) that are not really instances of
660 /// MemSDNode. For such nodes, we need some extra state to determine the
661 /// proper classof relationship.
662 bool isMemIntrinsic() const {
663 return (NodeType == ISD::INTRINSIC_W_CHAIN ||
664 NodeType == ISD::INTRINSIC_VOID) &&
665 SDNodeBits.IsMemIntrinsic;
666 }
667
668 /// Test if this node is a strict floating point pseudo-op.
669 bool isStrictFPOpcode() {
670 switch (NodeType) {
671 default:
672 return false;
673 case ISD::STRICT_FP16_TO_FP:
674 case ISD::STRICT_FP_TO_FP16:
675#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
676 case ISD::STRICT_##DAGN:
677#include "llvm/IR/ConstrainedOps.def"
678 return true;
679 }
680 }
681
682 /// Test if this node is a vector predication operation.
683 bool isVPOpcode() const { return ISD::isVPOpcode(getOpcode()); }
684
685 /// Test if this node has a post-isel opcode, directly
686 /// corresponding to a MachineInstr opcode.
687 bool isMachineOpcode() const { return NodeType < 0; }
688
689 /// This may only be called if isMachineOpcode returns
690 /// true. It returns the MachineInstr opcode value that the node's opcode
691 /// corresponds to.
692 unsigned getMachineOpcode() const {
693 assert(isMachineOpcode() && "Not a MachineInstr opcode!")(static_cast <bool> (isMachineOpcode() && "Not a MachineInstr opcode!"
) ? void (0) : __assert_fail ("isMachineOpcode() && \"Not a MachineInstr opcode!\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 693, __extension__
__PRETTY_FUNCTION__))
;
694 return ~NodeType;
695 }
696
697 bool getHasDebugValue() const { return SDNodeBits.HasDebugValue; }
698 void setHasDebugValue(bool b) { SDNodeBits.HasDebugValue = b; }
699
700 bool isDivergent() const { return SDNodeBits.IsDivergent; }
701
702 /// Return true if there are no uses of this node.
703 bool use_empty() const { return UseList == nullptr; }
704
705 /// Return true if there is exactly one use of this node.
706 bool hasOneUse() const { return hasSingleElement(uses()); }
707
708 /// Return the number of uses of this node. This method takes
709 /// time proportional to the number of uses.
710 size_t use_size() const { return std::distance(use_begin(), use_end()); }
711
712 /// Return the unique node id.
713 int getNodeId() const { return NodeId; }
714
715 /// Set unique node id.
716 void setNodeId(int Id) { NodeId = Id; }
717
718 /// Return the node ordering.
719 unsigned getIROrder() const { return IROrder; }
720
721 /// Set the node ordering.
722 void setIROrder(unsigned Order) { IROrder = Order; }
723
724 /// Return the source location info.
725 const DebugLoc &getDebugLoc() const { return debugLoc; }
726
727 /// Set source location info. Try to avoid this, putting
728 /// it in the constructor is preferable.
729 void setDebugLoc(DebugLoc dl) { debugLoc = std::move(dl); }
730
731 /// This class provides iterator support for SDUse
732 /// operands that use a specific SDNode.
733 class use_iterator {
734 friend class SDNode;
735
736 SDUse *Op = nullptr;
737
738 explicit use_iterator(SDUse *op) : Op(op) {}
739
740 public:
741 using iterator_category = std::forward_iterator_tag;
742 using value_type = SDUse;
743 using difference_type = std::ptrdiff_t;
744 using pointer = value_type *;
745 using reference = value_type &;
746
747 use_iterator() = default;
748 use_iterator(const use_iterator &I) = default;
749
750 bool operator==(const use_iterator &x) const { return Op == x.Op; }
751 bool operator!=(const use_iterator &x) const {
752 return !operator==(x);
753 }
754
755 /// Return true if this iterator is at the end of uses list.
756 bool atEnd() const { return Op == nullptr; }
757
758 // Iterator traversal: forward iteration only.
759 use_iterator &operator++() { // Preincrement
760 assert(Op && "Cannot increment end iterator!")(static_cast <bool> (Op && "Cannot increment end iterator!"
) ? void (0) : __assert_fail ("Op && \"Cannot increment end iterator!\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 760, __extension__
__PRETTY_FUNCTION__))
;
761 Op = Op->getNext();
762 return *this;
763 }
764
765 use_iterator operator++(int) { // Postincrement
766 use_iterator tmp = *this; ++*this; return tmp;
767 }
768
769 /// Retrieve a pointer to the current user node.
770 SDNode *operator*() const {
771 assert(Op && "Cannot dereference end iterator!")(static_cast <bool> (Op && "Cannot dereference end iterator!"
) ? void (0) : __assert_fail ("Op && \"Cannot dereference end iterator!\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 771, __extension__
__PRETTY_FUNCTION__))
;
772 return Op->getUser();
773 }
774
775 SDNode *operator->() const { return operator*(); }
776
777 SDUse &getUse() const { return *Op; }
778
779 /// Retrieve the operand # of this use in its user.
780 unsigned getOperandNo() const {
781 assert(Op && "Cannot dereference end iterator!")(static_cast <bool> (Op && "Cannot dereference end iterator!"
) ? void (0) : __assert_fail ("Op && \"Cannot dereference end iterator!\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 781, __extension__
__PRETTY_FUNCTION__))
;
782 return (unsigned)(Op - Op->getUser()->OperandList);
783 }
784 };
785
786 /// Provide iteration support to walk over all uses of an SDNode.
787 use_iterator use_begin() const {
788 return use_iterator(UseList);
789 }
790
791 static use_iterator use_end() { return use_iterator(nullptr); }
792
793 inline iterator_range<use_iterator> uses() {
794 return make_range(use_begin(), use_end());
795 }
796 inline iterator_range<use_iterator> uses() const {
797 return make_range(use_begin(), use_end());
798 }
799
800 /// Return true if there are exactly NUSES uses of the indicated value.
801 /// This method ignores uses of other values defined by this operation.
802 bool hasNUsesOfValue(unsigned NUses, unsigned Value) const;
803
804 /// Return true if there are any use of the indicated value.
805 /// This method ignores uses of other values defined by this operation.
806 bool hasAnyUseOfValue(unsigned Value) const;
807
808 /// Return true if this node is the only use of N.
809 bool isOnlyUserOf(const SDNode *N) const;
810
811 /// Return true if this node is an operand of N.
812 bool isOperandOf(const SDNode *N) const;
813
814 /// Return true if this node is a predecessor of N.
815 /// NOTE: Implemented on top of hasPredecessor and every bit as
816 /// expensive. Use carefully.
817 bool isPredecessorOf(const SDNode *N) const {
818 return N->hasPredecessor(this);
819 }
820
821 /// Return true if N is a predecessor of this node.
822 /// N is either an operand of this node, or can be reached by recursively
823 /// traversing up the operands.
824 /// NOTE: This is an expensive method. Use it carefully.
825 bool hasPredecessor(const SDNode *N) const;
826
827 /// Returns true if N is a predecessor of any node in Worklist. This
828 /// helper keeps Visited and Worklist sets externally to allow unions
829 /// searches to be performed in parallel, caching of results across
830 /// queries and incremental addition to Worklist. Stops early if N is
831 /// found but will resume. Remember to clear Visited and Worklists
832 /// if DAG changes. MaxSteps gives a maximum number of nodes to visit before
833 /// giving up. The TopologicalPrune flag signals that positive NodeIds are
834 /// topologically ordered (Operands have strictly smaller node id) and search
835 /// can be pruned leveraging this.
836 static bool hasPredecessorHelper(const SDNode *N,
837 SmallPtrSetImpl<const SDNode *> &Visited,
838 SmallVectorImpl<const SDNode *> &Worklist,
839 unsigned int MaxSteps = 0,
840 bool TopologicalPrune = false) {
841 SmallVector<const SDNode *, 8> DeferredNodes;
842 if (Visited.count(N))
843 return true;
844
845 // Node Id's are assigned in three places: As a topological
846 // ordering (> 0), during legalization (results in values set to
847 // 0), new nodes (set to -1). If N has a topolgical id then we
848 // know that all nodes with ids smaller than it cannot be
849 // successors and we need not check them. Filter out all node
850 // that can't be matches. We add them to the worklist before exit
851 // in case of multiple calls. Note that during selection the topological id
852 // may be violated if a node's predecessor is selected before it. We mark
853 // this at selection negating the id of unselected successors and
854 // restricting topological pruning to positive ids.
855
856 int NId = N->getNodeId();
857 // If we Invalidated the Id, reconstruct original NId.
858 if (NId < -1)
859 NId = -(NId + 1);
860
861 bool Found = false;
862 while (!Worklist.empty()) {
863 const SDNode *M = Worklist.pop_back_val();
864 int MId = M->getNodeId();
865 if (TopologicalPrune && M->getOpcode() != ISD::TokenFactor && (NId > 0) &&
866 (MId > 0) && (MId < NId)) {
867 DeferredNodes.push_back(M);
868 continue;
869 }
870 for (const SDValue &OpV : M->op_values()) {
871 SDNode *Op = OpV.getNode();
872 if (Visited.insert(Op).second)
873 Worklist.push_back(Op);
874 if (Op == N)
875 Found = true;
876 }
877 if (Found)
878 break;
879 if (MaxSteps != 0 && Visited.size() >= MaxSteps)
880 break;
881 }
882 // Push deferred nodes back on worklist.
883 Worklist.append(DeferredNodes.begin(), DeferredNodes.end());
884 // If we bailed early, conservatively return found.
885 if (MaxSteps != 0 && Visited.size() >= MaxSteps)
886 return true;
887 return Found;
888 }
889
890 /// Return true if all the users of N are contained in Nodes.
891 /// NOTE: Requires at least one match, but doesn't require them all.
892 static bool areOnlyUsersOf(ArrayRef<const SDNode *> Nodes, const SDNode *N);
893
894 /// Return the number of values used by this operation.
895 unsigned getNumOperands() const { return NumOperands; }
896
897 /// Return the maximum number of operands that a SDNode can hold.
898 static constexpr size_t getMaxNumOperands() {
899 return std::numeric_limits<decltype(SDNode::NumOperands)>::max();
900 }
901
902 /// Helper method returns the integer value of a ConstantSDNode operand.
903 inline uint64_t getConstantOperandVal(unsigned Num) const;
904
905 /// Helper method returns the APInt of a ConstantSDNode operand.
906 inline const APInt &getConstantOperandAPInt(unsigned Num) const;
907
908 const SDValue &getOperand(unsigned Num) const {
909 assert(Num < NumOperands && "Invalid child # of SDNode!")(static_cast <bool> (Num < NumOperands && "Invalid child # of SDNode!"
) ? void (0) : __assert_fail ("Num < NumOperands && \"Invalid child # of SDNode!\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 909, __extension__
__PRETTY_FUNCTION__))
;
910 return OperandList[Num];
911 }
912
913 using op_iterator = SDUse *;
914
915 op_iterator op_begin() const { return OperandList; }
916 op_iterator op_end() const { return OperandList+NumOperands; }
917 ArrayRef<SDUse> ops() const { return makeArrayRef(op_begin(), op_end()); }
918
919 /// Iterator for directly iterating over the operand SDValue's.
920 struct value_op_iterator
921 : iterator_adaptor_base<value_op_iterator, op_iterator,
922 std::random_access_iterator_tag, SDValue,
923 ptrdiff_t, value_op_iterator *,
924 value_op_iterator *> {
925 explicit value_op_iterator(SDUse *U = nullptr)
926 : iterator_adaptor_base(U) {}
927
928 const SDValue &operator*() const { return I->get(); }
929 };
930
931 iterator_range<value_op_iterator> op_values() const {
932 return make_range(value_op_iterator(op_begin()),
933 value_op_iterator(op_end()));
934 }
935
936 SDVTList getVTList() const {
937 SDVTList X = { ValueList, NumValues };
938 return X;
939 }
940
941 /// If this node has a glue operand, return the node
942 /// to which the glue operand points. Otherwise return NULL.
943 SDNode *getGluedNode() const {
944 if (getNumOperands() != 0 &&
945 getOperand(getNumOperands()-1).getValueType() == MVT::Glue)
946 return getOperand(getNumOperands()-1).getNode();
947 return nullptr;
948 }
949
950 /// If this node has a glue value with a user, return
951 /// the user (there is at most one). Otherwise return NULL.
952 SDNode *getGluedUser() const {
953 for (use_iterator UI = use_begin(), UE = use_end(); UI != UE; ++UI)
954 if (UI.getUse().get().getValueType() == MVT::Glue)
955 return *UI;
956 return nullptr;
957 }
958
959 SDNodeFlags getFlags() const { return Flags; }
960 void setFlags(SDNodeFlags NewFlags) { Flags = NewFlags; }
961
962 /// Clear any flags in this node that aren't also set in Flags.
963 /// If Flags is not in a defined state then this has no effect.
964 void intersectFlagsWith(const SDNodeFlags Flags);
965
966 /// Return the number of values defined/returned by this operator.
967 unsigned getNumValues() const { return NumValues; }
968
969 /// Return the type of a specified result.
970 EVT getValueType(unsigned ResNo) const {
971 assert(ResNo < NumValues && "Illegal result number!")(static_cast <bool> (ResNo < NumValues && "Illegal result number!"
) ? void (0) : __assert_fail ("ResNo < NumValues && \"Illegal result number!\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 971, __extension__
__PRETTY_FUNCTION__))
;
972 return ValueList[ResNo];
973 }
974
975 /// Return the type of a specified result as a simple type.
976 MVT getSimpleValueType(unsigned ResNo) const {
977 return getValueType(ResNo).getSimpleVT();
978 }
979
980 /// Returns MVT::getSizeInBits(getValueType(ResNo)).
981 ///
982 /// If the value type is a scalable vector type, the scalable property will
983 /// be set and the runtime size will be a positive integer multiple of the
984 /// base size.
985 TypeSize getValueSizeInBits(unsigned ResNo) const {
986 return getValueType(ResNo).getSizeInBits();
987 }
988
989 using value_iterator = const EVT *;
990
991 value_iterator value_begin() const { return ValueList; }
992 value_iterator value_end() const { return ValueList+NumValues; }
993 iterator_range<value_iterator> values() const {
994 return llvm::make_range(value_begin(), value_end());
995 }
996
997 /// Return the opcode of this operation for printing.
998 std::string getOperationName(const SelectionDAG *G = nullptr) const;
999 static const char* getIndexedModeName(ISD::MemIndexedMode AM);
1000 void print_types(raw_ostream &OS, const SelectionDAG *G) const;
1001 void print_details(raw_ostream &OS, const SelectionDAG *G) const;
1002 void print(raw_ostream &OS, const SelectionDAG *G = nullptr) const;
1003 void printr(raw_ostream &OS, const SelectionDAG *G = nullptr) const;
1004
1005 /// Print a SelectionDAG node and all children down to
1006 /// the leaves. The given SelectionDAG allows target-specific nodes
1007 /// to be printed in human-readable form. Unlike printr, this will
1008 /// print the whole DAG, including children that appear multiple
1009 /// times.
1010 ///
1011 void printrFull(raw_ostream &O, const SelectionDAG *G = nullptr) const;
1012
1013 /// Print a SelectionDAG node and children up to
1014 /// depth "depth." The given SelectionDAG allows target-specific
1015 /// nodes to be printed in human-readable form. Unlike printr, this
1016 /// will print children that appear multiple times wherever they are
1017 /// used.
1018 ///
1019 void printrWithDepth(raw_ostream &O, const SelectionDAG *G = nullptr,
1020 unsigned depth = 100) const;
1021
1022 /// Dump this node, for debugging.
1023 void dump() const;
1024
1025 /// Dump (recursively) this node and its use-def subgraph.
1026 void dumpr() const;
1027
1028 /// Dump this node, for debugging.
1029 /// The given SelectionDAG allows target-specific nodes to be printed
1030 /// in human-readable form.
1031 void dump(const SelectionDAG *G) const;
1032
1033 /// Dump (recursively) this node and its use-def subgraph.
1034 /// The given SelectionDAG allows target-specific nodes to be printed
1035 /// in human-readable form.
1036 void dumpr(const SelectionDAG *G) const;
1037
1038 /// printrFull to dbgs(). The given SelectionDAG allows
1039 /// target-specific nodes to be printed in human-readable form.
1040 /// Unlike dumpr, this will print the whole DAG, including children
1041 /// that appear multiple times.
1042 void dumprFull(const SelectionDAG *G = nullptr) const;
1043
1044 /// printrWithDepth to dbgs(). The given
1045 /// SelectionDAG allows target-specific nodes to be printed in
1046 /// human-readable form. Unlike dumpr, this will print children
1047 /// that appear multiple times wherever they are used.
1048 ///
1049 void dumprWithDepth(const SelectionDAG *G = nullptr,
1050 unsigned depth = 100) const;
1051
1052 /// Gather unique data for the node.
1053 void Profile(FoldingSetNodeID &ID) const;
1054
1055 /// This method should only be used by the SDUse class.
1056 void addUse(SDUse &U) { U.addToList(&UseList); }
1057
1058protected:
1059 static SDVTList getSDVTList(EVT VT) {
1060 SDVTList Ret = { getValueTypeList(VT), 1 };
1061 return Ret;
1062 }
1063
1064 /// Create an SDNode.
1065 ///
1066 /// SDNodes are created without any operands, and never own the operand
1067 /// storage. To add operands, see SelectionDAG::createOperands.
1068 SDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs)
1069 : NodeType(Opc), ValueList(VTs.VTs), NumValues(VTs.NumVTs),
1070 IROrder(Order), debugLoc(std::move(dl)) {
1071 memset(&RawSDNodeBits, 0, sizeof(RawSDNodeBits));
1072 assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor")(static_cast <bool> (debugLoc.hasTrivialDestructor() &&
"Expected trivial destructor") ? void (0) : __assert_fail ("debugLoc.hasTrivialDestructor() && \"Expected trivial destructor\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 1072, __extension__
__PRETTY_FUNCTION__))
;
1073 assert(NumValues == VTs.NumVTs &&(static_cast <bool> (NumValues == VTs.NumVTs &&
"NumValues wasn't wide enough for its operands!") ? void (0)
: __assert_fail ("NumValues == VTs.NumVTs && \"NumValues wasn't wide enough for its operands!\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 1074, __extension__
__PRETTY_FUNCTION__))
1074 "NumValues wasn't wide enough for its operands!")(static_cast <bool> (NumValues == VTs.NumVTs &&
"NumValues wasn't wide enough for its operands!") ? void (0)
: __assert_fail ("NumValues == VTs.NumVTs && \"NumValues wasn't wide enough for its operands!\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 1074, __extension__
__PRETTY_FUNCTION__))
;
1075 }
1076
1077 /// Release the operands and set this node to have zero operands.
1078 void DropOperands();
1079};
1080
1081/// Wrapper class for IR location info (IR ordering and DebugLoc) to be passed
1082/// into SDNode creation functions.
1083/// When an SDNode is created from the DAGBuilder, the DebugLoc is extracted
1084/// from the original Instruction, and IROrder is the ordinal position of
1085/// the instruction.
1086/// When an SDNode is created after the DAG is being built, both DebugLoc and
1087/// the IROrder are propagated from the original SDNode.
1088/// So SDLoc class provides two constructors besides the default one, one to
1089/// be used by the DAGBuilder, the other to be used by others.
1090class SDLoc {
1091private:
1092 DebugLoc DL;
1093 int IROrder = 0;
1094
1095public:
1096 SDLoc() = default;
1097 SDLoc(const SDNode *N) : DL(N->getDebugLoc()), IROrder(N->getIROrder()) {}
1098 SDLoc(const SDValue V) : SDLoc(V.getNode()) {}
1099 SDLoc(const Instruction *I, int Order) : IROrder(Order) {
1100 assert(Order >= 0 && "bad IROrder")(static_cast <bool> (Order >= 0 && "bad IROrder"
) ? void (0) : __assert_fail ("Order >= 0 && \"bad IROrder\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 1100, __extension__
__PRETTY_FUNCTION__))
;
1101 if (I)
1102 DL = I->getDebugLoc();
1103 }
1104
1105 unsigned getIROrder() const { return IROrder; }
1106 const DebugLoc &getDebugLoc() const { return DL; }
1107};
1108
1109// Define inline functions from the SDValue class.
1110
1111inline SDValue::SDValue(SDNode *node, unsigned resno)
1112 : Node(node), ResNo(resno) {
1113 // Explicitly check for !ResNo to avoid use-after-free, because there are
1114 // callers that use SDValue(N, 0) with a deleted N to indicate successful
1115 // combines.
1116 assert((!Node || !ResNo || ResNo < Node->getNumValues()) &&(static_cast <bool> ((!Node || !ResNo || ResNo < Node
->getNumValues()) && "Invalid result number for the given node!"
) ? void (0) : __assert_fail ("(!Node || !ResNo || ResNo < Node->getNumValues()) && \"Invalid result number for the given node!\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 1117, __extension__
__PRETTY_FUNCTION__))
1117 "Invalid result number for the given node!")(static_cast <bool> ((!Node || !ResNo || ResNo < Node
->getNumValues()) && "Invalid result number for the given node!"
) ? void (0) : __assert_fail ("(!Node || !ResNo || ResNo < Node->getNumValues()) && \"Invalid result number for the given node!\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 1117, __extension__
__PRETTY_FUNCTION__))
;
1118 assert(ResNo < -2U && "Cannot use result numbers reserved for DenseMaps.")(static_cast <bool> (ResNo < -2U && "Cannot use result numbers reserved for DenseMaps."
) ? void (0) : __assert_fail ("ResNo < -2U && \"Cannot use result numbers reserved for DenseMaps.\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 1118, __extension__
__PRETTY_FUNCTION__))
;
1119}
1120
1121inline unsigned SDValue::getOpcode() const {
1122 return Node->getOpcode();
1123}
1124
1125inline EVT SDValue::getValueType() const {
1126 return Node->getValueType(ResNo);
1127}
1128
1129inline unsigned SDValue::getNumOperands() const {
1130 return Node->getNumOperands();
1131}
1132
1133inline const SDValue &SDValue::getOperand(unsigned i) const {
1134 return Node->getOperand(i);
1135}
1136
1137inline uint64_t SDValue::getConstantOperandVal(unsigned i) const {
1138 return Node->getConstantOperandVal(i);
1139}
1140
1141inline const APInt &SDValue::getConstantOperandAPInt(unsigned i) const {
1142 return Node->getConstantOperandAPInt(i);
1143}
1144
1145inline bool SDValue::isTargetOpcode() const {
1146 return Node->isTargetOpcode();
1147}
1148
1149inline bool SDValue::isTargetMemoryOpcode() const {
1150 return Node->isTargetMemoryOpcode();
1151}
1152
1153inline bool SDValue::isMachineOpcode() const {
1154 return Node->isMachineOpcode();
7
Called C++ object pointer is null
1155}
1156
1157inline unsigned SDValue::getMachineOpcode() const {
1158 return Node->getMachineOpcode();
1159}
1160
1161inline bool SDValue::isUndef() const {
1162 return Node->isUndef();
1163}
1164
1165inline bool SDValue::use_empty() const {
1166 return !Node->hasAnyUseOfValue(ResNo);
1167}
1168
1169inline bool SDValue::hasOneUse() const {
1170 return Node->hasNUsesOfValue(1, ResNo);
1171}
1172
1173inline const DebugLoc &SDValue::getDebugLoc() const {
1174 return Node->getDebugLoc();
1175}
1176
1177inline void SDValue::dump() const {
1178 return Node->dump();
1179}
1180
1181inline void SDValue::dump(const SelectionDAG *G) const {
1182 return Node->dump(G);
1183}
1184
1185inline void SDValue::dumpr() const {
1186 return Node->dumpr();
1187}
1188
1189inline void SDValue::dumpr(const SelectionDAG *G) const {
1190 return Node->dumpr(G);
1191}
1192
1193// Define inline functions from the SDUse class.
1194
1195inline void SDUse::set(const SDValue &V) {
1196 if (Val.getNode()) removeFromList();
1197 Val = V;
1198 if (V.getNode())
1199 V->addUse(*this);
1200}
1201
1202inline void SDUse::setInitial(const SDValue &V) {
1203 Val = V;
1204 V->addUse(*this);
1205}
1206
1207inline void SDUse::setNode(SDNode *N) {
1208 if (Val.getNode()) removeFromList();
1209 Val.setNode(N);
1210 if (N) N->addUse(*this);
1211}
1212
1213/// This class is used to form a handle around another node that
1214/// is persistent and is updated across invocations of replaceAllUsesWith on its
1215/// operand. This node should be directly created by end-users and not added to
1216/// the AllNodes list.
1217class HandleSDNode : public SDNode {
1218 SDUse Op;
1219
1220public:
1221 explicit HandleSDNode(SDValue X)
1222 : SDNode(ISD::HANDLENODE, 0, DebugLoc(), getSDVTList(MVT::Other)) {
1223 // HandleSDNodes are never inserted into the DAG, so they won't be
1224 // auto-numbered. Use ID 65535 as a sentinel.
1225 PersistentId = 0xffff;
1226
1227 // Manually set up the operand list. This node type is special in that it's
1228 // always stack allocated and SelectionDAG does not manage its operands.
1229 // TODO: This should either (a) not be in the SDNode hierarchy, or (b) not
1230 // be so special.
1231 Op.setUser(this);
1232 Op.setInitial(X);
1233 NumOperands = 1;
1234 OperandList = &Op;
1235 }
1236 ~HandleSDNode();
1237
1238 const SDValue &getValue() const { return Op; }
1239};
1240
1241class AddrSpaceCastSDNode : public SDNode {
1242private:
1243 unsigned SrcAddrSpace;
1244 unsigned DestAddrSpace;
1245
1246public:
1247 AddrSpaceCastSDNode(unsigned Order, const DebugLoc &dl, EVT VT,
1248 unsigned SrcAS, unsigned DestAS);
1249
1250 unsigned getSrcAddressSpace() const { return SrcAddrSpace; }
1251 unsigned getDestAddressSpace() const { return DestAddrSpace; }
1252
1253 static bool classof(const SDNode *N) {
1254 return N->getOpcode() == ISD::ADDRSPACECAST;
1255 }
1256};
1257
1258/// This is an abstract virtual class for memory operations.
1259class MemSDNode : public SDNode {
1260private:
1261 // VT of in-memory value.
1262 EVT MemoryVT;
1263
1264protected:
1265 /// Memory reference information.
1266 MachineMemOperand *MMO;
1267
1268public:
1269 MemSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, SDVTList VTs,
1270 EVT memvt, MachineMemOperand *MMO);
1271
1272 bool readMem() const { return MMO->isLoad(); }
1273 bool writeMem() const { return MMO->isStore(); }
1274
1275 /// Returns alignment and volatility of the memory access
1276 Align getOriginalAlign() const { return MMO->getBaseAlign(); }
1277 Align getAlign() const { return MMO->getAlign(); }
1278 // FIXME: Remove once transition to getAlign is over.
1279 unsigned getAlignment() const { return MMO->getAlign().value(); }
1280
1281 /// Return the SubclassData value, without HasDebugValue. This contains an
1282 /// encoding of the volatile flag, as well as bits used by subclasses. This
1283 /// function should only be used to compute a FoldingSetNodeID value.
1284 /// The HasDebugValue bit is masked out because CSE map needs to match
1285 /// nodes with debug info with nodes without debug info. Same is about
1286 /// isDivergent bit.
1287 unsigned getRawSubclassData() const {
1288 uint16_t Data;
1289 union {
1290 char RawSDNodeBits[sizeof(uint16_t)];
1291 SDNodeBitfields SDNodeBits;
1292 };
1293 memcpy(&RawSDNodeBits, &this->RawSDNodeBits, sizeof(this->RawSDNodeBits));
1294 SDNodeBits.HasDebugValue = 0;
1295 SDNodeBits.IsDivergent = false;
1296 memcpy(&Data, &RawSDNodeBits, sizeof(RawSDNodeBits));
1297 return Data;
1298 }
1299
1300 bool isVolatile() const { return MemSDNodeBits.IsVolatile; }
1301 bool isNonTemporal() const { return MemSDNodeBits.IsNonTemporal; }
1302 bool isDereferenceable() const { return MemSDNodeBits.IsDereferenceable; }
1303 bool isInvariant() const { return MemSDNodeBits.IsInvariant; }
1304
1305 // Returns the offset from the location of the access.
1306 int64_t getSrcValueOffset() const { return MMO->getOffset(); }
1307
1308 /// Returns the AA info that describes the dereference.
1309 AAMDNodes getAAInfo() const { return MMO->getAAInfo(); }
1310
1311 /// Returns the Ranges that describes the dereference.
1312 const MDNode *getRanges() const { return MMO->getRanges(); }
1313
1314 /// Returns the synchronization scope ID for this memory operation.
1315 SyncScope::ID getSyncScopeID() const { return MMO->getSyncScopeID(); }
1316
1317 /// Return the atomic ordering requirements for this memory operation. For
1318 /// cmpxchg atomic operations, return the atomic ordering requirements when
1319 /// store occurs.
1320 AtomicOrdering getSuccessOrdering() const {
1321 return MMO->getSuccessOrdering();
1322 }
1323
1324 /// Return a single atomic ordering that is at least as strong as both the
1325 /// success and failure orderings for an atomic operation. (For operations
1326 /// other than cmpxchg, this is equivalent to getSuccessOrdering().)
1327 AtomicOrdering getMergedOrdering() const { return MMO->getMergedOrdering(); }
1328
1329 /// Return true if the memory operation ordering is Unordered or higher.
1330 bool isAtomic() const { return MMO->isAtomic(); }
1331
1332 /// Returns true if the memory operation doesn't imply any ordering
1333 /// constraints on surrounding memory operations beyond the normal memory
1334 /// aliasing rules.
1335 bool isUnordered() const { return MMO->isUnordered(); }
1336
1337 /// Returns true if the memory operation is neither atomic or volatile.
1338 bool isSimple() const { return !isAtomic() && !isVolatile(); }
1339
1340 /// Return the type of the in-memory value.
1341 EVT getMemoryVT() const { return MemoryVT; }
1342
1343 /// Return a MachineMemOperand object describing the memory
1344 /// reference performed by operation.
1345 MachineMemOperand *getMemOperand() const { return MMO; }
1346
1347 const MachinePointerInfo &getPointerInfo() const {
1348 return MMO->getPointerInfo();
1349 }
1350
1351 /// Return the address space for the associated pointer
1352 unsigned getAddressSpace() const {
1353 return getPointerInfo().getAddrSpace();
1354 }
1355
1356 /// Update this MemSDNode's MachineMemOperand information
1357 /// to reflect the alignment of NewMMO, if it has a greater alignment.
1358 /// This must only be used when the new alignment applies to all users of
1359 /// this MachineMemOperand.
1360 void refineAlignment(const MachineMemOperand *NewMMO) {
1361 MMO->refineAlignment(NewMMO);
1362 }
1363
1364 const SDValue &getChain() const { return getOperand(0); }
1365
1366 const SDValue &getBasePtr() const {
1367 switch (getOpcode()) {
1368 case ISD::STORE:
1369 case ISD::VP_STORE:
1370 case ISD::MSTORE:
1371 case ISD::VP_SCATTER:
1372 case ISD::EXPERIMENTAL_VP_STRIDED_STORE:
1373 return getOperand(2);
1374 case ISD::MGATHER:
1375 case ISD::MSCATTER:
1376 return getOperand(3);
1377 default:
1378 return getOperand(1);
1379 }
1380 }
1381
1382 // Methods to support isa and dyn_cast
1383 static bool classof(const SDNode *N) {
1384 // For some targets, we lower some target intrinsics to a MemIntrinsicNode
1385 // with either an intrinsic or a target opcode.
1386 switch (N->getOpcode()) {
1387 case ISD::LOAD:
1388 case ISD::STORE:
1389 case ISD::PREFETCH:
1390 case ISD::ATOMIC_CMP_SWAP:
1391 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
1392 case ISD::ATOMIC_SWAP:
1393 case ISD::ATOMIC_LOAD_ADD:
1394 case ISD::ATOMIC_LOAD_SUB:
1395 case ISD::ATOMIC_LOAD_AND:
1396 case ISD::ATOMIC_LOAD_CLR:
1397 case ISD::ATOMIC_LOAD_OR:
1398 case ISD::ATOMIC_LOAD_XOR:
1399 case ISD::ATOMIC_LOAD_NAND:
1400 case ISD::ATOMIC_LOAD_MIN:
1401 case ISD::ATOMIC_LOAD_MAX:
1402 case ISD::ATOMIC_LOAD_UMIN:
1403 case ISD::ATOMIC_LOAD_UMAX:
1404 case ISD::ATOMIC_LOAD_FADD:
1405 case ISD::ATOMIC_LOAD_FSUB:
1406 case ISD::ATOMIC_LOAD:
1407 case ISD::ATOMIC_STORE:
1408 case ISD::MLOAD:
1409 case ISD::MSTORE:
1410 case ISD::MGATHER:
1411 case ISD::MSCATTER:
1412 case ISD::VP_LOAD:
1413 case ISD::VP_STORE:
1414 case ISD::VP_GATHER:
1415 case ISD::VP_SCATTER:
1416 case ISD::EXPERIMENTAL_VP_STRIDED_LOAD:
1417 case ISD::EXPERIMENTAL_VP_STRIDED_STORE:
1418 return true;
1419 default:
1420 return N->isMemIntrinsic() || N->isTargetMemoryOpcode();
1421 }
1422 }
1423};
1424
1425/// This is an SDNode representing atomic operations.
1426class AtomicSDNode : public MemSDNode {
1427public:
1428 AtomicSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, SDVTList VTL,
1429 EVT MemVT, MachineMemOperand *MMO)
1430 : MemSDNode(Opc, Order, dl, VTL, MemVT, MMO) {
1431 assert(((Opc != ISD::ATOMIC_LOAD && Opc != ISD::ATOMIC_STORE) ||(static_cast <bool> (((Opc != ISD::ATOMIC_LOAD &&
Opc != ISD::ATOMIC_STORE) || MMO->isAtomic()) && "then why are we using an AtomicSDNode?"
) ? void (0) : __assert_fail ("((Opc != ISD::ATOMIC_LOAD && Opc != ISD::ATOMIC_STORE) || MMO->isAtomic()) && \"then why are we using an AtomicSDNode?\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 1432, __extension__
__PRETTY_FUNCTION__))
1432 MMO->isAtomic()) && "then why are we using an AtomicSDNode?")(static_cast <bool> (((Opc != ISD::ATOMIC_LOAD &&
Opc != ISD::ATOMIC_STORE) || MMO->isAtomic()) && "then why are we using an AtomicSDNode?"
) ? void (0) : __assert_fail ("((Opc != ISD::ATOMIC_LOAD && Opc != ISD::ATOMIC_STORE) || MMO->isAtomic()) && \"then why are we using an AtomicSDNode?\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 1432, __extension__
__PRETTY_FUNCTION__))
;
1433 }
1434
1435 const SDValue &getBasePtr() const { return getOperand(1); }
1436 const SDValue &getVal() const { return getOperand(2); }
1437
1438 /// Returns true if this SDNode represents cmpxchg atomic operation, false
1439 /// otherwise.
1440 bool isCompareAndSwap() const {
1441 unsigned Op = getOpcode();
1442 return Op == ISD::ATOMIC_CMP_SWAP ||
1443 Op == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS;
1444 }
1445
1446 /// For cmpxchg atomic operations, return the atomic ordering requirements
1447 /// when store does not occur.
1448 AtomicOrdering getFailureOrdering() const {
1449 assert(isCompareAndSwap() && "Must be cmpxchg operation")(static_cast <bool> (isCompareAndSwap() && "Must be cmpxchg operation"
) ? void (0) : __assert_fail ("isCompareAndSwap() && \"Must be cmpxchg operation\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 1449, __extension__
__PRETTY_FUNCTION__))
;
1450 return MMO->getFailureOrdering();
1451 }
1452
1453 // Methods to support isa and dyn_cast
1454 static bool classof(const SDNode *N) {
1455 return N->getOpcode() == ISD::ATOMIC_CMP_SWAP ||
1456 N->getOpcode() == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS ||
1457 N->getOpcode() == ISD::ATOMIC_SWAP ||
1458 N->getOpcode() == ISD::ATOMIC_LOAD_ADD ||
1459 N->getOpcode() == ISD::ATOMIC_LOAD_SUB ||
1460 N->getOpcode() == ISD::ATOMIC_LOAD_AND ||
1461 N->getOpcode() == ISD::ATOMIC_LOAD_CLR ||
1462 N->getOpcode() == ISD::ATOMIC_LOAD_OR ||
1463 N->getOpcode() == ISD::ATOMIC_LOAD_XOR ||
1464 N->getOpcode() == ISD::ATOMIC_LOAD_NAND ||
1465 N->getOpcode() == ISD::ATOMIC_LOAD_MIN ||
1466 N->getOpcode() == ISD::ATOMIC_LOAD_MAX ||
1467 N->getOpcode() == ISD::ATOMIC_LOAD_UMIN ||
1468 N->getOpcode() == ISD::ATOMIC_LOAD_UMAX ||
1469 N->getOpcode() == ISD::ATOMIC_LOAD_FADD ||
1470 N->getOpcode() == ISD::ATOMIC_LOAD_FSUB ||
1471 N->getOpcode() == ISD::ATOMIC_LOAD ||
1472 N->getOpcode() == ISD::ATOMIC_STORE;
1473 }
1474};
1475
1476/// This SDNode is used for target intrinsics that touch
1477/// memory and need an associated MachineMemOperand. Its opcode may be
1478/// INTRINSIC_VOID, INTRINSIC_W_CHAIN, PREFETCH, or a target-specific opcode
1479/// with a value not less than FIRST_TARGET_MEMORY_OPCODE.
1480class MemIntrinsicSDNode : public MemSDNode {
1481public:
1482 MemIntrinsicSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl,
1483 SDVTList VTs, EVT MemoryVT, MachineMemOperand *MMO)
1484 : MemSDNode(Opc, Order, dl, VTs, MemoryVT, MMO) {
1485 SDNodeBits.IsMemIntrinsic = true;
1486 }
1487
1488 // Methods to support isa and dyn_cast
1489 static bool classof(const SDNode *N) {
1490 // We lower some target intrinsics to their target opcode
1491 // early a node with a target opcode can be of this class
1492 return N->isMemIntrinsic() ||
1493 N->getOpcode() == ISD::PREFETCH ||
1494 N->isTargetMemoryOpcode();
1495 }
1496};
1497
1498/// This SDNode is used to implement the code generator
1499/// support for the llvm IR shufflevector instruction. It combines elements
1500/// from two input vectors into a new input vector, with the selection and
1501/// ordering of elements determined by an array of integers, referred to as
1502/// the shuffle mask. For input vectors of width N, mask indices of 0..N-1
1503/// refer to elements from the LHS input, and indices from N to 2N-1 the RHS.
1504/// An index of -1 is treated as undef, such that the code generator may put
1505/// any value in the corresponding element of the result.
1506class ShuffleVectorSDNode : public SDNode {
1507 // The memory for Mask is owned by the SelectionDAG's OperandAllocator, and
1508 // is freed when the SelectionDAG object is destroyed.
1509 const int *Mask;
1510
1511protected:
1512 friend class SelectionDAG;
1513
1514 ShuffleVectorSDNode(EVT VT, unsigned Order, const DebugLoc &dl, const int *M)
1515 : SDNode(ISD::VECTOR_SHUFFLE, Order, dl, getSDVTList(VT)), Mask(M) {}
1516
1517public:
1518 ArrayRef<int> getMask() const {
1519 EVT VT = getValueType(0);
1520 return makeArrayRef(Mask, VT.getVectorNumElements());
1521 }
1522
1523 int getMaskElt(unsigned Idx) const {
1524 assert(Idx < getValueType(0).getVectorNumElements() && "Idx out of range!")(static_cast <bool> (Idx < getValueType(0).getVectorNumElements
() && "Idx out of range!") ? void (0) : __assert_fail
("Idx < getValueType(0).getVectorNumElements() && \"Idx out of range!\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 1524, __extension__
__PRETTY_FUNCTION__))
;
1525 return Mask[Idx];
1526 }
1527
1528 bool isSplat() const { return isSplatMask(Mask, getValueType(0)); }
1529
1530 int getSplatIndex() const {
1531 assert(isSplat() && "Cannot get splat index for non-splat!")(static_cast <bool> (isSplat() && "Cannot get splat index for non-splat!"
) ? void (0) : __assert_fail ("isSplat() && \"Cannot get splat index for non-splat!\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 1531, __extension__
__PRETTY_FUNCTION__))
;
1532 EVT VT = getValueType(0);
1533 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i)
1534 if (Mask[i] >= 0)
1535 return Mask[i];
1536
1537 // We can choose any index value here and be correct because all elements
1538 // are undefined. Return 0 for better potential for callers to simplify.
1539 return 0;
1540 }
1541
1542 static bool isSplatMask(const int *Mask, EVT VT);
1543
1544 /// Change values in a shuffle permute mask assuming
1545 /// the two vector operands have swapped position.
1546 static void commuteMask(MutableArrayRef<int> Mask) {
1547 unsigned NumElems = Mask.size();
1548 for (unsigned i = 0; i != NumElems; ++i) {
1549 int idx = Mask[i];
1550 if (idx < 0)
1551 continue;
1552 else if (idx < (int)NumElems)
1553 Mask[i] = idx + NumElems;
1554 else
1555 Mask[i] = idx - NumElems;
1556 }
1557 }
1558
1559 static bool classof(const SDNode *N) {
1560 return N->getOpcode() == ISD::VECTOR_SHUFFLE;
1561 }
1562};
1563
1564class ConstantSDNode : public SDNode {
1565 friend class SelectionDAG;
1566
1567 const ConstantInt *Value;
1568
1569 ConstantSDNode(bool isTarget, bool isOpaque, const ConstantInt *val, EVT VT)
1570 : SDNode(isTarget ? ISD::TargetConstant : ISD::Constant, 0, DebugLoc(),
1571 getSDVTList(VT)),
1572 Value(val) {
1573 ConstantSDNodeBits.IsOpaque = isOpaque;
1574 }
1575
1576public:
1577 const ConstantInt *getConstantIntValue() const { return Value; }
1578 const APInt &getAPIntValue() const { return Value->getValue(); }
1579 uint64_t getZExtValue() const { return Value->getZExtValue(); }
1580 int64_t getSExtValue() const { return Value->getSExtValue(); }
1581 uint64_t getLimitedValue(uint64_t Limit = UINT64_MAX(18446744073709551615UL)) {
1582 return Value->getLimitedValue(Limit);
1583 }
1584 MaybeAlign getMaybeAlignValue() const { return Value->getMaybeAlignValue(); }
1585 Align getAlignValue() const { return Value->getAlignValue(); }
1586
1587 bool isOne() const { return Value->isOne(); }
1588 bool isZero() const { return Value->isZero(); }
1589 // NOTE: This is soft-deprecated. Please use `isZero()` instead.
1590 bool isNullValue() const { return isZero(); }
1591 bool isAllOnes() const { return Value->isMinusOne(); }
1592 // NOTE: This is soft-deprecated. Please use `isAllOnes()` instead.
1593 bool isAllOnesValue() const { return isAllOnes(); }
1594 bool isMaxSignedValue() const { return Value->isMaxValue(true); }
1595 bool isMinSignedValue() const { return Value->isMinValue(true); }
1596
1597 bool isOpaque() const { return ConstantSDNodeBits.IsOpaque; }
1598
1599 static bool classof(const SDNode *N) {
1600 return N->getOpcode() == ISD::Constant ||
1601 N->getOpcode() == ISD::TargetConstant;
1602 }
1603};
1604
1605uint64_t SDNode::getConstantOperandVal(unsigned Num) const {
1606 return cast<ConstantSDNode>(getOperand(Num))->getZExtValue();
1607}
1608
1609const APInt &SDNode::getConstantOperandAPInt(unsigned Num) const {
1610 return cast<ConstantSDNode>(getOperand(Num))->getAPIntValue();
1611}
1612
1613class ConstantFPSDNode : public SDNode {
1614 friend class SelectionDAG;
1615
1616 const ConstantFP *Value;
1617
1618 ConstantFPSDNode(bool isTarget, const ConstantFP *val, EVT VT)
1619 : SDNode(isTarget ? ISD::TargetConstantFP : ISD::ConstantFP, 0,
1620 DebugLoc(), getSDVTList(VT)),
1621 Value(val) {}
1622
1623public:
1624 const APFloat& getValueAPF() const { return Value->getValueAPF(); }
1625 const ConstantFP *getConstantFPValue() const { return Value; }
1626
1627 /// Return true if the value is positive or negative zero.
1628 bool isZero() const { return Value->isZero(); }
1629
1630 /// Return true if the value is a NaN.
1631 bool isNaN() const { return Value->isNaN(); }
1632
1633 /// Return true if the value is an infinity
1634 bool isInfinity() const { return Value->isInfinity(); }
1635
1636 /// Return true if the value is negative.
1637 bool isNegative() const { return Value->isNegative(); }
1638
1639 /// We don't rely on operator== working on double values, as
1640 /// it returns true for things that are clearly not equal, like -0.0 and 0.0.
1641 /// As such, this method can be used to do an exact bit-for-bit comparison of
1642 /// two floating point values.
1643
1644 /// We leave the version with the double argument here because it's just so
1645 /// convenient to write "2.0" and the like. Without this function we'd
1646 /// have to duplicate its logic everywhere it's called.
1647 bool isExactlyValue(double V) const {
1648 return Value->getValueAPF().isExactlyValue(V);
1649 }
1650 bool isExactlyValue(const APFloat& V) const;
1651
1652 static bool isValueValidForType(EVT VT, const APFloat& Val);
1653
1654 static bool classof(const SDNode *N) {
1655 return N->getOpcode() == ISD::ConstantFP ||
1656 N->getOpcode() == ISD::TargetConstantFP;
1657 }
1658};
1659
1660/// Returns true if \p V is a constant integer zero.
1661bool isNullConstant(SDValue V);
1662
1663/// Returns true if \p V is an FP constant with a value of positive zero.
1664bool isNullFPConstant(SDValue V);
1665
1666/// Returns true if \p V is an integer constant with all bits set.
1667bool isAllOnesConstant(SDValue V);
1668
1669/// Returns true if \p V is a constant integer one.
1670bool isOneConstant(SDValue V);
1671
1672/// Returns true if \p V is a constant min signed integer value.
1673bool isMinSignedConstant(SDValue V);
1674
1675/// Return the non-bitcasted source operand of \p V if it exists.
1676/// If \p V is not a bitcasted value, it is returned as-is.
1677SDValue peekThroughBitcasts(SDValue V);
1678
1679/// Return the non-bitcasted and one-use source operand of \p V if it exists.
1680/// If \p V is not a bitcasted one-use value, it is returned as-is.
1681SDValue peekThroughOneUseBitcasts(SDValue V);
1682
1683/// Return the non-extracted vector source operand of \p V if it exists.
1684/// If \p V is not an extracted subvector, it is returned as-is.
1685SDValue peekThroughExtractSubvectors(SDValue V);
1686
1687/// Returns true if \p V is a bitwise not operation. Assumes that an all ones
1688/// constant is canonicalized to be operand 1.
1689bool isBitwiseNot(SDValue V, bool AllowUndefs = false);
1690
1691/// Returns the SDNode if it is a constant splat BuildVector or constant int.
1692ConstantSDNode *isConstOrConstSplat(SDValue N, bool AllowUndefs = false,
1693 bool AllowTruncation = false);
1694
1695/// Returns the SDNode if it is a demanded constant splat BuildVector or
1696/// constant int.
1697ConstantSDNode *isConstOrConstSplat(SDValue N, const APInt &DemandedElts,
1698 bool AllowUndefs = false,
1699 bool AllowTruncation = false);
1700
1701/// Returns the SDNode if it is a constant splat BuildVector or constant float.
1702ConstantFPSDNode *isConstOrConstSplatFP(SDValue N, bool AllowUndefs = false);
1703
1704/// Returns the SDNode if it is a demanded constant splat BuildVector or
1705/// constant float.
1706ConstantFPSDNode *isConstOrConstSplatFP(SDValue N, const APInt &DemandedElts,
1707 bool AllowUndefs = false);
1708
1709/// Return true if the value is a constant 0 integer or a splatted vector of
1710/// a constant 0 integer (with no undefs by default).
1711/// Build vector implicit truncation is not an issue for null values.
1712bool isNullOrNullSplat(SDValue V, bool AllowUndefs = false);
1713
1714/// Return true if the value is a constant 1 integer or a splatted vector of a
1715/// constant 1 integer (with no undefs).
1716/// Does not permit build vector implicit truncation.
1717bool isOneOrOneSplat(SDValue V, bool AllowUndefs = false);
1718
1719/// Return true if the value is a constant -1 integer or a splatted vector of a
1720/// constant -1 integer (with no undefs).
1721/// Does not permit build vector implicit truncation.
1722bool isAllOnesOrAllOnesSplat(SDValue V, bool AllowUndefs = false);
1723
1724/// Return true if \p V is either a integer or FP constant.
1725inline bool isIntOrFPConstant(SDValue V) {
1726 return isa<ConstantSDNode>(V) || isa<ConstantFPSDNode>(V);
1727}
1728
1729class GlobalAddressSDNode : public SDNode {
1730 friend class SelectionDAG;
1731
1732 const GlobalValue *TheGlobal;
1733 int64_t Offset;
1734 unsigned TargetFlags;
1735
1736 GlobalAddressSDNode(unsigned Opc, unsigned Order, const DebugLoc &DL,
1737 const GlobalValue *GA, EVT VT, int64_t o,
1738 unsigned TF);
1739
1740public:
1741 const GlobalValue *getGlobal() const { return TheGlobal; }
1742 int64_t getOffset() const { return Offset; }
1743 unsigned getTargetFlags() const { return TargetFlags; }
1744 // Return the address space this GlobalAddress belongs to.
1745 unsigned getAddressSpace() const;
1746
1747 static bool classof(const SDNode *N) {
1748 return N->getOpcode() == ISD::GlobalAddress ||
1749 N->getOpcode() == ISD::TargetGlobalAddress ||
1750 N->getOpcode() == ISD::GlobalTLSAddress ||
1751 N->getOpcode() == ISD::TargetGlobalTLSAddress;
1752 }
1753};
1754
1755class FrameIndexSDNode : public SDNode {
1756 friend class SelectionDAG;
1757
1758 int FI;
1759
1760 FrameIndexSDNode(int fi, EVT VT, bool isTarg)
1761 : SDNode(isTarg ? ISD::TargetFrameIndex : ISD::FrameIndex,
1762 0, DebugLoc(), getSDVTList(VT)), FI(fi) {
1763 }
1764
1765public:
1766 int getIndex() const { return FI; }
1767
1768 static bool classof(const SDNode *N) {
1769 return N->getOpcode() == ISD::FrameIndex ||
1770 N->getOpcode() == ISD::TargetFrameIndex;
1771 }
1772};
1773
1774/// This SDNode is used for LIFETIME_START/LIFETIME_END values, which indicate
1775/// the offet and size that are started/ended in the underlying FrameIndex.
1776class LifetimeSDNode : public SDNode {
1777 friend class SelectionDAG;
1778 int64_t Size;
1779 int64_t Offset; // -1 if offset is unknown.
1780
1781 LifetimeSDNode(unsigned Opcode, unsigned Order, const DebugLoc &dl,
1782 SDVTList VTs, int64_t Size, int64_t Offset)
1783 : SDNode(Opcode, Order, dl, VTs), Size(Size), Offset(Offset) {}
1784public:
1785 int64_t getFrameIndex() const {
1786 return cast<FrameIndexSDNode>(getOperand(1))->getIndex();
1787 }
1788
1789 bool hasOffset() const { return Offset >= 0; }
1790 int64_t getOffset() const {
1791 assert(hasOffset() && "offset is unknown")(static_cast <bool> (hasOffset() && "offset is unknown"
) ? void (0) : __assert_fail ("hasOffset() && \"offset is unknown\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 1791, __extension__
__PRETTY_FUNCTION__))
;
1792 return Offset;
1793 }
1794 int64_t getSize() const {
1795 assert(hasOffset() && "offset is unknown")(static_cast <bool> (hasOffset() && "offset is unknown"
) ? void (0) : __assert_fail ("hasOffset() && \"offset is unknown\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 1795, __extension__
__PRETTY_FUNCTION__))
;
1796 return Size;
1797 }
1798
1799 // Methods to support isa and dyn_cast
1800 static bool classof(const SDNode *N) {
1801 return N->getOpcode() == ISD::LIFETIME_START ||
1802 N->getOpcode() == ISD::LIFETIME_END;
1803 }
1804};
1805
1806/// This SDNode is used for PSEUDO_PROBE values, which are the function guid and
1807/// the index of the basic block being probed. A pseudo probe serves as a place
1808/// holder and will be removed at the end of compilation. It does not have any
1809/// operand because we do not want the instruction selection to deal with any.
1810class PseudoProbeSDNode : public SDNode {
1811 friend class SelectionDAG;
1812 uint64_t Guid;
1813 uint64_t Index;
1814 uint32_t Attributes;
1815
1816 PseudoProbeSDNode(unsigned Opcode, unsigned Order, const DebugLoc &Dl,
1817 SDVTList VTs, uint64_t Guid, uint64_t Index, uint32_t Attr)
1818 : SDNode(Opcode, Order, Dl, VTs), Guid(Guid), Index(Index),
1819 Attributes(Attr) {}
1820
1821public:
1822 uint64_t getGuid() const { return Guid; }
1823 uint64_t getIndex() const { return Index; }
1824 uint32_t getAttributes() const { return Attributes; }
1825
1826 // Methods to support isa and dyn_cast
1827 static bool classof(const SDNode *N) {
1828 return N->getOpcode() == ISD::PSEUDO_PROBE;
1829 }
1830};
1831
1832class JumpTableSDNode : public SDNode {
1833 friend class SelectionDAG;
1834
1835 int JTI;
1836 unsigned TargetFlags;
1837
1838 JumpTableSDNode(int jti, EVT VT, bool isTarg, unsigned TF)
1839 : SDNode(isTarg ? ISD::TargetJumpTable : ISD::JumpTable,
1840 0, DebugLoc(), getSDVTList(VT)), JTI(jti), TargetFlags(TF) {
1841 }
1842
1843public:
1844 int getIndex() const { return JTI; }
1845 unsigned getTargetFlags() const { return TargetFlags; }
1846
1847 static bool classof(const SDNode *N) {
1848 return N->getOpcode() == ISD::JumpTable ||
1849 N->getOpcode() == ISD::TargetJumpTable;
1850 }
1851};
1852
1853class ConstantPoolSDNode : public SDNode {
1854 friend class SelectionDAG;
1855
1856 union {
1857 const Constant *ConstVal;
1858 MachineConstantPoolValue *MachineCPVal;
1859 } Val;
1860 int Offset; // It's a MachineConstantPoolValue if top bit is set.
1861 Align Alignment; // Minimum alignment requirement of CP.
1862 unsigned TargetFlags;
1863
1864 ConstantPoolSDNode(bool isTarget, const Constant *c, EVT VT, int o,
1865 Align Alignment, unsigned TF)
1866 : SDNode(isTarget ? ISD::TargetConstantPool : ISD::ConstantPool, 0,
1867 DebugLoc(), getSDVTList(VT)),
1868 Offset(o), Alignment(Alignment), TargetFlags(TF) {
1869 assert(Offset >= 0 && "Offset is too large")(static_cast <bool> (Offset >= 0 && "Offset is too large"
) ? void (0) : __assert_fail ("Offset >= 0 && \"Offset is too large\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 1869, __extension__
__PRETTY_FUNCTION__))
;
1870 Val.ConstVal = c;
1871 }
1872
1873 ConstantPoolSDNode(bool isTarget, MachineConstantPoolValue *v, EVT VT, int o,
1874 Align Alignment, unsigned TF)
1875 : SDNode(isTarget ? ISD::TargetConstantPool : ISD::ConstantPool, 0,
1876 DebugLoc(), getSDVTList(VT)),
1877 Offset(o), Alignment(Alignment), TargetFlags(TF) {
1878 assert(Offset >= 0 && "Offset is too large")(static_cast <bool> (Offset >= 0 && "Offset is too large"
) ? void (0) : __assert_fail ("Offset >= 0 && \"Offset is too large\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 1878, __extension__
__PRETTY_FUNCTION__))
;
1879 Val.MachineCPVal = v;
1880 Offset |= 1 << (sizeof(unsigned)*CHAR_BIT8-1);
1881 }
1882
1883public:
1884 bool isMachineConstantPoolEntry() const {
1885 return Offset < 0;
1886 }
1887
1888 const Constant *getConstVal() const {
1889 assert(!isMachineConstantPoolEntry() && "Wrong constantpool type")(static_cast <bool> (!isMachineConstantPoolEntry() &&
"Wrong constantpool type") ? void (0) : __assert_fail ("!isMachineConstantPoolEntry() && \"Wrong constantpool type\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 1889, __extension__
__PRETTY_FUNCTION__))
;
1890 return Val.ConstVal;
1891 }
1892
1893 MachineConstantPoolValue *getMachineCPVal() const {
1894 assert(isMachineConstantPoolEntry() && "Wrong constantpool type")(static_cast <bool> (isMachineConstantPoolEntry() &&
"Wrong constantpool type") ? void (0) : __assert_fail ("isMachineConstantPoolEntry() && \"Wrong constantpool type\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 1894, __extension__
__PRETTY_FUNCTION__))
;
1895 return Val.MachineCPVal;
1896 }
1897
1898 int getOffset() const {
1899 return Offset & ~(1 << (sizeof(unsigned)*CHAR_BIT8-1));
1900 }
1901
1902 // Return the alignment of this constant pool object, which is either 0 (for
1903 // default alignment) or the desired value.
1904 Align getAlign() const { return Alignment; }
1905 unsigned getTargetFlags() const { return TargetFlags; }
1906
1907 Type *getType() const;
1908
1909 static bool classof(const SDNode *N) {
1910 return N->getOpcode() == ISD::ConstantPool ||
1911 N->getOpcode() == ISD::TargetConstantPool;
1912 }
1913};
1914
1915/// Completely target-dependent object reference.
1916class TargetIndexSDNode : public SDNode {
1917 friend class SelectionDAG;
1918
1919 unsigned TargetFlags;
1920 int Index;
1921 int64_t Offset;
1922
1923public:
1924 TargetIndexSDNode(int Idx, EVT VT, int64_t Ofs, unsigned TF)
1925 : SDNode(ISD::TargetIndex, 0, DebugLoc(), getSDVTList(VT)),
1926 TargetFlags(TF), Index(Idx), Offset(Ofs) {}
1927
1928 unsigned getTargetFlags() const { return TargetFlags; }
1929 int getIndex() const { return Index; }
1930 int64_t getOffset() const { return Offset; }
1931
1932 static bool classof(const SDNode *N) {
1933 return N->getOpcode() == ISD::TargetIndex;
1934 }
1935};
1936
1937class BasicBlockSDNode : public SDNode {
1938 friend class SelectionDAG;
1939
1940 MachineBasicBlock *MBB;
1941
1942 /// Debug info is meaningful and potentially useful here, but we create
1943 /// blocks out of order when they're jumped to, which makes it a bit
1944 /// harder. Let's see if we need it first.
1945 explicit BasicBlockSDNode(MachineBasicBlock *mbb)
1946 : SDNode(ISD::BasicBlock, 0, DebugLoc(), getSDVTList(MVT::Other)), MBB(mbb)
1947 {}
1948
1949public:
1950 MachineBasicBlock *getBasicBlock() const { return MBB; }
1951
1952 static bool classof(const SDNode *N) {
1953 return N->getOpcode() == ISD::BasicBlock;
1954 }
1955};
1956
1957/// A "pseudo-class" with methods for operating on BUILD_VECTORs.
1958class BuildVectorSDNode : public SDNode {
1959public:
1960 // These are constructed as SDNodes and then cast to BuildVectorSDNodes.
1961 explicit BuildVectorSDNode() = delete;
1962
1963 /// Check if this is a constant splat, and if so, find the
1964 /// smallest element size that splats the vector. If MinSplatBits is
1965 /// nonzero, the element size must be at least that large. Note that the
1966 /// splat element may be the entire vector (i.e., a one element vector).
1967 /// Returns the splat element value in SplatValue. Any undefined bits in
1968 /// that value are zero, and the corresponding bits in the SplatUndef mask
1969 /// are set. The SplatBitSize value is set to the splat element size in
1970 /// bits. HasAnyUndefs is set to true if any bits in the vector are
1971 /// undefined. isBigEndian describes the endianness of the target.
1972 bool isConstantSplat(APInt &SplatValue, APInt &SplatUndef,
1973 unsigned &SplatBitSize, bool &HasAnyUndefs,
1974 unsigned MinSplatBits = 0,
1975 bool isBigEndian = false) const;
1976
1977 /// Returns the demanded splatted value or a null value if this is not a
1978 /// splat.
1979 ///
1980 /// The DemandedElts mask indicates the elements that must be in the splat.
1981 /// If passed a non-null UndefElements bitvector, it will resize it to match
1982 /// the vector width and set the bits where elements are undef.
1983 SDValue getSplatValue(const APInt &DemandedElts,
1984 BitVector *UndefElements = nullptr) const;
1985
1986 /// Returns the splatted value or a null value if this is not a splat.
1987 ///
1988 /// If passed a non-null UndefElements bitvector, it will resize it to match
1989 /// the vector width and set the bits where elements are undef.
1990 SDValue getSplatValue(BitVector *UndefElements = nullptr) const;
1991
1992 /// Find the shortest repeating sequence of values in the build vector.
1993 ///
1994 /// e.g. { u, X, u, X, u, u, X, u } -> { X }
1995 /// { X, Y, u, Y, u, u, X, u } -> { X, Y }
1996 ///
1997 /// Currently this must be a power-of-2 build vector.
1998 /// The DemandedElts mask indicates the elements that must be present,
1999 /// undemanded elements in Sequence may be null (SDValue()). If passed a
2000 /// non-null UndefElements bitvector, it will resize it to match the original
2001 /// vector width and set the bits where elements are undef. If result is
2002 /// false, Sequence will be empty.
2003 bool getRepeatedSequence(const APInt &DemandedElts,
2004 SmallVectorImpl<SDValue> &Sequence,
2005 BitVector *UndefElements = nullptr) const;
2006
2007 /// Find the shortest repeating sequence of values in the build vector.
2008 ///
2009 /// e.g. { u, X, u, X, u, u, X, u } -> { X }
2010 /// { X, Y, u, Y, u, u, X, u } -> { X, Y }
2011 ///
2012 /// Currently this must be a power-of-2 build vector.
2013 /// If passed a non-null UndefElements bitvector, it will resize it to match
2014 /// the original vector width and set the bits where elements are undef.
2015 /// If result is false, Sequence will be empty.
2016 bool getRepeatedSequence(SmallVectorImpl<SDValue> &Sequence,
2017 BitVector *UndefElements = nullptr) const;
2018
2019 /// Returns the demanded splatted constant or null if this is not a constant
2020 /// splat.
2021 ///
2022 /// The DemandedElts mask indicates the elements that must be in the splat.
2023 /// If passed a non-null UndefElements bitvector, it will resize it to match
2024 /// the vector width and set the bits where elements are undef.
2025 ConstantSDNode *
2026 getConstantSplatNode(const APInt &DemandedElts,
2027 BitVector *UndefElements = nullptr) const;
2028
2029 /// Returns the splatted constant or null if this is not a constant
2030 /// splat.
2031 ///
2032 /// If passed a non-null UndefElements bitvector, it will resize it to match
2033 /// the vector width and set the bits where elements are undef.
2034 ConstantSDNode *
2035 getConstantSplatNode(BitVector *UndefElements = nullptr) const;
2036
2037 /// Returns the demanded splatted constant FP or null if this is not a
2038 /// constant FP splat.
2039 ///
2040 /// The DemandedElts mask indicates the elements that must be in the splat.
2041 /// If passed a non-null UndefElements bitvector, it will resize it to match
2042 /// the vector width and set the bits where elements are undef.
2043 ConstantFPSDNode *
2044 getConstantFPSplatNode(const APInt &DemandedElts,
2045 BitVector *UndefElements = nullptr) const;
2046
2047 /// Returns the splatted constant FP or null if this is not a constant
2048 /// FP splat.
2049 ///
2050 /// If passed a non-null UndefElements bitvector, it will resize it to match
2051 /// the vector width and set the bits where elements are undef.
2052 ConstantFPSDNode *
2053 getConstantFPSplatNode(BitVector *UndefElements = nullptr) const;
2054
2055 /// If this is a constant FP splat and the splatted constant FP is an
2056 /// exact power or 2, return the log base 2 integer value. Otherwise,
2057 /// return -1.
2058 ///
2059 /// The BitWidth specifies the necessary bit precision.
2060 int32_t getConstantFPSplatPow2ToLog2Int(BitVector *UndefElements,
2061 uint32_t BitWidth) const;
2062
2063 /// Extract the raw bit data from a build vector of Undef, Constant or
2064 /// ConstantFP node elements. Each raw bit element will be \p
2065 /// DstEltSizeInBits wide, undef elements are treated as zero, and entirely
2066 /// undefined elements are flagged in \p UndefElements.
2067 bool getConstantRawBits(bool IsLittleEndian, unsigned DstEltSizeInBits,
2068 SmallVectorImpl<APInt> &RawBitElements,
2069 BitVector &UndefElements) const;
2070
2071 bool isConstant() const;
2072
2073 /// Recast bit data \p SrcBitElements to \p DstEltSizeInBits wide elements.
2074 /// Undef elements are treated as zero, and entirely undefined elements are
2075 /// flagged in \p DstUndefElements.
2076 static void recastRawBits(bool IsLittleEndian, unsigned DstEltSizeInBits,
2077 SmallVectorImpl<APInt> &DstBitElements,
2078 ArrayRef<APInt> SrcBitElements,
2079 BitVector &DstUndefElements,
2080 const BitVector &SrcUndefElements);
2081
2082 static bool classof(const SDNode *N) {
2083 return N->getOpcode() == ISD::BUILD_VECTOR;
2084 }
2085};
2086
2087/// An SDNode that holds an arbitrary LLVM IR Value. This is
2088/// used when the SelectionDAG needs to make a simple reference to something
2089/// in the LLVM IR representation.
2090///
2091class SrcValueSDNode : public SDNode {
2092 friend class SelectionDAG;
2093
2094 const Value *V;
2095
2096 /// Create a SrcValue for a general value.
2097 explicit SrcValueSDNode(const Value *v)
2098 : SDNode(ISD::SRCVALUE, 0, DebugLoc(), getSDVTList(MVT::Other)), V(v) {}
2099
2100public:
2101 /// Return the contained Value.
2102 const Value *getValue() const { return V; }
2103
2104 static bool classof(const SDNode *N) {
2105 return N->getOpcode() == ISD::SRCVALUE;
2106 }
2107};
2108
2109class MDNodeSDNode : public SDNode {
2110 friend class SelectionDAG;
2111
2112 const MDNode *MD;
2113
2114 explicit MDNodeSDNode(const MDNode *md)
2115 : SDNode(ISD::MDNODE_SDNODE, 0, DebugLoc(), getSDVTList(MVT::Other)), MD(md)
2116 {}
2117
2118public:
2119 const MDNode *getMD() const { return MD; }
2120
2121 static bool classof(const SDNode *N) {
2122 return N->getOpcode() == ISD::MDNODE_SDNODE;
2123 }
2124};
2125
2126class RegisterSDNode : public SDNode {
2127 friend class SelectionDAG;
2128
2129 Register Reg;
2130
2131 RegisterSDNode(Register reg, EVT VT)
2132 : SDNode(ISD::Register, 0, DebugLoc(), getSDVTList(VT)), Reg(reg) {}
2133
2134public:
2135 Register getReg() const { return Reg; }
2136
2137 static bool classof(const SDNode *N) {
2138 return N->getOpcode() == ISD::Register;
2139 }
2140};
2141
2142class RegisterMaskSDNode : public SDNode {
2143 friend class SelectionDAG;
2144
2145 // The memory for RegMask is not owned by the node.
2146 const uint32_t *RegMask;
2147
2148 RegisterMaskSDNode(const uint32_t *mask)
2149 : SDNode(ISD::RegisterMask, 0, DebugLoc(), getSDVTList(MVT::Untyped)),
2150 RegMask(mask) {}
2151
2152public:
2153 const uint32_t *getRegMask() const { return RegMask; }
2154
2155 static bool classof(const SDNode *N) {
2156 return N->getOpcode() == ISD::RegisterMask;
2157 }
2158};
2159
2160class BlockAddressSDNode : public SDNode {
2161 friend class SelectionDAG;
2162
2163 const BlockAddress *BA;
2164 int64_t Offset;
2165 unsigned TargetFlags;
2166
2167 BlockAddressSDNode(unsigned NodeTy, EVT VT, const BlockAddress *ba,
2168 int64_t o, unsigned Flags)
2169 : SDNode(NodeTy, 0, DebugLoc(), getSDVTList(VT)),
2170 BA(ba), Offset(o), TargetFlags(Flags) {}
2171
2172public:
2173 const BlockAddress *getBlockAddress() const { return BA; }
2174 int64_t getOffset() const { return Offset; }
2175 unsigned getTargetFlags() const { return TargetFlags; }
2176
2177 static bool classof(const SDNode *N) {
2178 return N->getOpcode() == ISD::BlockAddress ||
2179 N->getOpcode() == ISD::TargetBlockAddress;
2180 }
2181};
2182
2183class LabelSDNode : public SDNode {
2184 friend class SelectionDAG;
2185
2186 MCSymbol *Label;
2187
2188 LabelSDNode(unsigned Opcode, unsigned Order, const DebugLoc &dl, MCSymbol *L)
2189 : SDNode(Opcode, Order, dl, getSDVTList(MVT::Other)), Label(L) {
2190 assert(LabelSDNode::classof(this) && "not a label opcode")(static_cast <bool> (LabelSDNode::classof(this) &&
"not a label opcode") ? void (0) : __assert_fail ("LabelSDNode::classof(this) && \"not a label opcode\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 2190, __extension__
__PRETTY_FUNCTION__))
;
2191 }
2192
2193public:
2194 MCSymbol *getLabel() const { return Label; }
2195
2196 static bool classof(const SDNode *N) {
2197 return N->getOpcode() == ISD::EH_LABEL ||
2198 N->getOpcode() == ISD::ANNOTATION_LABEL;
2199 }
2200};
2201
2202class ExternalSymbolSDNode : public SDNode {
2203 friend class SelectionDAG;
2204
2205 const char *Symbol;
2206 unsigned TargetFlags;
2207
2208 ExternalSymbolSDNode(bool isTarget, const char *Sym, unsigned TF, EVT VT)
2209 : SDNode(isTarget ? ISD::TargetExternalSymbol : ISD::ExternalSymbol, 0,
2210 DebugLoc(), getSDVTList(VT)),
2211 Symbol(Sym), TargetFlags(TF) {}
2212
2213public:
2214 const char *getSymbol() const { return Symbol; }
2215 unsigned getTargetFlags() const { return TargetFlags; }
2216
2217 static bool classof(const SDNode *N) {
2218 return N->getOpcode() == ISD::ExternalSymbol ||
2219 N->getOpcode() == ISD::TargetExternalSymbol;
2220 }
2221};
2222
2223class MCSymbolSDNode : public SDNode {
2224 friend class SelectionDAG;
2225
2226 MCSymbol *Symbol;
2227
2228 MCSymbolSDNode(MCSymbol *Symbol, EVT VT)
2229 : SDNode(ISD::MCSymbol, 0, DebugLoc(), getSDVTList(VT)), Symbol(Symbol) {}
2230
2231public:
2232 MCSymbol *getMCSymbol() const { return Symbol; }
2233
2234 static bool classof(const SDNode *N) {
2235 return N->getOpcode() == ISD::MCSymbol;
2236 }
2237};
2238
2239class CondCodeSDNode : public SDNode {
2240 friend class SelectionDAG;
2241
2242 ISD::CondCode Condition;
2243
2244 explicit CondCodeSDNode(ISD::CondCode Cond)
2245 : SDNode(ISD::CONDCODE, 0, DebugLoc(), getSDVTList(MVT::Other)),
2246 Condition(Cond) {}
2247
2248public:
2249 ISD::CondCode get() const { return Condition; }
2250
2251 static bool classof(const SDNode *N) {
2252 return N->getOpcode() == ISD::CONDCODE;
2253 }
2254};
2255
2256/// This class is used to represent EVT's, which are used
2257/// to parameterize some operations.
2258class VTSDNode : public SDNode {
2259 friend class SelectionDAG;
2260
2261 EVT ValueType;
2262
2263 explicit VTSDNode(EVT VT)
2264 : SDNode(ISD::VALUETYPE, 0, DebugLoc(), getSDVTList(MVT::Other)),
2265 ValueType(VT) {}
2266
2267public:
2268 EVT getVT() const { return ValueType; }
2269
2270 static bool classof(const SDNode *N) {
2271 return N->getOpcode() == ISD::VALUETYPE;
2272 }
2273};
2274
2275/// Base class for LoadSDNode and StoreSDNode
2276class LSBaseSDNode : public MemSDNode {
2277public:
2278 LSBaseSDNode(ISD::NodeType NodeTy, unsigned Order, const DebugLoc &dl,
2279 SDVTList VTs, ISD::MemIndexedMode AM, EVT MemVT,
2280 MachineMemOperand *MMO)
2281 : MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) {
2282 LSBaseSDNodeBits.AddressingMode = AM;
2283 assert(getAddressingMode() == AM && "Value truncated")(static_cast <bool> (getAddressingMode() == AM &&
"Value truncated") ? void (0) : __assert_fail ("getAddressingMode() == AM && \"Value truncated\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 2283, __extension__
__PRETTY_FUNCTION__))
;
2284 }
2285
2286 const SDValue &getOffset() const {
2287 return getOperand(getOpcode() == ISD::LOAD ? 2 : 3);
2288 }
2289
2290 /// Return the addressing mode for this load or store:
2291 /// unindexed, pre-inc, pre-dec, post-inc, or post-dec.
2292 ISD::MemIndexedMode getAddressingMode() const {
2293 return static_cast<ISD::MemIndexedMode>(LSBaseSDNodeBits.AddressingMode);
2294 }
2295
2296 /// Return true if this is a pre/post inc/dec load/store.
2297 bool isIndexed() const { return getAddressingMode() != ISD::UNINDEXED; }
2298
2299 /// Return true if this is NOT a pre/post inc/dec load/store.
2300 bool isUnindexed() const { return getAddressingMode() == ISD::UNINDEXED; }
2301
2302 static bool classof(const SDNode *N) {
2303 return N->getOpcode() == ISD::LOAD ||
2304 N->getOpcode() == ISD::STORE;
2305 }
2306};
2307
2308/// This class is used to represent ISD::LOAD nodes.
2309class LoadSDNode : public LSBaseSDNode {
2310 friend class SelectionDAG;
2311
2312 LoadSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
2313 ISD::MemIndexedMode AM, ISD::LoadExtType ETy, EVT MemVT,
2314 MachineMemOperand *MMO)
2315 : LSBaseSDNode(ISD::LOAD, Order, dl, VTs, AM, MemVT, MMO) {
2316 LoadSDNodeBits.ExtTy = ETy;
2317 assert(readMem() && "Load MachineMemOperand is not a load!")(static_cast <bool> (readMem() && "Load MachineMemOperand is not a load!"
) ? void (0) : __assert_fail ("readMem() && \"Load MachineMemOperand is not a load!\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 2317, __extension__
__PRETTY_FUNCTION__))
;
2318 assert(!writeMem() && "Load MachineMemOperand is a store!")(static_cast <bool> (!writeMem() && "Load MachineMemOperand is a store!"
) ? void (0) : __assert_fail ("!writeMem() && \"Load MachineMemOperand is a store!\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 2318, __extension__
__PRETTY_FUNCTION__))
;
2319 }
2320
2321public:
2322 /// Return whether this is a plain node,
2323 /// or one of the varieties of value-extending loads.
2324 ISD::LoadExtType getExtensionType() const {
2325 return static_cast<ISD::LoadExtType>(LoadSDNodeBits.ExtTy);
2326 }
2327
2328 const SDValue &getBasePtr() const { return getOperand(1); }
2329 const SDValue &getOffset() const { return getOperand(2); }
2330
2331 static bool classof(const SDNode *N) {
2332 return N->getOpcode() == ISD::LOAD;
2333 }
2334};
2335
2336/// This class is used to represent ISD::STORE nodes.
2337class StoreSDNode : public LSBaseSDNode {
2338 friend class SelectionDAG;
2339
2340 StoreSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
2341 ISD::MemIndexedMode AM, bool isTrunc, EVT MemVT,
2342 MachineMemOperand *MMO)
2343 : LSBaseSDNode(ISD::STORE, Order, dl, VTs, AM, MemVT, MMO) {
2344 StoreSDNodeBits.IsTruncating = isTrunc;
2345 assert(!readMem() && "Store MachineMemOperand is a load!")(static_cast <bool> (!readMem() && "Store MachineMemOperand is a load!"
) ? void (0) : __assert_fail ("!readMem() && \"Store MachineMemOperand is a load!\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 2345, __extension__
__PRETTY_FUNCTION__))
;
2346 assert(writeMem() && "Store MachineMemOperand is not a store!")(static_cast <bool> (writeMem() && "Store MachineMemOperand is not a store!"
) ? void (0) : __assert_fail ("writeMem() && \"Store MachineMemOperand is not a store!\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 2346, __extension__
__PRETTY_FUNCTION__))
;
2347 }
2348
2349public:
2350 /// Return true if the op does a truncation before store.
2351 /// For integers this is the same as doing a TRUNCATE and storing the result.
2352 /// For floats, it is the same as doing an FP_ROUND and storing the result.
2353 bool isTruncatingStore() const { return StoreSDNodeBits.IsTruncating; }
2354 void setTruncatingStore(bool Truncating) {
2355 StoreSDNodeBits.IsTruncating = Truncating;
2356 }
2357
2358 const SDValue &getValue() const { return getOperand(1); }
2359 const SDValue &getBasePtr() const { return getOperand(2); }
2360 const SDValue &getOffset() const { return getOperand(3); }
2361
2362 static bool classof(const SDNode *N) {
2363 return N->getOpcode() == ISD::STORE;
2364 }
2365};
2366
2367/// This base class is used to represent VP_LOAD, VP_STORE,
2368/// EXPERIMENTAL_VP_STRIDED_LOAD and EXPERIMENTAL_VP_STRIDED_STORE nodes
2369class VPBaseLoadStoreSDNode : public MemSDNode {
2370public:
2371 friend class SelectionDAG;
2372
2373 VPBaseLoadStoreSDNode(ISD::NodeType NodeTy, unsigned Order,
2374 const DebugLoc &DL, SDVTList VTs,
2375 ISD::MemIndexedMode AM, EVT MemVT,
2376 MachineMemOperand *MMO)
2377 : MemSDNode(NodeTy, Order, DL, VTs, MemVT, MMO) {
2378 LSBaseSDNodeBits.AddressingMode = AM;
2379 assert(getAddressingMode() == AM && "Value truncated")(static_cast <bool> (getAddressingMode() == AM &&
"Value truncated") ? void (0) : __assert_fail ("getAddressingMode() == AM && \"Value truncated\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 2379, __extension__
__PRETTY_FUNCTION__))
;
2380 }
2381
2382 // VPStridedStoreSDNode (Chain, Data, Ptr, Offset, Stride, Mask, EVL)
2383 // VPStoreSDNode (Chain, Data, Ptr, Offset, Mask, EVL)
2384 // VPStridedLoadSDNode (Chain, Ptr, Offset, Stride, Mask, EVL)
2385 // VPLoadSDNode (Chain, Ptr, Offset, Mask, EVL)
2386 // Mask is a vector of i1 elements;
2387 // the type of EVL is TLI.getVPExplicitVectorLengthTy().
2388 const SDValue &getOffset() const {
2389 return getOperand((getOpcode() == ISD::EXPERIMENTAL_VP_STRIDED_LOAD ||
2390 getOpcode() == ISD::VP_LOAD)
2391 ? 2
2392 : 3);
2393 }
2394 const SDValue &getBasePtr() const {
2395 return getOperand((getOpcode() == ISD::EXPERIMENTAL_VP_STRIDED_LOAD ||
2396 getOpcode() == ISD::VP_LOAD)
2397 ? 1
2398 : 2);
2399 }
2400 const SDValue &getMask() const {
2401 switch (getOpcode()) {
2402 default:
2403 llvm_unreachable("Invalid opcode")::llvm::llvm_unreachable_internal("Invalid opcode", "llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 2403)
;
2404 case ISD::VP_LOAD:
2405 return getOperand(3);
2406 case ISD::VP_STORE:
2407 case ISD::EXPERIMENTAL_VP_STRIDED_LOAD:
2408 return getOperand(4);
2409 case ISD::EXPERIMENTAL_VP_STRIDED_STORE:
2410 return getOperand(5);
2411 }
2412 }
2413 const SDValue &getVectorLength() const {
2414 switch (getOpcode()) {
2415 default:
2416 llvm_unreachable("Invalid opcode")::llvm::llvm_unreachable_internal("Invalid opcode", "llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 2416)
;
2417 case ISD::VP_LOAD:
2418 return getOperand(4);
2419 case ISD::VP_STORE:
2420 case ISD::EXPERIMENTAL_VP_STRIDED_LOAD:
2421 return getOperand(5);
2422 case ISD::EXPERIMENTAL_VP_STRIDED_STORE:
2423 return getOperand(6);
2424 }
2425 }
2426
2427 /// Return the addressing mode for this load or store:
2428 /// unindexed, pre-inc, pre-dec, post-inc, or post-dec.
2429 ISD::MemIndexedMode getAddressingMode() const {
2430 return static_cast<ISD::MemIndexedMode>(LSBaseSDNodeBits.AddressingMode);
2431 }
2432
2433 /// Return true if this is a pre/post inc/dec load/store.
2434 bool isIndexed() const { return getAddressingMode() != ISD::UNINDEXED; }
2435
2436 /// Return true if this is NOT a pre/post inc/dec load/store.
2437 bool isUnindexed() const { return getAddressingMode() == ISD::UNINDEXED; }
2438
2439 static bool classof(const SDNode *N) {
2440 return N->getOpcode() == ISD::EXPERIMENTAL_VP_STRIDED_LOAD ||
2441 N->getOpcode() == ISD::EXPERIMENTAL_VP_STRIDED_STORE ||
2442 N->getOpcode() == ISD::VP_LOAD || N->getOpcode() == ISD::VP_STORE;
2443 }
2444};
2445
2446/// This class is used to represent a VP_LOAD node
2447class VPLoadSDNode : public VPBaseLoadStoreSDNode {
2448public:
2449 friend class SelectionDAG;
2450
2451 VPLoadSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
2452 ISD::MemIndexedMode AM, ISD::LoadExtType ETy, bool isExpanding,
2453 EVT MemVT, MachineMemOperand *MMO)
2454 : VPBaseLoadStoreSDNode(ISD::VP_LOAD, Order, dl, VTs, AM, MemVT, MMO) {
2455 LoadSDNodeBits.ExtTy = ETy;
2456 LoadSDNodeBits.IsExpanding = isExpanding;
2457 }
2458
2459 ISD::LoadExtType getExtensionType() const {
2460 return static_cast<ISD::LoadExtType>(LoadSDNodeBits.ExtTy);
2461 }
2462
2463 const SDValue &getBasePtr() const { return getOperand(1); }
2464 const SDValue &getOffset() const { return getOperand(2); }
2465 const SDValue &getMask() const { return getOperand(3); }
2466 const SDValue &getVectorLength() const { return getOperand(4); }
2467
2468 static bool classof(const SDNode *N) {
2469 return N->getOpcode() == ISD::VP_LOAD;
2470 }
2471 bool isExpandingLoad() const { return LoadSDNodeBits.IsExpanding; }
2472};
2473
2474/// This class is used to represent an EXPERIMENTAL_VP_STRIDED_LOAD node.
2475class VPStridedLoadSDNode : public VPBaseLoadStoreSDNode {
2476public:
2477 friend class SelectionDAG;
2478
2479 VPStridedLoadSDNode(unsigned Order, const DebugLoc &DL, SDVTList VTs,
2480 ISD::MemIndexedMode AM, ISD::LoadExtType ETy,
2481 bool IsExpanding, EVT MemVT, MachineMemOperand *MMO)
2482 : VPBaseLoadStoreSDNode(ISD::EXPERIMENTAL_VP_STRIDED_LOAD, Order, DL, VTs,
2483 AM, MemVT, MMO) {
2484 LoadSDNodeBits.ExtTy = ETy;
2485 LoadSDNodeBits.IsExpanding = IsExpanding;
2486 }
2487
2488 ISD::LoadExtType getExtensionType() const {
2489 return static_cast<ISD::LoadExtType>(LoadSDNodeBits.ExtTy);
2490 }
2491
2492 const SDValue &getBasePtr() const { return getOperand(1); }
2493 const SDValue &getOffset() const { return getOperand(2); }
2494 const SDValue &getStride() const { return getOperand(3); }
2495 const SDValue &getMask() const { return getOperand(4); }
2496 const SDValue &getVectorLength() const { return getOperand(5); }
2497
2498 static bool classof(const SDNode *N) {
2499 return N->getOpcode() == ISD::EXPERIMENTAL_VP_STRIDED_LOAD;
2500 }
2501 bool isExpandingLoad() const { return LoadSDNodeBits.IsExpanding; }
2502};
2503
2504/// This class is used to represent a VP_STORE node
2505class VPStoreSDNode : public VPBaseLoadStoreSDNode {
2506public:
2507 friend class SelectionDAG;
2508
2509 VPStoreSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
2510 ISD::MemIndexedMode AM, bool isTrunc, bool isCompressing,
2511 EVT MemVT, MachineMemOperand *MMO)
2512 : VPBaseLoadStoreSDNode(ISD::VP_STORE, Order, dl, VTs, AM, MemVT, MMO) {
2513 StoreSDNodeBits.IsTruncating = isTrunc;
2514 StoreSDNodeBits.IsCompressing = isCompressing;
2515 }
2516
2517 /// Return true if this is a truncating store.
2518 /// For integers this is the same as doing a TRUNCATE and storing the result.
2519 /// For floats, it is the same as doing an FP_ROUND and storing the result.
2520 bool isTruncatingStore() const { return StoreSDNodeBits.IsTruncating; }
2521
2522 /// Returns true if the op does a compression to the vector before storing.
2523 /// The node contiguously stores the active elements (integers or floats)
2524 /// in src (those with their respective bit set in writemask k) to unaligned
2525 /// memory at base_addr.
2526 bool isCompressingStore() const { return StoreSDNodeBits.IsCompressing; }
2527
2528 const SDValue &getValue() const { return getOperand(1); }
2529 const SDValue &getBasePtr() const { return getOperand(2); }
2530 const SDValue &getOffset() const { return getOperand(3); }
2531 const SDValue &getMask() const { return getOperand(4); }
2532 const SDValue &getVectorLength() const { return getOperand(5); }
2533
2534 static bool classof(const SDNode *N) {
2535 return N->getOpcode() == ISD::VP_STORE;
2536 }
2537};
2538
2539/// This class is used to represent an EXPERIMENTAL_VP_STRIDED_STORE node.
2540class VPStridedStoreSDNode : public VPBaseLoadStoreSDNode {
2541public:
2542 friend class SelectionDAG;
2543
2544 VPStridedStoreSDNode(unsigned Order, const DebugLoc &DL, SDVTList VTs,
2545 ISD::MemIndexedMode AM, bool IsTrunc, bool IsCompressing,
2546 EVT MemVT, MachineMemOperand *MMO)
2547 : VPBaseLoadStoreSDNode(ISD::EXPERIMENTAL_VP_STRIDED_STORE, Order, DL,
2548 VTs, AM, MemVT, MMO) {
2549 StoreSDNodeBits.IsTruncating = IsTrunc;
2550 StoreSDNodeBits.IsCompressing = IsCompressing;
2551 }
2552
2553 /// Return true if this is a truncating store.
2554 /// For integers this is the same as doing a TRUNCATE and storing the result.
2555 /// For floats, it is the same as doing an FP_ROUND and storing the result.
2556 bool isTruncatingStore() const { return StoreSDNodeBits.IsTruncating; }
2557
2558 /// Returns true if the op does a compression to the vector before storing.
2559 /// The node contiguously stores the active elements (integers or floats)
2560 /// in src (those with their respective bit set in writemask k) to unaligned
2561 /// memory at base_addr.
2562 bool isCompressingStore() const { return StoreSDNodeBits.IsCompressing; }
2563
2564 const SDValue &getValue() const { return getOperand(1); }
2565 const SDValue &getBasePtr() const { return getOperand(2); }
2566 const SDValue &getOffset() const { return getOperand(3); }
2567 const SDValue &getStride() const { return getOperand(4); }
2568 const SDValue &getMask() const { return getOperand(5); }
2569 const SDValue &getVectorLength() const { return getOperand(6); }
2570
2571 static bool classof(const SDNode *N) {
2572 return N->getOpcode() == ISD::EXPERIMENTAL_VP_STRIDED_STORE;
2573 }
2574};
2575
2576/// This base class is used to represent MLOAD and MSTORE nodes
2577class MaskedLoadStoreSDNode : public MemSDNode {
2578public:
2579 friend class SelectionDAG;
2580
2581 MaskedLoadStoreSDNode(ISD::NodeType NodeTy, unsigned Order,
2582 const DebugLoc &dl, SDVTList VTs,
2583 ISD::MemIndexedMode AM, EVT MemVT,
2584 MachineMemOperand *MMO)
2585 : MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) {
2586 LSBaseSDNodeBits.AddressingMode = AM;
2587 assert(getAddressingMode() == AM && "Value truncated")(static_cast <bool> (getAddressingMode() == AM &&
"Value truncated") ? void (0) : __assert_fail ("getAddressingMode() == AM && \"Value truncated\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 2587, __extension__
__PRETTY_FUNCTION__))
;
2588 }
2589
2590 // MaskedLoadSDNode (Chain, ptr, offset, mask, passthru)
2591 // MaskedStoreSDNode (Chain, data, ptr, offset, mask)
2592 // Mask is a vector of i1 elements
2593 const SDValue &getOffset() const {
2594 return getOperand(getOpcode() == ISD::MLOAD ? 2 : 3);
2595 }
2596 const SDValue &getMask() const {
2597 return getOperand(getOpcode() == ISD::MLOAD ? 3 : 4);
2598 }
2599
2600 /// Return the addressing mode for this load or store:
2601 /// unindexed, pre-inc, pre-dec, post-inc, or post-dec.
2602 ISD::MemIndexedMode getAddressingMode() const {
2603 return static_cast<ISD::MemIndexedMode>(LSBaseSDNodeBits.AddressingMode);
2604 }
2605
2606 /// Return true if this is a pre/post inc/dec load/store.
2607 bool isIndexed() const { return getAddressingMode() != ISD::UNINDEXED; }
2608
2609 /// Return true if this is NOT a pre/post inc/dec load/store.
2610 bool isUnindexed() const { return getAddressingMode() == ISD::UNINDEXED; }
2611
2612 static bool classof(const SDNode *N) {
2613 return N->getOpcode() == ISD::MLOAD ||
2614 N->getOpcode() == ISD::MSTORE;
2615 }
2616};
2617
2618/// This class is used to represent an MLOAD node
2619class MaskedLoadSDNode : public MaskedLoadStoreSDNode {
2620public:
2621 friend class SelectionDAG;
2622
2623 MaskedLoadSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
2624 ISD::MemIndexedMode AM, ISD::LoadExtType ETy,
2625 bool IsExpanding, EVT MemVT, MachineMemOperand *MMO)
2626 : MaskedLoadStoreSDNode(ISD::MLOAD, Order, dl, VTs, AM, MemVT, MMO) {
2627 LoadSDNodeBits.ExtTy = ETy;
2628 LoadSDNodeBits.IsExpanding = IsExpanding;
2629 }
2630
2631 ISD::LoadExtType getExtensionType() const {
2632 return static_cast<ISD::LoadExtType>(LoadSDNodeBits.ExtTy);
2633 }
2634
2635 const SDValue &getBasePtr() const { return getOperand(1); }
2636 const SDValue &getOffset() const { return getOperand(2); }
2637 const SDValue &getMask() const { return getOperand(3); }
2638 const SDValue &getPassThru() const { return getOperand(4); }
2639
2640 static bool classof(const SDNode *N) {
2641 return N->getOpcode() == ISD::MLOAD;
2642 }
2643
2644 bool isExpandingLoad() const { return LoadSDNodeBits.IsExpanding; }
2645};
2646
2647/// This class is used to represent an MSTORE node
2648class MaskedStoreSDNode : public MaskedLoadStoreSDNode {
2649public:
2650 friend class SelectionDAG;
2651
2652 MaskedStoreSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
2653 ISD::MemIndexedMode AM, bool isTrunc, bool isCompressing,
2654 EVT MemVT, MachineMemOperand *MMO)
2655 : MaskedLoadStoreSDNode(ISD::MSTORE, Order, dl, VTs, AM, MemVT, MMO) {
2656 StoreSDNodeBits.IsTruncating = isTrunc;
2657 StoreSDNodeBits.IsCompressing = isCompressing;
2658 }
2659
2660 /// Return true if the op does a truncation before store.
2661 /// For integers this is the same as doing a TRUNCATE and storing the result.
2662 /// For floats, it is the same as doing an FP_ROUND and storing the result.
2663 bool isTruncatingStore() const { return StoreSDNodeBits.IsTruncating; }
2664
2665 /// Returns true if the op does a compression to the vector before storing.
2666 /// The node contiguously stores the active elements (integers or floats)
2667 /// in src (those with their respective bit set in writemask k) to unaligned
2668 /// memory at base_addr.
2669 bool isCompressingStore() const { return StoreSDNodeBits.IsCompressing; }
2670
2671 const SDValue &getValue() const { return getOperand(1); }
2672 const SDValue &getBasePtr() const { return getOperand(2); }
2673 const SDValue &getOffset() const { return getOperand(3); }
2674 const SDValue &getMask() const { return getOperand(4); }
2675
2676 static bool classof(const SDNode *N) {
2677 return N->getOpcode() == ISD::MSTORE;
2678 }
2679};
2680
2681/// This is a base class used to represent
2682/// VP_GATHER and VP_SCATTER nodes
2683///
2684class VPGatherScatterSDNode : public MemSDNode {
2685public:
2686 friend class SelectionDAG;
2687
2688 VPGatherScatterSDNode(ISD::NodeType NodeTy, unsigned Order,
2689 const DebugLoc &dl, SDVTList VTs, EVT MemVT,
2690 MachineMemOperand *MMO, ISD::MemIndexType IndexType)
2691 : MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) {
2692 LSBaseSDNodeBits.AddressingMode = IndexType;
2693 assert(getIndexType() == IndexType && "Value truncated")(static_cast <bool> (getIndexType() == IndexType &&
"Value truncated") ? void (0) : __assert_fail ("getIndexType() == IndexType && \"Value truncated\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 2693, __extension__
__PRETTY_FUNCTION__))
;
2694 }
2695
2696 /// How is Index applied to BasePtr when computing addresses.
2697 ISD::MemIndexType getIndexType() const {
2698 return static_cast<ISD::MemIndexType>(LSBaseSDNodeBits.AddressingMode);
2699 }
2700 bool isIndexScaled() const {
2701 return (getIndexType() == ISD::SIGNED_SCALED) ||
2702 (getIndexType() == ISD::UNSIGNED_SCALED);
2703 }
2704 bool isIndexSigned() const {
2705 return (getIndexType() == ISD::SIGNED_SCALED) ||
2706 (getIndexType() == ISD::SIGNED_UNSCALED);
2707 }
2708
2709 // In the both nodes address is Op1, mask is Op2:
2710 // VPGatherSDNode (Chain, base, index, scale, mask, vlen)
2711 // VPScatterSDNode (Chain, value, base, index, scale, mask, vlen)
2712 // Mask is a vector of i1 elements
2713 const SDValue &getBasePtr() const {
2714 return getOperand((getOpcode() == ISD::VP_GATHER) ? 1 : 2);
2715 }
2716 const SDValue &getIndex() const {
2717 return getOperand((getOpcode() == ISD::VP_GATHER) ? 2 : 3);
2718 }
2719 const SDValue &getScale() const {
2720 return getOperand((getOpcode() == ISD::VP_GATHER) ? 3 : 4);
2721 }
2722 const SDValue &getMask() const {
2723 return getOperand((getOpcode() == ISD::VP_GATHER) ? 4 : 5);
2724 }
2725 const SDValue &getVectorLength() const {
2726 return getOperand((getOpcode() == ISD::VP_GATHER) ? 5 : 6);
2727 }
2728
2729 static bool classof(const SDNode *N) {
2730 return N->getOpcode() == ISD::VP_GATHER ||
2731 N->getOpcode() == ISD::VP_SCATTER;
2732 }
2733};
2734
2735/// This class is used to represent an VP_GATHER node
2736///
2737class VPGatherSDNode : public VPGatherScatterSDNode {
2738public:
2739 friend class SelectionDAG;
2740
2741 VPGatherSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs, EVT MemVT,
2742 MachineMemOperand *MMO, ISD::MemIndexType IndexType)
2743 : VPGatherScatterSDNode(ISD::VP_GATHER, Order, dl, VTs, MemVT, MMO,
2744 IndexType) {}
2745
2746 static bool classof(const SDNode *N) {
2747 return N->getOpcode() == ISD::VP_GATHER;
2748 }
2749};
2750
2751/// This class is used to represent an VP_SCATTER node
2752///
2753class VPScatterSDNode : public VPGatherScatterSDNode {
2754public:
2755 friend class SelectionDAG;
2756
2757 VPScatterSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs, EVT MemVT,
2758 MachineMemOperand *MMO, ISD::MemIndexType IndexType)
2759 : VPGatherScatterSDNode(ISD::VP_SCATTER, Order, dl, VTs, MemVT, MMO,
2760 IndexType) {}
2761
2762 const SDValue &getValue() const { return getOperand(1); }
2763
2764 static bool classof(const SDNode *N) {
2765 return N->getOpcode() == ISD::VP_SCATTER;
2766 }
2767};
2768
2769/// This is a base class used to represent
2770/// MGATHER and MSCATTER nodes
2771///
2772class MaskedGatherScatterSDNode : public MemSDNode {
2773public:
2774 friend class SelectionDAG;
2775
2776 MaskedGatherScatterSDNode(ISD::NodeType NodeTy, unsigned Order,
2777 const DebugLoc &dl, SDVTList VTs, EVT MemVT,
2778 MachineMemOperand *MMO, ISD::MemIndexType IndexType)
2779 : MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) {
2780 LSBaseSDNodeBits.AddressingMode = IndexType;
2781 assert(getIndexType() == IndexType && "Value truncated")(static_cast <bool> (getIndexType() == IndexType &&
"Value truncated") ? void (0) : __assert_fail ("getIndexType() == IndexType && \"Value truncated\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 2781, __extension__
__PRETTY_FUNCTION__))
;
2782 }
2783
2784 /// How is Index applied to BasePtr when computing addresses.
2785 ISD::MemIndexType getIndexType() const {
2786 return static_cast<ISD::MemIndexType>(LSBaseSDNodeBits.AddressingMode);
2787 }
2788 void setIndexType(ISD::MemIndexType IndexType) {
2789 LSBaseSDNodeBits.AddressingMode = IndexType;
2790 }
2791 bool isIndexScaled() const {
2792 return (getIndexType() == ISD::SIGNED_SCALED) ||
2793 (getIndexType() == ISD::UNSIGNED_SCALED);
2794 }
2795 bool isIndexSigned() const {
2796 return (getIndexType() == ISD::SIGNED_SCALED) ||
2797 (getIndexType() == ISD::SIGNED_UNSCALED);
2798 }
2799
2800 // In the both nodes address is Op1, mask is Op2:
2801 // MaskedGatherSDNode (Chain, passthru, mask, base, index, scale)
2802 // MaskedScatterSDNode (Chain, value, mask, base, index, scale)
2803 // Mask is a vector of i1 elements
2804 const SDValue &getBasePtr() const { return getOperand(3); }
2805 const SDValue &getIndex() const { return getOperand(4); }
2806 const SDValue &getMask() const { return getOperand(2); }
2807 const SDValue &getScale() const { return getOperand(5); }
2808
2809 static bool classof(const SDNode *N) {
2810 return N->getOpcode() == ISD::MGATHER ||
2811 N->getOpcode() == ISD::MSCATTER;
2812 }
2813};
2814
2815/// This class is used to represent an MGATHER node
2816///
2817class MaskedGatherSDNode : public MaskedGatherScatterSDNode {
2818public:
2819 friend class SelectionDAG;
2820
2821 MaskedGatherSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
2822 EVT MemVT, MachineMemOperand *MMO,
2823 ISD::MemIndexType IndexType, ISD::LoadExtType ETy)
2824 : MaskedGatherScatterSDNode(ISD::MGATHER, Order, dl, VTs, MemVT, MMO,
2825 IndexType) {
2826 LoadSDNodeBits.ExtTy = ETy;
2827 }
2828
2829 const SDValue &getPassThru() const { return getOperand(1); }
2830
2831 ISD::LoadExtType getExtensionType() const {
2832 return ISD::LoadExtType(LoadSDNodeBits.ExtTy);
2833 }
2834
2835 static bool classof(const SDNode *N) {
2836 return N->getOpcode() == ISD::MGATHER;
2837 }
2838};
2839
2840/// This class is used to represent an MSCATTER node
2841///
2842class MaskedScatterSDNode : public MaskedGatherScatterSDNode {
2843public:
2844 friend class SelectionDAG;
2845
2846 MaskedScatterSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
2847 EVT MemVT, MachineMemOperand *MMO,
2848 ISD::MemIndexType IndexType, bool IsTrunc)
2849 : MaskedGatherScatterSDNode(ISD::MSCATTER, Order, dl, VTs, MemVT, MMO,
2850 IndexType) {
2851 StoreSDNodeBits.IsTruncating = IsTrunc;
2852 }
2853
2854 /// Return true if the op does a truncation before store.
2855 /// For integers this is the same as doing a TRUNCATE and storing the result.
2856 /// For floats, it is the same as doing an FP_ROUND and storing the result.
2857 bool isTruncatingStore() const { return StoreSDNodeBits.IsTruncating; }
2858
2859 const SDValue &getValue() const { return getOperand(1); }
2860
2861 static bool classof(const SDNode *N) {
2862 return N->getOpcode() == ISD::MSCATTER;
2863 }
2864};
2865
2866/// An SDNode that represents everything that will be needed
2867/// to construct a MachineInstr. These nodes are created during the
2868/// instruction selection proper phase.
2869///
2870/// Note that the only supported way to set the `memoperands` is by calling the
2871/// `SelectionDAG::setNodeMemRefs` function as the memory management happens
2872/// inside the DAG rather than in the node.
2873class MachineSDNode : public SDNode {
2874private:
2875 friend class SelectionDAG;
2876
2877 MachineSDNode(unsigned Opc, unsigned Order, const DebugLoc &DL, SDVTList VTs)
2878 : SDNode(Opc, Order, DL, VTs) {}
2879
2880 // We use a pointer union between a single `MachineMemOperand` pointer and
2881 // a pointer to an array of `MachineMemOperand` pointers. This is null when
2882 // the number of these is zero, the single pointer variant used when the
2883 // number is one, and the array is used for larger numbers.
2884 //
2885 // The array is allocated via the `SelectionDAG`'s allocator and so will
2886 // always live until the DAG is cleaned up and doesn't require ownership here.
2887 //
2888 // We can't use something simpler like `TinyPtrVector` here because `SDNode`
2889 // subclasses aren't managed in a conforming C++ manner. See the comments on
2890 // `SelectionDAG::MorphNodeTo` which details what all goes on, but the
2891 // constraint here is that these don't manage memory with their constructor or
2892 // destructor and can be initialized to a good state even if they start off
2893 // uninitialized.
2894 PointerUnion<MachineMemOperand *, MachineMemOperand **> MemRefs = {};
2895
2896 // Note that this could be folded into the above `MemRefs` member if doing so
2897 // is advantageous at some point. We don't need to store this in most cases.
2898 // However, at the moment this doesn't appear to make the allocation any
2899 // smaller and makes the code somewhat simpler to read.
2900 int NumMemRefs = 0;
2901
2902public:
2903 using mmo_iterator = ArrayRef<MachineMemOperand *>::const_iterator;
2904
2905 ArrayRef<MachineMemOperand *> memoperands() const {
2906 // Special case the common cases.
2907 if (NumMemRefs == 0)
2908 return {};
2909 if (NumMemRefs == 1)
2910 return makeArrayRef(MemRefs.getAddrOfPtr1(), 1);
2911
2912 // Otherwise we have an actual array.
2913 return makeArrayRef(MemRefs.get<MachineMemOperand **>(), NumMemRefs);
2914 }
2915 mmo_iterator memoperands_begin() const { return memoperands().begin(); }
2916 mmo_iterator memoperands_end() const { return memoperands().end(); }
2917 bool memoperands_empty() const { return memoperands().empty(); }
2918
2919 /// Clear out the memory reference descriptor list.
2920 void clearMemRefs() {
2921 MemRefs = nullptr;
2922 NumMemRefs = 0;
2923 }
2924
2925 static bool classof(const SDNode *N) {
2926 return N->isMachineOpcode();
2927 }
2928};
2929
2930/// An SDNode that records if a register contains a value that is guaranteed to
2931/// be aligned accordingly.
2932class AssertAlignSDNode : public SDNode {
2933 Align Alignment;
2934
2935public:
2936 AssertAlignSDNode(unsigned Order, const DebugLoc &DL, EVT VT, Align A)
2937 : SDNode(ISD::AssertAlign, Order, DL, getSDVTList(VT)), Alignment(A) {}
2938
2939 Align getAlign() const { return Alignment; }
2940
2941 static bool classof(const SDNode *N) {
2942 return N->getOpcode() == ISD::AssertAlign;
2943 }
2944};
2945
2946class SDNodeIterator {
2947 const SDNode *Node;
2948 unsigned Operand;
2949
2950 SDNodeIterator(const SDNode *N, unsigned Op) : Node(N), Operand(Op) {}
2951
2952public:
2953 using iterator_category = std::forward_iterator_tag;
2954 using value_type = SDNode;
2955 using difference_type = std::ptrdiff_t;
2956 using pointer = value_type *;
2957 using reference = value_type &;
2958
2959 bool operator==(const SDNodeIterator& x) const {
2960 return Operand == x.Operand;
2961 }
2962 bool operator!=(const SDNodeIterator& x) const { return !operator==(x); }
2963
2964 pointer operator*() const {
2965 return Node->getOperand(Operand).getNode();
2966 }
2967 pointer operator->() const { return operator*(); }
2968
2969 SDNodeIterator& operator++() { // Preincrement
2970 ++Operand;
2971 return *this;
2972 }
2973 SDNodeIterator operator++(int) { // Postincrement
2974 SDNodeIterator tmp = *this; ++*this; return tmp;
2975 }
2976 size_t operator-(SDNodeIterator Other) const {
2977 assert(Node == Other.Node &&(static_cast <bool> (Node == Other.Node && "Cannot compare iterators of two different nodes!"
) ? void (0) : __assert_fail ("Node == Other.Node && \"Cannot compare iterators of two different nodes!\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 2978, __extension__
__PRETTY_FUNCTION__))
2978 "Cannot compare iterators of two different nodes!")(static_cast <bool> (Node == Other.Node && "Cannot compare iterators of two different nodes!"
) ? void (0) : __assert_fail ("Node == Other.Node && \"Cannot compare iterators of two different nodes!\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 2978, __extension__
__PRETTY_FUNCTION__))
;
2979 return Operand - Other.Operand;
2980 }
2981
2982 static SDNodeIterator begin(const SDNode *N) { return SDNodeIterator(N, 0); }
2983 static SDNodeIterator end (const SDNode *N) {
2984 return SDNodeIterator(N, N->getNumOperands());
2985 }
2986
2987 unsigned getOperand() const { return Operand; }
2988 const SDNode *getNode() const { return Node; }
2989};
2990
2991template <> struct GraphTraits<SDNode*> {
2992 using NodeRef = SDNode *;
2993 using ChildIteratorType = SDNodeIterator;
2994
2995 static NodeRef getEntryNode(SDNode *N) { return N; }
2996
2997 static ChildIteratorType child_begin(NodeRef N) {
2998 return SDNodeIterator::begin(N);
2999 }
3000
3001 static ChildIteratorType child_end(NodeRef N) {
3002 return SDNodeIterator::end(N);
3003 }
3004};
3005
3006/// A representation of the largest SDNode, for use in sizeof().
3007///
3008/// This needs to be a union because the largest node differs on 32 bit systems
3009/// with 4 and 8 byte pointer alignment, respectively.
3010using LargestSDNode = AlignedCharArrayUnion<AtomicSDNode, TargetIndexSDNode,
3011 BlockAddressSDNode,
3012 GlobalAddressSDNode,
3013 PseudoProbeSDNode>;
3014
3015/// The SDNode class with the greatest alignment requirement.
3016using MostAlignedSDNode = GlobalAddressSDNode;
3017
3018namespace ISD {
3019
3020 /// Returns true if the specified node is a non-extending and unindexed load.
3021 inline bool isNormalLoad(const SDNode *N) {
3022 const LoadSDNode *Ld = dyn_cast<LoadSDNode>(N);
3023 return Ld && Ld->getExtensionType() == ISD::NON_EXTLOAD &&
3024 Ld->getAddressingMode() == ISD::UNINDEXED;
3025 }
3026
3027 /// Returns true if the specified node is a non-extending load.
3028 inline bool isNON_EXTLoad(const SDNode *N) {
3029 return isa<LoadSDNode>(N) &&
3030 cast<LoadSDNode>(N)->getExtensionType() == ISD::NON_EXTLOAD;
3031 }
3032
3033 /// Returns true if the specified node is a EXTLOAD.
3034 inline bool isEXTLoad(const SDNode *N) {
3035 return isa<LoadSDNode>(N) &&
3036 cast<LoadSDNode>(N)->getExtensionType() == ISD::EXTLOAD;
3037 }
3038
3039 /// Returns true if the specified node is a SEXTLOAD.
3040 inline bool isSEXTLoad(const SDNode *N) {
3041 return isa<LoadSDNode>(N) &&
3042 cast<LoadSDNode>(N)->getExtensionType() == ISD::SEXTLOAD;
3043 }
3044
3045 /// Returns true if the specified node is a ZEXTLOAD.
3046 inline bool isZEXTLoad(const SDNode *N) {
3047 return isa<LoadSDNode>(N) &&
3048 cast<LoadSDNode>(N)->getExtensionType() == ISD::ZEXTLOAD;
3049 }
3050
3051 /// Returns true if the specified node is an unindexed load.
3052 inline bool isUNINDEXEDLoad(const SDNode *N) {
3053 return isa<LoadSDNode>(N) &&
3054 cast<LoadSDNode>(N)->getAddressingMode() == ISD::UNINDEXED;
3055 }
3056
3057 /// Returns true if the specified node is a non-truncating
3058 /// and unindexed store.
3059 inline bool isNormalStore(const SDNode *N) {
3060 const StoreSDNode *St = dyn_cast<StoreSDNode>(N);
3061 return St && !St->isTruncatingStore() &&
3062 St->getAddressingMode() == ISD::UNINDEXED;
3063 }
3064
3065 /// Returns true if the specified node is an unindexed store.
3066 inline bool isUNINDEXEDStore(const SDNode *N) {
3067 return isa<StoreSDNode>(N) &&
3068 cast<StoreSDNode>(N)->getAddressingMode() == ISD::UNINDEXED;
3069 }
3070
3071 /// Attempt to match a unary predicate against a scalar/splat constant or
3072 /// every element of a constant BUILD_VECTOR.
3073 /// If AllowUndef is true, then UNDEF elements will pass nullptr to Match.
3074 bool matchUnaryPredicate(SDValue Op,
3075 std::function<bool(ConstantSDNode *)> Match,
3076 bool AllowUndefs = false);
3077
3078 /// Attempt to match a binary predicate against a pair of scalar/splat
3079 /// constants or every element of a pair of constant BUILD_VECTORs.
3080 /// If AllowUndef is true, then UNDEF elements will pass nullptr to Match.
3081 /// If AllowTypeMismatch is true then RetType + ArgTypes don't need to match.
3082 bool matchBinaryPredicate(
3083 SDValue LHS, SDValue RHS,
3084 std::function<bool(ConstantSDNode *, ConstantSDNode *)> Match,
3085 bool AllowUndefs = false, bool AllowTypeMismatch = false);
3086
3087 /// Returns true if the specified value is the overflow result from one
3088 /// of the overflow intrinsic nodes.
3089 inline bool isOverflowIntrOpRes(SDValue Op) {
3090 unsigned Opc = Op.getOpcode();
3091 return (Op.getResNo() == 1 &&
3092 (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO ||
3093 Opc == ISD::USUBO || Opc == ISD::SMULO || Opc == ISD::UMULO));
3094 }
3095
3096} // end namespace ISD
3097
3098} // end namespace llvm
3099
3100#endif // LLVM_CODEGEN_SELECTIONDAGNODES_H