Bug Summary

File:llvm/lib/Target/BPF/BPFISelDAGToDAG.cpp
Warning:line 299, column 9
Assigned value is garbage or undefined

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name BPFISelDAGToDAG.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-14~++20220116111435+edbb8a843c13/build-llvm/tools/clang/stage2-bins -resource-dir /usr/lib/llvm-14/lib/clang/14.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I lib/Target/BPF -I /build/llvm-toolchain-snapshot-14~++20220116111435+edbb8a843c13/llvm/lib/Target/BPF -I include -I /build/llvm-toolchain-snapshot-14~++20220116111435+edbb8a843c13/llvm/include -D _FORTIFY_SOURCE=2 -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-14/lib/clang/14.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -fmacro-prefix-map=/build/llvm-toolchain-snapshot-14~++20220116111435+edbb8a843c13/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fmacro-prefix-map=/build/llvm-toolchain-snapshot-14~++20220116111435+edbb8a843c13/= -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-14~++20220116111435+edbb8a843c13/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-14~++20220116111435+edbb8a843c13/= -O3 -Wno-unused-command-line-argument -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-14~++20220116111435+edbb8a843c13/build-llvm/tools/clang/stage2-bins -fdebug-prefix-map=/build/llvm-toolchain-snapshot-14~++20220116111435+edbb8a843c13/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fdebug-prefix-map=/build/llvm-toolchain-snapshot-14~++20220116111435+edbb8a843c13/= -ferror-limit 19 -fvisibility hidden -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2022-01-16-123900-34731-1 -x c++ /build/llvm-toolchain-snapshot-14~++20220116111435+edbb8a843c13/llvm/lib/Target/BPF/BPFISelDAGToDAG.cpp

/build/llvm-toolchain-snapshot-14~++20220116111435+edbb8a843c13/llvm/lib/Target/BPF/BPFISelDAGToDAG.cpp

1//===-- BPFISelDAGToDAG.cpp - A dag to dag inst selector for BPF ----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines a DAG pattern matching instruction selector for BPF,
10// converting from a legalized dag to a BPF dag.
11//
12//===----------------------------------------------------------------------===//
13
14#include "BPF.h"
15#include "BPFRegisterInfo.h"
16#include "BPFSubtarget.h"
17#include "BPFTargetMachine.h"
18#include "llvm/CodeGen/FunctionLoweringInfo.h"
19#include "llvm/CodeGen/MachineConstantPool.h"
20#include "llvm/CodeGen/MachineFrameInfo.h"
21#include "llvm/CodeGen/MachineFunction.h"
22#include "llvm/CodeGen/MachineInstrBuilder.h"
23#include "llvm/CodeGen/MachineRegisterInfo.h"
24#include "llvm/CodeGen/SelectionDAGISel.h"
25#include "llvm/IR/Constants.h"
26#include "llvm/IR/IntrinsicInst.h"
27#include "llvm/IR/IntrinsicsBPF.h"
28#include "llvm/Support/Debug.h"
29#include "llvm/Support/Endian.h"
30#include "llvm/Support/ErrorHandling.h"
31#include "llvm/Support/raw_ostream.h"
32#include "llvm/Target/TargetMachine.h"
33
34using namespace llvm;
35
36#define DEBUG_TYPE"bpf-isel" "bpf-isel"
37
38// Instruction Selector Implementation
39namespace {
40
41class BPFDAGToDAGISel : public SelectionDAGISel {
42
43 /// Subtarget - Keep a pointer to the BPFSubtarget around so that we can
44 /// make the right decision when generating code for different subtargets.
45 const BPFSubtarget *Subtarget;
46
47public:
48 explicit BPFDAGToDAGISel(BPFTargetMachine &TM)
49 : SelectionDAGISel(TM), Subtarget(nullptr) {}
50
51 StringRef getPassName() const override {
52 return "BPF DAG->DAG Pattern Instruction Selection";
53 }
54
55 bool runOnMachineFunction(MachineFunction &MF) override {
56 // Reset the subtarget each time through.
57 Subtarget = &MF.getSubtarget<BPFSubtarget>();
58 return SelectionDAGISel::runOnMachineFunction(MF);
59 }
60
61 void PreprocessISelDAG() override;
62
63 bool SelectInlineAsmMemoryOperand(const SDValue &Op, unsigned ConstraintCode,
64 std::vector<SDValue> &OutOps) override;
65
66
67private:
68// Include the pieces autogenerated from the target description.
69#include "BPFGenDAGISel.inc"
70
71 void Select(SDNode *N) override;
72
73 // Complex Pattern for address selection.
74 bool SelectAddr(SDValue Addr, SDValue &Base, SDValue &Offset);
75 bool SelectFIAddr(SDValue Addr, SDValue &Base, SDValue &Offset);
76
77 // Node preprocessing cases
78 void PreprocessLoad(SDNode *Node, SelectionDAG::allnodes_iterator &I);
79 void PreprocessCopyToReg(SDNode *Node);
80 void PreprocessTrunc(SDNode *Node, SelectionDAG::allnodes_iterator &I);
81
82 // Find constants from a constant structure
83 typedef std::vector<unsigned char> val_vec_type;
84 bool fillGenericConstant(const DataLayout &DL, const Constant *CV,
85 val_vec_type &Vals, uint64_t Offset);
86 bool fillConstantDataArray(const DataLayout &DL, const ConstantDataArray *CDA,
87 val_vec_type &Vals, int Offset);
88 bool fillConstantArray(const DataLayout &DL, const ConstantArray *CA,
89 val_vec_type &Vals, int Offset);
90 bool fillConstantStruct(const DataLayout &DL, const ConstantStruct *CS,
91 val_vec_type &Vals, int Offset);
92 bool getConstantFieldValue(const GlobalAddressSDNode *Node, uint64_t Offset,
93 uint64_t Size, unsigned char *ByteSeq);
94 // Mapping from ConstantStruct global value to corresponding byte-list values
95 std::map<const void *, val_vec_type> cs_vals_;
96};
97} // namespace
98
99// ComplexPattern used on BPF Load/Store instructions
100bool BPFDAGToDAGISel::SelectAddr(SDValue Addr, SDValue &Base, SDValue &Offset) {
101 // if Address is FI, get the TargetFrameIndex.
102 SDLoc DL(Addr);
103 if (auto *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
104 Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i64);
105 Offset = CurDAG->getTargetConstant(0, DL, MVT::i64);
106 return true;
107 }
108
109 if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
110 Addr.getOpcode() == ISD::TargetGlobalAddress)
111 return false;
112
113 // Addresses of the form Addr+const or Addr|const
114 if (CurDAG->isBaseWithConstantOffset(Addr)) {
115 auto *CN = cast<ConstantSDNode>(Addr.getOperand(1));
116 if (isInt<16>(CN->getSExtValue())) {
117 // If the first operand is a FI, get the TargetFI Node
118 if (auto *FIN = dyn_cast<FrameIndexSDNode>(Addr.getOperand(0)))
119 Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i64);
120 else
121 Base = Addr.getOperand(0);
122
123 Offset = CurDAG->getTargetConstant(CN->getSExtValue(), DL, MVT::i64);
124 return true;
125 }
126 }
127
128 Base = Addr;
129 Offset = CurDAG->getTargetConstant(0, DL, MVT::i64);
130 return true;
131}
132
133// ComplexPattern used on BPF FI instruction
134bool BPFDAGToDAGISel::SelectFIAddr(SDValue Addr, SDValue &Base,
135 SDValue &Offset) {
136 SDLoc DL(Addr);
137
138 if (!CurDAG->isBaseWithConstantOffset(Addr))
139 return false;
140
141 // Addresses of the form Addr+const or Addr|const
142 auto *CN = cast<ConstantSDNode>(Addr.getOperand(1));
143 if (isInt<16>(CN->getSExtValue())) {
144 // If the first operand is a FI, get the TargetFI Node
145 if (auto *FIN = dyn_cast<FrameIndexSDNode>(Addr.getOperand(0)))
146 Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i64);
147 else
148 return false;
149
150 Offset = CurDAG->getTargetConstant(CN->getSExtValue(), DL, MVT::i64);
151 return true;
152 }
153
154 return false;
155}
156
157bool BPFDAGToDAGISel::SelectInlineAsmMemoryOperand(
158 const SDValue &Op, unsigned ConstraintCode, std::vector<SDValue> &OutOps) {
159 SDValue Op0, Op1;
160 switch (ConstraintCode) {
161 default:
162 return true;
163 case InlineAsm::Constraint_m: // memory
164 if (!SelectAddr(Op, Op0, Op1))
165 return true;
166 break;
167 }
168
169 SDLoc DL(Op);
170 SDValue AluOp = CurDAG->getTargetConstant(ISD::ADD, DL, MVT::i32);;
171 OutOps.push_back(Op0);
172 OutOps.push_back(Op1);
173 OutOps.push_back(AluOp);
174 return false;
175}
176
177void BPFDAGToDAGISel::Select(SDNode *Node) {
178 unsigned Opcode = Node->getOpcode();
179
180 // If we have a custom node, we already have selected!
181 if (Node->isMachineOpcode()) {
182 LLVM_DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("bpf-isel")) { dbgs() << "== "; Node->dump(CurDAG);
dbgs() << '\n'; } } while (false)
;
183 return;
184 }
185
186 // tablegen selection should be handled here.
187 switch (Opcode) {
188 default:
189 break;
190 case ISD::SDIV: {
191 DebugLoc Empty;
192 const DebugLoc &DL = Node->getDebugLoc();
193 if (DL != Empty)
194 errs() << "Error at line " << DL.getLine() << ": ";
195 else
196 errs() << "Error: ";
197 errs() << "Unsupport signed division for DAG: ";
198 Node->print(errs(), CurDAG);
199 errs() << "Please convert to unsigned div/mod.\n";
200 break;
201 }
202 case ISD::INTRINSIC_W_CHAIN: {
203 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
204 switch (IntNo) {
205 case Intrinsic::bpf_load_byte:
206 case Intrinsic::bpf_load_half:
207 case Intrinsic::bpf_load_word: {
208 SDLoc DL(Node);
209 SDValue Chain = Node->getOperand(0);
210 SDValue N1 = Node->getOperand(1);
211 SDValue Skb = Node->getOperand(2);
212 SDValue N3 = Node->getOperand(3);
213
214 SDValue R6Reg = CurDAG->getRegister(BPF::R6, MVT::i64);
215 Chain = CurDAG->getCopyToReg(Chain, DL, R6Reg, Skb, SDValue());
216 Node = CurDAG->UpdateNodeOperands(Node, Chain, N1, R6Reg, N3);
217 break;
218 }
219 }
220 break;
221 }
222
223 case ISD::FrameIndex: {
224 int FI = cast<FrameIndexSDNode>(Node)->getIndex();
225 EVT VT = Node->getValueType(0);
226 SDValue TFI = CurDAG->getTargetFrameIndex(FI, VT);
227 unsigned Opc = BPF::MOV_rr;
228 if (Node->hasOneUse()) {
229 CurDAG->SelectNodeTo(Node, Opc, VT, TFI);
230 return;
231 }
232 ReplaceNode(Node, CurDAG->getMachineNode(Opc, SDLoc(Node), VT, TFI));
233 return;
234 }
235 }
236
237 // Select the default instruction
238 SelectCode(Node);
239}
240
241void BPFDAGToDAGISel::PreprocessLoad(SDNode *Node,
242 SelectionDAG::allnodes_iterator &I) {
243 union {
244 uint8_t c[8];
245 uint16_t s;
246 uint32_t i;
247 uint64_t d;
248 } new_val; // hold up the constant values replacing loads.
249 bool to_replace = false;
250 SDLoc DL(Node);
251 const LoadSDNode *LD = cast<LoadSDNode>(Node);
5
'Node' is a 'LoadSDNode'
252 uint64_t size = LD->getMemOperand()->getSize();
6
Calling 'MachineMemOperand::getSize'
9
Returning from 'MachineMemOperand::getSize'
253
254 if (!size || size > 8 || (size & (size - 1)) || !LD->isSimple())
10
Assuming 'size' is not equal to 0
11
Assuming 'size' is <= 8
12
Assuming the condition is false
13
Calling 'MemSDNode::isSimple'
16
Returning from 'MemSDNode::isSimple'
17
Taking false branch
255 return;
256
257 SDNode *LDAddrNode = LD->getOperand(1).getNode();
258 // Match LDAddr against either global_addr or (global_addr + offset)
259 unsigned opcode = LDAddrNode->getOpcode();
260 if (opcode == ISD::ADD) {
18
Assuming 'opcode' is equal to ADD
19
Taking true branch
261 SDValue OP1 = LDAddrNode->getOperand(0);
262 SDValue OP2 = LDAddrNode->getOperand(1);
263
264 // We want to find the pattern global_addr + offset
265 SDNode *OP1N = OP1.getNode();
266 if (OP1N->getOpcode() <= ISD::BUILTIN_OP_END || OP1N->getNumOperands() == 0)
20
Assuming the condition is false
21
Assuming the condition is false
267 return;
268
269 LLVM_DEBUG(dbgs() << "Check candidate load: "; LD->dump(); dbgs() << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("bpf-isel")) { dbgs() << "Check candidate load: "; LD->
dump(); dbgs() << '\n'; } } while (false)
;
22
Taking false branch
23
Assuming 'DebugFlag' is false
24
Loop condition is false. Exiting loop
270
271 const GlobalAddressSDNode *GADN =
272 dyn_cast<GlobalAddressSDNode>(OP1N->getOperand(0).getNode());
25
Assuming the object is a 'GlobalAddressSDNode'
273 const ConstantSDNode *CDN = dyn_cast<ConstantSDNode>(OP2.getNode());
26
Assuming the object is a 'ConstantSDNode'
274 if (GADN
26.1
'GADN' is non-null
26.1
'GADN' is non-null
26.1
'GADN' is non-null
26.1
'GADN' is non-null
26.1
'GADN' is non-null
&& CDN
26.2
'CDN' is non-null
26.2
'CDN' is non-null
26.2
'CDN' is non-null
26.2
'CDN' is non-null
26.2
'CDN' is non-null
)
27
Taking true branch
275 to_replace =
276 getConstantFieldValue(GADN, CDN->getZExtValue(), size, new_val.c);
28
Calling 'BPFDAGToDAGISel::getConstantFieldValue'
46
Returning from 'BPFDAGToDAGISel::getConstantFieldValue'
277 } else if (LDAddrNode->getOpcode() > ISD::BUILTIN_OP_END &&
278 LDAddrNode->getNumOperands() > 0) {
279 LLVM_DEBUG(dbgs() << "Check candidate load: "; LD->dump(); dbgs() << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("bpf-isel")) { dbgs() << "Check candidate load: "; LD->
dump(); dbgs() << '\n'; } } while (false)
;
280
281 SDValue OP1 = LDAddrNode->getOperand(0);
282 if (const GlobalAddressSDNode *GADN =
283 dyn_cast<GlobalAddressSDNode>(OP1.getNode()))
284 to_replace = getConstantFieldValue(GADN, 0, size, new_val.c);
285 }
286
287 if (!to_replace
46.1
'to_replace' is true
46.1
'to_replace' is true
46.1
'to_replace' is true
46.1
'to_replace' is true
46.1
'to_replace' is true
)
47
Taking false branch
288 return;
289
290 // replacing the old with a new value
291 uint64_t val;
292 if (size == 1)
48
Assuming 'size' is not equal to 1
49
Taking false branch
293 val = new_val.c[0];
294 else if (size == 2)
50
Assuming 'size' is not equal to 2
51
Taking false branch
295 val = new_val.s;
296 else if (size == 4)
52
Assuming 'size' is not equal to 4
53
Taking false branch
297 val = new_val.i;
298 else {
299 val = new_val.d;
54
Assigned value is garbage or undefined
300 }
301
302 LLVM_DEBUG(dbgs() << "Replacing load of size " << size << " with constant "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("bpf-isel")) { dbgs() << "Replacing load of size " <<
size << " with constant " << val << '\n'; }
} while (false)
303 << val << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("bpf-isel")) { dbgs() << "Replacing load of size " <<
size << " with constant " << val << '\n'; }
} while (false)
;
304 SDValue NVal = CurDAG->getConstant(val, DL, LD->getValueType(0));
305
306 // After replacement, the current node is dead, we need to
307 // go backward one step to make iterator still work
308 I--;
309 SDValue From[] = {SDValue(Node, 0), SDValue(Node, 1)};
310 SDValue To[] = {NVal, NVal};
311 CurDAG->ReplaceAllUsesOfValuesWith(From, To, 2);
312 I++;
313 // It is safe to delete node now
314 CurDAG->DeleteNode(Node);
315}
316
317void BPFDAGToDAGISel::PreprocessISelDAG() {
318 // Iterate through all nodes, interested in the following case:
319 //
320 // . loads from ConstantStruct or ConstantArray of constructs
321 // which can be turns into constant itself, with this we can
322 // avoid reading from read-only section at runtime.
323 //
324 // . Removing redundant AND for intrinsic narrow loads.
325 for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
1
Loop condition is true. Entering loop body
326 E = CurDAG->allnodes_end();
327 I != E;) {
328 SDNode *Node = &*I++;
329 unsigned Opcode = Node->getOpcode();
330 if (Opcode == ISD::LOAD)
2
Assuming 'Opcode' is equal to LOAD
3
Taking true branch
331 PreprocessLoad(Node, I);
4
Calling 'BPFDAGToDAGISel::PreprocessLoad'
332 else if (Opcode == ISD::AND)
333 PreprocessTrunc(Node, I);
334 }
335}
336
337bool BPFDAGToDAGISel::getConstantFieldValue(const GlobalAddressSDNode *Node,
338 uint64_t Offset, uint64_t Size,
339 unsigned char *ByteSeq) {
340 const GlobalVariable *V = dyn_cast<GlobalVariable>(Node->getGlobal());
29
Assuming the object is a 'GlobalVariable'
341
342 if (!V
29.1
'V' is non-null, which participates in a condition later
29.1
'V' is non-null, which participates in a condition later
29.1
'V' is non-null, which participates in a condition later
29.1
'V' is non-null, which participates in a condition later
29.1
'V' is non-null, which participates in a condition later
|| !V->hasInitializer() || !V->isConstant())
30
Calling 'GlobalVariable::hasInitializer'
33
Returning from 'GlobalVariable::hasInitializer'
34
Assuming the condition is false
35
Taking false branch
343 return false;
344
345 const Constant *Init = V->getInitializer();
346 const DataLayout &DL = CurDAG->getDataLayout();
347 val_vec_type TmpVal;
348
349 auto it = cs_vals_.find(static_cast<const void *>(Init));
350 if (it != cs_vals_.end()) {
36
Calling 'operator!='
39
Returning from 'operator!='
40
Taking true branch
351 TmpVal = it->second;
352 } else {
353 uint64_t total_size = 0;
354 if (const ConstantStruct *CS = dyn_cast<ConstantStruct>(Init))
355 total_size =
356 DL.getStructLayout(cast<StructType>(CS->getType()))->getSizeInBytes();
357 else if (const ConstantArray *CA = dyn_cast<ConstantArray>(Init))
358 total_size = DL.getTypeAllocSize(CA->getType()->getElementType()) *
359 CA->getNumOperands();
360 else
361 return false;
362
363 val_vec_type Vals(total_size, 0);
364 if (fillGenericConstant(DL, Init, Vals, 0) == false)
365 return false;
366 cs_vals_[static_cast<const void *>(Init)] = Vals;
367 TmpVal = std::move(Vals);
368 }
369
370 // test whether host endianness matches target
371 union {
372 uint8_t c[2];
373 uint16_t s;
374 } test_buf;
375 uint16_t test_val = 0x2345;
376 if (DL.isLittleEndian())
41
Taking false branch
377 support::endian::write16le(test_buf.c, test_val);
378 else
379 support::endian::write16be(test_buf.c, test_val);
380
381 bool endian_match = test_buf.s == test_val;
42
Assuming 'test_val' is not equal to field 's'
382 for (uint64_t i = Offset, j = 0; i < Offset + Size; i++, j++)
43
Assuming the condition is false
44
Loop condition is false. Execution continues on line 385
383 ByteSeq[j] = endian_match ? TmpVal[i] : TmpVal[Offset + Size - 1 - j];
384
385 return true;
45
Returning the value 1, which participates in a condition later
386}
387
388bool BPFDAGToDAGISel::fillGenericConstant(const DataLayout &DL,
389 const Constant *CV,
390 val_vec_type &Vals, uint64_t Offset) {
391 uint64_t Size = DL.getTypeAllocSize(CV->getType());
392
393 if (isa<ConstantAggregateZero>(CV) || isa<UndefValue>(CV))
394 return true; // already done
395
396 if (const ConstantInt *CI = dyn_cast<ConstantInt>(CV)) {
397 uint64_t val = CI->getZExtValue();
398 LLVM_DEBUG(dbgs() << "Byte array at offset " << Offset << " with value "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("bpf-isel")) { dbgs() << "Byte array at offset " <<
Offset << " with value " << val << '\n'; }
} while (false)
399 << val << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("bpf-isel")) { dbgs() << "Byte array at offset " <<
Offset << " with value " << val << '\n'; }
} while (false)
;
400
401 if (Size > 8 || (Size & (Size - 1)))
402 return false;
403
404 // Store based on target endian
405 for (uint64_t i = 0; i < Size; ++i) {
406 Vals[Offset + i] = DL.isLittleEndian()
407 ? ((val >> (i * 8)) & 0xFF)
408 : ((val >> ((Size - i - 1) * 8)) & 0xFF);
409 }
410 return true;
411 }
412
413 if (const ConstantDataArray *CDA = dyn_cast<ConstantDataArray>(CV))
414 return fillConstantDataArray(DL, CDA, Vals, Offset);
415
416 if (const ConstantArray *CA = dyn_cast<ConstantArray>(CV))
417 return fillConstantArray(DL, CA, Vals, Offset);
418
419 if (const ConstantStruct *CVS = dyn_cast<ConstantStruct>(CV))
420 return fillConstantStruct(DL, CVS, Vals, Offset);
421
422 return false;
423}
424
425bool BPFDAGToDAGISel::fillConstantDataArray(const DataLayout &DL,
426 const ConstantDataArray *CDA,
427 val_vec_type &Vals, int Offset) {
428 for (unsigned i = 0, e = CDA->getNumElements(); i != e; ++i) {
429 if (fillGenericConstant(DL, CDA->getElementAsConstant(i), Vals, Offset) ==
430 false)
431 return false;
432 Offset += DL.getTypeAllocSize(CDA->getElementAsConstant(i)->getType());
433 }
434
435 return true;
436}
437
438bool BPFDAGToDAGISel::fillConstantArray(const DataLayout &DL,
439 const ConstantArray *CA,
440 val_vec_type &Vals, int Offset) {
441 for (unsigned i = 0, e = CA->getNumOperands(); i != e; ++i) {
442 if (fillGenericConstant(DL, CA->getOperand(i), Vals, Offset) == false)
443 return false;
444 Offset += DL.getTypeAllocSize(CA->getOperand(i)->getType());
445 }
446
447 return true;
448}
449
450bool BPFDAGToDAGISel::fillConstantStruct(const DataLayout &DL,
451 const ConstantStruct *CS,
452 val_vec_type &Vals, int Offset) {
453 const StructLayout *Layout = DL.getStructLayout(CS->getType());
454 for (unsigned i = 0, e = CS->getNumOperands(); i != e; ++i) {
455 const Constant *Field = CS->getOperand(i);
456 uint64_t SizeSoFar = Layout->getElementOffset(i);
457 if (fillGenericConstant(DL, Field, Vals, Offset + SizeSoFar) == false)
458 return false;
459 }
460 return true;
461}
462
463void BPFDAGToDAGISel::PreprocessTrunc(SDNode *Node,
464 SelectionDAG::allnodes_iterator &I) {
465 ConstantSDNode *MaskN = dyn_cast<ConstantSDNode>(Node->getOperand(1));
466 if (!MaskN)
467 return;
468
469 // The Reg operand should be a virtual register, which is defined
470 // outside the current basic block. DAG combiner has done a pretty
471 // good job in removing truncating inside a single basic block except
472 // when the Reg operand comes from bpf_load_[byte | half | word] for
473 // which the generic optimizer doesn't understand their results are
474 // zero extended.
475 SDValue BaseV = Node->getOperand(0);
476 if (BaseV.getOpcode() != ISD::INTRINSIC_W_CHAIN)
477 return;
478
479 unsigned IntNo = cast<ConstantSDNode>(BaseV->getOperand(1))->getZExtValue();
480 uint64_t MaskV = MaskN->getZExtValue();
481
482 if (!((IntNo == Intrinsic::bpf_load_byte && MaskV == 0xFF) ||
483 (IntNo == Intrinsic::bpf_load_half && MaskV == 0xFFFF) ||
484 (IntNo == Intrinsic::bpf_load_word && MaskV == 0xFFFFFFFF)))
485 return;
486
487 LLVM_DEBUG(dbgs() << "Remove the redundant AND operation in: ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("bpf-isel")) { dbgs() << "Remove the redundant AND operation in: "
; Node->dump(); dbgs() << '\n'; } } while (false)
488 Node->dump(); dbgs() << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("bpf-isel")) { dbgs() << "Remove the redundant AND operation in: "
; Node->dump(); dbgs() << '\n'; } } while (false)
;
489
490 I--;
491 CurDAG->ReplaceAllUsesWith(SDValue(Node, 0), BaseV);
492 I++;
493 CurDAG->DeleteNode(Node);
494}
495
496FunctionPass *llvm::createBPFISelDag(BPFTargetMachine &TM) {
497 return new BPFDAGToDAGISel(TM);
498}

/build/llvm-toolchain-snapshot-14~++20220116111435+edbb8a843c13/llvm/include/llvm/CodeGen/MachineMemOperand.h

1//==- llvm/CodeGen/MachineMemOperand.h - MachineMemOperand class -*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the declaration of the MachineMemOperand class, which is a
10// description of a memory reference. It is used to help track dependencies
11// in the backend.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_CODEGEN_MACHINEMEMOPERAND_H
16#define LLVM_CODEGEN_MACHINEMEMOPERAND_H
17
18#include "llvm/ADT/BitmaskEnum.h"
19#include "llvm/ADT/PointerUnion.h"
20#include "llvm/CodeGen/PseudoSourceValue.h"
21#include "llvm/IR/DerivedTypes.h"
22#include "llvm/IR/Value.h" // PointerLikeTypeTraits<Value*>
23#include "llvm/Support/AtomicOrdering.h"
24#include "llvm/Support/DataTypes.h"
25#include "llvm/Support/LowLevelTypeImpl.h"
26
27namespace llvm {
28
29class FoldingSetNodeID;
30class MDNode;
31class raw_ostream;
32class MachineFunction;
33class ModuleSlotTracker;
34
35/// This class contains a discriminated union of information about pointers in
36/// memory operands, relating them back to LLVM IR or to virtual locations (such
37/// as frame indices) that are exposed during codegen.
38struct MachinePointerInfo {
39 /// This is the IR pointer value for the access, or it is null if unknown.
40 /// If this is null, then the access is to a pointer in the default address
41 /// space.
42 PointerUnion<const Value *, const PseudoSourceValue *> V;
43
44 /// Offset - This is an offset from the base Value*.
45 int64_t Offset;
46
47 unsigned AddrSpace = 0;
48
49 uint8_t StackID;
50
51 explicit MachinePointerInfo(const Value *v, int64_t offset = 0,
52 uint8_t ID = 0)
53 : V(v), Offset(offset), StackID(ID) {
54 AddrSpace = v ? v->getType()->getPointerAddressSpace() : 0;
55 }
56
57 explicit MachinePointerInfo(const PseudoSourceValue *v, int64_t offset = 0,
58 uint8_t ID = 0)
59 : V(v), Offset(offset), StackID(ID) {
60 AddrSpace = v ? v->getAddressSpace() : 0;
61 }
62
63 explicit MachinePointerInfo(unsigned AddressSpace = 0, int64_t offset = 0)
64 : V((const Value *)nullptr), Offset(offset), AddrSpace(AddressSpace),
65 StackID(0) {}
66
67 explicit MachinePointerInfo(
68 PointerUnion<const Value *, const PseudoSourceValue *> v,
69 int64_t offset = 0,
70 uint8_t ID = 0)
71 : V(v), Offset(offset), StackID(ID) {
72 if (V) {
73 if (const auto *ValPtr = V.dyn_cast<const Value*>())
74 AddrSpace = ValPtr->getType()->getPointerAddressSpace();
75 else
76 AddrSpace = V.get<const PseudoSourceValue*>()->getAddressSpace();
77 }
78 }
79
80 MachinePointerInfo getWithOffset(int64_t O) const {
81 if (V.isNull())
82 return MachinePointerInfo(AddrSpace, Offset + O);
83 if (V.is<const Value*>())
84 return MachinePointerInfo(V.get<const Value*>(), Offset + O, StackID);
85 return MachinePointerInfo(V.get<const PseudoSourceValue*>(), Offset + O,
86 StackID);
87 }
88
89 /// Return true if memory region [V, V+Offset+Size) is known to be
90 /// dereferenceable.
91 bool isDereferenceable(unsigned Size, LLVMContext &C,
92 const DataLayout &DL) const;
93
94 /// Return the LLVM IR address space number that this pointer points into.
95 unsigned getAddrSpace() const;
96
97 /// Return a MachinePointerInfo record that refers to the constant pool.
98 static MachinePointerInfo getConstantPool(MachineFunction &MF);
99
100 /// Return a MachinePointerInfo record that refers to the specified
101 /// FrameIndex.
102 static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI,
103 int64_t Offset = 0);
104
105 /// Return a MachinePointerInfo record that refers to a jump table entry.
106 static MachinePointerInfo getJumpTable(MachineFunction &MF);
107
108 /// Return a MachinePointerInfo record that refers to a GOT entry.
109 static MachinePointerInfo getGOT(MachineFunction &MF);
110
111 /// Stack pointer relative access.
112 static MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset,
113 uint8_t ID = 0);
114
115 /// Stack memory without other information.
116 static MachinePointerInfo getUnknownStack(MachineFunction &MF);
117};
118
119
120//===----------------------------------------------------------------------===//
121/// A description of a memory reference used in the backend.
122/// Instead of holding a StoreInst or LoadInst, this class holds the address
123/// Value of the reference along with a byte size and offset. This allows it
124/// to describe lowered loads and stores. Also, the special PseudoSourceValue
125/// objects can be used to represent loads and stores to memory locations
126/// that aren't explicit in the regular LLVM IR.
127///
128class MachineMemOperand {
129public:
130 /// Flags values. These may be or'd together.
131 enum Flags : uint16_t {
132 // No flags set.
133 MONone = 0,
134 /// The memory access reads data.
135 MOLoad = 1u << 0,
136 /// The memory access writes data.
137 MOStore = 1u << 1,
138 /// The memory access is volatile.
139 MOVolatile = 1u << 2,
140 /// The memory access is non-temporal.
141 MONonTemporal = 1u << 3,
142 /// The memory access is dereferenceable (i.e., doesn't trap).
143 MODereferenceable = 1u << 4,
144 /// The memory access always returns the same value (or traps).
145 MOInvariant = 1u << 5,
146
147 // Reserved for use by target-specific passes.
148 // Targets may override getSerializableMachineMemOperandTargetFlags() to
149 // enable MIR serialization/parsing of these flags. If more of these flags
150 // are added, the MIR printing/parsing code will need to be updated as well.
151 MOTargetFlag1 = 1u << 6,
152 MOTargetFlag2 = 1u << 7,
153 MOTargetFlag3 = 1u << 8,
154
155 LLVM_MARK_AS_BITMASK_ENUM(/* LargestFlag = */ MOTargetFlag3)LLVM_BITMASK_LARGEST_ENUMERATOR = MOTargetFlag3
156 };
157
158private:
159 /// Atomic information for this memory operation.
160 struct MachineAtomicInfo {
161 /// Synchronization scope ID for this memory operation.
162 unsigned SSID : 8; // SyncScope::ID
163 /// Atomic ordering requirements for this memory operation. For cmpxchg
164 /// atomic operations, atomic ordering requirements when store occurs.
165 unsigned Ordering : 4; // enum AtomicOrdering
166 /// For cmpxchg atomic operations, atomic ordering requirements when store
167 /// does not occur.
168 unsigned FailureOrdering : 4; // enum AtomicOrdering
169 };
170
171 MachinePointerInfo PtrInfo;
172
173 /// Track the memory type of the access. An access size which is unknown or
174 /// too large to be represented by LLT should use the invalid LLT.
175 LLT MemoryType;
176
177 Flags FlagVals;
178 Align BaseAlign;
179 MachineAtomicInfo AtomicInfo;
180 AAMDNodes AAInfo;
181 const MDNode *Ranges;
182
183public:
184 /// Construct a MachineMemOperand object with the specified PtrInfo, flags,
185 /// size, and base alignment. For atomic operations the synchronization scope
186 /// and atomic ordering requirements must also be specified. For cmpxchg
187 /// atomic operations the atomic ordering requirements when store does not
188 /// occur must also be specified.
189 MachineMemOperand(MachinePointerInfo PtrInfo, Flags flags, uint64_t s,
190 Align a, const AAMDNodes &AAInfo = AAMDNodes(),
191 const MDNode *Ranges = nullptr,
192 SyncScope::ID SSID = SyncScope::System,
193 AtomicOrdering Ordering = AtomicOrdering::NotAtomic,
194 AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic);
195 MachineMemOperand(MachinePointerInfo PtrInfo, Flags flags, LLT type, Align a,
196 const AAMDNodes &AAInfo = AAMDNodes(),
197 const MDNode *Ranges = nullptr,
198 SyncScope::ID SSID = SyncScope::System,
199 AtomicOrdering Ordering = AtomicOrdering::NotAtomic,
200 AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic);
201
202 const MachinePointerInfo &getPointerInfo() const { return PtrInfo; }
203
204 /// Return the base address of the memory access. This may either be a normal
205 /// LLVM IR Value, or one of the special values used in CodeGen.
206 /// Special values are those obtained via
207 /// PseudoSourceValue::getFixedStack(int), PseudoSourceValue::getStack, and
208 /// other PseudoSourceValue member functions which return objects which stand
209 /// for frame/stack pointer relative references and other special references
210 /// which are not representable in the high-level IR.
211 const Value *getValue() const { return PtrInfo.V.dyn_cast<const Value*>(); }
212
213 const PseudoSourceValue *getPseudoValue() const {
214 return PtrInfo.V.dyn_cast<const PseudoSourceValue*>();
215 }
216
217 const void *getOpaqueValue() const { return PtrInfo.V.getOpaqueValue(); }
218
219 /// Return the raw flags of the source value, \see Flags.
220 Flags getFlags() const { return FlagVals; }
221
222 /// Bitwise OR the current flags with the given flags.
223 void setFlags(Flags f) { FlagVals |= f; }
224
225 /// For normal values, this is a byte offset added to the base address.
226 /// For PseudoSourceValue::FPRel values, this is the FrameIndex number.
227 int64_t getOffset() const { return PtrInfo.Offset; }
228
229 unsigned getAddrSpace() const { return PtrInfo.getAddrSpace(); }
230
231 /// Return the memory type of the memory reference. This should only be relied
232 /// on for GlobalISel G_* operation legalization.
233 LLT getMemoryType() const { return MemoryType; }
234
235 /// Return the size in bytes of the memory reference.
236 uint64_t getSize() const {
237 return MemoryType.isValid() ? MemoryType.getSizeInBytes() : ~UINT64_C(0)0UL;
7
'?' condition is true
8
Returning value, which participates in a condition later
238 }
239
240 /// Return the size in bits of the memory reference.
241 uint64_t getSizeInBits() const {
242 return MemoryType.isValid() ? MemoryType.getSizeInBits() : ~UINT64_C(0)0UL;
243 }
244
245 LLT getType() const {
246 return MemoryType;
247 }
248
249 /// Return the minimum known alignment in bytes of the actual memory
250 /// reference.
251 Align getAlign() const;
252
253 /// Return the minimum known alignment in bytes of the base address, without
254 /// the offset.
255 Align getBaseAlign() const { return BaseAlign; }
256
257 /// Return the AA tags for the memory reference.
258 AAMDNodes getAAInfo() const { return AAInfo; }
259
260 /// Return the range tag for the memory reference.
261 const MDNode *getRanges() const { return Ranges; }
262
263 /// Returns the synchronization scope ID for this memory operation.
264 SyncScope::ID getSyncScopeID() const {
265 return static_cast<SyncScope::ID>(AtomicInfo.SSID);
266 }
267
268 /// Return the atomic ordering requirements for this memory operation. For
269 /// cmpxchg atomic operations, return the atomic ordering requirements when
270 /// store occurs.
271 AtomicOrdering getSuccessOrdering() const {
272 return static_cast<AtomicOrdering>(AtomicInfo.Ordering);
273 }
274
275 /// For cmpxchg atomic operations, return the atomic ordering requirements
276 /// when store does not occur.
277 AtomicOrdering getFailureOrdering() const {
278 return static_cast<AtomicOrdering>(AtomicInfo.FailureOrdering);
279 }
280
281 /// Return a single atomic ordering that is at least as strong as both the
282 /// success and failure orderings for an atomic operation. (For operations
283 /// other than cmpxchg, this is equivalent to getSuccessOrdering().)
284 AtomicOrdering getMergedOrdering() const {
285 return getMergedAtomicOrdering(getSuccessOrdering(), getFailureOrdering());
286 }
287
288 bool isLoad() const { return FlagVals & MOLoad; }
289 bool isStore() const { return FlagVals & MOStore; }
290 bool isVolatile() const { return FlagVals & MOVolatile; }
291 bool isNonTemporal() const { return FlagVals & MONonTemporal; }
292 bool isDereferenceable() const { return FlagVals & MODereferenceable; }
293 bool isInvariant() const { return FlagVals & MOInvariant; }
294
295 /// Returns true if this operation has an atomic ordering requirement of
296 /// unordered or higher, false otherwise.
297 bool isAtomic() const {
298 return getSuccessOrdering() != AtomicOrdering::NotAtomic;
299 }
300
301 /// Returns true if this memory operation doesn't have any ordering
302 /// constraints other than normal aliasing. Volatile and (ordered) atomic
303 /// memory operations can't be reordered.
304 bool isUnordered() const {
305 return (getSuccessOrdering() == AtomicOrdering::NotAtomic ||
306 getSuccessOrdering() == AtomicOrdering::Unordered) &&
307 !isVolatile();
308 }
309
310 /// Update this MachineMemOperand to reflect the alignment of MMO, if it has a
311 /// greater alignment. This must only be used when the new alignment applies
312 /// to all users of this MachineMemOperand.
313 void refineAlignment(const MachineMemOperand *MMO);
314
315 /// Change the SourceValue for this MachineMemOperand. This should only be
316 /// used when an object is being relocated and all references to it are being
317 /// updated.
318 void setValue(const Value *NewSV) { PtrInfo.V = NewSV; }
319 void setValue(const PseudoSourceValue *NewSV) { PtrInfo.V = NewSV; }
320 void setOffset(int64_t NewOffset) { PtrInfo.Offset = NewOffset; }
321
322 /// Reset the tracked memory type.
323 void setType(LLT NewTy) {
324 MemoryType = NewTy;
325 }
326
327 /// Profile - Gather unique data for the object.
328 ///
329 void Profile(FoldingSetNodeID &ID) const;
330
331 /// Support for operator<<.
332 /// @{
333 void print(raw_ostream &OS, ModuleSlotTracker &MST,
334 SmallVectorImpl<StringRef> &SSNs, const LLVMContext &Context,
335 const MachineFrameInfo *MFI, const TargetInstrInfo *TII) const;
336 /// @}
337
338 friend bool operator==(const MachineMemOperand &LHS,
339 const MachineMemOperand &RHS) {
340 return LHS.getValue() == RHS.getValue() &&
341 LHS.getPseudoValue() == RHS.getPseudoValue() &&
342 LHS.getSize() == RHS.getSize() &&
343 LHS.getOffset() == RHS.getOffset() &&
344 LHS.getFlags() == RHS.getFlags() &&
345 LHS.getAAInfo() == RHS.getAAInfo() &&
346 LHS.getRanges() == RHS.getRanges() &&
347 LHS.getAlign() == RHS.getAlign() &&
348 LHS.getAddrSpace() == RHS.getAddrSpace();
349 }
350
351 friend bool operator!=(const MachineMemOperand &LHS,
352 const MachineMemOperand &RHS) {
353 return !(LHS == RHS);
354 }
355};
356
357} // End llvm namespace
358
359#endif

/build/llvm-toolchain-snapshot-14~++20220116111435+edbb8a843c13/llvm/include/llvm/CodeGen/SelectionDAGNodes.h

1//===- llvm/CodeGen/SelectionDAGNodes.h - SelectionDAG Nodes ----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file declares the SDNode class and derived classes, which are used to
10// represent the nodes and operations present in a SelectionDAG. These nodes
11// and operations are machine code level operations, with some similarities to
12// the GCC RTL representation.
13//
14// Clients should include the SelectionDAG.h file instead of this file directly.
15//
16//===----------------------------------------------------------------------===//
17
18#ifndef LLVM_CODEGEN_SELECTIONDAGNODES_H
19#define LLVM_CODEGEN_SELECTIONDAGNODES_H
20
21#include "llvm/ADT/APFloat.h"
22#include "llvm/ADT/ArrayRef.h"
23#include "llvm/ADT/BitVector.h"
24#include "llvm/ADT/FoldingSet.h"
25#include "llvm/ADT/GraphTraits.h"
26#include "llvm/ADT/SmallPtrSet.h"
27#include "llvm/ADT/SmallVector.h"
28#include "llvm/ADT/ilist_node.h"
29#include "llvm/ADT/iterator.h"
30#include "llvm/ADT/iterator_range.h"
31#include "llvm/CodeGen/ISDOpcodes.h"
32#include "llvm/CodeGen/MachineMemOperand.h"
33#include "llvm/CodeGen/Register.h"
34#include "llvm/CodeGen/ValueTypes.h"
35#include "llvm/IR/Constants.h"
36#include "llvm/IR/DebugLoc.h"
37#include "llvm/IR/Instruction.h"
38#include "llvm/IR/Instructions.h"
39#include "llvm/IR/Metadata.h"
40#include "llvm/IR/Operator.h"
41#include "llvm/Support/AlignOf.h"
42#include "llvm/Support/AtomicOrdering.h"
43#include "llvm/Support/Casting.h"
44#include "llvm/Support/ErrorHandling.h"
45#include "llvm/Support/MachineValueType.h"
46#include "llvm/Support/TypeSize.h"
47#include <algorithm>
48#include <cassert>
49#include <climits>
50#include <cstddef>
51#include <cstdint>
52#include <cstring>
53#include <iterator>
54#include <string>
55#include <tuple>
56
57namespace llvm {
58
59class APInt;
60class Constant;
61class GlobalValue;
62class MachineBasicBlock;
63class MachineConstantPoolValue;
64class MCSymbol;
65class raw_ostream;
66class SDNode;
67class SelectionDAG;
68class Type;
69class Value;
70
71void checkForCycles(const SDNode *N, const SelectionDAG *DAG = nullptr,
72 bool force = false);
73
74/// This represents a list of ValueType's that has been intern'd by
75/// a SelectionDAG. Instances of this simple value class are returned by
76/// SelectionDAG::getVTList(...).
77///
78struct SDVTList {
79 const EVT *VTs;
80 unsigned int NumVTs;
81};
82
83namespace ISD {
84
85 /// Node predicates
86
87/// If N is a BUILD_VECTOR or SPLAT_VECTOR node whose elements are all the
88/// same constant or undefined, return true and return the constant value in
89/// \p SplatValue.
90bool isConstantSplatVector(const SDNode *N, APInt &SplatValue);
91
92/// Return true if the specified node is a BUILD_VECTOR or SPLAT_VECTOR where
93/// all of the elements are ~0 or undef. If \p BuildVectorOnly is set to
94/// true, it only checks BUILD_VECTOR.
95bool isConstantSplatVectorAllOnes(const SDNode *N,
96 bool BuildVectorOnly = false);
97
98/// Return true if the specified node is a BUILD_VECTOR or SPLAT_VECTOR where
99/// all of the elements are 0 or undef. If \p BuildVectorOnly is set to true, it
100/// only checks BUILD_VECTOR.
101bool isConstantSplatVectorAllZeros(const SDNode *N,
102 bool BuildVectorOnly = false);
103
104/// Return true if the specified node is a BUILD_VECTOR where all of the
105/// elements are ~0 or undef.
106bool isBuildVectorAllOnes(const SDNode *N);
107
108/// Return true if the specified node is a BUILD_VECTOR where all of the
109/// elements are 0 or undef.
110bool isBuildVectorAllZeros(const SDNode *N);
111
112/// Return true if the specified node is a BUILD_VECTOR node of all
113/// ConstantSDNode or undef.
114bool isBuildVectorOfConstantSDNodes(const SDNode *N);
115
116/// Return true if the specified node is a BUILD_VECTOR node of all
117/// ConstantFPSDNode or undef.
118bool isBuildVectorOfConstantFPSDNodes(const SDNode *N);
119
120/// Return true if the node has at least one operand and all operands of the
121/// specified node are ISD::UNDEF.
122bool allOperandsUndef(const SDNode *N);
123
124} // end namespace ISD
125
126//===----------------------------------------------------------------------===//
127/// Unlike LLVM values, Selection DAG nodes may return multiple
128/// values as the result of a computation. Many nodes return multiple values,
129/// from loads (which define a token and a return value) to ADDC (which returns
130/// a result and a carry value), to calls (which may return an arbitrary number
131/// of values).
132///
133/// As such, each use of a SelectionDAG computation must indicate the node that
134/// computes it as well as which return value to use from that node. This pair
135/// of information is represented with the SDValue value type.
136///
137class SDValue {
138 friend struct DenseMapInfo<SDValue>;
139
140 SDNode *Node = nullptr; // The node defining the value we are using.
141 unsigned ResNo = 0; // Which return value of the node we are using.
142
143public:
144 SDValue() = default;
145 SDValue(SDNode *node, unsigned resno);
146
147 /// get the index which selects a specific result in the SDNode
148 unsigned getResNo() const { return ResNo; }
149
150 /// get the SDNode which holds the desired result
151 SDNode *getNode() const { return Node; }
152
153 /// set the SDNode
154 void setNode(SDNode *N) { Node = N; }
155
156 inline SDNode *operator->() const { return Node; }
157
158 bool operator==(const SDValue &O) const {
159 return Node == O.Node && ResNo == O.ResNo;
160 }
161 bool operator!=(const SDValue &O) const {
162 return !operator==(O);
163 }
164 bool operator<(const SDValue &O) const {
165 return std::tie(Node, ResNo) < std::tie(O.Node, O.ResNo);
166 }
167 explicit operator bool() const {
168 return Node != nullptr;
169 }
170
171 SDValue getValue(unsigned R) const {
172 return SDValue(Node, R);
173 }
174
175 /// Return true if this node is an operand of N.
176 bool isOperandOf(const SDNode *N) const;
177
178 /// Return the ValueType of the referenced return value.
179 inline EVT getValueType() const;
180
181 /// Return the simple ValueType of the referenced return value.
182 MVT getSimpleValueType() const {
183 return getValueType().getSimpleVT();
184 }
185
186 /// Returns the size of the value in bits.
187 ///
188 /// If the value type is a scalable vector type, the scalable property will
189 /// be set and the runtime size will be a positive integer multiple of the
190 /// base size.
191 TypeSize getValueSizeInBits() const {
192 return getValueType().getSizeInBits();
193 }
194
195 uint64_t getScalarValueSizeInBits() const {
196 return getValueType().getScalarType().getFixedSizeInBits();
197 }
198
199 // Forwarding methods - These forward to the corresponding methods in SDNode.
200 inline unsigned getOpcode() const;
201 inline unsigned getNumOperands() const;
202 inline const SDValue &getOperand(unsigned i) const;
203 inline uint64_t getConstantOperandVal(unsigned i) const;
204 inline const APInt &getConstantOperandAPInt(unsigned i) const;
205 inline bool isTargetMemoryOpcode() const;
206 inline bool isTargetOpcode() const;
207 inline bool isMachineOpcode() const;
208 inline bool isUndef() const;
209 inline unsigned getMachineOpcode() const;
210 inline const DebugLoc &getDebugLoc() const;
211 inline void dump() const;
212 inline void dump(const SelectionDAG *G) const;
213 inline void dumpr() const;
214 inline void dumpr(const SelectionDAG *G) const;
215
216 /// Return true if this operand (which must be a chain) reaches the
217 /// specified operand without crossing any side-effecting instructions.
218 /// In practice, this looks through token factors and non-volatile loads.
219 /// In order to remain efficient, this only
220 /// looks a couple of nodes in, it does not do an exhaustive search.
221 bool reachesChainWithoutSideEffects(SDValue Dest,
222 unsigned Depth = 2) const;
223
224 /// Return true if there are no nodes using value ResNo of Node.
225 inline bool use_empty() const;
226
227 /// Return true if there is exactly one node using value ResNo of Node.
228 inline bool hasOneUse() const;
229};
230
231template<> struct DenseMapInfo<SDValue> {
232 static inline SDValue getEmptyKey() {
233 SDValue V;
234 V.ResNo = -1U;
235 return V;
236 }
237
238 static inline SDValue getTombstoneKey() {
239 SDValue V;
240 V.ResNo = -2U;
241 return V;
242 }
243
244 static unsigned getHashValue(const SDValue &Val) {
245 return ((unsigned)((uintptr_t)Val.getNode() >> 4) ^
246 (unsigned)((uintptr_t)Val.getNode() >> 9)) + Val.getResNo();
247 }
248
249 static bool isEqual(const SDValue &LHS, const SDValue &RHS) {
250 return LHS == RHS;
251 }
252};
253
254/// Allow casting operators to work directly on
255/// SDValues as if they were SDNode*'s.
256template<> struct simplify_type<SDValue> {
257 using SimpleType = SDNode *;
258
259 static SimpleType getSimplifiedValue(SDValue &Val) {
260 return Val.getNode();
261 }
262};
263template<> struct simplify_type<const SDValue> {
264 using SimpleType = /*const*/ SDNode *;
265
266 static SimpleType getSimplifiedValue(const SDValue &Val) {
267 return Val.getNode();
268 }
269};
270
271/// Represents a use of a SDNode. This class holds an SDValue,
272/// which records the SDNode being used and the result number, a
273/// pointer to the SDNode using the value, and Next and Prev pointers,
274/// which link together all the uses of an SDNode.
275///
276class SDUse {
277 /// Val - The value being used.
278 SDValue Val;
279 /// User - The user of this value.
280 SDNode *User = nullptr;
281 /// Prev, Next - Pointers to the uses list of the SDNode referred by
282 /// this operand.
283 SDUse **Prev = nullptr;
284 SDUse *Next = nullptr;
285
286public:
287 SDUse() = default;
288 SDUse(const SDUse &U) = delete;
289 SDUse &operator=(const SDUse &) = delete;
290
291 /// Normally SDUse will just implicitly convert to an SDValue that it holds.
292 operator const SDValue&() const { return Val; }
293
294 /// If implicit conversion to SDValue doesn't work, the get() method returns
295 /// the SDValue.
296 const SDValue &get() const { return Val; }
297
298 /// This returns the SDNode that contains this Use.
299 SDNode *getUser() { return User; }
300
301 /// Get the next SDUse in the use list.
302 SDUse *getNext() const { return Next; }
303
304 /// Convenience function for get().getNode().
305 SDNode *getNode() const { return Val.getNode(); }
306 /// Convenience function for get().getResNo().
307 unsigned getResNo() const { return Val.getResNo(); }
308 /// Convenience function for get().getValueType().
309 EVT getValueType() const { return Val.getValueType(); }
310
311 /// Convenience function for get().operator==
312 bool operator==(const SDValue &V) const {
313 return Val == V;
314 }
315
316 /// Convenience function for get().operator!=
317 bool operator!=(const SDValue &V) const {
318 return Val != V;
319 }
320
321 /// Convenience function for get().operator<
322 bool operator<(const SDValue &V) const {
323 return Val < V;
324 }
325
326private:
327 friend class SelectionDAG;
328 friend class SDNode;
329 // TODO: unfriend HandleSDNode once we fix its operand handling.
330 friend class HandleSDNode;
331
332 void setUser(SDNode *p) { User = p; }
333
334 /// Remove this use from its existing use list, assign it the
335 /// given value, and add it to the new value's node's use list.
336 inline void set(const SDValue &V);
337 /// Like set, but only supports initializing a newly-allocated
338 /// SDUse with a non-null value.
339 inline void setInitial(const SDValue &V);
340 /// Like set, but only sets the Node portion of the value,
341 /// leaving the ResNo portion unmodified.
342 inline void setNode(SDNode *N);
343
344 void addToList(SDUse **List) {
345 Next = *List;
346 if (Next) Next->Prev = &Next;
347 Prev = List;
348 *List = this;
349 }
350
351 void removeFromList() {
352 *Prev = Next;
353 if (Next) Next->Prev = Prev;
354 }
355};
356
357/// simplify_type specializations - Allow casting operators to work directly on
358/// SDValues as if they were SDNode*'s.
359template<> struct simplify_type<SDUse> {
360 using SimpleType = SDNode *;
361
362 static SimpleType getSimplifiedValue(SDUse &Val) {
363 return Val.getNode();
364 }
365};
366
367/// These are IR-level optimization flags that may be propagated to SDNodes.
368/// TODO: This data structure should be shared by the IR optimizer and the
369/// the backend.
370struct SDNodeFlags {
371private:
372 bool NoUnsignedWrap : 1;
373 bool NoSignedWrap : 1;
374 bool Exact : 1;
375 bool NoNaNs : 1;
376 bool NoInfs : 1;
377 bool NoSignedZeros : 1;
378 bool AllowReciprocal : 1;
379 bool AllowContract : 1;
380 bool ApproximateFuncs : 1;
381 bool AllowReassociation : 1;
382
383 // We assume instructions do not raise floating-point exceptions by default,
384 // and only those marked explicitly may do so. We could choose to represent
385 // this via a positive "FPExcept" flags like on the MI level, but having a
386 // negative "NoFPExcept" flag here (that defaults to true) makes the flag
387 // intersection logic more straightforward.
388 bool NoFPExcept : 1;
389
390public:
391 /// Default constructor turns off all optimization flags.
392 SDNodeFlags()
393 : NoUnsignedWrap(false), NoSignedWrap(false), Exact(false), NoNaNs(false),
394 NoInfs(false), NoSignedZeros(false), AllowReciprocal(false),
395 AllowContract(false), ApproximateFuncs(false),
396 AllowReassociation(false), NoFPExcept(false) {}
397
398 /// Propagate the fast-math-flags from an IR FPMathOperator.
399 void copyFMF(const FPMathOperator &FPMO) {
400 setNoNaNs(FPMO.hasNoNaNs());
401 setNoInfs(FPMO.hasNoInfs());
402 setNoSignedZeros(FPMO.hasNoSignedZeros());
403 setAllowReciprocal(FPMO.hasAllowReciprocal());
404 setAllowContract(FPMO.hasAllowContract());
405 setApproximateFuncs(FPMO.hasApproxFunc());
406 setAllowReassociation(FPMO.hasAllowReassoc());
407 }
408
409 // These are mutators for each flag.
410 void setNoUnsignedWrap(bool b) { NoUnsignedWrap = b; }
411 void setNoSignedWrap(bool b) { NoSignedWrap = b; }
412 void setExact(bool b) { Exact = b; }
413 void setNoNaNs(bool b) { NoNaNs = b; }
414 void setNoInfs(bool b) { NoInfs = b; }
415 void setNoSignedZeros(bool b) { NoSignedZeros = b; }
416 void setAllowReciprocal(bool b) { AllowReciprocal = b; }
417 void setAllowContract(bool b) { AllowContract = b; }
418 void setApproximateFuncs(bool b) { ApproximateFuncs = b; }
419 void setAllowReassociation(bool b) { AllowReassociation = b; }
420 void setNoFPExcept(bool b) { NoFPExcept = b; }
421
422 // These are accessors for each flag.
423 bool hasNoUnsignedWrap() const { return NoUnsignedWrap; }
424 bool hasNoSignedWrap() const { return NoSignedWrap; }
425 bool hasExact() const { return Exact; }
426 bool hasNoNaNs() const { return NoNaNs; }
427 bool hasNoInfs() const { return NoInfs; }
428 bool hasNoSignedZeros() const { return NoSignedZeros; }
429 bool hasAllowReciprocal() const { return AllowReciprocal; }
430 bool hasAllowContract() const { return AllowContract; }
431 bool hasApproximateFuncs() const { return ApproximateFuncs; }
432 bool hasAllowReassociation() const { return AllowReassociation; }
433 bool hasNoFPExcept() const { return NoFPExcept; }
434
435 /// Clear any flags in this flag set that aren't also set in Flags. All
436 /// flags will be cleared if Flags are undefined.
437 void intersectWith(const SDNodeFlags Flags) {
438 NoUnsignedWrap &= Flags.NoUnsignedWrap;
439 NoSignedWrap &= Flags.NoSignedWrap;
440 Exact &= Flags.Exact;
441 NoNaNs &= Flags.NoNaNs;
442 NoInfs &= Flags.NoInfs;
443 NoSignedZeros &= Flags.NoSignedZeros;
444 AllowReciprocal &= Flags.AllowReciprocal;
445 AllowContract &= Flags.AllowContract;
446 ApproximateFuncs &= Flags.ApproximateFuncs;
447 AllowReassociation &= Flags.AllowReassociation;
448 NoFPExcept &= Flags.NoFPExcept;
449 }
450};
451
452/// Represents one node in the SelectionDAG.
453///
454class SDNode : public FoldingSetNode, public ilist_node<SDNode> {
455private:
456 /// The operation that this node performs.
457 int16_t NodeType;
458
459protected:
460 // We define a set of mini-helper classes to help us interpret the bits in our
461 // SubclassData. These are designed to fit within a uint16_t so they pack
462 // with NodeType.
463
464#if defined(_AIX) && (!defined(__GNUC__4) || defined(__clang__1))
465// Except for GCC; by default, AIX compilers store bit-fields in 4-byte words
466// and give the `pack` pragma push semantics.
467#define BEGIN_TWO_BYTE_PACK() _Pragma("pack(2)")pack(2)
468#define END_TWO_BYTE_PACK() _Pragma("pack(pop)")pack(pop)
469#else
470#define BEGIN_TWO_BYTE_PACK()
471#define END_TWO_BYTE_PACK()
472#endif
473
474BEGIN_TWO_BYTE_PACK()
475 class SDNodeBitfields {
476 friend class SDNode;
477 friend class MemIntrinsicSDNode;
478 friend class MemSDNode;
479 friend class SelectionDAG;
480
481 uint16_t HasDebugValue : 1;
482 uint16_t IsMemIntrinsic : 1;
483 uint16_t IsDivergent : 1;
484 };
485 enum { NumSDNodeBits = 3 };
486
487 class ConstantSDNodeBitfields {
488 friend class ConstantSDNode;
489
490 uint16_t : NumSDNodeBits;
491
492 uint16_t IsOpaque : 1;
493 };
494
495 class MemSDNodeBitfields {
496 friend class MemSDNode;
497 friend class MemIntrinsicSDNode;
498 friend class AtomicSDNode;
499
500 uint16_t : NumSDNodeBits;
501
502 uint16_t IsVolatile : 1;
503 uint16_t IsNonTemporal : 1;
504 uint16_t IsDereferenceable : 1;
505 uint16_t IsInvariant : 1;
506 };
507 enum { NumMemSDNodeBits = NumSDNodeBits + 4 };
508
509 class LSBaseSDNodeBitfields {
510 friend class LSBaseSDNode;
511 friend class VPLoadStoreSDNode;
512 friend class MaskedLoadStoreSDNode;
513 friend class MaskedGatherScatterSDNode;
514 friend class VPGatherScatterSDNode;
515
516 uint16_t : NumMemSDNodeBits;
517
518 // This storage is shared between disparate class hierarchies to hold an
519 // enumeration specific to the class hierarchy in use.
520 // LSBaseSDNode => enum ISD::MemIndexedMode
521 // VPLoadStoreBaseSDNode => enum ISD::MemIndexedMode
522 // MaskedLoadStoreBaseSDNode => enum ISD::MemIndexedMode
523 // VPGatherScatterSDNode => enum ISD::MemIndexType
524 // MaskedGatherScatterSDNode => enum ISD::MemIndexType
525 uint16_t AddressingMode : 3;
526 };
527 enum { NumLSBaseSDNodeBits = NumMemSDNodeBits + 3 };
528
529 class LoadSDNodeBitfields {
530 friend class LoadSDNode;
531 friend class VPLoadSDNode;
532 friend class MaskedLoadSDNode;
533 friend class MaskedGatherSDNode;
534 friend class VPGatherSDNode;
535
536 uint16_t : NumLSBaseSDNodeBits;
537
538 uint16_t ExtTy : 2; // enum ISD::LoadExtType
539 uint16_t IsExpanding : 1;
540 };
541
542 class StoreSDNodeBitfields {
543 friend class StoreSDNode;
544 friend class VPStoreSDNode;
545 friend class MaskedStoreSDNode;
546 friend class MaskedScatterSDNode;
547 friend class VPScatterSDNode;
548
549 uint16_t : NumLSBaseSDNodeBits;
550
551 uint16_t IsTruncating : 1;
552 uint16_t IsCompressing : 1;
553 };
554
555 union {
556 char RawSDNodeBits[sizeof(uint16_t)];
557 SDNodeBitfields SDNodeBits;
558 ConstantSDNodeBitfields ConstantSDNodeBits;
559 MemSDNodeBitfields MemSDNodeBits;
560 LSBaseSDNodeBitfields LSBaseSDNodeBits;
561 LoadSDNodeBitfields LoadSDNodeBits;
562 StoreSDNodeBitfields StoreSDNodeBits;
563 };
564END_TWO_BYTE_PACK()
565#undef BEGIN_TWO_BYTE_PACK
566#undef END_TWO_BYTE_PACK
567
568 // RawSDNodeBits must cover the entirety of the union. This means that all of
569 // the union's members must have size <= RawSDNodeBits. We write the RHS as
570 // "2" instead of sizeof(RawSDNodeBits) because MSVC can't handle the latter.
571 static_assert(sizeof(SDNodeBitfields) <= 2, "field too wide");
572 static_assert(sizeof(ConstantSDNodeBitfields) <= 2, "field too wide");
573 static_assert(sizeof(MemSDNodeBitfields) <= 2, "field too wide");
574 static_assert(sizeof(LSBaseSDNodeBitfields) <= 2, "field too wide");
575 static_assert(sizeof(LoadSDNodeBitfields) <= 2, "field too wide");
576 static_assert(sizeof(StoreSDNodeBitfields) <= 2, "field too wide");
577
578private:
579 friend class SelectionDAG;
580 // TODO: unfriend HandleSDNode once we fix its operand handling.
581 friend class HandleSDNode;
582
583 /// Unique id per SDNode in the DAG.
584 int NodeId = -1;
585
586 /// The values that are used by this operation.
587 SDUse *OperandList = nullptr;
588
589 /// The types of the values this node defines. SDNode's may
590 /// define multiple values simultaneously.
591 const EVT *ValueList;
592
593 /// List of uses for this SDNode.
594 SDUse *UseList = nullptr;
595
596 /// The number of entries in the Operand/Value list.
597 unsigned short NumOperands = 0;
598 unsigned short NumValues;
599
600 // The ordering of the SDNodes. It roughly corresponds to the ordering of the
601 // original LLVM instructions.
602 // This is used for turning off scheduling, because we'll forgo
603 // the normal scheduling algorithms and output the instructions according to
604 // this ordering.
605 unsigned IROrder;
606
607 /// Source line information.
608 DebugLoc debugLoc;
609
610 /// Return a pointer to the specified value type.
611 static const EVT *getValueTypeList(EVT VT);
612
613 SDNodeFlags Flags;
614
615public:
616 /// Unique and persistent id per SDNode in the DAG.
617 /// Used for debug printing.
618 uint16_t PersistentId;
619
620 //===--------------------------------------------------------------------===//
621 // Accessors
622 //
623
624 /// Return the SelectionDAG opcode value for this node. For
625 /// pre-isel nodes (those for which isMachineOpcode returns false), these
626 /// are the opcode values in the ISD and <target>ISD namespaces. For
627 /// post-isel opcodes, see getMachineOpcode.
628 unsigned getOpcode() const { return (unsigned short)NodeType; }
629
630 /// Test if this node has a target-specific opcode (in the
631 /// \<target\>ISD namespace).
632 bool isTargetOpcode() const { return NodeType >= ISD::BUILTIN_OP_END; }
633
634 /// Test if this node has a target-specific opcode that may raise
635 /// FP exceptions (in the \<target\>ISD namespace and greater than
636 /// FIRST_TARGET_STRICTFP_OPCODE). Note that all target memory
637 /// opcode are currently automatically considered to possibly raise
638 /// FP exceptions as well.
639 bool isTargetStrictFPOpcode() const {
640 return NodeType >= ISD::FIRST_TARGET_STRICTFP_OPCODE;
641 }
642
643 /// Test if this node has a target-specific
644 /// memory-referencing opcode (in the \<target\>ISD namespace and
645 /// greater than FIRST_TARGET_MEMORY_OPCODE).
646 bool isTargetMemoryOpcode() const {
647 return NodeType >= ISD::FIRST_TARGET_MEMORY_OPCODE;
648 }
649
650 /// Return true if the type of the node type undefined.
651 bool isUndef() const { return NodeType == ISD::UNDEF; }
652
653 /// Test if this node is a memory intrinsic (with valid pointer information).
654 /// INTRINSIC_W_CHAIN and INTRINSIC_VOID nodes are sometimes created for
655 /// non-memory intrinsics (with chains) that are not really instances of
656 /// MemSDNode. For such nodes, we need some extra state to determine the
657 /// proper classof relationship.
658 bool isMemIntrinsic() const {
659 return (NodeType == ISD::INTRINSIC_W_CHAIN ||
660 NodeType == ISD::INTRINSIC_VOID) &&
661 SDNodeBits.IsMemIntrinsic;
662 }
663
664 /// Test if this node is a strict floating point pseudo-op.
665 bool isStrictFPOpcode() {
666 switch (NodeType) {
667 default:
668 return false;
669 case ISD::STRICT_FP16_TO_FP:
670 case ISD::STRICT_FP_TO_FP16:
671#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
672 case ISD::STRICT_##DAGN:
673#include "llvm/IR/ConstrainedOps.def"
674 return true;
675 }
676 }
677
678 /// Test if this node is a vector predication operation.
679 bool isVPOpcode() const { return ISD::isVPOpcode(getOpcode()); }
680
681 /// Test if this node has a post-isel opcode, directly
682 /// corresponding to a MachineInstr opcode.
683 bool isMachineOpcode() const { return NodeType < 0; }
684
685 /// This may only be called if isMachineOpcode returns
686 /// true. It returns the MachineInstr opcode value that the node's opcode
687 /// corresponds to.
688 unsigned getMachineOpcode() const {
689 assert(isMachineOpcode() && "Not a MachineInstr opcode!")(static_cast <bool> (isMachineOpcode() && "Not a MachineInstr opcode!"
) ? void (0) : __assert_fail ("isMachineOpcode() && \"Not a MachineInstr opcode!\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 689, __extension__
__PRETTY_FUNCTION__))
;
690 return ~NodeType;
691 }
692
693 bool getHasDebugValue() const { return SDNodeBits.HasDebugValue; }
694 void setHasDebugValue(bool b) { SDNodeBits.HasDebugValue = b; }
695
696 bool isDivergent() const { return SDNodeBits.IsDivergent; }
697
698 /// Return true if there are no uses of this node.
699 bool use_empty() const { return UseList == nullptr; }
700
701 /// Return true if there is exactly one use of this node.
702 bool hasOneUse() const { return hasSingleElement(uses()); }
703
704 /// Return the number of uses of this node. This method takes
705 /// time proportional to the number of uses.
706 size_t use_size() const { return std::distance(use_begin(), use_end()); }
707
708 /// Return the unique node id.
709 int getNodeId() const { return NodeId; }
710
711 /// Set unique node id.
712 void setNodeId(int Id) { NodeId = Id; }
713
714 /// Return the node ordering.
715 unsigned getIROrder() const { return IROrder; }
716
717 /// Set the node ordering.
718 void setIROrder(unsigned Order) { IROrder = Order; }
719
720 /// Return the source location info.
721 const DebugLoc &getDebugLoc() const { return debugLoc; }
722
723 /// Set source location info. Try to avoid this, putting
724 /// it in the constructor is preferable.
725 void setDebugLoc(DebugLoc dl) { debugLoc = std::move(dl); }
726
727 /// This class provides iterator support for SDUse
728 /// operands that use a specific SDNode.
729 class use_iterator {
730 friend class SDNode;
731
732 SDUse *Op = nullptr;
733
734 explicit use_iterator(SDUse *op) : Op(op) {}
735
736 public:
737 using iterator_category = std::forward_iterator_tag;
738 using value_type = SDUse;
739 using difference_type = std::ptrdiff_t;
740 using pointer = value_type *;
741 using reference = value_type &;
742
743 use_iterator() = default;
744 use_iterator(const use_iterator &I) : Op(I.Op) {}
745
746 bool operator==(const use_iterator &x) const {
747 return Op == x.Op;
748 }
749 bool operator!=(const use_iterator &x) const {
750 return !operator==(x);
751 }
752
753 /// Return true if this iterator is at the end of uses list.
754 bool atEnd() const { return Op == nullptr; }
755
756 // Iterator traversal: forward iteration only.
757 use_iterator &operator++() { // Preincrement
758 assert(Op && "Cannot increment end iterator!")(static_cast <bool> (Op && "Cannot increment end iterator!"
) ? void (0) : __assert_fail ("Op && \"Cannot increment end iterator!\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 758, __extension__
__PRETTY_FUNCTION__))
;
759 Op = Op->getNext();
760 return *this;
761 }
762
763 use_iterator operator++(int) { // Postincrement
764 use_iterator tmp = *this; ++*this; return tmp;
765 }
766
767 /// Retrieve a pointer to the current user node.
768 SDNode *operator*() const {
769 assert(Op && "Cannot dereference end iterator!")(static_cast <bool> (Op && "Cannot dereference end iterator!"
) ? void (0) : __assert_fail ("Op && \"Cannot dereference end iterator!\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 769, __extension__
__PRETTY_FUNCTION__))
;
770 return Op->getUser();
771 }
772
773 SDNode *operator->() const { return operator*(); }
774
775 SDUse &getUse() const { return *Op; }
776
777 /// Retrieve the operand # of this use in its user.
778 unsigned getOperandNo() const {
779 assert(Op && "Cannot dereference end iterator!")(static_cast <bool> (Op && "Cannot dereference end iterator!"
) ? void (0) : __assert_fail ("Op && \"Cannot dereference end iterator!\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 779, __extension__
__PRETTY_FUNCTION__))
;
780 return (unsigned)(Op - Op->getUser()->OperandList);
781 }
782 };
783
784 /// Provide iteration support to walk over all uses of an SDNode.
785 use_iterator use_begin() const {
786 return use_iterator(UseList);
787 }
788
789 static use_iterator use_end() { return use_iterator(nullptr); }
790
791 inline iterator_range<use_iterator> uses() {
792 return make_range(use_begin(), use_end());
793 }
794 inline iterator_range<use_iterator> uses() const {
795 return make_range(use_begin(), use_end());
796 }
797
798 /// Return true if there are exactly NUSES uses of the indicated value.
799 /// This method ignores uses of other values defined by this operation.
800 bool hasNUsesOfValue(unsigned NUses, unsigned Value) const;
801
802 /// Return true if there are any use of the indicated value.
803 /// This method ignores uses of other values defined by this operation.
804 bool hasAnyUseOfValue(unsigned Value) const;
805
806 /// Return true if this node is the only use of N.
807 bool isOnlyUserOf(const SDNode *N) const;
808
809 /// Return true if this node is an operand of N.
810 bool isOperandOf(const SDNode *N) const;
811
812 /// Return true if this node is a predecessor of N.
813 /// NOTE: Implemented on top of hasPredecessor and every bit as
814 /// expensive. Use carefully.
815 bool isPredecessorOf(const SDNode *N) const {
816 return N->hasPredecessor(this);
817 }
818
819 /// Return true if N is a predecessor of this node.
820 /// N is either an operand of this node, or can be reached by recursively
821 /// traversing up the operands.
822 /// NOTE: This is an expensive method. Use it carefully.
823 bool hasPredecessor(const SDNode *N) const;
824
825 /// Returns true if N is a predecessor of any node in Worklist. This
826 /// helper keeps Visited and Worklist sets externally to allow unions
827 /// searches to be performed in parallel, caching of results across
828 /// queries and incremental addition to Worklist. Stops early if N is
829 /// found but will resume. Remember to clear Visited and Worklists
830 /// if DAG changes. MaxSteps gives a maximum number of nodes to visit before
831 /// giving up. The TopologicalPrune flag signals that positive NodeIds are
832 /// topologically ordered (Operands have strictly smaller node id) and search
833 /// can be pruned leveraging this.
834 static bool hasPredecessorHelper(const SDNode *N,
835 SmallPtrSetImpl<const SDNode *> &Visited,
836 SmallVectorImpl<const SDNode *> &Worklist,
837 unsigned int MaxSteps = 0,
838 bool TopologicalPrune = false) {
839 SmallVector<const SDNode *, 8> DeferredNodes;
840 if (Visited.count(N))
841 return true;
842
843 // Node Id's are assigned in three places: As a topological
844 // ordering (> 0), during legalization (results in values set to
845 // 0), new nodes (set to -1). If N has a topolgical id then we
846 // know that all nodes with ids smaller than it cannot be
847 // successors and we need not check them. Filter out all node
848 // that can't be matches. We add them to the worklist before exit
849 // in case of multiple calls. Note that during selection the topological id
850 // may be violated if a node's predecessor is selected before it. We mark
851 // this at selection negating the id of unselected successors and
852 // restricting topological pruning to positive ids.
853
854 int NId = N->getNodeId();
855 // If we Invalidated the Id, reconstruct original NId.
856 if (NId < -1)
857 NId = -(NId + 1);
858
859 bool Found = false;
860 while (!Worklist.empty()) {
861 const SDNode *M = Worklist.pop_back_val();
862 int MId = M->getNodeId();
863 if (TopologicalPrune && M->getOpcode() != ISD::TokenFactor && (NId > 0) &&
864 (MId > 0) && (MId < NId)) {
865 DeferredNodes.push_back(M);
866 continue;
867 }
868 for (const SDValue &OpV : M->op_values()) {
869 SDNode *Op = OpV.getNode();
870 if (Visited.insert(Op).second)
871 Worklist.push_back(Op);
872 if (Op == N)
873 Found = true;
874 }
875 if (Found)
876 break;
877 if (MaxSteps != 0 && Visited.size() >= MaxSteps)
878 break;
879 }
880 // Push deferred nodes back on worklist.
881 Worklist.append(DeferredNodes.begin(), DeferredNodes.end());
882 // If we bailed early, conservatively return found.
883 if (MaxSteps != 0 && Visited.size() >= MaxSteps)
884 return true;
885 return Found;
886 }
887
888 /// Return true if all the users of N are contained in Nodes.
889 /// NOTE: Requires at least one match, but doesn't require them all.
890 static bool areOnlyUsersOf(ArrayRef<const SDNode *> Nodes, const SDNode *N);
891
892 /// Return the number of values used by this operation.
893 unsigned getNumOperands() const { return NumOperands; }
894
895 /// Return the maximum number of operands that a SDNode can hold.
896 static constexpr size_t getMaxNumOperands() {
897 return std::numeric_limits<decltype(SDNode::NumOperands)>::max();
898 }
899
900 /// Helper method returns the integer value of a ConstantSDNode operand.
901 inline uint64_t getConstantOperandVal(unsigned Num) const;
902
903 /// Helper method returns the APInt of a ConstantSDNode operand.
904 inline const APInt &getConstantOperandAPInt(unsigned Num) const;
905
906 const SDValue &getOperand(unsigned Num) const {
907 assert(Num < NumOperands && "Invalid child # of SDNode!")(static_cast <bool> (Num < NumOperands && "Invalid child # of SDNode!"
) ? void (0) : __assert_fail ("Num < NumOperands && \"Invalid child # of SDNode!\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 907, __extension__
__PRETTY_FUNCTION__))
;
908 return OperandList[Num];
909 }
910
911 using op_iterator = SDUse *;
912
913 op_iterator op_begin() const { return OperandList; }
914 op_iterator op_end() const { return OperandList+NumOperands; }
915 ArrayRef<SDUse> ops() const { return makeArrayRef(op_begin(), op_end()); }
916
917 /// Iterator for directly iterating over the operand SDValue's.
918 struct value_op_iterator
919 : iterator_adaptor_base<value_op_iterator, op_iterator,
920 std::random_access_iterator_tag, SDValue,
921 ptrdiff_t, value_op_iterator *,
922 value_op_iterator *> {
923 explicit value_op_iterator(SDUse *U = nullptr)
924 : iterator_adaptor_base(U) {}
925
926 const SDValue &operator*() const { return I->get(); }
927 };
928
929 iterator_range<value_op_iterator> op_values() const {
930 return make_range(value_op_iterator(op_begin()),
931 value_op_iterator(op_end()));
932 }
933
934 SDVTList getVTList() const {
935 SDVTList X = { ValueList, NumValues };
936 return X;
937 }
938
939 /// If this node has a glue operand, return the node
940 /// to which the glue operand points. Otherwise return NULL.
941 SDNode *getGluedNode() const {
942 if (getNumOperands() != 0 &&
943 getOperand(getNumOperands()-1).getValueType() == MVT::Glue)
944 return getOperand(getNumOperands()-1).getNode();
945 return nullptr;
946 }
947
948 /// If this node has a glue value with a user, return
949 /// the user (there is at most one). Otherwise return NULL.
950 SDNode *getGluedUser() const {
951 for (use_iterator UI = use_begin(), UE = use_end(); UI != UE; ++UI)
952 if (UI.getUse().get().getValueType() == MVT::Glue)
953 return *UI;
954 return nullptr;
955 }
956
957 SDNodeFlags getFlags() const { return Flags; }
958 void setFlags(SDNodeFlags NewFlags) { Flags = NewFlags; }
959
960 /// Clear any flags in this node that aren't also set in Flags.
961 /// If Flags is not in a defined state then this has no effect.
962 void intersectFlagsWith(const SDNodeFlags Flags);
963
964 /// Return the number of values defined/returned by this operator.
965 unsigned getNumValues() const { return NumValues; }
966
967 /// Return the type of a specified result.
968 EVT getValueType(unsigned ResNo) const {
969 assert(ResNo < NumValues && "Illegal result number!")(static_cast <bool> (ResNo < NumValues && "Illegal result number!"
) ? void (0) : __assert_fail ("ResNo < NumValues && \"Illegal result number!\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 969, __extension__
__PRETTY_FUNCTION__))
;
970 return ValueList[ResNo];
971 }
972
973 /// Return the type of a specified result as a simple type.
974 MVT getSimpleValueType(unsigned ResNo) const {
975 return getValueType(ResNo).getSimpleVT();
976 }
977
978 /// Returns MVT::getSizeInBits(getValueType(ResNo)).
979 ///
980 /// If the value type is a scalable vector type, the scalable property will
981 /// be set and the runtime size will be a positive integer multiple of the
982 /// base size.
983 TypeSize getValueSizeInBits(unsigned ResNo) const {
984 return getValueType(ResNo).getSizeInBits();
985 }
986
987 using value_iterator = const EVT *;
988
989 value_iterator value_begin() const { return ValueList; }
990 value_iterator value_end() const { return ValueList+NumValues; }
991 iterator_range<value_iterator> values() const {
992 return llvm::make_range(value_begin(), value_end());
993 }
994
995 /// Return the opcode of this operation for printing.
996 std::string getOperationName(const SelectionDAG *G = nullptr) const;
997 static const char* getIndexedModeName(ISD::MemIndexedMode AM);
998 void print_types(raw_ostream &OS, const SelectionDAG *G) const;
999 void print_details(raw_ostream &OS, const SelectionDAG *G) const;
1000 void print(raw_ostream &OS, const SelectionDAG *G = nullptr) const;
1001 void printr(raw_ostream &OS, const SelectionDAG *G = nullptr) const;
1002
1003 /// Print a SelectionDAG node and all children down to
1004 /// the leaves. The given SelectionDAG allows target-specific nodes
1005 /// to be printed in human-readable form. Unlike printr, this will
1006 /// print the whole DAG, including children that appear multiple
1007 /// times.
1008 ///
1009 void printrFull(raw_ostream &O, const SelectionDAG *G = nullptr) const;
1010
1011 /// Print a SelectionDAG node and children up to
1012 /// depth "depth." The given SelectionDAG allows target-specific
1013 /// nodes to be printed in human-readable form. Unlike printr, this
1014 /// will print children that appear multiple times wherever they are
1015 /// used.
1016 ///
1017 void printrWithDepth(raw_ostream &O, const SelectionDAG *G = nullptr,
1018 unsigned depth = 100) const;
1019
1020 /// Dump this node, for debugging.
1021 void dump() const;
1022
1023 /// Dump (recursively) this node and its use-def subgraph.
1024 void dumpr() const;
1025
1026 /// Dump this node, for debugging.
1027 /// The given SelectionDAG allows target-specific nodes to be printed
1028 /// in human-readable form.
1029 void dump(const SelectionDAG *G) const;
1030
1031 /// Dump (recursively) this node and its use-def subgraph.
1032 /// The given SelectionDAG allows target-specific nodes to be printed
1033 /// in human-readable form.
1034 void dumpr(const SelectionDAG *G) const;
1035
1036 /// printrFull to dbgs(). The given SelectionDAG allows
1037 /// target-specific nodes to be printed in human-readable form.
1038 /// Unlike dumpr, this will print the whole DAG, including children
1039 /// that appear multiple times.
1040 void dumprFull(const SelectionDAG *G = nullptr) const;
1041
1042 /// printrWithDepth to dbgs(). The given
1043 /// SelectionDAG allows target-specific nodes to be printed in
1044 /// human-readable form. Unlike dumpr, this will print children
1045 /// that appear multiple times wherever they are used.
1046 ///
1047 void dumprWithDepth(const SelectionDAG *G = nullptr,
1048 unsigned depth = 100) const;
1049
1050 /// Gather unique data for the node.
1051 void Profile(FoldingSetNodeID &ID) const;
1052
1053 /// This method should only be used by the SDUse class.
1054 void addUse(SDUse &U) { U.addToList(&UseList); }
1055
1056protected:
1057 static SDVTList getSDVTList(EVT VT) {
1058 SDVTList Ret = { getValueTypeList(VT), 1 };
1059 return Ret;
1060 }
1061
1062 /// Create an SDNode.
1063 ///
1064 /// SDNodes are created without any operands, and never own the operand
1065 /// storage. To add operands, see SelectionDAG::createOperands.
1066 SDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs)
1067 : NodeType(Opc), ValueList(VTs.VTs), NumValues(VTs.NumVTs),
1068 IROrder(Order), debugLoc(std::move(dl)) {
1069 memset(&RawSDNodeBits, 0, sizeof(RawSDNodeBits));
1070 assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor")(static_cast <bool> (debugLoc.hasTrivialDestructor() &&
"Expected trivial destructor") ? void (0) : __assert_fail ("debugLoc.hasTrivialDestructor() && \"Expected trivial destructor\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 1070, __extension__
__PRETTY_FUNCTION__))
;
1071 assert(NumValues == VTs.NumVTs &&(static_cast <bool> (NumValues == VTs.NumVTs &&
"NumValues wasn't wide enough for its operands!") ? void (0)
: __assert_fail ("NumValues == VTs.NumVTs && \"NumValues wasn't wide enough for its operands!\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 1072, __extension__
__PRETTY_FUNCTION__))
1072 "NumValues wasn't wide enough for its operands!")(static_cast <bool> (NumValues == VTs.NumVTs &&
"NumValues wasn't wide enough for its operands!") ? void (0)
: __assert_fail ("NumValues == VTs.NumVTs && \"NumValues wasn't wide enough for its operands!\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 1072, __extension__
__PRETTY_FUNCTION__))
;
1073 }
1074
1075 /// Release the operands and set this node to have zero operands.
1076 void DropOperands();
1077};
1078
1079/// Wrapper class for IR location info (IR ordering and DebugLoc) to be passed
1080/// into SDNode creation functions.
1081/// When an SDNode is created from the DAGBuilder, the DebugLoc is extracted
1082/// from the original Instruction, and IROrder is the ordinal position of
1083/// the instruction.
1084/// When an SDNode is created after the DAG is being built, both DebugLoc and
1085/// the IROrder are propagated from the original SDNode.
1086/// So SDLoc class provides two constructors besides the default one, one to
1087/// be used by the DAGBuilder, the other to be used by others.
1088class SDLoc {
1089private:
1090 DebugLoc DL;
1091 int IROrder = 0;
1092
1093public:
1094 SDLoc() = default;
1095 SDLoc(const SDNode *N) : DL(N->getDebugLoc()), IROrder(N->getIROrder()) {}
1096 SDLoc(const SDValue V) : SDLoc(V.getNode()) {}
1097 SDLoc(const Instruction *I, int Order) : IROrder(Order) {
1098 assert(Order >= 0 && "bad IROrder")(static_cast <bool> (Order >= 0 && "bad IROrder"
) ? void (0) : __assert_fail ("Order >= 0 && \"bad IROrder\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 1098, __extension__
__PRETTY_FUNCTION__))
;
1099 if (I)
1100 DL = I->getDebugLoc();
1101 }
1102
1103 unsigned getIROrder() const { return IROrder; }
1104 const DebugLoc &getDebugLoc() const { return DL; }
1105};
1106
1107// Define inline functions from the SDValue class.
1108
1109inline SDValue::SDValue(SDNode *node, unsigned resno)
1110 : Node(node), ResNo(resno) {
1111 // Explicitly check for !ResNo to avoid use-after-free, because there are
1112 // callers that use SDValue(N, 0) with a deleted N to indicate successful
1113 // combines.
1114 assert((!Node || !ResNo || ResNo < Node->getNumValues()) &&(static_cast <bool> ((!Node || !ResNo || ResNo < Node
->getNumValues()) && "Invalid result number for the given node!"
) ? void (0) : __assert_fail ("(!Node || !ResNo || ResNo < Node->getNumValues()) && \"Invalid result number for the given node!\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 1115, __extension__
__PRETTY_FUNCTION__))
1115 "Invalid result number for the given node!")(static_cast <bool> ((!Node || !ResNo || ResNo < Node
->getNumValues()) && "Invalid result number for the given node!"
) ? void (0) : __assert_fail ("(!Node || !ResNo || ResNo < Node->getNumValues()) && \"Invalid result number for the given node!\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 1115, __extension__
__PRETTY_FUNCTION__))
;
1116 assert(ResNo < -2U && "Cannot use result numbers reserved for DenseMaps.")(static_cast <bool> (ResNo < -2U && "Cannot use result numbers reserved for DenseMaps."
) ? void (0) : __assert_fail ("ResNo < -2U && \"Cannot use result numbers reserved for DenseMaps.\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 1116, __extension__
__PRETTY_FUNCTION__))
;
1117}
1118
1119inline unsigned SDValue::getOpcode() const {
1120 return Node->getOpcode();
1121}
1122
1123inline EVT SDValue::getValueType() const {
1124 return Node->getValueType(ResNo);
1125}
1126
1127inline unsigned SDValue::getNumOperands() const {
1128 return Node->getNumOperands();
1129}
1130
1131inline const SDValue &SDValue::getOperand(unsigned i) const {
1132 return Node->getOperand(i);
1133}
1134
1135inline uint64_t SDValue::getConstantOperandVal(unsigned i) const {
1136 return Node->getConstantOperandVal(i);
1137}
1138
1139inline const APInt &SDValue::getConstantOperandAPInt(unsigned i) const {
1140 return Node->getConstantOperandAPInt(i);
1141}
1142
1143inline bool SDValue::isTargetOpcode() const {
1144 return Node->isTargetOpcode();
1145}
1146
1147inline bool SDValue::isTargetMemoryOpcode() const {
1148 return Node->isTargetMemoryOpcode();
1149}
1150
1151inline bool SDValue::isMachineOpcode() const {
1152 return Node->isMachineOpcode();
1153}
1154
1155inline unsigned SDValue::getMachineOpcode() const {
1156 return Node->getMachineOpcode();
1157}
1158
1159inline bool SDValue::isUndef() const {
1160 return Node->isUndef();
1161}
1162
1163inline bool SDValue::use_empty() const {
1164 return !Node->hasAnyUseOfValue(ResNo);
1165}
1166
1167inline bool SDValue::hasOneUse() const {
1168 return Node->hasNUsesOfValue(1, ResNo);
1169}
1170
1171inline const DebugLoc &SDValue::getDebugLoc() const {
1172 return Node->getDebugLoc();
1173}
1174
1175inline void SDValue::dump() const {
1176 return Node->dump();
1177}
1178
1179inline void SDValue::dump(const SelectionDAG *G) const {
1180 return Node->dump(G);
1181}
1182
1183inline void SDValue::dumpr() const {
1184 return Node->dumpr();
1185}
1186
1187inline void SDValue::dumpr(const SelectionDAG *G) const {
1188 return Node->dumpr(G);
1189}
1190
1191// Define inline functions from the SDUse class.
1192
1193inline void SDUse::set(const SDValue &V) {
1194 if (Val.getNode()) removeFromList();
1195 Val = V;
1196 if (V.getNode()) V.getNode()->addUse(*this);
1197}
1198
1199inline void SDUse::setInitial(const SDValue &V) {
1200 Val = V;
1201 V.getNode()->addUse(*this);
1202}
1203
1204inline void SDUse::setNode(SDNode *N) {
1205 if (Val.getNode()) removeFromList();
1206 Val.setNode(N);
1207 if (N) N->addUse(*this);
1208}
1209
1210/// This class is used to form a handle around another node that
1211/// is persistent and is updated across invocations of replaceAllUsesWith on its
1212/// operand. This node should be directly created by end-users and not added to
1213/// the AllNodes list.
1214class HandleSDNode : public SDNode {
1215 SDUse Op;
1216
1217public:
1218 explicit HandleSDNode(SDValue X)
1219 : SDNode(ISD::HANDLENODE, 0, DebugLoc(), getSDVTList(MVT::Other)) {
1220 // HandleSDNodes are never inserted into the DAG, so they won't be
1221 // auto-numbered. Use ID 65535 as a sentinel.
1222 PersistentId = 0xffff;
1223
1224 // Manually set up the operand list. This node type is special in that it's
1225 // always stack allocated and SelectionDAG does not manage its operands.
1226 // TODO: This should either (a) not be in the SDNode hierarchy, or (b) not
1227 // be so special.
1228 Op.setUser(this);
1229 Op.setInitial(X);
1230 NumOperands = 1;
1231 OperandList = &Op;
1232 }
1233 ~HandleSDNode();
1234
1235 const SDValue &getValue() const { return Op; }
1236};
1237
1238class AddrSpaceCastSDNode : public SDNode {
1239private:
1240 unsigned SrcAddrSpace;
1241 unsigned DestAddrSpace;
1242
1243public:
1244 AddrSpaceCastSDNode(unsigned Order, const DebugLoc &dl, EVT VT,
1245 unsigned SrcAS, unsigned DestAS);
1246
1247 unsigned getSrcAddressSpace() const { return SrcAddrSpace; }
1248 unsigned getDestAddressSpace() const { return DestAddrSpace; }
1249
1250 static bool classof(const SDNode *N) {
1251 return N->getOpcode() == ISD::ADDRSPACECAST;
1252 }
1253};
1254
1255/// This is an abstract virtual class for memory operations.
1256class MemSDNode : public SDNode {
1257private:
1258 // VT of in-memory value.
1259 EVT MemoryVT;
1260
1261protected:
1262 /// Memory reference information.
1263 MachineMemOperand *MMO;
1264
1265public:
1266 MemSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, SDVTList VTs,
1267 EVT memvt, MachineMemOperand *MMO);
1268
1269 bool readMem() const { return MMO->isLoad(); }
1270 bool writeMem() const { return MMO->isStore(); }
1271
1272 /// Returns alignment and volatility of the memory access
1273 Align getOriginalAlign() const { return MMO->getBaseAlign(); }
1274 Align getAlign() const { return MMO->getAlign(); }
1275 // FIXME: Remove once transition to getAlign is over.
1276 unsigned getAlignment() const { return MMO->getAlign().value(); }
1277
1278 /// Return the SubclassData value, without HasDebugValue. This contains an
1279 /// encoding of the volatile flag, as well as bits used by subclasses. This
1280 /// function should only be used to compute a FoldingSetNodeID value.
1281 /// The HasDebugValue bit is masked out because CSE map needs to match
1282 /// nodes with debug info with nodes without debug info. Same is about
1283 /// isDivergent bit.
1284 unsigned getRawSubclassData() const {
1285 uint16_t Data;
1286 union {
1287 char RawSDNodeBits[sizeof(uint16_t)];
1288 SDNodeBitfields SDNodeBits;
1289 };
1290 memcpy(&RawSDNodeBits, &this->RawSDNodeBits, sizeof(this->RawSDNodeBits));
1291 SDNodeBits.HasDebugValue = 0;
1292 SDNodeBits.IsDivergent = false;
1293 memcpy(&Data, &RawSDNodeBits, sizeof(RawSDNodeBits));
1294 return Data;
1295 }
1296
1297 bool isVolatile() const { return MemSDNodeBits.IsVolatile; }
1298 bool isNonTemporal() const { return MemSDNodeBits.IsNonTemporal; }
1299 bool isDereferenceable() const { return MemSDNodeBits.IsDereferenceable; }
1300 bool isInvariant() const { return MemSDNodeBits.IsInvariant; }
1301
1302 // Returns the offset from the location of the access.
1303 int64_t getSrcValueOffset() const { return MMO->getOffset(); }
1304
1305 /// Returns the AA info that describes the dereference.
1306 AAMDNodes getAAInfo() const { return MMO->getAAInfo(); }
1307
1308 /// Returns the Ranges that describes the dereference.
1309 const MDNode *getRanges() const { return MMO->getRanges(); }
1310
1311 /// Returns the synchronization scope ID for this memory operation.
1312 SyncScope::ID getSyncScopeID() const { return MMO->getSyncScopeID(); }
1313
1314 /// Return the atomic ordering requirements for this memory operation. For
1315 /// cmpxchg atomic operations, return the atomic ordering requirements when
1316 /// store occurs.
1317 AtomicOrdering getSuccessOrdering() const {
1318 return MMO->getSuccessOrdering();
1319 }
1320
1321 /// Return a single atomic ordering that is at least as strong as both the
1322 /// success and failure orderings for an atomic operation. (For operations
1323 /// other than cmpxchg, this is equivalent to getSuccessOrdering().)
1324 AtomicOrdering getMergedOrdering() const { return MMO->getMergedOrdering(); }
1325
1326 /// Return true if the memory operation ordering is Unordered or higher.
1327 bool isAtomic() const { return MMO->isAtomic(); }
1328
1329 /// Returns true if the memory operation doesn't imply any ordering
1330 /// constraints on surrounding memory operations beyond the normal memory
1331 /// aliasing rules.
1332 bool isUnordered() const { return MMO->isUnordered(); }
1333
1334 /// Returns true if the memory operation is neither atomic or volatile.
1335 bool isSimple() const { return !isAtomic() && !isVolatile(); }
14
Assuming the condition is true
15
Returning the value 1, which participates in a condition later
1336
1337 /// Return the type of the in-memory value.
1338 EVT getMemoryVT() const { return MemoryVT; }
1339
1340 /// Return a MachineMemOperand object describing the memory
1341 /// reference performed by operation.
1342 MachineMemOperand *getMemOperand() const { return MMO; }
1343
1344 const MachinePointerInfo &getPointerInfo() const {
1345 return MMO->getPointerInfo();
1346 }
1347
1348 /// Return the address space for the associated pointer
1349 unsigned getAddressSpace() const {
1350 return getPointerInfo().getAddrSpace();
1351 }
1352
1353 /// Update this MemSDNode's MachineMemOperand information
1354 /// to reflect the alignment of NewMMO, if it has a greater alignment.
1355 /// This must only be used when the new alignment applies to all users of
1356 /// this MachineMemOperand.
1357 void refineAlignment(const MachineMemOperand *NewMMO) {
1358 MMO->refineAlignment(NewMMO);
1359 }
1360
1361 const SDValue &getChain() const { return getOperand(0); }
1362
1363 const SDValue &getBasePtr() const {
1364 switch (getOpcode()) {
1365 case ISD::STORE:
1366 case ISD::VP_STORE:
1367 case ISD::MSTORE:
1368 case ISD::VP_SCATTER:
1369 return getOperand(2);
1370 case ISD::MGATHER:
1371 case ISD::MSCATTER:
1372 return getOperand(3);
1373 default:
1374 return getOperand(1);
1375 }
1376 }
1377
1378 // Methods to support isa and dyn_cast
1379 static bool classof(const SDNode *N) {
1380 // For some targets, we lower some target intrinsics to a MemIntrinsicNode
1381 // with either an intrinsic or a target opcode.
1382 switch (N->getOpcode()) {
1383 case ISD::LOAD:
1384 case ISD::STORE:
1385 case ISD::PREFETCH:
1386 case ISD::ATOMIC_CMP_SWAP:
1387 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
1388 case ISD::ATOMIC_SWAP:
1389 case ISD::ATOMIC_LOAD_ADD:
1390 case ISD::ATOMIC_LOAD_SUB:
1391 case ISD::ATOMIC_LOAD_AND:
1392 case ISD::ATOMIC_LOAD_CLR:
1393 case ISD::ATOMIC_LOAD_OR:
1394 case ISD::ATOMIC_LOAD_XOR:
1395 case ISD::ATOMIC_LOAD_NAND:
1396 case ISD::ATOMIC_LOAD_MIN:
1397 case ISD::ATOMIC_LOAD_MAX:
1398 case ISD::ATOMIC_LOAD_UMIN:
1399 case ISD::ATOMIC_LOAD_UMAX:
1400 case ISD::ATOMIC_LOAD_FADD:
1401 case ISD::ATOMIC_LOAD_FSUB:
1402 case ISD::ATOMIC_LOAD:
1403 case ISD::ATOMIC_STORE:
1404 case ISD::MLOAD:
1405 case ISD::MSTORE:
1406 case ISD::MGATHER:
1407 case ISD::MSCATTER:
1408 case ISD::VP_LOAD:
1409 case ISD::VP_STORE:
1410 case ISD::VP_GATHER:
1411 case ISD::VP_SCATTER:
1412 return true;
1413 default:
1414 return N->isMemIntrinsic() || N->isTargetMemoryOpcode();
1415 }
1416 }
1417};
1418
1419/// This is an SDNode representing atomic operations.
1420class AtomicSDNode : public MemSDNode {
1421public:
1422 AtomicSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, SDVTList VTL,
1423 EVT MemVT, MachineMemOperand *MMO)
1424 : MemSDNode(Opc, Order, dl, VTL, MemVT, MMO) {
1425 assert(((Opc != ISD::ATOMIC_LOAD && Opc != ISD::ATOMIC_STORE) ||(static_cast <bool> (((Opc != ISD::ATOMIC_LOAD &&
Opc != ISD::ATOMIC_STORE) || MMO->isAtomic()) && "then why are we using an AtomicSDNode?"
) ? void (0) : __assert_fail ("((Opc != ISD::ATOMIC_LOAD && Opc != ISD::ATOMIC_STORE) || MMO->isAtomic()) && \"then why are we using an AtomicSDNode?\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 1426, __extension__
__PRETTY_FUNCTION__))
1426 MMO->isAtomic()) && "then why are we using an AtomicSDNode?")(static_cast <bool> (((Opc != ISD::ATOMIC_LOAD &&
Opc != ISD::ATOMIC_STORE) || MMO->isAtomic()) && "then why are we using an AtomicSDNode?"
) ? void (0) : __assert_fail ("((Opc != ISD::ATOMIC_LOAD && Opc != ISD::ATOMIC_STORE) || MMO->isAtomic()) && \"then why are we using an AtomicSDNode?\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 1426, __extension__
__PRETTY_FUNCTION__))
;
1427 }
1428
1429 const SDValue &getBasePtr() const { return getOperand(1); }
1430 const SDValue &getVal() const { return getOperand(2); }
1431
1432 /// Returns true if this SDNode represents cmpxchg atomic operation, false
1433 /// otherwise.
1434 bool isCompareAndSwap() const {
1435 unsigned Op = getOpcode();
1436 return Op == ISD::ATOMIC_CMP_SWAP ||
1437 Op == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS;
1438 }
1439
1440 /// For cmpxchg atomic operations, return the atomic ordering requirements
1441 /// when store does not occur.
1442 AtomicOrdering getFailureOrdering() const {
1443 assert(isCompareAndSwap() && "Must be cmpxchg operation")(static_cast <bool> (isCompareAndSwap() && "Must be cmpxchg operation"
) ? void (0) : __assert_fail ("isCompareAndSwap() && \"Must be cmpxchg operation\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 1443, __extension__
__PRETTY_FUNCTION__))
;
1444 return MMO->getFailureOrdering();
1445 }
1446
1447 // Methods to support isa and dyn_cast
1448 static bool classof(const SDNode *N) {
1449 return N->getOpcode() == ISD::ATOMIC_CMP_SWAP ||
1450 N->getOpcode() == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS ||
1451 N->getOpcode() == ISD::ATOMIC_SWAP ||
1452 N->getOpcode() == ISD::ATOMIC_LOAD_ADD ||
1453 N->getOpcode() == ISD::ATOMIC_LOAD_SUB ||
1454 N->getOpcode() == ISD::ATOMIC_LOAD_AND ||
1455 N->getOpcode() == ISD::ATOMIC_LOAD_CLR ||
1456 N->getOpcode() == ISD::ATOMIC_LOAD_OR ||
1457 N->getOpcode() == ISD::ATOMIC_LOAD_XOR ||
1458 N->getOpcode() == ISD::ATOMIC_LOAD_NAND ||
1459 N->getOpcode() == ISD::ATOMIC_LOAD_MIN ||
1460 N->getOpcode() == ISD::ATOMIC_LOAD_MAX ||
1461 N->getOpcode() == ISD::ATOMIC_LOAD_UMIN ||
1462 N->getOpcode() == ISD::ATOMIC_LOAD_UMAX ||
1463 N->getOpcode() == ISD::ATOMIC_LOAD_FADD ||
1464 N->getOpcode() == ISD::ATOMIC_LOAD_FSUB ||
1465 N->getOpcode() == ISD::ATOMIC_LOAD ||
1466 N->getOpcode() == ISD::ATOMIC_STORE;
1467 }
1468};
1469
1470/// This SDNode is used for target intrinsics that touch
1471/// memory and need an associated MachineMemOperand. Its opcode may be
1472/// INTRINSIC_VOID, INTRINSIC_W_CHAIN, PREFETCH, or a target-specific opcode
1473/// with a value not less than FIRST_TARGET_MEMORY_OPCODE.
1474class MemIntrinsicSDNode : public MemSDNode {
1475public:
1476 MemIntrinsicSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl,
1477 SDVTList VTs, EVT MemoryVT, MachineMemOperand *MMO)
1478 : MemSDNode(Opc, Order, dl, VTs, MemoryVT, MMO) {
1479 SDNodeBits.IsMemIntrinsic = true;
1480 }
1481
1482 // Methods to support isa and dyn_cast
1483 static bool classof(const SDNode *N) {
1484 // We lower some target intrinsics to their target opcode
1485 // early a node with a target opcode can be of this class
1486 return N->isMemIntrinsic() ||
1487 N->getOpcode() == ISD::PREFETCH ||
1488 N->isTargetMemoryOpcode();
1489 }
1490};
1491
1492/// This SDNode is used to implement the code generator
1493/// support for the llvm IR shufflevector instruction. It combines elements
1494/// from two input vectors into a new input vector, with the selection and
1495/// ordering of elements determined by an array of integers, referred to as
1496/// the shuffle mask. For input vectors of width N, mask indices of 0..N-1
1497/// refer to elements from the LHS input, and indices from N to 2N-1 the RHS.
1498/// An index of -1 is treated as undef, such that the code generator may put
1499/// any value in the corresponding element of the result.
1500class ShuffleVectorSDNode : public SDNode {
1501 // The memory for Mask is owned by the SelectionDAG's OperandAllocator, and
1502 // is freed when the SelectionDAG object is destroyed.
1503 const int *Mask;
1504
1505protected:
1506 friend class SelectionDAG;
1507
1508 ShuffleVectorSDNode(EVT VT, unsigned Order, const DebugLoc &dl, const int *M)
1509 : SDNode(ISD::VECTOR_SHUFFLE, Order, dl, getSDVTList(VT)), Mask(M) {}
1510
1511public:
1512 ArrayRef<int> getMask() const {
1513 EVT VT = getValueType(0);
1514 return makeArrayRef(Mask, VT.getVectorNumElements());
1515 }
1516
1517 int getMaskElt(unsigned Idx) const {
1518 assert(Idx < getValueType(0).getVectorNumElements() && "Idx out of range!")(static_cast <bool> (Idx < getValueType(0).getVectorNumElements
() && "Idx out of range!") ? void (0) : __assert_fail
("Idx < getValueType(0).getVectorNumElements() && \"Idx out of range!\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 1518, __extension__
__PRETTY_FUNCTION__))
;
1519 return Mask[Idx];
1520 }
1521
1522 bool isSplat() const { return isSplatMask(Mask, getValueType(0)); }
1523
1524 int getSplatIndex() const {
1525 assert(isSplat() && "Cannot get splat index for non-splat!")(static_cast <bool> (isSplat() && "Cannot get splat index for non-splat!"
) ? void (0) : __assert_fail ("isSplat() && \"Cannot get splat index for non-splat!\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 1525, __extension__
__PRETTY_FUNCTION__))
;
1526 EVT VT = getValueType(0);
1527 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i)
1528 if (Mask[i] >= 0)
1529 return Mask[i];
1530
1531 // We can choose any index value here and be correct because all elements
1532 // are undefined. Return 0 for better potential for callers to simplify.
1533 return 0;
1534 }
1535
1536 static bool isSplatMask(const int *Mask, EVT VT);
1537
1538 /// Change values in a shuffle permute mask assuming
1539 /// the two vector operands have swapped position.
1540 static void commuteMask(MutableArrayRef<int> Mask) {
1541 unsigned NumElems = Mask.size();
1542 for (unsigned i = 0; i != NumElems; ++i) {
1543 int idx = Mask[i];
1544 if (idx < 0)
1545 continue;
1546 else if (idx < (int)NumElems)
1547 Mask[i] = idx + NumElems;
1548 else
1549 Mask[i] = idx - NumElems;
1550 }
1551 }
1552
1553 static bool classof(const SDNode *N) {
1554 return N->getOpcode() == ISD::VECTOR_SHUFFLE;
1555 }
1556};
1557
1558class ConstantSDNode : public SDNode {
1559 friend class SelectionDAG;
1560
1561 const ConstantInt *Value;
1562
1563 ConstantSDNode(bool isTarget, bool isOpaque, const ConstantInt *val, EVT VT)
1564 : SDNode(isTarget ? ISD::TargetConstant : ISD::Constant, 0, DebugLoc(),
1565 getSDVTList(VT)),
1566 Value(val) {
1567 ConstantSDNodeBits.IsOpaque = isOpaque;
1568 }
1569
1570public:
1571 const ConstantInt *getConstantIntValue() const { return Value; }
1572 const APInt &getAPIntValue() const { return Value->getValue(); }
1573 uint64_t getZExtValue() const { return Value->getZExtValue(); }
1574 int64_t getSExtValue() const { return Value->getSExtValue(); }
1575 uint64_t getLimitedValue(uint64_t Limit = UINT64_MAX(18446744073709551615UL)) {
1576 return Value->getLimitedValue(Limit);
1577 }
1578 MaybeAlign getMaybeAlignValue() const { return Value->getMaybeAlignValue(); }
1579 Align getAlignValue() const { return Value->getAlignValue(); }
1580
1581 bool isOne() const { return Value->isOne(); }
1582 bool isZero() const { return Value->isZero(); }
1583 // NOTE: This is soft-deprecated. Please use `isZero()` instead.
1584 bool isNullValue() const { return isZero(); }
1585 bool isAllOnes() const { return Value->isMinusOne(); }
1586 // NOTE: This is soft-deprecated. Please use `isAllOnes()` instead.
1587 bool isAllOnesValue() const { return isAllOnes(); }
1588 bool isMaxSignedValue() const { return Value->isMaxValue(true); }
1589 bool isMinSignedValue() const { return Value->isMinValue(true); }
1590
1591 bool isOpaque() const { return ConstantSDNodeBits.IsOpaque; }
1592
1593 static bool classof(const SDNode *N) {
1594 return N->getOpcode() == ISD::Constant ||
1595 N->getOpcode() == ISD::TargetConstant;
1596 }
1597};
1598
1599uint64_t SDNode::getConstantOperandVal(unsigned Num) const {
1600 return cast<ConstantSDNode>(getOperand(Num))->getZExtValue();
1601}
1602
1603const APInt &SDNode::getConstantOperandAPInt(unsigned Num) const {
1604 return cast<ConstantSDNode>(getOperand(Num))->getAPIntValue();
1605}
1606
1607class ConstantFPSDNode : public SDNode {
1608 friend class SelectionDAG;
1609
1610 const ConstantFP *Value;
1611
1612 ConstantFPSDNode(bool isTarget, const ConstantFP *val, EVT VT)
1613 : SDNode(isTarget ? ISD::TargetConstantFP : ISD::ConstantFP, 0,
1614 DebugLoc(), getSDVTList(VT)),
1615 Value(val) {}
1616
1617public:
1618 const APFloat& getValueAPF() const { return Value->getValueAPF(); }
1619 const ConstantFP *getConstantFPValue() const { return Value; }
1620
1621 /// Return true if the value is positive or negative zero.
1622 bool isZero() const { return Value->isZero(); }
1623
1624 /// Return true if the value is a NaN.
1625 bool isNaN() const { return Value->isNaN(); }
1626
1627 /// Return true if the value is an infinity
1628 bool isInfinity() const { return Value->isInfinity(); }
1629
1630 /// Return true if the value is negative.
1631 bool isNegative() const { return Value->isNegative(); }
1632
1633 /// We don't rely on operator== working on double values, as
1634 /// it returns true for things that are clearly not equal, like -0.0 and 0.0.
1635 /// As such, this method can be used to do an exact bit-for-bit comparison of
1636 /// two floating point values.
1637
1638 /// We leave the version with the double argument here because it's just so
1639 /// convenient to write "2.0" and the like. Without this function we'd
1640 /// have to duplicate its logic everywhere it's called.
1641 bool isExactlyValue(double V) const {
1642 return Value->getValueAPF().isExactlyValue(V);
1643 }
1644 bool isExactlyValue(const APFloat& V) const;
1645
1646 static bool isValueValidForType(EVT VT, const APFloat& Val);
1647
1648 static bool classof(const SDNode *N) {
1649 return N->getOpcode() == ISD::ConstantFP ||
1650 N->getOpcode() == ISD::TargetConstantFP;
1651 }
1652};
1653
1654/// Returns true if \p V is a constant integer zero.
1655bool isNullConstant(SDValue V);
1656
1657/// Returns true if \p V is an FP constant with a value of positive zero.
1658bool isNullFPConstant(SDValue V);
1659
1660/// Returns true if \p V is an integer constant with all bits set.
1661bool isAllOnesConstant(SDValue V);
1662
1663/// Returns true if \p V is a constant integer one.
1664bool isOneConstant(SDValue V);
1665
1666/// Return the non-bitcasted source operand of \p V if it exists.
1667/// If \p V is not a bitcasted value, it is returned as-is.
1668SDValue peekThroughBitcasts(SDValue V);
1669
1670/// Return the non-bitcasted and one-use source operand of \p V if it exists.
1671/// If \p V is not a bitcasted one-use value, it is returned as-is.
1672SDValue peekThroughOneUseBitcasts(SDValue V);
1673
1674/// Return the non-extracted vector source operand of \p V if it exists.
1675/// If \p V is not an extracted subvector, it is returned as-is.
1676SDValue peekThroughExtractSubvectors(SDValue V);
1677
1678/// Returns true if \p V is a bitwise not operation. Assumes that an all ones
1679/// constant is canonicalized to be operand 1.
1680bool isBitwiseNot(SDValue V, bool AllowUndefs = false);
1681
1682/// Returns the SDNode if it is a constant splat BuildVector or constant int.
1683ConstantSDNode *isConstOrConstSplat(SDValue N, bool AllowUndefs = false,
1684 bool AllowTruncation = false);
1685
1686/// Returns the SDNode if it is a demanded constant splat BuildVector or
1687/// constant int.
1688ConstantSDNode *isConstOrConstSplat(SDValue N, const APInt &DemandedElts,
1689 bool AllowUndefs = false,
1690 bool AllowTruncation = false);
1691
1692/// Returns the SDNode if it is a constant splat BuildVector or constant float.
1693ConstantFPSDNode *isConstOrConstSplatFP(SDValue N, bool AllowUndefs = false);
1694
1695/// Returns the SDNode if it is a demanded constant splat BuildVector or
1696/// constant float.
1697ConstantFPSDNode *isConstOrConstSplatFP(SDValue N, const APInt &DemandedElts,
1698 bool AllowUndefs = false);
1699
1700/// Return true if the value is a constant 0 integer or a splatted vector of
1701/// a constant 0 integer (with no undefs by default).
1702/// Build vector implicit truncation is not an issue for null values.
1703bool isNullOrNullSplat(SDValue V, bool AllowUndefs = false);
1704
1705/// Return true if the value is a constant 1 integer or a splatted vector of a
1706/// constant 1 integer (with no undefs).
1707/// Does not permit build vector implicit truncation.
1708bool isOneOrOneSplat(SDValue V, bool AllowUndefs = false);
1709
1710/// Return true if the value is a constant -1 integer or a splatted vector of a
1711/// constant -1 integer (with no undefs).
1712/// Does not permit build vector implicit truncation.
1713bool isAllOnesOrAllOnesSplat(SDValue V, bool AllowUndefs = false);
1714
1715/// Return true if \p V is either a integer or FP constant.
1716inline bool isIntOrFPConstant(SDValue V) {
1717 return isa<ConstantSDNode>(V) || isa<ConstantFPSDNode>(V);
1718}
1719
1720class GlobalAddressSDNode : public SDNode {
1721 friend class SelectionDAG;
1722
1723 const GlobalValue *TheGlobal;
1724 int64_t Offset;
1725 unsigned TargetFlags;
1726
1727 GlobalAddressSDNode(unsigned Opc, unsigned Order, const DebugLoc &DL,
1728 const GlobalValue *GA, EVT VT, int64_t o,
1729 unsigned TF);
1730
1731public:
1732 const GlobalValue *getGlobal() const { return TheGlobal; }
1733 int64_t getOffset() const { return Offset; }
1734 unsigned getTargetFlags() const { return TargetFlags; }
1735 // Return the address space this GlobalAddress belongs to.
1736 unsigned getAddressSpace() const;
1737
1738 static bool classof(const SDNode *N) {
1739 return N->getOpcode() == ISD::GlobalAddress ||
1740 N->getOpcode() == ISD::TargetGlobalAddress ||
1741 N->getOpcode() == ISD::GlobalTLSAddress ||
1742 N->getOpcode() == ISD::TargetGlobalTLSAddress;
1743 }
1744};
1745
1746class FrameIndexSDNode : public SDNode {
1747 friend class SelectionDAG;
1748
1749 int FI;
1750
1751 FrameIndexSDNode(int fi, EVT VT, bool isTarg)
1752 : SDNode(isTarg ? ISD::TargetFrameIndex : ISD::FrameIndex,
1753 0, DebugLoc(), getSDVTList(VT)), FI(fi) {
1754 }
1755
1756public:
1757 int getIndex() const { return FI; }
1758
1759 static bool classof(const SDNode *N) {
1760 return N->getOpcode() == ISD::FrameIndex ||
1761 N->getOpcode() == ISD::TargetFrameIndex;
1762 }
1763};
1764
1765/// This SDNode is used for LIFETIME_START/LIFETIME_END values, which indicate
1766/// the offet and size that are started/ended in the underlying FrameIndex.
1767class LifetimeSDNode : public SDNode {
1768 friend class SelectionDAG;
1769 int64_t Size;
1770 int64_t Offset; // -1 if offset is unknown.
1771
1772 LifetimeSDNode(unsigned Opcode, unsigned Order, const DebugLoc &dl,
1773 SDVTList VTs, int64_t Size, int64_t Offset)
1774 : SDNode(Opcode, Order, dl, VTs), Size(Size), Offset(Offset) {}
1775public:
1776 int64_t getFrameIndex() const {
1777 return cast<FrameIndexSDNode>(getOperand(1))->getIndex();
1778 }
1779
1780 bool hasOffset() const { return Offset >= 0; }
1781 int64_t getOffset() const {
1782 assert(hasOffset() && "offset is unknown")(static_cast <bool> (hasOffset() && "offset is unknown"
) ? void (0) : __assert_fail ("hasOffset() && \"offset is unknown\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 1782, __extension__
__PRETTY_FUNCTION__))
;
1783 return Offset;
1784 }
1785 int64_t getSize() const {
1786 assert(hasOffset() && "offset is unknown")(static_cast <bool> (hasOffset() && "offset is unknown"
) ? void (0) : __assert_fail ("hasOffset() && \"offset is unknown\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 1786, __extension__
__PRETTY_FUNCTION__))
;
1787 return Size;
1788 }
1789
1790 // Methods to support isa and dyn_cast
1791 static bool classof(const SDNode *N) {
1792 return N->getOpcode() == ISD::LIFETIME_START ||
1793 N->getOpcode() == ISD::LIFETIME_END;
1794 }
1795};
1796
1797/// This SDNode is used for PSEUDO_PROBE values, which are the function guid and
1798/// the index of the basic block being probed. A pseudo probe serves as a place
1799/// holder and will be removed at the end of compilation. It does not have any
1800/// operand because we do not want the instruction selection to deal with any.
1801class PseudoProbeSDNode : public SDNode {
1802 friend class SelectionDAG;
1803 uint64_t Guid;
1804 uint64_t Index;
1805 uint32_t Attributes;
1806
1807 PseudoProbeSDNode(unsigned Opcode, unsigned Order, const DebugLoc &Dl,
1808 SDVTList VTs, uint64_t Guid, uint64_t Index, uint32_t Attr)
1809 : SDNode(Opcode, Order, Dl, VTs), Guid(Guid), Index(Index),
1810 Attributes(Attr) {}
1811
1812public:
1813 uint64_t getGuid() const { return Guid; }
1814 uint64_t getIndex() const { return Index; }
1815 uint32_t getAttributes() const { return Attributes; }
1816
1817 // Methods to support isa and dyn_cast
1818 static bool classof(const SDNode *N) {
1819 return N->getOpcode() == ISD::PSEUDO_PROBE;
1820 }
1821};
1822
1823class JumpTableSDNode : public SDNode {
1824 friend class SelectionDAG;
1825
1826 int JTI;
1827 unsigned TargetFlags;
1828
1829 JumpTableSDNode(int jti, EVT VT, bool isTarg, unsigned TF)
1830 : SDNode(isTarg ? ISD::TargetJumpTable : ISD::JumpTable,
1831 0, DebugLoc(), getSDVTList(VT)), JTI(jti), TargetFlags(TF) {
1832 }
1833
1834public:
1835 int getIndex() const { return JTI; }
1836 unsigned getTargetFlags() const { return TargetFlags; }
1837
1838 static bool classof(const SDNode *N) {
1839 return N->getOpcode() == ISD::JumpTable ||
1840 N->getOpcode() == ISD::TargetJumpTable;
1841 }
1842};
1843
1844class ConstantPoolSDNode : public SDNode {
1845 friend class SelectionDAG;
1846
1847 union {
1848 const Constant *ConstVal;
1849 MachineConstantPoolValue *MachineCPVal;
1850 } Val;
1851 int Offset; // It's a MachineConstantPoolValue if top bit is set.
1852 Align Alignment; // Minimum alignment requirement of CP.
1853 unsigned TargetFlags;
1854
1855 ConstantPoolSDNode(bool isTarget, const Constant *c, EVT VT, int o,
1856 Align Alignment, unsigned TF)
1857 : SDNode(isTarget ? ISD::TargetConstantPool : ISD::ConstantPool, 0,
1858 DebugLoc(), getSDVTList(VT)),
1859 Offset(o), Alignment(Alignment), TargetFlags(TF) {
1860 assert(Offset >= 0 && "Offset is too large")(static_cast <bool> (Offset >= 0 && "Offset is too large"
) ? void (0) : __assert_fail ("Offset >= 0 && \"Offset is too large\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 1860, __extension__
__PRETTY_FUNCTION__))
;
1861 Val.ConstVal = c;
1862 }
1863
1864 ConstantPoolSDNode(bool isTarget, MachineConstantPoolValue *v, EVT VT, int o,
1865 Align Alignment, unsigned TF)
1866 : SDNode(isTarget ? ISD::TargetConstantPool : ISD::ConstantPool, 0,
1867 DebugLoc(), getSDVTList(VT)),
1868 Offset(o), Alignment(Alignment), TargetFlags(TF) {
1869 assert(Offset >= 0 && "Offset is too large")(static_cast <bool> (Offset >= 0 && "Offset is too large"
) ? void (0) : __assert_fail ("Offset >= 0 && \"Offset is too large\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 1869, __extension__
__PRETTY_FUNCTION__))
;
1870 Val.MachineCPVal = v;
1871 Offset |= 1 << (sizeof(unsigned)*CHAR_BIT8-1);
1872 }
1873
1874public:
1875 bool isMachineConstantPoolEntry() const {
1876 return Offset < 0;
1877 }
1878
1879 const Constant *getConstVal() const {
1880 assert(!isMachineConstantPoolEntry() && "Wrong constantpool type")(static_cast <bool> (!isMachineConstantPoolEntry() &&
"Wrong constantpool type") ? void (0) : __assert_fail ("!isMachineConstantPoolEntry() && \"Wrong constantpool type\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 1880, __extension__
__PRETTY_FUNCTION__))
;
1881 return Val.ConstVal;
1882 }
1883
1884 MachineConstantPoolValue *getMachineCPVal() const {
1885 assert(isMachineConstantPoolEntry() && "Wrong constantpool type")(static_cast <bool> (isMachineConstantPoolEntry() &&
"Wrong constantpool type") ? void (0) : __assert_fail ("isMachineConstantPoolEntry() && \"Wrong constantpool type\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 1885, __extension__
__PRETTY_FUNCTION__))
;
1886 return Val.MachineCPVal;
1887 }
1888
1889 int getOffset() const {
1890 return Offset & ~(1 << (sizeof(unsigned)*CHAR_BIT8-1));
1891 }
1892
1893 // Return the alignment of this constant pool object, which is either 0 (for
1894 // default alignment) or the desired value.
1895 Align getAlign() const { return Alignment; }
1896 unsigned getTargetFlags() const { return TargetFlags; }
1897
1898 Type *getType() const;
1899
1900 static bool classof(const SDNode *N) {
1901 return N->getOpcode() == ISD::ConstantPool ||
1902 N->getOpcode() == ISD::TargetConstantPool;
1903 }
1904};
1905
1906/// Completely target-dependent object reference.
1907class TargetIndexSDNode : public SDNode {
1908 friend class SelectionDAG;
1909
1910 unsigned TargetFlags;
1911 int Index;
1912 int64_t Offset;
1913
1914public:
1915 TargetIndexSDNode(int Idx, EVT VT, int64_t Ofs, unsigned TF)
1916 : SDNode(ISD::TargetIndex, 0, DebugLoc(), getSDVTList(VT)),
1917 TargetFlags(TF), Index(Idx), Offset(Ofs) {}
1918
1919 unsigned getTargetFlags() const { return TargetFlags; }
1920 int getIndex() const { return Index; }
1921 int64_t getOffset() const { return Offset; }
1922
1923 static bool classof(const SDNode *N) {
1924 return N->getOpcode() == ISD::TargetIndex;
1925 }
1926};
1927
1928class BasicBlockSDNode : public SDNode {
1929 friend class SelectionDAG;
1930
1931 MachineBasicBlock *MBB;
1932
1933 /// Debug info is meaningful and potentially useful here, but we create
1934 /// blocks out of order when they're jumped to, which makes it a bit
1935 /// harder. Let's see if we need it first.
1936 explicit BasicBlockSDNode(MachineBasicBlock *mbb)
1937 : SDNode(ISD::BasicBlock, 0, DebugLoc(), getSDVTList(MVT::Other)), MBB(mbb)
1938 {}
1939
1940public:
1941 MachineBasicBlock *getBasicBlock() const { return MBB; }
1942
1943 static bool classof(const SDNode *N) {
1944 return N->getOpcode() == ISD::BasicBlock;
1945 }
1946};
1947
1948/// A "pseudo-class" with methods for operating on BUILD_VECTORs.
1949class BuildVectorSDNode : public SDNode {
1950public:
1951 // These are constructed as SDNodes and then cast to BuildVectorSDNodes.
1952 explicit BuildVectorSDNode() = delete;
1953
1954 /// Check if this is a constant splat, and if so, find the
1955 /// smallest element size that splats the vector. If MinSplatBits is
1956 /// nonzero, the element size must be at least that large. Note that the
1957 /// splat element may be the entire vector (i.e., a one element vector).
1958 /// Returns the splat element value in SplatValue. Any undefined bits in
1959 /// that value are zero, and the corresponding bits in the SplatUndef mask
1960 /// are set. The SplatBitSize value is set to the splat element size in
1961 /// bits. HasAnyUndefs is set to true if any bits in the vector are
1962 /// undefined. isBigEndian describes the endianness of the target.
1963 bool isConstantSplat(APInt &SplatValue, APInt &SplatUndef,
1964 unsigned &SplatBitSize, bool &HasAnyUndefs,
1965 unsigned MinSplatBits = 0,
1966 bool isBigEndian = false) const;
1967
1968 /// Returns the demanded splatted value or a null value if this is not a
1969 /// splat.
1970 ///
1971 /// The DemandedElts mask indicates the elements that must be in the splat.
1972 /// If passed a non-null UndefElements bitvector, it will resize it to match
1973 /// the vector width and set the bits where elements are undef.
1974 SDValue getSplatValue(const APInt &DemandedElts,
1975 BitVector *UndefElements = nullptr) const;
1976
1977 /// Returns the splatted value or a null value if this is not a splat.
1978 ///
1979 /// If passed a non-null UndefElements bitvector, it will resize it to match
1980 /// the vector width and set the bits where elements are undef.
1981 SDValue getSplatValue(BitVector *UndefElements = nullptr) const;
1982
1983 /// Find the shortest repeating sequence of values in the build vector.
1984 ///
1985 /// e.g. { u, X, u, X, u, u, X, u } -> { X }
1986 /// { X, Y, u, Y, u, u, X, u } -> { X, Y }
1987 ///
1988 /// Currently this must be a power-of-2 build vector.
1989 /// The DemandedElts mask indicates the elements that must be present,
1990 /// undemanded elements in Sequence may be null (SDValue()). If passed a
1991 /// non-null UndefElements bitvector, it will resize it to match the original
1992 /// vector width and set the bits where elements are undef. If result is
1993 /// false, Sequence will be empty.
1994 bool getRepeatedSequence(const APInt &DemandedElts,
1995 SmallVectorImpl<SDValue> &Sequence,
1996 BitVector *UndefElements = nullptr) const;
1997
1998 /// Find the shortest repeating sequence of values in the build vector.
1999 ///
2000 /// e.g. { u, X, u, X, u, u, X, u } -> { X }
2001 /// { X, Y, u, Y, u, u, X, u } -> { X, Y }
2002 ///
2003 /// Currently this must be a power-of-2 build vector.
2004 /// If passed a non-null UndefElements bitvector, it will resize it to match
2005 /// the original vector width and set the bits where elements are undef.
2006 /// If result is false, Sequence will be empty.
2007 bool getRepeatedSequence(SmallVectorImpl<SDValue> &Sequence,
2008 BitVector *UndefElements = nullptr) const;
2009
2010 /// Returns the demanded splatted constant or null if this is not a constant
2011 /// splat.
2012 ///
2013 /// The DemandedElts mask indicates the elements that must be in the splat.
2014 /// If passed a non-null UndefElements bitvector, it will resize it to match
2015 /// the vector width and set the bits where elements are undef.
2016 ConstantSDNode *
2017 getConstantSplatNode(const APInt &DemandedElts,
2018 BitVector *UndefElements = nullptr) const;
2019
2020 /// Returns the splatted constant or null if this is not a constant
2021 /// splat.
2022 ///
2023 /// If passed a non-null UndefElements bitvector, it will resize it to match
2024 /// the vector width and set the bits where elements are undef.
2025 ConstantSDNode *
2026 getConstantSplatNode(BitVector *UndefElements = nullptr) const;
2027
2028 /// Returns the demanded splatted constant FP or null if this is not a
2029 /// constant FP splat.
2030 ///
2031 /// The DemandedElts mask indicates the elements that must be in the splat.
2032 /// If passed a non-null UndefElements bitvector, it will resize it to match
2033 /// the vector width and set the bits where elements are undef.
2034 ConstantFPSDNode *
2035 getConstantFPSplatNode(const APInt &DemandedElts,
2036 BitVector *UndefElements = nullptr) const;
2037
2038 /// Returns the splatted constant FP or null if this is not a constant
2039 /// FP splat.
2040 ///
2041 /// If passed a non-null UndefElements bitvector, it will resize it to match
2042 /// the vector width and set the bits where elements are undef.
2043 ConstantFPSDNode *
2044 getConstantFPSplatNode(BitVector *UndefElements = nullptr) const;
2045
2046 /// If this is a constant FP splat and the splatted constant FP is an
2047 /// exact power or 2, return the log base 2 integer value. Otherwise,
2048 /// return -1.
2049 ///
2050 /// The BitWidth specifies the necessary bit precision.
2051 int32_t getConstantFPSplatPow2ToLog2Int(BitVector *UndefElements,
2052 uint32_t BitWidth) const;
2053
2054 /// Extract the raw bit data from a build vector of Undef, Constant or
2055 /// ConstantFP node elements. Each raw bit element will be \p
2056 /// DstEltSizeInBits wide, undef elements are treated as zero, and entirely
2057 /// undefined elements are flagged in \p UndefElements.
2058 bool getConstantRawBits(bool IsLittleEndian, unsigned DstEltSizeInBits,
2059 SmallVectorImpl<APInt> &RawBitElements,
2060 BitVector &UndefElements) const;
2061
2062 bool isConstant() const;
2063
2064 /// Recast bit data \p SrcBitElements to \p DstEltSizeInBits wide elements.
2065 /// Undef elements are treated as zero, and entirely undefined elements are
2066 /// flagged in \p DstUndefElements.
2067 static void recastRawBits(bool IsLittleEndian, unsigned DstEltSizeInBits,
2068 SmallVectorImpl<APInt> &DstBitElements,
2069 ArrayRef<APInt> SrcBitElements,
2070 BitVector &DstUndefElements,
2071 const BitVector &SrcUndefElements);
2072
2073 static bool classof(const SDNode *N) {
2074 return N->getOpcode() == ISD::BUILD_VECTOR;
2075 }
2076};
2077
2078/// An SDNode that holds an arbitrary LLVM IR Value. This is
2079/// used when the SelectionDAG needs to make a simple reference to something
2080/// in the LLVM IR representation.
2081///
2082class SrcValueSDNode : public SDNode {
2083 friend class SelectionDAG;
2084
2085 const Value *V;
2086
2087 /// Create a SrcValue for a general value.
2088 explicit SrcValueSDNode(const Value *v)
2089 : SDNode(ISD::SRCVALUE, 0, DebugLoc(), getSDVTList(MVT::Other)), V(v) {}
2090
2091public:
2092 /// Return the contained Value.
2093 const Value *getValue() const { return V; }
2094
2095 static bool classof(const SDNode *N) {
2096 return N->getOpcode() == ISD::SRCVALUE;
2097 }
2098};
2099
2100class MDNodeSDNode : public SDNode {
2101 friend class SelectionDAG;
2102
2103 const MDNode *MD;
2104
2105 explicit MDNodeSDNode(const MDNode *md)
2106 : SDNode(ISD::MDNODE_SDNODE, 0, DebugLoc(), getSDVTList(MVT::Other)), MD(md)
2107 {}
2108
2109public:
2110 const MDNode *getMD() const { return MD; }
2111
2112 static bool classof(const SDNode *N) {
2113 return N->getOpcode() == ISD::MDNODE_SDNODE;
2114 }
2115};
2116
2117class RegisterSDNode : public SDNode {
2118 friend class SelectionDAG;
2119
2120 Register Reg;
2121
2122 RegisterSDNode(Register reg, EVT VT)
2123 : SDNode(ISD::Register, 0, DebugLoc(), getSDVTList(VT)), Reg(reg) {}
2124
2125public:
2126 Register getReg() const { return Reg; }
2127
2128 static bool classof(const SDNode *N) {
2129 return N->getOpcode() == ISD::Register;
2130 }
2131};
2132
2133class RegisterMaskSDNode : public SDNode {
2134 friend class SelectionDAG;
2135
2136 // The memory for RegMask is not owned by the node.
2137 const uint32_t *RegMask;
2138
2139 RegisterMaskSDNode(const uint32_t *mask)
2140 : SDNode(ISD::RegisterMask, 0, DebugLoc(), getSDVTList(MVT::Untyped)),
2141 RegMask(mask) {}
2142
2143public:
2144 const uint32_t *getRegMask() const { return RegMask; }
2145
2146 static bool classof(const SDNode *N) {
2147 return N->getOpcode() == ISD::RegisterMask;
2148 }
2149};
2150
2151class BlockAddressSDNode : public SDNode {
2152 friend class SelectionDAG;
2153
2154 const BlockAddress *BA;
2155 int64_t Offset;
2156 unsigned TargetFlags;
2157
2158 BlockAddressSDNode(unsigned NodeTy, EVT VT, const BlockAddress *ba,
2159 int64_t o, unsigned Flags)
2160 : SDNode(NodeTy, 0, DebugLoc(), getSDVTList(VT)),
2161 BA(ba), Offset(o), TargetFlags(Flags) {}
2162
2163public:
2164 const BlockAddress *getBlockAddress() const { return BA; }
2165 int64_t getOffset() const { return Offset; }
2166 unsigned getTargetFlags() const { return TargetFlags; }
2167
2168 static bool classof(const SDNode *N) {
2169 return N->getOpcode() == ISD::BlockAddress ||
2170 N->getOpcode() == ISD::TargetBlockAddress;
2171 }
2172};
2173
2174class LabelSDNode : public SDNode {
2175 friend class SelectionDAG;
2176
2177 MCSymbol *Label;
2178
2179 LabelSDNode(unsigned Opcode, unsigned Order, const DebugLoc &dl, MCSymbol *L)
2180 : SDNode(Opcode, Order, dl, getSDVTList(MVT::Other)), Label(L) {
2181 assert(LabelSDNode::classof(this) && "not a label opcode")(static_cast <bool> (LabelSDNode::classof(this) &&
"not a label opcode") ? void (0) : __assert_fail ("LabelSDNode::classof(this) && \"not a label opcode\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 2181, __extension__
__PRETTY_FUNCTION__))
;
2182 }
2183
2184public:
2185 MCSymbol *getLabel() const { return Label; }
2186
2187 static bool classof(const SDNode *N) {
2188 return N->getOpcode() == ISD::EH_LABEL ||
2189 N->getOpcode() == ISD::ANNOTATION_LABEL;
2190 }
2191};
2192
2193class ExternalSymbolSDNode : public SDNode {
2194 friend class SelectionDAG;
2195
2196 const char *Symbol;
2197 unsigned TargetFlags;
2198
2199 ExternalSymbolSDNode(bool isTarget, const char *Sym, unsigned TF, EVT VT)
2200 : SDNode(isTarget ? ISD::TargetExternalSymbol : ISD::ExternalSymbol, 0,
2201 DebugLoc(), getSDVTList(VT)),
2202 Symbol(Sym), TargetFlags(TF) {}
2203
2204public:
2205 const char *getSymbol() const { return Symbol; }
2206 unsigned getTargetFlags() const { return TargetFlags; }
2207
2208 static bool classof(const SDNode *N) {
2209 return N->getOpcode() == ISD::ExternalSymbol ||
2210 N->getOpcode() == ISD::TargetExternalSymbol;
2211 }
2212};
2213
2214class MCSymbolSDNode : public SDNode {
2215 friend class SelectionDAG;
2216
2217 MCSymbol *Symbol;
2218
2219 MCSymbolSDNode(MCSymbol *Symbol, EVT VT)
2220 : SDNode(ISD::MCSymbol, 0, DebugLoc(), getSDVTList(VT)), Symbol(Symbol) {}
2221
2222public:
2223 MCSymbol *getMCSymbol() const { return Symbol; }
2224
2225 static bool classof(const SDNode *N) {
2226 return N->getOpcode() == ISD::MCSymbol;
2227 }
2228};
2229
2230class CondCodeSDNode : public SDNode {
2231 friend class SelectionDAG;
2232
2233 ISD::CondCode Condition;
2234
2235 explicit CondCodeSDNode(ISD::CondCode Cond)
2236 : SDNode(ISD::CONDCODE, 0, DebugLoc(), getSDVTList(MVT::Other)),
2237 Condition(Cond) {}
2238
2239public:
2240 ISD::CondCode get() const { return Condition; }
2241
2242 static bool classof(const SDNode *N) {
2243 return N->getOpcode() == ISD::CONDCODE;
2244 }
2245};
2246
2247/// This class is used to represent EVT's, which are used
2248/// to parameterize some operations.
2249class VTSDNode : public SDNode {
2250 friend class SelectionDAG;
2251
2252 EVT ValueType;
2253
2254 explicit VTSDNode(EVT VT)
2255 : SDNode(ISD::VALUETYPE, 0, DebugLoc(), getSDVTList(MVT::Other)),
2256 ValueType(VT) {}
2257
2258public:
2259 EVT getVT() const { return ValueType; }
2260
2261 static bool classof(const SDNode *N) {
2262 return N->getOpcode() == ISD::VALUETYPE;
2263 }
2264};
2265
2266/// Base class for LoadSDNode and StoreSDNode
2267class LSBaseSDNode : public MemSDNode {
2268public:
2269 LSBaseSDNode(ISD::NodeType NodeTy, unsigned Order, const DebugLoc &dl,
2270 SDVTList VTs, ISD::MemIndexedMode AM, EVT MemVT,
2271 MachineMemOperand *MMO)
2272 : MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) {
2273 LSBaseSDNodeBits.AddressingMode = AM;
2274 assert(getAddressingMode() == AM && "Value truncated")(static_cast <bool> (getAddressingMode() == AM &&
"Value truncated") ? void (0) : __assert_fail ("getAddressingMode() == AM && \"Value truncated\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 2274, __extension__
__PRETTY_FUNCTION__))
;
2275 }
2276
2277 const SDValue &getOffset() const {
2278 return getOperand(getOpcode() == ISD::LOAD ? 2 : 3);
2279 }
2280
2281 /// Return the addressing mode for this load or store:
2282 /// unindexed, pre-inc, pre-dec, post-inc, or post-dec.
2283 ISD::MemIndexedMode getAddressingMode() const {
2284 return static_cast<ISD::MemIndexedMode>(LSBaseSDNodeBits.AddressingMode);
2285 }
2286
2287 /// Return true if this is a pre/post inc/dec load/store.
2288 bool isIndexed() const { return getAddressingMode() != ISD::UNINDEXED; }
2289
2290 /// Return true if this is NOT a pre/post inc/dec load/store.
2291 bool isUnindexed() const { return getAddressingMode() == ISD::UNINDEXED; }
2292
2293 static bool classof(const SDNode *N) {
2294 return N->getOpcode() == ISD::LOAD ||
2295 N->getOpcode() == ISD::STORE;
2296 }
2297};
2298
2299/// This class is used to represent ISD::LOAD nodes.
2300class LoadSDNode : public LSBaseSDNode {
2301 friend class SelectionDAG;
2302
2303 LoadSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
2304 ISD::MemIndexedMode AM, ISD::LoadExtType ETy, EVT MemVT,
2305 MachineMemOperand *MMO)
2306 : LSBaseSDNode(ISD::LOAD, Order, dl, VTs, AM, MemVT, MMO) {
2307 LoadSDNodeBits.ExtTy = ETy;
2308 assert(readMem() && "Load MachineMemOperand is not a load!")(static_cast <bool> (readMem() && "Load MachineMemOperand is not a load!"
) ? void (0) : __assert_fail ("readMem() && \"Load MachineMemOperand is not a load!\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 2308, __extension__
__PRETTY_FUNCTION__))
;
2309 assert(!writeMem() && "Load MachineMemOperand is a store!")(static_cast <bool> (!writeMem() && "Load MachineMemOperand is a store!"
) ? void (0) : __assert_fail ("!writeMem() && \"Load MachineMemOperand is a store!\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 2309, __extension__
__PRETTY_FUNCTION__))
;
2310 }
2311
2312public:
2313 /// Return whether this is a plain node,
2314 /// or one of the varieties of value-extending loads.
2315 ISD::LoadExtType getExtensionType() const {
2316 return static_cast<ISD::LoadExtType>(LoadSDNodeBits.ExtTy);
2317 }
2318
2319 const SDValue &getBasePtr() const { return getOperand(1); }
2320 const SDValue &getOffset() const { return getOperand(2); }
2321
2322 static bool classof(const SDNode *N) {
2323 return N->getOpcode() == ISD::LOAD;
2324 }
2325};
2326
2327/// This class is used to represent ISD::STORE nodes.
2328class StoreSDNode : public LSBaseSDNode {
2329 friend class SelectionDAG;
2330
2331 StoreSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
2332 ISD::MemIndexedMode AM, bool isTrunc, EVT MemVT,
2333 MachineMemOperand *MMO)
2334 : LSBaseSDNode(ISD::STORE, Order, dl, VTs, AM, MemVT, MMO) {
2335 StoreSDNodeBits.IsTruncating = isTrunc;
2336 assert(!readMem() && "Store MachineMemOperand is a load!")(static_cast <bool> (!readMem() && "Store MachineMemOperand is a load!"
) ? void (0) : __assert_fail ("!readMem() && \"Store MachineMemOperand is a load!\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 2336, __extension__
__PRETTY_FUNCTION__))
;
2337 assert(writeMem() && "Store MachineMemOperand is not a store!")(static_cast <bool> (writeMem() && "Store MachineMemOperand is not a store!"
) ? void (0) : __assert_fail ("writeMem() && \"Store MachineMemOperand is not a store!\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 2337, __extension__
__PRETTY_FUNCTION__))
;
2338 }
2339
2340public:
2341 /// Return true if the op does a truncation before store.
2342 /// For integers this is the same as doing a TRUNCATE and storing the result.
2343 /// For floats, it is the same as doing an FP_ROUND and storing the result.
2344 bool isTruncatingStore() const { return StoreSDNodeBits.IsTruncating; }
2345 void setTruncatingStore(bool Truncating) {
2346 StoreSDNodeBits.IsTruncating = Truncating;
2347 }
2348
2349 const SDValue &getValue() const { return getOperand(1); }
2350 const SDValue &getBasePtr() const { return getOperand(2); }
2351 const SDValue &getOffset() const { return getOperand(3); }
2352
2353 static bool classof(const SDNode *N) {
2354 return N->getOpcode() == ISD::STORE;
2355 }
2356};
2357
2358/// This base class is used to represent VP_LOAD and VP_STORE nodes
2359class VPLoadStoreSDNode : public MemSDNode {
2360public:
2361 friend class SelectionDAG;
2362
2363 VPLoadStoreSDNode(ISD::NodeType NodeTy, unsigned Order, const DebugLoc &dl,
2364 SDVTList VTs, ISD::MemIndexedMode AM, EVT MemVT,
2365 MachineMemOperand *MMO)
2366 : MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) {
2367 LSBaseSDNodeBits.AddressingMode = AM;
2368 assert(getAddressingMode() == AM && "Value truncated")(static_cast <bool> (getAddressingMode() == AM &&
"Value truncated") ? void (0) : __assert_fail ("getAddressingMode() == AM && \"Value truncated\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 2368, __extension__
__PRETTY_FUNCTION__))
;
2369 }
2370
2371 // VPLoadSDNode (Chain, Ptr, Offset, Mask, EVL)
2372 // VPStoreSDNode (Chain, Data, Ptr, Offset, Mask, EVL)
2373 // Mask is a vector of i1 elements;
2374 // the type of EVL is TLI.getVPExplicitVectorLengthTy().
2375 const SDValue &getOffset() const {
2376 return getOperand(getOpcode() == ISD::VP_LOAD ? 2 : 3);
2377 }
2378 const SDValue &getBasePtr() const {
2379 return getOperand(getOpcode() == ISD::VP_LOAD ? 1 : 2);
2380 }
2381 const SDValue &getMask() const {
2382 return getOperand(getOpcode() == ISD::VP_LOAD ? 3 : 4);
2383 }
2384 const SDValue &getVectorLength() const {
2385 return getOperand(getOpcode() == ISD::VP_LOAD ? 4 : 5);
2386 }
2387
2388 /// Return the addressing mode for this load or store:
2389 /// unindexed, pre-inc, pre-dec, post-inc, or post-dec.
2390 ISD::MemIndexedMode getAddressingMode() const {
2391 return static_cast<ISD::MemIndexedMode>(LSBaseSDNodeBits.AddressingMode);
2392 }
2393
2394 /// Return true if this is a pre/post inc/dec load/store.
2395 bool isIndexed() const { return getAddressingMode() != ISD::UNINDEXED; }
2396
2397 /// Return true if this is NOT a pre/post inc/dec load/store.
2398 bool isUnindexed() const { return getAddressingMode() == ISD::UNINDEXED; }
2399
2400 static bool classof(const SDNode *N) {
2401 return N->getOpcode() == ISD::VP_LOAD || N->getOpcode() == ISD::VP_STORE;
2402 }
2403};
2404
2405/// This class is used to represent a VP_LOAD node
2406class VPLoadSDNode : public VPLoadStoreSDNode {
2407public:
2408 friend class SelectionDAG;
2409
2410 VPLoadSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
2411 ISD::MemIndexedMode AM, ISD::LoadExtType ETy, bool isExpanding,
2412 EVT MemVT, MachineMemOperand *MMO)
2413 : VPLoadStoreSDNode(ISD::VP_LOAD, Order, dl, VTs, AM, MemVT, MMO) {
2414 LoadSDNodeBits.ExtTy = ETy;
2415 LoadSDNodeBits.IsExpanding = isExpanding;
2416 }
2417
2418 ISD::LoadExtType getExtensionType() const {
2419 return static_cast<ISD::LoadExtType>(LoadSDNodeBits.ExtTy);
2420 }
2421
2422 const SDValue &getBasePtr() const { return getOperand(1); }
2423 const SDValue &getOffset() const { return getOperand(2); }
2424 const SDValue &getMask() const { return getOperand(3); }
2425 const SDValue &getVectorLength() const { return getOperand(4); }
2426
2427 static bool classof(const SDNode *N) {
2428 return N->getOpcode() == ISD::VP_LOAD;
2429 }
2430 bool isExpandingLoad() const { return LoadSDNodeBits.IsExpanding; }
2431};
2432
2433/// This class is used to represent a VP_STORE node
2434class VPStoreSDNode : public VPLoadStoreSDNode {
2435public:
2436 friend class SelectionDAG;
2437
2438 VPStoreSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
2439 ISD::MemIndexedMode AM, bool isTrunc, bool isCompressing,
2440 EVT MemVT, MachineMemOperand *MMO)
2441 : VPLoadStoreSDNode(ISD::VP_STORE, Order, dl, VTs, AM, MemVT, MMO) {
2442 StoreSDNodeBits.IsTruncating = isTrunc;
2443 StoreSDNodeBits.IsCompressing = isCompressing;
2444 }
2445
2446 /// Return true if this is a truncating store.
2447 /// For integers this is the same as doing a TRUNCATE and storing the result.
2448 /// For floats, it is the same as doing an FP_ROUND and storing the result.
2449 bool isTruncatingStore() const { return StoreSDNodeBits.IsTruncating; }
2450
2451 /// Returns true if the op does a compression to the vector before storing.
2452 /// The node contiguously stores the active elements (integers or floats)
2453 /// in src (those with their respective bit set in writemask k) to unaligned
2454 /// memory at base_addr.
2455 bool isCompressingStore() const { return StoreSDNodeBits.IsCompressing; }
2456
2457 const SDValue &getValue() const { return getOperand(1); }
2458 const SDValue &getBasePtr() const { return getOperand(2); }
2459 const SDValue &getOffset() const { return getOperand(3); }
2460 const SDValue &getMask() const { return getOperand(4); }
2461 const SDValue &getVectorLength() const { return getOperand(5); }
2462
2463 static bool classof(const SDNode *N) {
2464 return N->getOpcode() == ISD::VP_STORE;
2465 }
2466};
2467
2468/// This base class is used to represent MLOAD and MSTORE nodes
2469class MaskedLoadStoreSDNode : public MemSDNode {
2470public:
2471 friend class SelectionDAG;
2472
2473 MaskedLoadStoreSDNode(ISD::NodeType NodeTy, unsigned Order,
2474 const DebugLoc &dl, SDVTList VTs,
2475 ISD::MemIndexedMode AM, EVT MemVT,
2476 MachineMemOperand *MMO)
2477 : MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) {
2478 LSBaseSDNodeBits.AddressingMode = AM;
2479 assert(getAddressingMode() == AM && "Value truncated")(static_cast <bool> (getAddressingMode() == AM &&
"Value truncated") ? void (0) : __assert_fail ("getAddressingMode() == AM && \"Value truncated\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 2479, __extension__
__PRETTY_FUNCTION__))
;
2480 }
2481
2482 // MaskedLoadSDNode (Chain, ptr, offset, mask, passthru)
2483 // MaskedStoreSDNode (Chain, data, ptr, offset, mask)
2484 // Mask is a vector of i1 elements
2485 const SDValue &getOffset() const {
2486 return getOperand(getOpcode() == ISD::MLOAD ? 2 : 3);
2487 }
2488 const SDValue &getMask() const {
2489 return getOperand(getOpcode() == ISD::MLOAD ? 3 : 4);
2490 }
2491
2492 /// Return the addressing mode for this load or store:
2493 /// unindexed, pre-inc, pre-dec, post-inc, or post-dec.
2494 ISD::MemIndexedMode getAddressingMode() const {
2495 return static_cast<ISD::MemIndexedMode>(LSBaseSDNodeBits.AddressingMode);
2496 }
2497
2498 /// Return true if this is a pre/post inc/dec load/store.
2499 bool isIndexed() const { return getAddressingMode() != ISD::UNINDEXED; }
2500
2501 /// Return true if this is NOT a pre/post inc/dec load/store.
2502 bool isUnindexed() const { return getAddressingMode() == ISD::UNINDEXED; }
2503
2504 static bool classof(const SDNode *N) {
2505 return N->getOpcode() == ISD::MLOAD ||
2506 N->getOpcode() == ISD::MSTORE;
2507 }
2508};
2509
2510/// This class is used to represent an MLOAD node
2511class MaskedLoadSDNode : public MaskedLoadStoreSDNode {
2512public:
2513 friend class SelectionDAG;
2514
2515 MaskedLoadSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
2516 ISD::MemIndexedMode AM, ISD::LoadExtType ETy,
2517 bool IsExpanding, EVT MemVT, MachineMemOperand *MMO)
2518 : MaskedLoadStoreSDNode(ISD::MLOAD, Order, dl, VTs, AM, MemVT, MMO) {
2519 LoadSDNodeBits.ExtTy = ETy;
2520 LoadSDNodeBits.IsExpanding = IsExpanding;
2521 }
2522
2523 ISD::LoadExtType getExtensionType() const {
2524 return static_cast<ISD::LoadExtType>(LoadSDNodeBits.ExtTy);
2525 }
2526
2527 const SDValue &getBasePtr() const { return getOperand(1); }
2528 const SDValue &getOffset() const { return getOperand(2); }
2529 const SDValue &getMask() const { return getOperand(3); }
2530 const SDValue &getPassThru() const { return getOperand(4); }
2531
2532 static bool classof(const SDNode *N) {
2533 return N->getOpcode() == ISD::MLOAD;
2534 }
2535
2536 bool isExpandingLoad() const { return LoadSDNodeBits.IsExpanding; }
2537};
2538
2539/// This class is used to represent an MSTORE node
2540class MaskedStoreSDNode : public MaskedLoadStoreSDNode {
2541public:
2542 friend class SelectionDAG;
2543
2544 MaskedStoreSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
2545 ISD::MemIndexedMode AM, bool isTrunc, bool isCompressing,
2546 EVT MemVT, MachineMemOperand *MMO)
2547 : MaskedLoadStoreSDNode(ISD::MSTORE, Order, dl, VTs, AM, MemVT, MMO) {
2548 StoreSDNodeBits.IsTruncating = isTrunc;
2549 StoreSDNodeBits.IsCompressing = isCompressing;
2550 }
2551
2552 /// Return true if the op does a truncation before store.
2553 /// For integers this is the same as doing a TRUNCATE and storing the result.
2554 /// For floats, it is the same as doing an FP_ROUND and storing the result.
2555 bool isTruncatingStore() const { return StoreSDNodeBits.IsTruncating; }
2556
2557 /// Returns true if the op does a compression to the vector before storing.
2558 /// The node contiguously stores the active elements (integers or floats)
2559 /// in src (those with their respective bit set in writemask k) to unaligned
2560 /// memory at base_addr.
2561 bool isCompressingStore() const { return StoreSDNodeBits.IsCompressing; }
2562
2563 const SDValue &getValue() const { return getOperand(1); }
2564 const SDValue &getBasePtr() const { return getOperand(2); }
2565 const SDValue &getOffset() const { return getOperand(3); }
2566 const SDValue &getMask() const { return getOperand(4); }
2567
2568 static bool classof(const SDNode *N) {
2569 return N->getOpcode() == ISD::MSTORE;
2570 }
2571};
2572
2573/// This is a base class used to represent
2574/// VP_GATHER and VP_SCATTER nodes
2575///
2576class VPGatherScatterSDNode : public MemSDNode {
2577public:
2578 friend class SelectionDAG;
2579
2580 VPGatherScatterSDNode(ISD::NodeType NodeTy, unsigned Order,
2581 const DebugLoc &dl, SDVTList VTs, EVT MemVT,
2582 MachineMemOperand *MMO, ISD::MemIndexType IndexType)
2583 : MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) {
2584 LSBaseSDNodeBits.AddressingMode = IndexType;
2585 assert(getIndexType() == IndexType && "Value truncated")(static_cast <bool> (getIndexType() == IndexType &&
"Value truncated") ? void (0) : __assert_fail ("getIndexType() == IndexType && \"Value truncated\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 2585, __extension__
__PRETTY_FUNCTION__))
;
2586 }
2587
2588 /// How is Index applied to BasePtr when computing addresses.
2589 ISD::MemIndexType getIndexType() const {
2590 return static_cast<ISD::MemIndexType>(LSBaseSDNodeBits.AddressingMode);
2591 }
2592 bool isIndexScaled() const {
2593 return (getIndexType() == ISD::SIGNED_SCALED) ||
2594 (getIndexType() == ISD::UNSIGNED_SCALED);
2595 }
2596 bool isIndexSigned() const {
2597 return (getIndexType() == ISD::SIGNED_SCALED) ||
2598 (getIndexType() == ISD::SIGNED_UNSCALED);
2599 }
2600
2601 // In the both nodes address is Op1, mask is Op2:
2602 // VPGatherSDNode (Chain, base, index, scale, mask, vlen)
2603 // VPScatterSDNode (Chain, value, base, index, scale, mask, vlen)
2604 // Mask is a vector of i1 elements
2605 const SDValue &getBasePtr() const {
2606 return getOperand((getOpcode() == ISD::VP_GATHER) ? 1 : 2);
2607 }
2608 const SDValue &getIndex() const {
2609 return getOperand((getOpcode() == ISD::VP_GATHER) ? 2 : 3);
2610 }
2611 const SDValue &getScale() const {
2612 return getOperand((getOpcode() == ISD::VP_GATHER) ? 3 : 4);
2613 }
2614 const SDValue &getMask() const {
2615 return getOperand((getOpcode() == ISD::VP_GATHER) ? 4 : 5);
2616 }
2617 const SDValue &getVectorLength() const {
2618 return getOperand((getOpcode() == ISD::VP_GATHER) ? 5 : 6);
2619 }
2620
2621 static bool classof(const SDNode *N) {
2622 return N->getOpcode() == ISD::VP_GATHER ||
2623 N->getOpcode() == ISD::VP_SCATTER;
2624 }
2625};
2626
2627/// This class is used to represent an VP_GATHER node
2628///
2629class VPGatherSDNode : public VPGatherScatterSDNode {
2630public:
2631 friend class SelectionDAG;
2632
2633 VPGatherSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs, EVT MemVT,
2634 MachineMemOperand *MMO, ISD::MemIndexType IndexType)
2635 : VPGatherScatterSDNode(ISD::VP_GATHER, Order, dl, VTs, MemVT, MMO,
2636 IndexType) {}
2637
2638 static bool classof(const SDNode *N) {
2639 return N->getOpcode() == ISD::VP_GATHER;
2640 }
2641};
2642
2643/// This class is used to represent an VP_SCATTER node
2644///
2645class VPScatterSDNode : public VPGatherScatterSDNode {
2646public:
2647 friend class SelectionDAG;
2648
2649 VPScatterSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs, EVT MemVT,
2650 MachineMemOperand *MMO, ISD::MemIndexType IndexType)
2651 : VPGatherScatterSDNode(ISD::VP_SCATTER, Order, dl, VTs, MemVT, MMO,
2652 IndexType) {}
2653
2654 const SDValue &getValue() const { return getOperand(1); }
2655
2656 static bool classof(const SDNode *N) {
2657 return N->getOpcode() == ISD::VP_SCATTER;
2658 }
2659};
2660
2661/// This is a base class used to represent
2662/// MGATHER and MSCATTER nodes
2663///
2664class MaskedGatherScatterSDNode : public MemSDNode {
2665public:
2666 friend class SelectionDAG;
2667
2668 MaskedGatherScatterSDNode(ISD::NodeType NodeTy, unsigned Order,
2669 const DebugLoc &dl, SDVTList VTs, EVT MemVT,
2670 MachineMemOperand *MMO, ISD::MemIndexType IndexType)
2671 : MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) {
2672 LSBaseSDNodeBits.AddressingMode = IndexType;
2673 assert(getIndexType() == IndexType && "Value truncated")(static_cast <bool> (getIndexType() == IndexType &&
"Value truncated") ? void (0) : __assert_fail ("getIndexType() == IndexType && \"Value truncated\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 2673, __extension__
__PRETTY_FUNCTION__))
;
2674 }
2675
2676 /// How is Index applied to BasePtr when computing addresses.
2677 ISD::MemIndexType getIndexType() const {
2678 return static_cast<ISD::MemIndexType>(LSBaseSDNodeBits.AddressingMode);
2679 }
2680 void setIndexType(ISD::MemIndexType IndexType) {
2681 LSBaseSDNodeBits.AddressingMode = IndexType;
2682 }
2683 bool isIndexScaled() const {
2684 return (getIndexType() == ISD::SIGNED_SCALED) ||
2685 (getIndexType() == ISD::UNSIGNED_SCALED);
2686 }
2687 bool isIndexSigned() const {
2688 return (getIndexType() == ISD::SIGNED_SCALED) ||
2689 (getIndexType() == ISD::SIGNED_UNSCALED);
2690 }
2691
2692 // In the both nodes address is Op1, mask is Op2:
2693 // MaskedGatherSDNode (Chain, passthru, mask, base, index, scale)
2694 // MaskedScatterSDNode (Chain, value, mask, base, index, scale)
2695 // Mask is a vector of i1 elements
2696 const SDValue &getBasePtr() const { return getOperand(3); }
2697 const SDValue &getIndex() const { return getOperand(4); }
2698 const SDValue &getMask() const { return getOperand(2); }
2699 const SDValue &getScale() const { return getOperand(5); }
2700
2701 static bool classof(const SDNode *N) {
2702 return N->getOpcode() == ISD::MGATHER ||
2703 N->getOpcode() == ISD::MSCATTER;
2704 }
2705};
2706
2707/// This class is used to represent an MGATHER node
2708///
2709class MaskedGatherSDNode : public MaskedGatherScatterSDNode {
2710public:
2711 friend class SelectionDAG;
2712
2713 MaskedGatherSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
2714 EVT MemVT, MachineMemOperand *MMO,
2715 ISD::MemIndexType IndexType, ISD::LoadExtType ETy)
2716 : MaskedGatherScatterSDNode(ISD::MGATHER, Order, dl, VTs, MemVT, MMO,
2717 IndexType) {
2718 LoadSDNodeBits.ExtTy = ETy;
2719 }
2720
2721 const SDValue &getPassThru() const { return getOperand(1); }
2722
2723 ISD::LoadExtType getExtensionType() const {
2724 return ISD::LoadExtType(LoadSDNodeBits.ExtTy);
2725 }
2726
2727 static bool classof(const SDNode *N) {
2728 return N->getOpcode() == ISD::MGATHER;
2729 }
2730};
2731
2732/// This class is used to represent an MSCATTER node
2733///
2734class MaskedScatterSDNode : public MaskedGatherScatterSDNode {
2735public:
2736 friend class SelectionDAG;
2737
2738 MaskedScatterSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
2739 EVT MemVT, MachineMemOperand *MMO,
2740 ISD::MemIndexType IndexType, bool IsTrunc)
2741 : MaskedGatherScatterSDNode(ISD::MSCATTER, Order, dl, VTs, MemVT, MMO,
2742 IndexType) {
2743 StoreSDNodeBits.IsTruncating = IsTrunc;
2744 }
2745
2746 /// Return true if the op does a truncation before store.
2747 /// For integers this is the same as doing a TRUNCATE and storing the result.
2748 /// For floats, it is the same as doing an FP_ROUND and storing the result.
2749 bool isTruncatingStore() const { return StoreSDNodeBits.IsTruncating; }
2750
2751 const SDValue &getValue() const { return getOperand(1); }
2752
2753 static bool classof(const SDNode *N) {
2754 return N->getOpcode() == ISD::MSCATTER;
2755 }
2756};
2757
2758/// An SDNode that represents everything that will be needed
2759/// to construct a MachineInstr. These nodes are created during the
2760/// instruction selection proper phase.
2761///
2762/// Note that the only supported way to set the `memoperands` is by calling the
2763/// `SelectionDAG::setNodeMemRefs` function as the memory management happens
2764/// inside the DAG rather than in the node.
2765class MachineSDNode : public SDNode {
2766private:
2767 friend class SelectionDAG;
2768
2769 MachineSDNode(unsigned Opc, unsigned Order, const DebugLoc &DL, SDVTList VTs)
2770 : SDNode(Opc, Order, DL, VTs) {}
2771
2772 // We use a pointer union between a single `MachineMemOperand` pointer and
2773 // a pointer to an array of `MachineMemOperand` pointers. This is null when
2774 // the number of these is zero, the single pointer variant used when the
2775 // number is one, and the array is used for larger numbers.
2776 //
2777 // The array is allocated via the `SelectionDAG`'s allocator and so will
2778 // always live until the DAG is cleaned up and doesn't require ownership here.
2779 //
2780 // We can't use something simpler like `TinyPtrVector` here because `SDNode`
2781 // subclasses aren't managed in a conforming C++ manner. See the comments on
2782 // `SelectionDAG::MorphNodeTo` which details what all goes on, but the
2783 // constraint here is that these don't manage memory with their constructor or
2784 // destructor and can be initialized to a good state even if they start off
2785 // uninitialized.
2786 PointerUnion<MachineMemOperand *, MachineMemOperand **> MemRefs = {};
2787
2788 // Note that this could be folded into the above `MemRefs` member if doing so
2789 // is advantageous at some point. We don't need to store this in most cases.
2790 // However, at the moment this doesn't appear to make the allocation any
2791 // smaller and makes the code somewhat simpler to read.
2792 int NumMemRefs = 0;
2793
2794public:
2795 using mmo_iterator = ArrayRef<MachineMemOperand *>::const_iterator;
2796
2797 ArrayRef<MachineMemOperand *> memoperands() const {
2798 // Special case the common cases.
2799 if (NumMemRefs == 0)
2800 return {};
2801 if (NumMemRefs == 1)
2802 return makeArrayRef(MemRefs.getAddrOfPtr1(), 1);
2803
2804 // Otherwise we have an actual array.
2805 return makeArrayRef(MemRefs.get<MachineMemOperand **>(), NumMemRefs);
2806 }
2807 mmo_iterator memoperands_begin() const { return memoperands().begin(); }
2808 mmo_iterator memoperands_end() const { return memoperands().end(); }
2809 bool memoperands_empty() const { return memoperands().empty(); }
2810
2811 /// Clear out the memory reference descriptor list.
2812 void clearMemRefs() {
2813 MemRefs = nullptr;
2814 NumMemRefs = 0;
2815 }
2816
2817 static bool classof(const SDNode *N) {
2818 return N->isMachineOpcode();
2819 }
2820};
2821
2822/// An SDNode that records if a register contains a value that is guaranteed to
2823/// be aligned accordingly.
2824class AssertAlignSDNode : public SDNode {
2825 Align Alignment;
2826
2827public:
2828 AssertAlignSDNode(unsigned Order, const DebugLoc &DL, EVT VT, Align A)
2829 : SDNode(ISD::AssertAlign, Order, DL, getSDVTList(VT)), Alignment(A) {}
2830
2831 Align getAlign() const { return Alignment; }
2832
2833 static bool classof(const SDNode *N) {
2834 return N->getOpcode() == ISD::AssertAlign;
2835 }
2836};
2837
2838class SDNodeIterator {
2839 const SDNode *Node;
2840 unsigned Operand;
2841
2842 SDNodeIterator(const SDNode *N, unsigned Op) : Node(N), Operand(Op) {}
2843
2844public:
2845 using iterator_category = std::forward_iterator_tag;
2846 using value_type = SDNode;
2847 using difference_type = std::ptrdiff_t;
2848 using pointer = value_type *;
2849 using reference = value_type &;
2850
2851 bool operator==(const SDNodeIterator& x) const {
2852 return Operand == x.Operand;
2853 }
2854 bool operator!=(const SDNodeIterator& x) const { return !operator==(x); }
2855
2856 pointer operator*() const {
2857 return Node->getOperand(Operand).getNode();
2858 }
2859 pointer operator->() const { return operator*(); }
2860
2861 SDNodeIterator& operator++() { // Preincrement
2862 ++Operand;
2863 return *this;
2864 }
2865 SDNodeIterator operator++(int) { // Postincrement
2866 SDNodeIterator tmp = *this; ++*this; return tmp;
2867 }
2868 size_t operator-(SDNodeIterator Other) const {
2869 assert(Node == Other.Node &&(static_cast <bool> (Node == Other.Node && "Cannot compare iterators of two different nodes!"
) ? void (0) : __assert_fail ("Node == Other.Node && \"Cannot compare iterators of two different nodes!\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 2870, __extension__
__PRETTY_FUNCTION__))
2870 "Cannot compare iterators of two different nodes!")(static_cast <bool> (Node == Other.Node && "Cannot compare iterators of two different nodes!"
) ? void (0) : __assert_fail ("Node == Other.Node && \"Cannot compare iterators of two different nodes!\""
, "llvm/include/llvm/CodeGen/SelectionDAGNodes.h", 2870, __extension__
__PRETTY_FUNCTION__))
;
2871 return Operand - Other.Operand;
2872 }
2873
2874 static SDNodeIterator begin(const SDNode *N) { return SDNodeIterator(N, 0); }
2875 static SDNodeIterator end (const SDNode *N) {
2876 return SDNodeIterator(N, N->getNumOperands());
2877 }
2878
2879 unsigned getOperand() const { return Operand; }
2880 const SDNode *getNode() const { return Node; }
2881};
2882
2883template <> struct GraphTraits<SDNode*> {
2884 using NodeRef = SDNode *;
2885 using ChildIteratorType = SDNodeIterator;
2886
2887 static NodeRef getEntryNode(SDNode *N) { return N; }
2888
2889 static ChildIteratorType child_begin(NodeRef N) {
2890 return SDNodeIterator::begin(N);
2891 }
2892
2893 static ChildIteratorType child_end(NodeRef N) {
2894 return SDNodeIterator::end(N);
2895 }
2896};
2897
2898/// A representation of the largest SDNode, for use in sizeof().
2899///
2900/// This needs to be a union because the largest node differs on 32 bit systems
2901/// with 4 and 8 byte pointer alignment, respectively.
2902using LargestSDNode = AlignedCharArrayUnion<AtomicSDNode, TargetIndexSDNode,
2903 BlockAddressSDNode,
2904 GlobalAddressSDNode,
2905 PseudoProbeSDNode>;
2906
2907/// The SDNode class with the greatest alignment requirement.
2908using MostAlignedSDNode = GlobalAddressSDNode;
2909
2910namespace ISD {
2911
2912 /// Returns true if the specified node is a non-extending and unindexed load.
2913 inline bool isNormalLoad(const SDNode *N) {
2914 const LoadSDNode *Ld = dyn_cast<LoadSDNode>(N);
2915 return Ld && Ld->getExtensionType() == ISD::NON_EXTLOAD &&
2916 Ld->getAddressingMode() == ISD::UNINDEXED;
2917 }
2918
2919 /// Returns true if the specified node is a non-extending load.
2920 inline bool isNON_EXTLoad(const SDNode *N) {
2921 return isa<LoadSDNode>(N) &&
2922 cast<LoadSDNode>(N)->getExtensionType() == ISD::NON_EXTLOAD;
2923 }
2924
2925 /// Returns true if the specified node is a EXTLOAD.
2926 inline bool isEXTLoad(const SDNode *N) {
2927 return isa<LoadSDNode>(N) &&
2928 cast<LoadSDNode>(N)->getExtensionType() == ISD::EXTLOAD;
2929 }
2930
2931 /// Returns true if the specified node is a SEXTLOAD.
2932 inline bool isSEXTLoad(const SDNode *N) {
2933 return isa<LoadSDNode>(N) &&
2934 cast<LoadSDNode>(N)->getExtensionType() == ISD::SEXTLOAD;
2935 }
2936
2937 /// Returns true if the specified node is a ZEXTLOAD.
2938 inline bool isZEXTLoad(const SDNode *N) {
2939 return isa<LoadSDNode>(N) &&
2940 cast<LoadSDNode>(N)->getExtensionType() == ISD::ZEXTLOAD;
2941 }
2942
2943 /// Returns true if the specified node is an unindexed load.
2944 inline bool isUNINDEXEDLoad(const SDNode *N) {
2945 return isa<LoadSDNode>(N) &&
2946 cast<LoadSDNode>(N)->getAddressingMode() == ISD::UNINDEXED;
2947 }
2948
2949 /// Returns true if the specified node is a non-truncating
2950 /// and unindexed store.
2951 inline bool isNormalStore(const SDNode *N) {
2952 const StoreSDNode *St = dyn_cast<StoreSDNode>(N);
2953 return St && !St->isTruncatingStore() &&
2954 St->getAddressingMode() == ISD::UNINDEXED;
2955 }
2956
2957 /// Returns true if the specified node is an unindexed store.
2958 inline bool isUNINDEXEDStore(const SDNode *N) {
2959 return isa<StoreSDNode>(N) &&
2960 cast<StoreSDNode>(N)->getAddressingMode() == ISD::UNINDEXED;
2961 }
2962
2963 /// Attempt to match a unary predicate against a scalar/splat constant or
2964 /// every element of a constant BUILD_VECTOR.
2965 /// If AllowUndef is true, then UNDEF elements will pass nullptr to Match.
2966 bool matchUnaryPredicate(SDValue Op,
2967 std::function<bool(ConstantSDNode *)> Match,
2968 bool AllowUndefs = false);
2969
2970 /// Attempt to match a binary predicate against a pair of scalar/splat
2971 /// constants or every element of a pair of constant BUILD_VECTORs.
2972 /// If AllowUndef is true, then UNDEF elements will pass nullptr to Match.
2973 /// If AllowTypeMismatch is true then RetType + ArgTypes don't need to match.
2974 bool matchBinaryPredicate(
2975 SDValue LHS, SDValue RHS,
2976 std::function<bool(ConstantSDNode *, ConstantSDNode *)> Match,
2977 bool AllowUndefs = false, bool AllowTypeMismatch = false);
2978
2979 /// Returns true if the specified value is the overflow result from one
2980 /// of the overflow intrinsic nodes.
2981 inline bool isOverflowIntrOpRes(SDValue Op) {
2982 unsigned Opc = Op.getOpcode();
2983 return (Op.getResNo() == 1 &&
2984 (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO ||
2985 Opc == ISD::USUBO || Opc == ISD::SMULO || Opc == ISD::UMULO));
2986 }
2987
2988} // end namespace ISD
2989
2990} // end namespace llvm
2991
2992#endif // LLVM_CODEGEN_SELECTIONDAGNODES_H

/build/llvm-toolchain-snapshot-14~++20220116111435+edbb8a843c13/llvm/include/llvm/IR/GlobalVariable.h

1//===-- llvm/GlobalVariable.h - GlobalVariable class ------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the declaration of the GlobalVariable class, which
10// represents a single global variable (or constant) in the VM.
11//
12// Global variables are constant pointers that refer to hunks of space that are
13// allocated by either the VM, or by the linker in a static compiler. A global
14// variable may have an initial value, which is copied into the executables .data
15// area. Global Constants are required to have initializers.
16//
17//===----------------------------------------------------------------------===//
18
19#ifndef LLVM_IR_GLOBALVARIABLE_H
20#define LLVM_IR_GLOBALVARIABLE_H
21
22#include "llvm/ADT/Twine.h"
23#include "llvm/ADT/ilist_node.h"
24#include "llvm/IR/Attributes.h"
25#include "llvm/IR/GlobalObject.h"
26#include "llvm/IR/OperandTraits.h"
27#include "llvm/IR/Value.h"
28#include <cassert>
29#include <cstddef>
30
31namespace llvm {
32
33class Constant;
34class Module;
35
36template <typename ValueSubClass> class SymbolTableListTraits;
37class DIGlobalVariableExpression;
38
39class GlobalVariable : public GlobalObject, public ilist_node<GlobalVariable> {
40 friend class SymbolTableListTraits<GlobalVariable>;
41
42 AttributeSet Attrs;
43 bool isConstantGlobal : 1; // Is this a global constant?
44 bool isExternallyInitializedConstant : 1; // Is this a global whose value
45 // can change from its initial
46 // value before global
47 // initializers are run?
48
49public:
50 /// GlobalVariable ctor - If a parent module is specified, the global is
51 /// automatically inserted into the end of the specified modules global list.
52 GlobalVariable(Type *Ty, bool isConstant, LinkageTypes Linkage,
53 Constant *Initializer = nullptr, const Twine &Name = "",
54 ThreadLocalMode = NotThreadLocal, unsigned AddressSpace = 0,
55 bool isExternallyInitialized = false);
56 /// GlobalVariable ctor - This creates a global and inserts it before the
57 /// specified other global.
58 GlobalVariable(Module &M, Type *Ty, bool isConstant, LinkageTypes Linkage,
59 Constant *Initializer, const Twine &Name = "",
60 GlobalVariable *InsertBefore = nullptr,
61 ThreadLocalMode = NotThreadLocal,
62 Optional<unsigned> AddressSpace = None,
63 bool isExternallyInitialized = false);
64 GlobalVariable(const GlobalVariable &) = delete;
65 GlobalVariable &operator=(const GlobalVariable &) = delete;
66
67 ~GlobalVariable() {
68 dropAllReferences();
69 }
70
71 // allocate space for exactly one operand
72 void *operator new(size_t s) {
73 return User::operator new(s, 1);
74 }
75
76 // delete space for exactly one operand as created in the corresponding new operator
77 void operator delete(void *ptr){
78 assert(ptr != nullptr && "must not be nullptr")(static_cast <bool> (ptr != nullptr && "must not be nullptr"
) ? void (0) : __assert_fail ("ptr != nullptr && \"must not be nullptr\""
, "llvm/include/llvm/IR/GlobalVariable.h", 78, __extension__ __PRETTY_FUNCTION__
))
;
79 User *Obj = static_cast<User *>(ptr);
80 // Number of operands can be set to 0 after construction and initialization. Make sure
81 // that number of operands is reset to 1, as this is needed in User::operator delete
82 Obj->setGlobalVariableNumOperands(1);
83 User::operator delete(Obj);
84 }
85
86 /// Provide fast operand accessors
87 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
88
89 /// Definitions have initializers, declarations don't.
90 ///
91 inline bool hasInitializer() const { return !isDeclaration(); }
31
Assuming the condition is true
32
Returning the value 1, which participates in a condition later
92
93 /// hasDefinitiveInitializer - Whether the global variable has an initializer,
94 /// and any other instances of the global (this can happen due to weak
95 /// linkage) are guaranteed to have the same initializer.
96 ///
97 /// Note that if you want to transform a global, you must use
98 /// hasUniqueInitializer() instead, because of the *_odr linkage type.
99 ///
100 /// Example:
101 ///
102 /// @a = global SomeType* null - Initializer is both definitive and unique.
103 ///
104 /// @b = global weak SomeType* null - Initializer is neither definitive nor
105 /// unique.
106 ///
107 /// @c = global weak_odr SomeType* null - Initializer is definitive, but not
108 /// unique.
109 inline bool hasDefinitiveInitializer() const {
110 return hasInitializer() &&
111 // The initializer of a global variable may change to something arbitrary
112 // at link time.
113 !isInterposable() &&
114 // The initializer of a global variable with the externally_initialized
115 // marker may change at runtime before C++ initializers are evaluated.
116 !isExternallyInitialized();
117 }
118
119 /// hasUniqueInitializer - Whether the global variable has an initializer, and
120 /// any changes made to the initializer will turn up in the final executable.
121 inline bool hasUniqueInitializer() const {
122 return
123 // We need to be sure this is the definition that will actually be used
124 isStrongDefinitionForLinker() &&
125 // It is not safe to modify initializers of global variables with the
126 // external_initializer marker since the value may be changed at runtime
127 // before C++ initializers are evaluated.
128 !isExternallyInitialized();
129 }
130
131 /// getInitializer - Return the initializer for this global variable. It is
132 /// illegal to call this method if the global is external, because we cannot
133 /// tell what the value is initialized to!
134 ///
135 inline const Constant *getInitializer() const {
136 assert(hasInitializer() && "GV doesn't have initializer!")(static_cast <bool> (hasInitializer() && "GV doesn't have initializer!"
) ? void (0) : __assert_fail ("hasInitializer() && \"GV doesn't have initializer!\""
, "llvm/include/llvm/IR/GlobalVariable.h", 136, __extension__
__PRETTY_FUNCTION__))
;
137 return static_cast<Constant*>(Op<0>().get());
138 }
139 inline Constant *getInitializer() {
140 assert(hasInitializer() && "GV doesn't have initializer!")(static_cast <bool> (hasInitializer() && "GV doesn't have initializer!"
) ? void (0) : __assert_fail ("hasInitializer() && \"GV doesn't have initializer!\""
, "llvm/include/llvm/IR/GlobalVariable.h", 140, __extension__
__PRETTY_FUNCTION__))
;
141 return static_cast<Constant*>(Op<0>().get());
142 }
143 /// setInitializer - Sets the initializer for this global variable, removing
144 /// any existing initializer if InitVal==NULL. If this GV has type T*, the
145 /// initializer must have type T.
146 void setInitializer(Constant *InitVal);
147
148 /// If the value is a global constant, its value is immutable throughout the
149 /// runtime execution of the program. Assigning a value into the constant
150 /// leads to undefined behavior.
151 ///
152 bool isConstant() const { return isConstantGlobal; }
153 void setConstant(bool Val) { isConstantGlobal = Val; }
154
155 bool isExternallyInitialized() const {
156 return isExternallyInitializedConstant;
157 }
158 void setExternallyInitialized(bool Val) {
159 isExternallyInitializedConstant = Val;
160 }
161
162 /// copyAttributesFrom - copy all additional attributes (those not needed to
163 /// create a GlobalVariable) from the GlobalVariable Src to this one.
164 void copyAttributesFrom(const GlobalVariable *Src);
165
166 /// removeFromParent - This method unlinks 'this' from the containing module,
167 /// but does not delete it.
168 ///
169 void removeFromParent();
170
171 /// eraseFromParent - This method unlinks 'this' from the containing module
172 /// and deletes it.
173 ///
174 void eraseFromParent();
175
176 /// Drop all references in preparation to destroy the GlobalVariable. This
177 /// drops not only the reference to the initializer but also to any metadata.
178 void dropAllReferences();
179
180 /// Attach a DIGlobalVariableExpression.
181 void addDebugInfo(DIGlobalVariableExpression *GV);
182
183 /// Fill the vector with all debug info attachements.
184 void getDebugInfo(SmallVectorImpl<DIGlobalVariableExpression *> &GVs) const;
185
186 /// Add attribute to this global.
187 void addAttribute(Attribute::AttrKind Kind) {
188 Attrs = Attrs.addAttribute(getContext(), Kind);
189 }
190
191 /// Add attribute to this global.
192 void addAttribute(StringRef Kind, StringRef Val = StringRef()) {
193 Attrs = Attrs.addAttribute(getContext(), Kind, Val);
194 }
195
196 /// Return true if the attribute exists.
197 bool hasAttribute(Attribute::AttrKind Kind) const {
198 return Attrs.hasAttribute(Kind);
199 }
200
201 /// Return true if the attribute exists.
202 bool hasAttribute(StringRef Kind) const {
203 return Attrs.hasAttribute(Kind);
204 }
205
206 /// Return true if any attributes exist.
207 bool hasAttributes() const {
208 return Attrs.hasAttributes();
209 }
210
211 /// Return the attribute object.
212 Attribute getAttribute(Attribute::AttrKind Kind) const {
213 return Attrs.getAttribute(Kind);
214 }
215
216 /// Return the attribute object.
217 Attribute getAttribute(StringRef Kind) const {
218 return Attrs.getAttribute(Kind);
219 }
220
221 /// Return the attribute set for this global
222 AttributeSet getAttributes() const {
223 return Attrs;
224 }
225
226 /// Return attribute set as list with index.
227 /// FIXME: This may not be required once ValueEnumerators
228 /// in bitcode-writer can enumerate attribute-set.
229 AttributeList getAttributesAsList(unsigned index) const {
230 if (!hasAttributes())
231 return AttributeList();
232 std::pair<unsigned, AttributeSet> AS[1] = {{index, Attrs}};
233 return AttributeList::get(getContext(), AS);
234 }
235
236 /// Set attribute list for this global
237 void setAttributes(AttributeSet A) {
238 Attrs = A;
239 }
240
241 /// Check if section name is present
242 bool hasImplicitSection() const {
243 return getAttributes().hasAttribute("bss-section") ||
244 getAttributes().hasAttribute("data-section") ||
245 getAttributes().hasAttribute("relro-section") ||
246 getAttributes().hasAttribute("rodata-section");
247 }
248
249 // Methods for support type inquiry through isa, cast, and dyn_cast:
250 static bool classof(const Value *V) {
251 return V->getValueID() == Value::GlobalVariableVal;
252 }
253};
254
255template <>
256struct OperandTraits<GlobalVariable> :
257 public OptionalOperandTraits<GlobalVariable> {
258};
259
260DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GlobalVariable, Value)GlobalVariable::op_iterator GlobalVariable::op_begin() { return
OperandTraits<GlobalVariable>::op_begin(this); } GlobalVariable
::const_op_iterator GlobalVariable::op_begin() const { return
OperandTraits<GlobalVariable>::op_begin(const_cast<
GlobalVariable*>(this)); } GlobalVariable::op_iterator GlobalVariable
::op_end() { return OperandTraits<GlobalVariable>::op_end
(this); } GlobalVariable::const_op_iterator GlobalVariable::op_end
() const { return OperandTraits<GlobalVariable>::op_end
(const_cast<GlobalVariable*>(this)); } Value *GlobalVariable
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<GlobalVariable>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<GlobalVariable>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/GlobalVariable.h", 260, __extension__
__PRETTY_FUNCTION__)); return cast_or_null<Value>( OperandTraits
<GlobalVariable>::op_begin(const_cast<GlobalVariable
*>(this))[i_nocapture].get()); } void GlobalVariable::setOperand
(unsigned i_nocapture, Value *Val_nocapture) { (static_cast <
bool> (i_nocapture < OperandTraits<GlobalVariable>
::operands(this) && "setOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<GlobalVariable>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/GlobalVariable.h", 260, __extension__
__PRETTY_FUNCTION__)); OperandTraits<GlobalVariable>::
op_begin(this)[i_nocapture] = Val_nocapture; } unsigned GlobalVariable
::getNumOperands() const { return OperandTraits<GlobalVariable
>::operands(this); } template <int Idx_nocapture> Use
&GlobalVariable::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
GlobalVariable::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
261
262} // end namespace llvm
263
264#endif // LLVM_IR_GLOBALVARIABLE_H

/usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/bits/stl_tree.h

1// RB tree implementation -*- C++ -*-
2
3// Copyright (C) 2001-2020 Free Software Foundation, Inc.
4//
5// This file is part of the GNU ISO C++ Library. This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
19
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23// <http://www.gnu.org/licenses/>.
24
25/*
26 *
27 * Copyright (c) 1996,1997
28 * Silicon Graphics Computer Systems, Inc.
29 *
30 * Permission to use, copy, modify, distribute and sell this software
31 * and its documentation for any purpose is hereby granted without fee,
32 * provided that the above copyright notice appear in all copies and
33 * that both that copyright notice and this permission notice appear
34 * in supporting documentation. Silicon Graphics makes no
35 * representations about the suitability of this software for any
36 * purpose. It is provided "as is" without express or implied warranty.
37 *
38 *
39 * Copyright (c) 1994
40 * Hewlett-Packard Company
41 *
42 * Permission to use, copy, modify, distribute and sell this software
43 * and its documentation for any purpose is hereby granted without fee,
44 * provided that the above copyright notice appear in all copies and
45 * that both that copyright notice and this permission notice appear
46 * in supporting documentation. Hewlett-Packard Company makes no
47 * representations about the suitability of this software for any
48 * purpose. It is provided "as is" without express or implied warranty.
49 *
50 *
51 */
52
53/** @file bits/stl_tree.h
54 * This is an internal header file, included by other library headers.
55 * Do not attempt to use it directly. @headername{map,set}
56 */
57
58#ifndef _STL_TREE_H1
59#define _STL_TREE_H1 1
60
61#pragma GCC system_header
62
63#include <bits/stl_algobase.h>
64#include <bits/allocator.h>
65#include <bits/stl_function.h>
66#include <bits/cpp_type_traits.h>
67#include <ext/alloc_traits.h>
68#if __cplusplus201402L >= 201103L
69# include <ext/aligned_buffer.h>
70#endif
71#if __cplusplus201402L > 201402L
72# include <bits/node_handle.h>
73#endif
74
75namespace std _GLIBCXX_VISIBILITY(default)__attribute__ ((__visibility__ ("default")))
76{
77_GLIBCXX_BEGIN_NAMESPACE_VERSION
78
79#if __cplusplus201402L > 201103L
80# define __cpp_lib_generic_associative_lookup201304 201304
81#endif
82
83 // Red-black tree class, designed for use in implementing STL
84 // associative containers (set, multiset, map, and multimap). The
85 // insertion and deletion algorithms are based on those in Cormen,
86 // Leiserson, and Rivest, Introduction to Algorithms (MIT Press,
87 // 1990), except that
88 //
89 // (1) the header cell is maintained with links not only to the root
90 // but also to the leftmost node of the tree, to enable constant
91 // time begin(), and to the rightmost node of the tree, to enable
92 // linear time performance when used with the generic set algorithms
93 // (set_union, etc.)
94 //
95 // (2) when a node being deleted has two children its successor node
96 // is relinked into its place, rather than copied, so that the only
97 // iterators invalidated are those referring to the deleted node.
98
99 enum _Rb_tree_color { _S_red = false, _S_black = true };
100
101 struct _Rb_tree_node_base
102 {
103 typedef _Rb_tree_node_base* _Base_ptr;
104 typedef const _Rb_tree_node_base* _Const_Base_ptr;
105
106 _Rb_tree_color _M_color;
107 _Base_ptr _M_parent;
108 _Base_ptr _M_left;
109 _Base_ptr _M_right;
110
111 static _Base_ptr
112 _S_minimum(_Base_ptr __x) _GLIBCXX_NOEXCEPTnoexcept
113 {
114 while (__x->_M_left != 0) __x = __x->_M_left;
115 return __x;
116 }
117
118 static _Const_Base_ptr
119 _S_minimum(_Const_Base_ptr __x) _GLIBCXX_NOEXCEPTnoexcept
120 {
121 while (__x->_M_left != 0) __x = __x->_M_left;
122 return __x;
123 }
124
125 static _Base_ptr
126 _S_maximum(_Base_ptr __x) _GLIBCXX_NOEXCEPTnoexcept
127 {
128 while (__x->_M_right != 0) __x = __x->_M_right;
129 return __x;
130 }
131
132 static _Const_Base_ptr
133 _S_maximum(_Const_Base_ptr __x) _GLIBCXX_NOEXCEPTnoexcept
134 {
135 while (__x->_M_right != 0) __x = __x->_M_right;
136 return __x;
137 }
138 };
139
140 // Helper type offering value initialization guarantee on the compare functor.
141 template<typename _Key_compare>
142 struct _Rb_tree_key_compare
143 {
144 _Key_compare _M_key_compare;
145
146 _Rb_tree_key_compare()
147 _GLIBCXX_NOEXCEPT_IF(noexcept(is_nothrow_default_constructible<_Key_compare>
::value)
148 is_nothrow_default_constructible<_Key_compare>::value)noexcept(is_nothrow_default_constructible<_Key_compare>
::value)
149 : _M_key_compare()
150 { }
151
152 _Rb_tree_key_compare(const _Key_compare& __comp)
153 : _M_key_compare(__comp)
154 { }
155
156#if __cplusplus201402L >= 201103L
157 // Copy constructor added for consistency with C++98 mode.
158 _Rb_tree_key_compare(const _Rb_tree_key_compare&) = default;
159
160 _Rb_tree_key_compare(_Rb_tree_key_compare&& __x)
161 noexcept(is_nothrow_copy_constructible<_Key_compare>::value)
162 : _M_key_compare(__x._M_key_compare)
163 { }
164#endif
165 };
166
167 // Helper type to manage default initialization of node count and header.
168 struct _Rb_tree_header
169 {
170 _Rb_tree_node_base _M_header;
171 size_t _M_node_count; // Keeps track of size of tree.
172
173 _Rb_tree_header() _GLIBCXX_NOEXCEPTnoexcept
174 {
175 _M_header._M_color = _S_red;
176 _M_reset();
177 }
178
179#if __cplusplus201402L >= 201103L
180 _Rb_tree_header(_Rb_tree_header&& __x) noexcept
181 {
182 if (__x._M_header._M_parent != nullptr)
183 _M_move_data(__x);
184 else
185 {
186 _M_header._M_color = _S_red;
187 _M_reset();
188 }
189 }
190#endif
191
192 void
193 _M_move_data(_Rb_tree_header& __from)
194 {
195 _M_header._M_color = __from._M_header._M_color;
196 _M_header._M_parent = __from._M_header._M_parent;
197 _M_header._M_left = __from._M_header._M_left;
198 _M_header._M_right = __from._M_header._M_right;
199 _M_header._M_parent->_M_parent = &_M_header;
200 _M_node_count = __from._M_node_count;
201
202 __from._M_reset();
203 }
204
205 void
206 _M_reset()
207 {
208 _M_header._M_parent = 0;
209 _M_header._M_left = &_M_header;
210 _M_header._M_right = &_M_header;
211 _M_node_count = 0;
212 }
213 };
214
215 template<typename _Val>
216 struct _Rb_tree_node : public _Rb_tree_node_base
217 {
218 typedef _Rb_tree_node<_Val>* _Link_type;
219
220#if __cplusplus201402L < 201103L
221 _Val _M_value_field;
222
223 _Val*
224 _M_valptr()
225 { return std::__addressof(_M_value_field); }
226
227 const _Val*
228 _M_valptr() const
229 { return std::__addressof(_M_value_field); }
230#else
231 __gnu_cxx::__aligned_membuf<_Val> _M_storage;
232
233 _Val*
234 _M_valptr()
235 { return _M_storage._M_ptr(); }
236
237 const _Val*
238 _M_valptr() const
239 { return _M_storage._M_ptr(); }
240#endif
241 };
242
243 _GLIBCXX_PURE__attribute__ ((__pure__)) _Rb_tree_node_base*
244 _Rb_tree_increment(_Rb_tree_node_base* __x) throw ();
245
246 _GLIBCXX_PURE__attribute__ ((__pure__)) const _Rb_tree_node_base*
247 _Rb_tree_increment(const _Rb_tree_node_base* __x) throw ();
248
249 _GLIBCXX_PURE__attribute__ ((__pure__)) _Rb_tree_node_base*
250 _Rb_tree_decrement(_Rb_tree_node_base* __x) throw ();
251
252 _GLIBCXX_PURE__attribute__ ((__pure__)) const _Rb_tree_node_base*
253 _Rb_tree_decrement(const _Rb_tree_node_base* __x) throw ();
254
255 template<typename _Tp>
256 struct _Rb_tree_iterator
257 {
258 typedef _Tp value_type;
259 typedef _Tp& reference;
260 typedef _Tp* pointer;
261
262 typedef bidirectional_iterator_tag iterator_category;
263 typedef ptrdiff_t difference_type;
264
265 typedef _Rb_tree_iterator<_Tp> _Self;
266 typedef _Rb_tree_node_base::_Base_ptr _Base_ptr;
267 typedef _Rb_tree_node<_Tp>* _Link_type;
268
269 _Rb_tree_iterator() _GLIBCXX_NOEXCEPTnoexcept
270 : _M_node() { }
271
272 explicit
273 _Rb_tree_iterator(_Base_ptr __x) _GLIBCXX_NOEXCEPTnoexcept
274 : _M_node(__x) { }
275
276 reference
277 operator*() const _GLIBCXX_NOEXCEPTnoexcept
278 { return *static_cast<_Link_type>(_M_node)->_M_valptr(); }
279
280 pointer
281 operator->() const _GLIBCXX_NOEXCEPTnoexcept
282 { return static_cast<_Link_type> (_M_node)->_M_valptr(); }
283
284 _Self&
285 operator++() _GLIBCXX_NOEXCEPTnoexcept
286 {
287 _M_node = _Rb_tree_increment(_M_node);
288 return *this;
289 }
290
291 _Self
292 operator++(int) _GLIBCXX_NOEXCEPTnoexcept
293 {
294 _Self __tmp = *this;
295 _M_node = _Rb_tree_increment(_M_node);
296 return __tmp;
297 }
298
299 _Self&
300 operator--() _GLIBCXX_NOEXCEPTnoexcept
301 {
302 _M_node = _Rb_tree_decrement(_M_node);
303 return *this;
304 }
305
306 _Self
307 operator--(int) _GLIBCXX_NOEXCEPTnoexcept
308 {
309 _Self __tmp = *this;
310 _M_node = _Rb_tree_decrement(_M_node);
311 return __tmp;
312 }
313
314 friend bool
315 operator==(const _Self& __x, const _Self& __y) _GLIBCXX_NOEXCEPTnoexcept
316 { return __x._M_node == __y._M_node; }
317
318#if ! __cpp_lib_three_way_comparison
319 friend bool
320 operator!=(const _Self& __x, const _Self& __y) _GLIBCXX_NOEXCEPTnoexcept
321 { return __x._M_node != __y._M_node; }
37
Assuming '__x._M_node' is not equal to '__y._M_node'
38
Returning the value 1, which participates in a condition later
322#endif
323
324 _Base_ptr _M_node;
325 };
326
327 template<typename _Tp>
328 struct _Rb_tree_const_iterator
329 {
330 typedef _Tp value_type;
331 typedef const _Tp& reference;
332 typedef const _Tp* pointer;
333
334 typedef _Rb_tree_iterator<_Tp> iterator;
335
336 typedef bidirectional_iterator_tag iterator_category;
337 typedef ptrdiff_t difference_type;
338
339 typedef _Rb_tree_const_iterator<_Tp> _Self;
340 typedef _Rb_tree_node_base::_Const_Base_ptr _Base_ptr;
341 typedef const _Rb_tree_node<_Tp>* _Link_type;
342
343 _Rb_tree_const_iterator() _GLIBCXX_NOEXCEPTnoexcept
344 : _M_node() { }
345
346 explicit
347 _Rb_tree_const_iterator(_Base_ptr __x) _GLIBCXX_NOEXCEPTnoexcept
348 : _M_node(__x) { }
349
350 _Rb_tree_const_iterator(const iterator& __it) _GLIBCXX_NOEXCEPTnoexcept
351 : _M_node(__it._M_node) { }
352
353 iterator
354 _M_const_cast() const _GLIBCXX_NOEXCEPTnoexcept
355 { return iterator(const_cast<typename iterator::_Base_ptr>(_M_node)); }
356
357 reference
358 operator*() const _GLIBCXX_NOEXCEPTnoexcept
359 { return *static_cast<_Link_type>(_M_node)->_M_valptr(); }
360
361 pointer
362 operator->() const _GLIBCXX_NOEXCEPTnoexcept
363 { return static_cast<_Link_type>(_M_node)->_M_valptr(); }
364
365 _Self&
366 operator++() _GLIBCXX_NOEXCEPTnoexcept
367 {
368 _M_node = _Rb_tree_increment(_M_node);
369 return *this;
370 }
371
372 _Self
373 operator++(int) _GLIBCXX_NOEXCEPTnoexcept
374 {
375 _Self __tmp = *this;
376 _M_node = _Rb_tree_increment(_M_node);
377 return __tmp;
378 }
379
380 _Self&
381 operator--() _GLIBCXX_NOEXCEPTnoexcept
382 {
383 _M_node = _Rb_tree_decrement(_M_node);
384 return *this;
385 }
386
387 _Self
388 operator--(int) _GLIBCXX_NOEXCEPTnoexcept
389 {
390 _Self __tmp = *this;
391 _M_node = _Rb_tree_decrement(_M_node);
392 return __tmp;
393 }
394
395 friend bool
396 operator==(const _Self& __x, const _Self& __y) _GLIBCXX_NOEXCEPTnoexcept
397 { return __x._M_node == __y._M_node; }
398
399#if ! __cpp_lib_three_way_comparison
400 friend bool
401 operator!=(const _Self& __x, const _Self& __y) _GLIBCXX_NOEXCEPTnoexcept
402 { return __x._M_node != __y._M_node; }
403#endif
404
405 _Base_ptr _M_node;
406 };
407
408 void
409 _Rb_tree_insert_and_rebalance(const bool __insert_left,
410 _Rb_tree_node_base* __x,
411 _Rb_tree_node_base* __p,
412 _Rb_tree_node_base& __header) throw ();
413
414 _Rb_tree_node_base*
415 _Rb_tree_rebalance_for_erase(_Rb_tree_node_base* const __z,
416 _Rb_tree_node_base& __header) throw ();
417
418#if __cplusplus201402L >= 201402L
419 template<typename _Cmp, typename _SfinaeType, typename = __void_t<>>
420 struct __has_is_transparent
421 { };
422
423 template<typename _Cmp, typename _SfinaeType>
424 struct __has_is_transparent<_Cmp, _SfinaeType,
425 __void_t<typename _Cmp::is_transparent>>
426 { typedef void type; };
427
428 template<typename _Cmp, typename _SfinaeType>
429 using __has_is_transparent_t
430 = typename __has_is_transparent<_Cmp, _SfinaeType>::type;
431#endif
432
433#if __cplusplus201402L > 201402L
434 template<typename _Tree1, typename _Cmp2>
435 struct _Rb_tree_merge_helper { };
436#endif
437
438 template<typename _Key, typename _Val, typename _KeyOfValue,
439 typename _Compare, typename _Alloc = allocator<_Val> >
440 class _Rb_tree
441 {
442 typedef typename __gnu_cxx::__alloc_traits<_Alloc>::template
443 rebind<_Rb_tree_node<_Val> >::other _Node_allocator;
444
445 typedef __gnu_cxx::__alloc_traits<_Node_allocator> _Alloc_traits;
446
447 protected:
448 typedef _Rb_tree_node_base* _Base_ptr;
449 typedef const _Rb_tree_node_base* _Const_Base_ptr;
450 typedef _Rb_tree_node<_Val>* _Link_type;
451 typedef const _Rb_tree_node<_Val>* _Const_Link_type;
452
453 private:
454 // Functor recycling a pool of nodes and using allocation once the pool
455 // is empty.
456 struct _Reuse_or_alloc_node
457 {
458 _Reuse_or_alloc_node(_Rb_tree& __t)
459 : _M_root(__t._M_root()), _M_nodes(__t._M_rightmost()), _M_t(__t)
460 {
461 if (_M_root)
462 {
463 _M_root->_M_parent = 0;
464
465 if (_M_nodes->_M_left)
466 _M_nodes = _M_nodes->_M_left;
467 }
468 else
469 _M_nodes = 0;
470 }
471
472#if __cplusplus201402L >= 201103L
473 _Reuse_or_alloc_node(const _Reuse_or_alloc_node&) = delete;
474#endif
475
476 ~_Reuse_or_alloc_node()
477 { _M_t._M_erase(static_cast<_Link_type>(_M_root)); }
478
479 template<typename _Arg>
480 _Link_type
481#if __cplusplus201402L < 201103L
482 operator()(const _Arg& __arg)
483#else
484 operator()(_Arg&& __arg)
485#endif
486 {
487 _Link_type __node = static_cast<_Link_type>(_M_extract());
488 if (__node)
489 {
490 _M_t._M_destroy_node(__node);
491 _M_t._M_construct_node(__node, _GLIBCXX_FORWARD(_Arg, __arg)std::forward<_Arg>(__arg));
492 return __node;
493 }
494
495 return _M_t._M_create_node(_GLIBCXX_FORWARD(_Arg, __arg)std::forward<_Arg>(__arg));
496 }
497
498 private:
499 _Base_ptr
500 _M_extract()
501 {
502 if (!_M_nodes)
503 return _M_nodes;
504
505 _Base_ptr __node = _M_nodes;
506 _M_nodes = _M_nodes->_M_parent;
507 if (_M_nodes)
508 {
509 if (_M_nodes->_M_right == __node)
510 {
511 _M_nodes->_M_right = 0;
512
513 if (_M_nodes->_M_left)
514 {
515 _M_nodes = _M_nodes->_M_left;
516
517 while (_M_nodes->_M_right)
518 _M_nodes = _M_nodes->_M_right;
519
520 if (_M_nodes->_M_left)
521 _M_nodes = _M_nodes->_M_left;
522 }
523 }
524 else // __node is on the left.
525 _M_nodes->_M_left = 0;
526 }
527 else
528 _M_root = 0;
529
530 return __node;
531 }
532
533 _Base_ptr _M_root;
534 _Base_ptr _M_nodes;
535 _Rb_tree& _M_t;
536 };
537
538 // Functor similar to the previous one but without any pool of nodes to
539 // recycle.
540 struct _Alloc_node
541 {
542 _Alloc_node(_Rb_tree& __t)
543 : _M_t(__t) { }
544
545 template<typename _Arg>
546 _Link_type
547#if __cplusplus201402L < 201103L
548 operator()(const _Arg& __arg) const
549#else
550 operator()(_Arg&& __arg) const
551#endif
552 { return _M_t._M_create_node(_GLIBCXX_FORWARD(_Arg, __arg)std::forward<_Arg>(__arg)); }
553
554 private:
555 _Rb_tree& _M_t;
556 };
557
558 public:
559 typedef _Key key_type;
560 typedef _Val value_type;
561 typedef value_type* pointer;
562 typedef const value_type* const_pointer;
563 typedef value_type& reference;
564 typedef const value_type& const_reference;
565 typedef size_t size_type;
566 typedef ptrdiff_t difference_type;
567 typedef _Alloc allocator_type;
568
569 _Node_allocator&
570 _M_get_Node_allocator() _GLIBCXX_NOEXCEPTnoexcept
571 { return this->_M_impl; }
572
573 const _Node_allocator&
574 _M_get_Node_allocator() const _GLIBCXX_NOEXCEPTnoexcept
575 { return this->_M_impl; }
576
577 allocator_type
578 get_allocator() const _GLIBCXX_NOEXCEPTnoexcept
579 { return allocator_type(_M_get_Node_allocator()); }
580
581 protected:
582 _Link_type
583 _M_get_node()
584 { return _Alloc_traits::allocate(_M_get_Node_allocator(), 1); }
585
586 void
587 _M_put_node(_Link_type __p) _GLIBCXX_NOEXCEPTnoexcept
588 { _Alloc_traits::deallocate(_M_get_Node_allocator(), __p, 1); }
589
590#if __cplusplus201402L < 201103L
591 void
592 _M_construct_node(_Link_type __node, const value_type& __x)
593 {
594 __tryif (true)
595 { get_allocator().construct(__node->_M_valptr(), __x); }
596 __catch(...)if (false)
597 {
598 _M_put_node(__node);
599 __throw_exception_again;
600 }
601 }
602
603 _Link_type
604 _M_create_node(const value_type& __x)
605 {
606 _Link_type __tmp = _M_get_node();
607 _M_construct_node(__tmp, __x);
608 return __tmp;
609 }
610#else
611 template<typename... _Args>
612 void
613 _M_construct_node(_Link_type __node, _Args&&... __args)
614 {
615 __tryif (true)
616 {
617 ::new(__node) _Rb_tree_node<_Val>;
618 _Alloc_traits::construct(_M_get_Node_allocator(),
619 __node->_M_valptr(),
620 std::forward<_Args>(__args)...);
621 }
622 __catch(...)if (false)
623 {
624 __node->~_Rb_tree_node<_Val>();
625 _M_put_node(__node);
626 __throw_exception_again;
627 }
628 }
629
630 template<typename... _Args>
631 _Link_type
632 _M_create_node(_Args&&... __args)
633 {
634 _Link_type __tmp = _M_get_node();
635 _M_construct_node(__tmp, std::forward<_Args>(__args)...);
636 return __tmp;
637 }
638#endif
639
640 void
641 _M_destroy_node(_Link_type __p) _GLIBCXX_NOEXCEPTnoexcept
642 {
643#if __cplusplus201402L < 201103L
644 get_allocator().destroy(__p->_M_valptr());
645#else
646 _Alloc_traits::destroy(_M_get_Node_allocator(), __p->_M_valptr());
647 __p->~_Rb_tree_node<_Val>();
648#endif
649 }
650
651 void
652 _M_drop_node(_Link_type __p) _GLIBCXX_NOEXCEPTnoexcept
653 {
654 _M_destroy_node(__p);
655 _M_put_node(__p);
656 }
657
658 template<typename _NodeGen>
659 _Link_type
660 _M_clone_node(_Const_Link_type __x, _NodeGen& __node_gen)
661 {
662 _Link_type __tmp = __node_gen(*__x->_M_valptr());
663 __tmp->_M_color = __x->_M_color;
664 __tmp->_M_left = 0;
665 __tmp->_M_right = 0;
666 return __tmp;
667 }
668
669 protected:
670#if _GLIBCXX_INLINE_VERSION0
671 template<typename _Key_compare>
672#else
673 // Unused _Is_pod_comparator is kept as it is part of mangled name.
674 template<typename _Key_compare,
675 bool /* _Is_pod_comparator */ = __is_pod(_Key_compare)>
676#endif
677 struct _Rb_tree_impl
678 : public _Node_allocator
679 , public _Rb_tree_key_compare<_Key_compare>
680 , public _Rb_tree_header
681 {
682 typedef _Rb_tree_key_compare<_Key_compare> _Base_key_compare;
683
684 _Rb_tree_impl()
685 _GLIBCXX_NOEXCEPT_IF(noexcept(is_nothrow_default_constructible<_Node_allocator>
::value && is_nothrow_default_constructible<_Base_key_compare
>::value)
686 is_nothrow_default_constructible<_Node_allocator>::valuenoexcept(is_nothrow_default_constructible<_Node_allocator>
::value && is_nothrow_default_constructible<_Base_key_compare
>::value)
687 && is_nothrow_default_constructible<_Base_key_compare>::value )noexcept(is_nothrow_default_constructible<_Node_allocator>
::value && is_nothrow_default_constructible<_Base_key_compare
>::value)
688 : _Node_allocator()
689 { }
690
691 _Rb_tree_impl(const _Rb_tree_impl& __x)
692 : _Node_allocator(_Alloc_traits::_S_select_on_copy(__x))
693 , _Base_key_compare(__x._M_key_compare)
694 { }
695
696#if __cplusplus201402L < 201103L
697 _Rb_tree_impl(const _Key_compare& __comp, const _Node_allocator& __a)
698 : _Node_allocator(__a), _Base_key_compare(__comp)
699 { }
700#else
701 _Rb_tree_impl(_Rb_tree_impl&&) = default;
702
703 explicit
704 _Rb_tree_impl(_Node_allocator&& __a)
705 : _Node_allocator(std::move(__a))
706 { }
707
708 _Rb_tree_impl(_Rb_tree_impl&& __x, _Node_allocator&& __a)
709 : _Node_allocator(std::move(__a)),
710 _Base_key_compare(std::move(__x)),
711 _Rb_tree_header(std::move(__x))
712 { }
713
714 _Rb_tree_impl(const _Key_compare& __comp, _Node_allocator&& __a)
715 : _Node_allocator(std::move(__a)), _Base_key_compare(__comp)
716 { }
717#endif
718 };
719
720 _Rb_tree_impl<_Compare> _M_impl;
721
722 protected:
723 _Base_ptr&
724 _M_root() _GLIBCXX_NOEXCEPTnoexcept
725 { return this->_M_impl._M_header._M_parent; }
726
727 _Const_Base_ptr
728 _M_root() const _GLIBCXX_NOEXCEPTnoexcept
729 { return this->_M_impl._M_header._M_parent; }
730
731 _Base_ptr&
732 _M_leftmost() _GLIBCXX_NOEXCEPTnoexcept
733 { return this->_M_impl._M_header._M_left; }
734
735 _Const_Base_ptr
736 _M_leftmost() const _GLIBCXX_NOEXCEPTnoexcept
737 { return this->_M_impl._M_header._M_left; }
738
739 _Base_ptr&
740 _M_rightmost() _GLIBCXX_NOEXCEPTnoexcept
741 { return this->_M_impl._M_header._M_right; }
742
743 _Const_Base_ptr
744 _M_rightmost() const _GLIBCXX_NOEXCEPTnoexcept
745 { return this->_M_impl._M_header._M_right; }
746
747 _Link_type
748 _M_begin() _GLIBCXX_NOEXCEPTnoexcept
749 { return static_cast<_Link_type>(this->_M_impl._M_header._M_parent); }
750
751 _Const_Link_type
752 _M_begin() const _GLIBCXX_NOEXCEPTnoexcept
753 {
754 return static_cast<_Const_Link_type>
755 (this->_M_impl._M_header._M_parent);
756 }
757
758 _Base_ptr
759 _M_end() _GLIBCXX_NOEXCEPTnoexcept
760 { return &this->_M_impl._M_header; }
761
762 _Const_Base_ptr
763 _M_end() const _GLIBCXX_NOEXCEPTnoexcept
764 { return &this->_M_impl._M_header; }
765
766 static const _Key&
767 _S_key(_Const_Link_type __x)
768 {
769#if __cplusplus201402L >= 201103L
770 // If we're asking for the key we're presumably using the comparison
771 // object, and so this is a good place to sanity check it.
772 static_assert(__is_invocable<_Compare&, const _Key&, const _Key&>{},
773 "comparison object must be invocable "
774 "with two arguments of key type");
775# if __cplusplus201402L >= 201703L
776 // _GLIBCXX_RESOLVE_LIB_DEFECTS
777 // 2542. Missing const requirements for associative containers
778 if constexpr (__is_invocable<_Compare&, const _Key&, const _Key&>{})
779 static_assert(
780 is_invocable_v<const _Compare&, const _Key&, const _Key&>,
781 "comparison object must be invocable as const");
782# endif // C++17
783#endif // C++11
784
785 return _KeyOfValue()(*__x->_M_valptr());
786 }
787
788 static _Link_type
789 _S_left(_Base_ptr __x) _GLIBCXX_NOEXCEPTnoexcept
790 { return static_cast<_Link_type>(__x->_M_left); }
791
792 static _Const_Link_type
793 _S_left(_Const_Base_ptr __x) _GLIBCXX_NOEXCEPTnoexcept
794 { return static_cast<_Const_Link_type>(__x->_M_left); }
795
796 static _Link_type
797 _S_right(_Base_ptr __x) _GLIBCXX_NOEXCEPTnoexcept
798 { return static_cast<_Link_type>(__x->_M_right); }
799
800 static _Const_Link_type
801 _S_right(_Const_Base_ptr __x) _GLIBCXX_NOEXCEPTnoexcept
802 { return static_cast<_Const_Link_type>(__x->_M_right); }
803
804 static const _Key&
805 _S_key(_Const_Base_ptr __x)
806 { return _S_key(static_cast<_Const_Link_type>(__x)); }
807
808 static _Base_ptr
809 _S_minimum(_Base_ptr __x) _GLIBCXX_NOEXCEPTnoexcept
810 { return _Rb_tree_node_base::_S_minimum(__x); }
811
812 static _Const_Base_ptr
813 _S_minimum(_Const_Base_ptr __x) _GLIBCXX_NOEXCEPTnoexcept
814 { return _Rb_tree_node_base::_S_minimum(__x); }
815
816 static _Base_ptr
817 _S_maximum(_Base_ptr __x) _GLIBCXX_NOEXCEPTnoexcept
818 { return _Rb_tree_node_base::_S_maximum(__x); }
819
820 static _Const_Base_ptr
821 _S_maximum(_Const_Base_ptr __x) _GLIBCXX_NOEXCEPTnoexcept
822 { return _Rb_tree_node_base::_S_maximum(__x); }
823
824 public:
825 typedef _Rb_tree_iterator<value_type> iterator;
826 typedef _Rb_tree_const_iterator<value_type> const_iterator;
827
828 typedef std::reverse_iterator<iterator> reverse_iterator;
829 typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
830
831#if __cplusplus201402L > 201402L
832 using node_type = _Node_handle<_Key, _Val, _Node_allocator>;
833 using insert_return_type = _Node_insert_return<
834 conditional_t<is_same_v<_Key, _Val>, const_iterator, iterator>,
835 node_type>;
836#endif
837
838 pair<_Base_ptr, _Base_ptr>
839 _M_get_insert_unique_pos(const key_type& __k);
840
841 pair<_Base_ptr, _Base_ptr>
842 _M_get_insert_equal_pos(const key_type& __k);
843
844 pair<_Base_ptr, _Base_ptr>
845 _M_get_insert_hint_unique_pos(const_iterator __pos,
846 const key_type& __k);
847
848 pair<_Base_ptr, _Base_ptr>
849 _M_get_insert_hint_equal_pos(const_iterator __pos,
850 const key_type& __k);
851
852 private:
853#if __cplusplus201402L >= 201103L
854 template<typename _Arg, typename _NodeGen>
855 iterator
856 _M_insert_(_Base_ptr __x, _Base_ptr __y, _Arg&& __v, _NodeGen&);
857
858 iterator
859 _M_insert_node(_Base_ptr __x, _Base_ptr __y, _Link_type __z);
860
861 template<typename _Arg>
862 iterator
863 _M_insert_lower(_Base_ptr __y, _Arg&& __v);
864
865 template<typename _Arg>
866 iterator
867 _M_insert_equal_lower(_Arg&& __x);
868
869 iterator
870 _M_insert_lower_node(_Base_ptr __p, _Link_type __z);
871
872 iterator
873 _M_insert_equal_lower_node(_Link_type __z);
874#else
875 template<typename _NodeGen>
876 iterator
877 _M_insert_(_Base_ptr __x, _Base_ptr __y,
878 const value_type& __v, _NodeGen&);
879
880 // _GLIBCXX_RESOLVE_LIB_DEFECTS
881 // 233. Insertion hints in associative containers.
882 iterator
883 _M_insert_lower(_Base_ptr __y, const value_type& __v);
884
885 iterator
886 _M_insert_equal_lower(const value_type& __x);
887#endif
888
889 template<typename _NodeGen>
890 _Link_type
891 _M_copy(_Const_Link_type __x, _Base_ptr __p, _NodeGen&);
892
893 template<typename _NodeGen>
894 _Link_type
895 _M_copy(const _Rb_tree& __x, _NodeGen& __gen)
896 {
897 _Link_type __root = _M_copy(__x._M_begin(), _M_end(), __gen);
898 _M_leftmost() = _S_minimum(__root);
899 _M_rightmost() = _S_maximum(__root);
900 _M_impl._M_node_count = __x._M_impl._M_node_count;
901 return __root;
902 }
903
904 _Link_type
905 _M_copy(const _Rb_tree& __x)
906 {
907 _Alloc_node __an(*this);
908 return _M_copy(__x, __an);
909 }
910
911 void
912 _M_erase(_Link_type __x);
913
914 iterator
915 _M_lower_bound(_Link_type __x, _Base_ptr __y,
916 const _Key& __k);
917
918 const_iterator
919 _M_lower_bound(_Const_Link_type __x, _Const_Base_ptr __y,
920 const _Key& __k) const;
921
922 iterator
923 _M_upper_bound(_Link_type __x, _Base_ptr __y,
924 const _Key& __k);
925
926 const_iterator
927 _M_upper_bound(_Const_Link_type __x, _Const_Base_ptr __y,
928 const _Key& __k) const;
929
930 public:
931 // allocation/deallocation
932#if __cplusplus201402L < 201103L
933 _Rb_tree() { }
934#else
935 _Rb_tree() = default;
936#endif
937
938 _Rb_tree(const _Compare& __comp,
939 const allocator_type& __a = allocator_type())
940 : _M_impl(__comp, _Node_allocator(__a)) { }
941
942 _Rb_tree(const _Rb_tree& __x)
943 : _M_impl(__x._M_impl)
944 {
945 if (__x._M_root() != 0)
946 _M_root() = _M_copy(__x);
947 }
948
949#if __cplusplus201402L >= 201103L
950 _Rb_tree(const allocator_type& __a)
951 : _M_impl(_Node_allocator(__a))
952 { }
953
954 _Rb_tree(const _Rb_tree& __x, const allocator_type& __a)
955 : _M_impl(__x._M_impl._M_key_compare, _Node_allocator(__a))
956 {
957 if (__x._M_root() != nullptr)
958 _M_root() = _M_copy(__x);
959 }
960
961 _Rb_tree(_Rb_tree&&) = default;
962
963 _Rb_tree(_Rb_tree&& __x, const allocator_type& __a)
964 : _Rb_tree(std::move(__x), _Node_allocator(__a))
965 { }
966
967 private:
968 _Rb_tree(_Rb_tree&& __x, _Node_allocator&& __a, true_type)
969 noexcept(is_nothrow_default_constructible<_Compare>::value)
970 : _M_impl(std::move(__x._M_impl), std::move(__a))
971 { }
972
973 _Rb_tree(_Rb_tree&& __x, _Node_allocator&& __a, false_type)
974 : _M_impl(__x._M_impl._M_key_compare, std::move(__a))
975 {
976 if (__x._M_root() != nullptr)
977 _M_move_data(__x, false_type{});
978 }
979
980 public:
981 _Rb_tree(_Rb_tree&& __x, _Node_allocator&& __a)
982 noexcept( noexcept(
983 _Rb_tree(std::declval<_Rb_tree&&>(), std::declval<_Node_allocator&&>(),
984 std::declval<typename _Alloc_traits::is_always_equal>())) )
985 : _Rb_tree(std::move(__x), std::move(__a),
986 typename _Alloc_traits::is_always_equal{})
987 { }
988#endif
989
990 ~_Rb_tree() _GLIBCXX_NOEXCEPTnoexcept
991 { _M_erase(_M_begin()); }
992
993 _Rb_tree&
994 operator=(const _Rb_tree& __x);
995
996 // Accessors.
997 _Compare
998 key_comp() const
999 { return _M_impl._M_key_compare; }
1000
1001 iterator
1002 begin() _GLIBCXX_NOEXCEPTnoexcept
1003 { return iterator(this->_M_impl._M_header._M_left); }
1004
1005 const_iterator
1006 begin() const _GLIBCXX_NOEXCEPTnoexcept
1007 { return const_iterator(this->_M_impl._M_header._M_left); }
1008
1009 iterator
1010 end() _GLIBCXX_NOEXCEPTnoexcept
1011 { return iterator(&this->_M_impl._M_header); }
1012
1013 const_iterator
1014 end() const _GLIBCXX_NOEXCEPTnoexcept
1015 { return const_iterator(&this->_M_impl._M_header); }
1016
1017 reverse_iterator
1018 rbegin() _GLIBCXX_NOEXCEPTnoexcept
1019 { return reverse_iterator(end()); }
1020
1021 const_reverse_iterator
1022 rbegin() const _GLIBCXX_NOEXCEPTnoexcept
1023 { return const_reverse_iterator(end()); }
1024
1025 reverse_iterator
1026 rend() _GLIBCXX_NOEXCEPTnoexcept
1027 { return reverse_iterator(begin()); }
1028
1029 const_reverse_iterator
1030 rend() const _GLIBCXX_NOEXCEPTnoexcept
1031 { return const_reverse_iterator(begin()); }
1032
1033 _GLIBCXX_NODISCARD bool
1034 empty() const _GLIBCXX_NOEXCEPTnoexcept
1035 { return _M_impl._M_node_count == 0; }
1036
1037 size_type
1038 size() const _GLIBCXX_NOEXCEPTnoexcept
1039 { return _M_impl._M_node_count; }
1040
1041 size_type
1042 max_size() const _GLIBCXX_NOEXCEPTnoexcept
1043 { return _Alloc_traits::max_size(_M_get_Node_allocator()); }
1044
1045 void
1046 swap(_Rb_tree& __t)
1047 _GLIBCXX_NOEXCEPT_IF(__is_nothrow_swappable<_Compare>::value)noexcept(__is_nothrow_swappable<_Compare>::value);
1048
1049 // Insert/erase.
1050#if __cplusplus201402L >= 201103L
1051 template<typename _Arg>
1052 pair<iterator, bool>
1053 _M_insert_unique(_Arg&& __x);
1054
1055 template<typename _Arg>
1056 iterator
1057 _M_insert_equal(_Arg&& __x);
1058
1059 template<typename _Arg, typename _NodeGen>
1060 iterator
1061 _M_insert_unique_(const_iterator __pos, _Arg&& __x, _NodeGen&);
1062
1063 template<typename _Arg>
1064 iterator
1065 _M_insert_unique_(const_iterator __pos, _Arg&& __x)
1066 {
1067 _Alloc_node __an(*this);
1068 return _M_insert_unique_(__pos, std::forward<_Arg>(__x), __an);
1069 }
1070
1071 template<typename _Arg, typename _NodeGen>
1072 iterator
1073 _M_insert_equal_(const_iterator __pos, _Arg&& __x, _NodeGen&);
1074
1075 template<typename _Arg>
1076 iterator
1077 _M_insert_equal_(const_iterator __pos, _Arg&& __x)
1078 {
1079 _Alloc_node __an(*this);
1080 return _M_insert_equal_(__pos, std::forward<_Arg>(__x), __an);
1081 }
1082
1083 template<typename... _Args>
1084 pair<iterator, bool>
1085 _M_emplace_unique(_Args&&... __args);
1086
1087 template<typename... _Args>
1088 iterator
1089 _M_emplace_equal(_Args&&... __args);
1090
1091 template<typename... _Args>
1092 iterator
1093 _M_emplace_hint_unique(const_iterator __pos, _Args&&... __args);
1094
1095 template<typename... _Args>
1096 iterator
1097 _M_emplace_hint_equal(const_iterator __pos, _Args&&... __args);
1098
1099 template<typename _Iter>
1100 using __same_value_type
1101 = is_same<value_type, typename iterator_traits<_Iter>::value_type>;
1102
1103 template<typename _InputIterator>
1104 __enable_if_t<__same_value_type<_InputIterator>::value>
1105 _M_insert_range_unique(_InputIterator __first, _InputIterator __last)
1106 {
1107 _Alloc_node __an(*this);
1108 for (; __first != __last; ++__first)
1109 _M_insert_unique_(end(), *__first, __an);
1110 }
1111
1112 template<typename _InputIterator>
1113 __enable_if_t<!__same_value_type<_InputIterator>::value>
1114 _M_insert_range_unique(_InputIterator __first, _InputIterator __last)
1115 {
1116 for (; __first != __last; ++__first)
1117 _M_emplace_unique(*__first);
1118 }
1119
1120 template<typename _InputIterator>
1121 __enable_if_t<__same_value_type<_InputIterator>::value>
1122 _M_insert_range_equal(_InputIterator __first, _InputIterator __last)
1123 {
1124 _Alloc_node __an(*this);
1125 for (; __first != __last; ++__first)
1126 _M_insert_equal_(end(), *__first, __an);
1127 }
1128
1129 template<typename _InputIterator>
1130 __enable_if_t<!__same_value_type<_InputIterator>::value>
1131 _M_insert_range_equal(_InputIterator __first, _InputIterator __last)
1132 {
1133 _Alloc_node __an(*this);
1134 for (; __first != __last; ++__first)
1135 _M_emplace_equal(*__first);
1136 }
1137#else
1138 pair<iterator, bool>
1139 _M_insert_unique(const value_type& __x);
1140
1141 iterator
1142 _M_insert_equal(const value_type& __x);
1143
1144 template<typename _NodeGen>
1145 iterator
1146 _M_insert_unique_(const_iterator __pos, const value_type& __x,
1147 _NodeGen&);
1148
1149 iterator
1150 _M_insert_unique_(const_iterator __pos, const value_type& __x)
1151 {
1152 _Alloc_node __an(*this);
1153 return _M_insert_unique_(__pos, __x, __an);
1154 }
1155
1156 template<typename _NodeGen>
1157 iterator
1158 _M_insert_equal_(const_iterator __pos, const value_type& __x,
1159 _NodeGen&);
1160 iterator
1161 _M_insert_equal_(const_iterator __pos, const value_type& __x)
1162 {
1163 _Alloc_node __an(*this);
1164 return _M_insert_equal_(__pos, __x, __an);
1165 }
1166
1167 template<typename _InputIterator>
1168 void
1169 _M_insert_range_unique(_InputIterator __first, _InputIterator __last)
1170 {
1171 _Alloc_node __an(*this);
1172 for (; __first != __last; ++__first)
1173 _M_insert_unique_(end(), *__first, __an);
1174 }
1175
1176 template<typename _InputIterator>
1177 void
1178 _M_insert_range_equal(_InputIterator __first, _InputIterator __last)
1179 {
1180 _Alloc_node __an(*this);
1181 for (; __first != __last; ++__first)
1182 _M_insert_equal_(end(), *__first, __an);
1183 }
1184#endif
1185
1186 private:
1187 void
1188 _M_erase_aux(const_iterator __position);
1189
1190 void
1191 _M_erase_aux(const_iterator __first, const_iterator __last);
1192
1193 public:
1194#if __cplusplus201402L >= 201103L
1195 // _GLIBCXX_RESOLVE_LIB_DEFECTS
1196 // DR 130. Associative erase should return an iterator.
1197 _GLIBCXX_ABI_TAG_CXX11__attribute ((__abi_tag__ ("cxx11")))
1198 iterator
1199 erase(const_iterator __position)
1200 {
1201 __glibcxx_assert(__position != end());
1202 const_iterator __result = __position;
1203 ++__result;
1204 _M_erase_aux(__position);
1205 return __result._M_const_cast();
1206 }
1207
1208 // LWG 2059.
1209 _GLIBCXX_ABI_TAG_CXX11__attribute ((__abi_tag__ ("cxx11")))
1210 iterator
1211 erase(iterator __position)
1212 {
1213 __glibcxx_assert(__position != end());
1214 iterator __result = __position;
1215 ++__result;
1216 _M_erase_aux(__position);
1217 return __result;
1218 }
1219#else
1220 void
1221 erase(iterator __position)
1222 {
1223 __glibcxx_assert(__position != end());
1224 _M_erase_aux(__position);
1225 }
1226
1227 void
1228 erase(const_iterator __position)
1229 {
1230 __glibcxx_assert(__position != end());
1231 _M_erase_aux(__position);
1232 }
1233#endif
1234
1235 size_type
1236 erase(const key_type& __x);
1237
1238#if __cplusplus201402L >= 201103L
1239 // _GLIBCXX_RESOLVE_LIB_DEFECTS
1240 // DR 130. Associative erase should return an iterator.
1241 _GLIBCXX_ABI_TAG_CXX11__attribute ((__abi_tag__ ("cxx11")))
1242 iterator
1243 erase(const_iterator __first, const_iterator __last)
1244 {
1245 _M_erase_aux(__first, __last);
1246 return __last._M_const_cast();
1247 }
1248#else
1249 void
1250 erase(iterator __first, iterator __last)
1251 { _M_erase_aux(__first, __last); }
1252
1253 void
1254 erase(const_iterator __first, const_iterator __last)
1255 { _M_erase_aux(__first, __last); }
1256#endif
1257
1258 void
1259 clear() _GLIBCXX_NOEXCEPTnoexcept
1260 {
1261 _M_erase(_M_begin());
1262 _M_impl._M_reset();
1263 }
1264
1265 // Set operations.
1266 iterator
1267 find(const key_type& __k);
1268
1269 const_iterator
1270 find(const key_type& __k) const;
1271
1272 size_type
1273 count(const key_type& __k) const;
1274
1275 iterator
1276 lower_bound(const key_type& __k)
1277 { return _M_lower_bound(_M_begin(), _M_end(), __k); }
1278
1279 const_iterator
1280 lower_bound(const key_type& __k) const
1281 { return _M_lower_bound(_M_begin(), _M_end(), __k); }
1282
1283 iterator
1284 upper_bound(const key_type& __k)
1285 { return _M_upper_bound(_M_begin(), _M_end(), __k); }
1286
1287 const_iterator
1288 upper_bound(const key_type& __k) const
1289 { return _M_upper_bound(_M_begin(), _M_end(), __k); }
1290
1291 pair<iterator, iterator>
1292 equal_range(const key_type& __k);
1293
1294 pair<const_iterator, const_iterator>
1295 equal_range(const key_type& __k) const;
1296
1297#if __cplusplus201402L >= 201402L
1298 template<typename _Kt,
1299 typename _Req = __has_is_transparent_t<_Compare, _Kt>>
1300 iterator
1301 _M_find_tr(const _Kt& __k)
1302 {
1303 const _Rb_tree* __const_this = this;
1304 return __const_this->_M_find_tr(__k)._M_const_cast();
1305 }
1306
1307 template<typename _Kt,
1308 typename _Req = __has_is_transparent_t<_Compare, _Kt>>
1309 const_iterator
1310 _M_find_tr(const _Kt& __k) const
1311 {
1312 auto __j = _M_lower_bound_tr(__k);
1313 if (__j != end() && _M_impl._M_key_compare(__k, _S_key(__j._M_node)))
1314 __j = end();
1315 return __j;
1316 }
1317
1318 template<typename _Kt,
1319 typename _Req = __has_is_transparent_t<_Compare, _Kt>>
1320 size_type
1321 _M_count_tr(const _Kt& __k) const
1322 {
1323 auto __p = _M_equal_range_tr(__k);
1324 return std::distance(__p.first, __p.second);
1325 }
1326
1327 template<typename _Kt,
1328 typename _Req = __has_is_transparent_t<_Compare, _Kt>>
1329 iterator
1330 _M_lower_bound_tr(const _Kt& __k)
1331 {
1332 const _Rb_tree* __const_this = this;
1333 return __const_this->_M_lower_bound_tr(__k)._M_const_cast();
1334 }
1335
1336 template<typename _Kt,
1337 typename _Req = __has_is_transparent_t<_Compare, _Kt>>
1338 const_iterator
1339 _M_lower_bound_tr(const _Kt& __k) const
1340 {
1341 auto __x = _M_begin();
1342 auto __y = _M_end();
1343 while (__x != 0)
1344 if (!_M_impl._M_key_compare(_S_key(__x), __k))
1345 {
1346 __y = __x;
1347 __x = _S_left(__x);
1348 }
1349 else
1350 __x = _S_right(__x);
1351 return const_iterator(__y);
1352 }
1353
1354 template<typename _Kt,
1355 typename _Req = __has_is_transparent_t<_Compare, _Kt>>
1356 iterator
1357 _M_upper_bound_tr(const _Kt& __k)
1358 {
1359 const _Rb_tree* __const_this = this;
1360 return __const_this->_M_upper_bound_tr(__k)._M_const_cast();
1361 }
1362
1363 template<typename _Kt,
1364 typename _Req = __has_is_transparent_t<_Compare, _Kt>>
1365 const_iterator
1366 _M_upper_bound_tr(const _Kt& __k) const
1367 {
1368 auto __x = _M_begin();
1369 auto __y = _M_end();
1370 while (__x != 0)
1371 if (_M_impl._M_key_compare(__k, _S_key(__x)))
1372 {
1373 __y = __x;
1374 __x = _S_left(__x);
1375 }
1376 else
1377 __x = _S_right(__x);
1378 return const_iterator(__y);
1379 }
1380
1381 template<typename _Kt,
1382 typename _Req = __has_is_transparent_t<_Compare, _Kt>>
1383 pair<iterator, iterator>
1384 _M_equal_range_tr(const _Kt& __k)
1385 {
1386 const _Rb_tree* __const_this = this;
1387 auto __ret = __const_this->_M_equal_range_tr(__k);
1388 return { __ret.first._M_const_cast(), __ret.second._M_const_cast() };
1389 }
1390
1391 template<typename _Kt,
1392 typename _Req = __has_is_transparent_t<_Compare, _Kt>>
1393 pair<const_iterator, const_iterator>
1394 _M_equal_range_tr(const _Kt& __k) const
1395 {
1396 auto __low = _M_lower_bound_tr(__k);
1397 auto __high = __low;
1398 auto& __cmp = _M_impl._M_key_compare;
1399 while (__high != end() && !__cmp(__k, _S_key(__high._M_node)))
1400 ++__high;
1401 return { __low, __high };
1402 }
1403#endif
1404
1405 // Debugging.
1406 bool
1407 __rb_verify() const;
1408
1409#if __cplusplus201402L >= 201103L
1410 _Rb_tree&
1411 operator=(_Rb_tree&&)
1412 noexcept(_Alloc_traits::_S_nothrow_move()
1413 && is_nothrow_move_assignable<_Compare>::value);
1414
1415 template<typename _Iterator>
1416 void
1417 _M_assign_unique(_Iterator, _Iterator);
1418
1419 template<typename _Iterator>
1420 void
1421 _M_assign_equal(_Iterator, _Iterator);
1422
1423 private:
1424 // Move elements from container with equal allocator.
1425 void
1426 _M_move_data(_Rb_tree& __x, true_type)
1427 { _M_impl._M_move_data(__x._M_impl); }
1428
1429 // Move elements from container with possibly non-equal allocator,
1430 // which might result in a copy not a move.
1431 void
1432 _M_move_data(_Rb_tree&, false_type);
1433
1434 // Move assignment from container with equal allocator.
1435 void
1436 _M_move_assign(_Rb_tree&, true_type);
1437
1438 // Move assignment from container with possibly non-equal allocator,
1439 // which might result in a copy not a move.
1440 void
1441 _M_move_assign(_Rb_tree&, false_type);
1442#endif
1443
1444#if __cplusplus201402L > 201402L
1445 public:
1446 /// Re-insert an extracted node.
1447 insert_return_type
1448 _M_reinsert_node_unique(node_type&& __nh)
1449 {
1450 insert_return_type __ret;
1451 if (__nh.empty())
1452 __ret.position = end();
1453 else
1454 {
1455 __glibcxx_assert(_M_get_Node_allocator() == *__nh._M_alloc);
1456
1457 auto __res = _M_get_insert_unique_pos(__nh._M_key());
1458 if (__res.second)
1459 {
1460 __ret.position
1461 = _M_insert_node(__res.first, __res.second, __nh._M_ptr);
1462 __nh._M_ptr = nullptr;
1463 __ret.inserted = true;
1464 }
1465 else
1466 {
1467 __ret.node = std::move(__nh);
1468 __ret.position = iterator(__res.first);
1469 __ret.inserted = false;
1470 }
1471 }
1472 return __ret;
1473 }
1474
1475 /// Re-insert an extracted node.
1476 iterator
1477 _M_reinsert_node_equal(node_type&& __nh)
1478 {
1479 iterator __ret;
1480 if (__nh.empty())
1481 __ret = end();
1482 else
1483 {
1484 __glibcxx_assert(_M_get_Node_allocator() == *__nh._M_alloc);
1485 auto __res = _M_get_insert_equal_pos(__nh._M_key());
1486 if (__res.second)
1487 __ret = _M_insert_node(__res.first, __res.second, __nh._M_ptr);
1488 else
1489 __ret = _M_insert_equal_lower_node(__nh._M_ptr);
1490 __nh._M_ptr = nullptr;
1491 }
1492 return __ret;
1493 }
1494
1495 /// Re-insert an extracted node.
1496 iterator
1497 _M_reinsert_node_hint_unique(const_iterator __hint, node_type&& __nh)
1498 {
1499 iterator __ret;
1500 if (__nh.empty())
1501 __ret = end();
1502 else
1503 {
1504 __glibcxx_assert(_M_get_Node_allocator() == *__nh._M_alloc);
1505 auto __res = _M_get_insert_hint_unique_pos(__hint, __nh._M_key());
1506 if (__res.second)
1507 {
1508 __ret = _M_insert_node(__res.first, __res.second, __nh._M_ptr);
1509 __nh._M_ptr = nullptr;
1510 }
1511 else
1512 __ret = iterator(__res.first);
1513 }
1514 return __ret;
1515 }
1516
1517 /// Re-insert an extracted node.
1518 iterator
1519 _M_reinsert_node_hint_equal(const_iterator __hint, node_type&& __nh)
1520 {
1521 iterator __ret;
1522 if (__nh.empty())
1523 __ret = end();
1524 else
1525 {
1526 __glibcxx_assert(_M_get_Node_allocator() == *__nh._M_alloc);
1527 auto __res = _M_get_insert_hint_equal_pos(__hint, __nh._M_key());
1528 if (__res.second)
1529 __ret = _M_insert_node(__res.first, __res.second, __nh._M_ptr);
1530 else
1531 __ret = _M_insert_equal_lower_node(__nh._M_ptr);
1532 __nh._M_ptr = nullptr;
1533 }
1534 return __ret;
1535 }
1536
1537 /// Extract a node.
1538 node_type
1539 extract(const_iterator __pos)
1540 {
1541 auto __ptr = _Rb_tree_rebalance_for_erase(
1542 __pos._M_const_cast()._M_node, _M_impl._M_header);
1543 --_M_impl._M_node_count;
1544 return { static_cast<_Link_type>(__ptr), _M_get_Node_allocator() };
1545 }
1546
1547 /// Extract a node.
1548 node_type
1549 extract(const key_type& __k)
1550 {
1551 node_type __nh;
1552 auto __pos = find(__k);
1553 if (__pos != end())
1554 __nh = extract(const_iterator(__pos));
1555 return __nh;
1556 }
1557
1558 template<typename _Compare2>
1559 using _Compatible_tree
1560 = _Rb_tree<_Key, _Val, _KeyOfValue, _Compare2, _Alloc>;
1561
1562 template<typename, typename>
1563 friend class _Rb_tree_merge_helper;
1564
1565 /// Merge from a compatible container into one with unique keys.
1566 template<typename _Compare2>
1567 void
1568 _M_merge_unique(_Compatible_tree<_Compare2>& __src) noexcept
1569 {
1570 using _Merge_helper = _Rb_tree_merge_helper<_Rb_tree, _Compare2>;
1571 for (auto __i = __src.begin(), __end = __src.end(); __i != __end;)
1572 {
1573 auto __pos = __i++;
1574 auto __res = _M_get_insert_unique_pos(_KeyOfValue()(*__pos));
1575 if (__res.second)
1576 {
1577 auto& __src_impl = _Merge_helper::_S_get_impl(__src);
1578 auto __ptr = _Rb_tree_rebalance_for_erase(
1579 __pos._M_node, __src_impl._M_header);
1580 --__src_impl._M_node_count;
1581 _M_insert_node(__res.first, __res.second,
1582 static_cast<_Link_type>(__ptr));
1583 }
1584 }
1585 }
1586
1587 /// Merge from a compatible container into one with equivalent keys.
1588 template<typename _Compare2>
1589 void
1590 _M_merge_equal(_Compatible_tree<_Compare2>& __src) noexcept
1591 {
1592 using _Merge_helper = _Rb_tree_merge_helper<_Rb_tree, _Compare2>;