Bug Summary

File:llvm/lib/Target/BPF/BPFISelDAGToDAG.cpp
Warning:line 297, column 9
Assigned value is garbage or undefined

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name BPFISelDAGToDAG.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/lib/Target/BPF -resource-dir /usr/lib/llvm-14/lib/clang/14.0.0 -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/lib/Target/BPF -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/lib/Target/BPF -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/include -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/include -D NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-14/lib/clang/14.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/lib/Target/BPF -fdebug-prefix-map=/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e=. -ferror-limit 19 -fvisibility hidden -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2021-09-04-040900-46481-1 -x c++ /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/lib/Target/BPF/BPFISelDAGToDAG.cpp

/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/lib/Target/BPF/BPFISelDAGToDAG.cpp

1//===-- BPFISelDAGToDAG.cpp - A dag to dag inst selector for BPF ----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines a DAG pattern matching instruction selector for BPF,
10// converting from a legalized dag to a BPF dag.
11//
12//===----------------------------------------------------------------------===//
13
14#include "BPF.h"
15#include "BPFRegisterInfo.h"
16#include "BPFSubtarget.h"
17#include "BPFTargetMachine.h"
18#include "llvm/CodeGen/FunctionLoweringInfo.h"
19#include "llvm/CodeGen/MachineConstantPool.h"
20#include "llvm/CodeGen/MachineFrameInfo.h"
21#include "llvm/CodeGen/MachineFunction.h"
22#include "llvm/CodeGen/MachineInstrBuilder.h"
23#include "llvm/CodeGen/MachineRegisterInfo.h"
24#include "llvm/CodeGen/SelectionDAGISel.h"
25#include "llvm/IR/Constants.h"
26#include "llvm/IR/IntrinsicInst.h"
27#include "llvm/IR/IntrinsicsBPF.h"
28#include "llvm/Support/Debug.h"
29#include "llvm/Support/Endian.h"
30#include "llvm/Support/ErrorHandling.h"
31#include "llvm/Support/raw_ostream.h"
32#include "llvm/Target/TargetMachine.h"
33
34using namespace llvm;
35
36#define DEBUG_TYPE"bpf-isel" "bpf-isel"
37
38// Instruction Selector Implementation
39namespace {
40
41class BPFDAGToDAGISel : public SelectionDAGISel {
42
43 /// Subtarget - Keep a pointer to the BPFSubtarget around so that we can
44 /// make the right decision when generating code for different subtargets.
45 const BPFSubtarget *Subtarget;
46
47public:
48 explicit BPFDAGToDAGISel(BPFTargetMachine &TM)
49 : SelectionDAGISel(TM), Subtarget(nullptr) {}
50
51 StringRef getPassName() const override {
52 return "BPF DAG->DAG Pattern Instruction Selection";
53 }
54
55 bool runOnMachineFunction(MachineFunction &MF) override {
56 // Reset the subtarget each time through.
57 Subtarget = &MF.getSubtarget<BPFSubtarget>();
58 return SelectionDAGISel::runOnMachineFunction(MF);
59 }
60
61 void PreprocessISelDAG() override;
62
63 bool SelectInlineAsmMemoryOperand(const SDValue &Op, unsigned ConstraintCode,
64 std::vector<SDValue> &OutOps) override;
65
66
67private:
68// Include the pieces autogenerated from the target description.
69#include "BPFGenDAGISel.inc"
70
71 void Select(SDNode *N) override;
72
73 // Complex Pattern for address selection.
74 bool SelectAddr(SDValue Addr, SDValue &Base, SDValue &Offset);
75 bool SelectFIAddr(SDValue Addr, SDValue &Base, SDValue &Offset);
76
77 // Node preprocessing cases
78 void PreprocessLoad(SDNode *Node, SelectionDAG::allnodes_iterator &I);
79 void PreprocessCopyToReg(SDNode *Node);
80 void PreprocessTrunc(SDNode *Node, SelectionDAG::allnodes_iterator &I);
81
82 // Find constants from a constant structure
83 typedef std::vector<unsigned char> val_vec_type;
84 bool fillGenericConstant(const DataLayout &DL, const Constant *CV,
85 val_vec_type &Vals, uint64_t Offset);
86 bool fillConstantDataArray(const DataLayout &DL, const ConstantDataArray *CDA,
87 val_vec_type &Vals, int Offset);
88 bool fillConstantArray(const DataLayout &DL, const ConstantArray *CA,
89 val_vec_type &Vals, int Offset);
90 bool fillConstantStruct(const DataLayout &DL, const ConstantStruct *CS,
91 val_vec_type &Vals, int Offset);
92 bool getConstantFieldValue(const GlobalAddressSDNode *Node, uint64_t Offset,
93 uint64_t Size, unsigned char *ByteSeq);
94 // Mapping from ConstantStruct global value to corresponding byte-list values
95 std::map<const void *, val_vec_type> cs_vals_;
96};
97} // namespace
98
99// ComplexPattern used on BPF Load/Store instructions
100bool BPFDAGToDAGISel::SelectAddr(SDValue Addr, SDValue &Base, SDValue &Offset) {
101 // if Address is FI, get the TargetFrameIndex.
102 SDLoc DL(Addr);
103 if (auto *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
104 Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i64);
105 Offset = CurDAG->getTargetConstant(0, DL, MVT::i64);
106 return true;
107 }
108
109 if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
110 Addr.getOpcode() == ISD::TargetGlobalAddress)
111 return false;
112
113 // Addresses of the form Addr+const or Addr|const
114 if (CurDAG->isBaseWithConstantOffset(Addr)) {
115 auto *CN = cast<ConstantSDNode>(Addr.getOperand(1));
116 if (isInt<16>(CN->getSExtValue())) {
117 // If the first operand is a FI, get the TargetFI Node
118 if (auto *FIN = dyn_cast<FrameIndexSDNode>(Addr.getOperand(0)))
119 Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i64);
120 else
121 Base = Addr.getOperand(0);
122
123 Offset = CurDAG->getTargetConstant(CN->getSExtValue(), DL, MVT::i64);
124 return true;
125 }
126 }
127
128 Base = Addr;
129 Offset = CurDAG->getTargetConstant(0, DL, MVT::i64);
130 return true;
131}
132
133// ComplexPattern used on BPF FI instruction
134bool BPFDAGToDAGISel::SelectFIAddr(SDValue Addr, SDValue &Base,
135 SDValue &Offset) {
136 SDLoc DL(Addr);
137
138 if (!CurDAG->isBaseWithConstantOffset(Addr))
139 return false;
140
141 // Addresses of the form Addr+const or Addr|const
142 auto *CN = cast<ConstantSDNode>(Addr.getOperand(1));
143 if (isInt<16>(CN->getSExtValue())) {
144 // If the first operand is a FI, get the TargetFI Node
145 if (auto *FIN = dyn_cast<FrameIndexSDNode>(Addr.getOperand(0)))
146 Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i64);
147 else
148 return false;
149
150 Offset = CurDAG->getTargetConstant(CN->getSExtValue(), DL, MVT::i64);
151 return true;
152 }
153
154 return false;
155}
156
157bool BPFDAGToDAGISel::SelectInlineAsmMemoryOperand(
158 const SDValue &Op, unsigned ConstraintCode, std::vector<SDValue> &OutOps) {
159 SDValue Op0, Op1;
160 switch (ConstraintCode) {
161 default:
162 return true;
163 case InlineAsm::Constraint_m: // memory
164 if (!SelectAddr(Op, Op0, Op1))
165 return true;
166 break;
167 }
168
169 SDLoc DL(Op);
170 SDValue AluOp = CurDAG->getTargetConstant(ISD::ADD, DL, MVT::i32);;
171 OutOps.push_back(Op0);
172 OutOps.push_back(Op1);
173 OutOps.push_back(AluOp);
174 return false;
175}
176
177void BPFDAGToDAGISel::Select(SDNode *Node) {
178 unsigned Opcode = Node->getOpcode();
179
180 // If we have a custom node, we already have selected!
181 if (Node->isMachineOpcode()) {
182 LLVM_DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << '\n')do { } while (false);
183 return;
184 }
185
186 // tablegen selection should be handled here.
187 switch (Opcode) {
188 default:
189 break;
190 case ISD::SDIV: {
191 DebugLoc Empty;
192 const DebugLoc &DL = Node->getDebugLoc();
193 if (DL != Empty)
194 errs() << "Error at line " << DL.getLine() << ": ";
195 else
196 errs() << "Error: ";
197 errs() << "Unsupport signed division for DAG: ";
198 Node->print(errs(), CurDAG);
199 errs() << "Please convert to unsigned div/mod.\n";
200 break;
201 }
202 case ISD::INTRINSIC_W_CHAIN: {
203 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
204 switch (IntNo) {
205 case Intrinsic::bpf_load_byte:
206 case Intrinsic::bpf_load_half:
207 case Intrinsic::bpf_load_word: {
208 SDLoc DL(Node);
209 SDValue Chain = Node->getOperand(0);
210 SDValue N1 = Node->getOperand(1);
211 SDValue Skb = Node->getOperand(2);
212 SDValue N3 = Node->getOperand(3);
213
214 SDValue R6Reg = CurDAG->getRegister(BPF::R6, MVT::i64);
215 Chain = CurDAG->getCopyToReg(Chain, DL, R6Reg, Skb, SDValue());
216 Node = CurDAG->UpdateNodeOperands(Node, Chain, N1, R6Reg, N3);
217 break;
218 }
219 }
220 break;
221 }
222
223 case ISD::FrameIndex: {
224 int FI = cast<FrameIndexSDNode>(Node)->getIndex();
225 EVT VT = Node->getValueType(0);
226 SDValue TFI = CurDAG->getTargetFrameIndex(FI, VT);
227 unsigned Opc = BPF::MOV_rr;
228 if (Node->hasOneUse()) {
229 CurDAG->SelectNodeTo(Node, Opc, VT, TFI);
230 return;
231 }
232 ReplaceNode(Node, CurDAG->getMachineNode(Opc, SDLoc(Node), VT, TFI));
233 return;
234 }
235 }
236
237 // Select the default instruction
238 SelectCode(Node);
239}
240
241void BPFDAGToDAGISel::PreprocessLoad(SDNode *Node,
242 SelectionDAG::allnodes_iterator &I) {
243 union {
244 uint8_t c[8];
245 uint16_t s;
246 uint32_t i;
247 uint64_t d;
248 } new_val; // hold up the constant values replacing loads.
249 bool to_replace = false;
250 SDLoc DL(Node);
251 const LoadSDNode *LD = cast<LoadSDNode>(Node);
5
'Node' is a 'LoadSDNode'
252 uint64_t size = LD->getMemOperand()->getSize();
6
Calling 'MachineMemOperand::getSize'
9
Returning from 'MachineMemOperand::getSize'
253
254 if (!size || size > 8 || (size & (size - 1)) || !LD->isSimple())
10
Assuming 'size' is not equal to 0
11
Assuming 'size' is <= 8
12
Assuming the condition is false
13
Calling 'MemSDNode::isSimple'
16
Returning from 'MemSDNode::isSimple'
17
Taking false branch
255 return;
256
257 SDNode *LDAddrNode = LD->getOperand(1).getNode();
258 // Match LDAddr against either global_addr or (global_addr + offset)
259 unsigned opcode = LDAddrNode->getOpcode();
260 if (opcode == ISD::ADD) {
18
Assuming 'opcode' is equal to ADD
19
Taking true branch
261 SDValue OP1 = LDAddrNode->getOperand(0);
262 SDValue OP2 = LDAddrNode->getOperand(1);
263
264 // We want to find the pattern global_addr + offset
265 SDNode *OP1N = OP1.getNode();
266 if (OP1N->getOpcode() <= ISD::BUILTIN_OP_END || OP1N->getNumOperands() == 0)
20
Assuming the condition is false
21
Assuming the condition is false
22
Taking false branch
267 return;
268
269 LLVM_DEBUG(dbgs() << "Check candidate load: "; LD->dump(); dbgs() << '\n')do { } while (false);
23
Loop condition is false. Exiting loop
270
271 const GlobalAddressSDNode *GADN =
272 dyn_cast<GlobalAddressSDNode>(OP1N->getOperand(0).getNode());
24
Assuming the object is a 'GlobalAddressSDNode'
273 const ConstantSDNode *CDN = dyn_cast<ConstantSDNode>(OP2.getNode());
25
Assuming the object is a 'ConstantSDNode'
274 if (GADN
25.1
'GADN' is non-null
25.1
'GADN' is non-null
25.1
'GADN' is non-null
25.1
'GADN' is non-null
25.1
'GADN' is non-null
&& CDN
25.2
'CDN' is non-null
25.2
'CDN' is non-null
25.2
'CDN' is non-null
25.2
'CDN' is non-null
25.2
'CDN' is non-null
)
26
Taking true branch
275 to_replace =
276 getConstantFieldValue(GADN, CDN->getZExtValue(), size, new_val.c);
27
Calling 'BPFDAGToDAGISel::getConstantFieldValue'
50
Returning from 'BPFDAGToDAGISel::getConstantFieldValue'
277 } else if (LDAddrNode->getOpcode() > ISD::BUILTIN_OP_END &&
278 LDAddrNode->getNumOperands() > 0) {
279 LLVM_DEBUG(dbgs() << "Check candidate load: "; LD->dump(); dbgs() << '\n')do { } while (false);
280
281 SDValue OP1 = LDAddrNode->getOperand(0);
282 if (const GlobalAddressSDNode *GADN =
283 dyn_cast<GlobalAddressSDNode>(OP1.getNode()))
284 to_replace = getConstantFieldValue(GADN, 0, size, new_val.c);
285 }
286
287 if (!to_replace
50.1
'to_replace' is true
50.1
'to_replace' is true
50.1
'to_replace' is true
50.1
'to_replace' is true
50.1
'to_replace' is true
)
51
Taking false branch
288 return;
289
290 // replacing the old with a new value
291 uint64_t val;
292 if (size == 1)
52
Assuming 'size' is not equal to 1
53
Taking false branch
293 val = new_val.c[0];
294 else if (size == 2)
54
Assuming 'size' is not equal to 2
55
Taking false branch
295 val = new_val.s;
296 else if (size == 4)
56
Assuming 'size' is equal to 4
57
Taking true branch
297 val = new_val.i;
58
Assigned value is garbage or undefined
298 else {
299 val = new_val.d;
300 }
301
302 LLVM_DEBUG(dbgs() << "Replacing load of size " << size << " with constant "do { } while (false)
303 << val << '\n')do { } while (false);
304 SDValue NVal = CurDAG->getConstant(val, DL, LD->getValueType(0));
305
306 // After replacement, the current node is dead, we need to
307 // go backward one step to make iterator still work
308 I--;
309 SDValue From[] = {SDValue(Node, 0), SDValue(Node, 1)};
310 SDValue To[] = {NVal, NVal};
311 CurDAG->ReplaceAllUsesOfValuesWith(From, To, 2);
312 I++;
313 // It is safe to delete node now
314 CurDAG->DeleteNode(Node);
315}
316
317void BPFDAGToDAGISel::PreprocessISelDAG() {
318 // Iterate through all nodes, interested in the following case:
319 //
320 // . loads from ConstantStruct or ConstantArray of constructs
321 // which can be turns into constant itself, with this we can
322 // avoid reading from read-only section at runtime.
323 //
324 // . Removing redundant AND for intrinsic narrow loads.
325 for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
1
Loop condition is true. Entering loop body
326 E = CurDAG->allnodes_end();
327 I != E;) {
328 SDNode *Node = &*I++;
329 unsigned Opcode = Node->getOpcode();
330 if (Opcode == ISD::LOAD)
2
Assuming 'Opcode' is equal to LOAD
3
Taking true branch
331 PreprocessLoad(Node, I);
4
Calling 'BPFDAGToDAGISel::PreprocessLoad'
332 else if (Opcode == ISD::AND)
333 PreprocessTrunc(Node, I);
334 }
335}
336
337bool BPFDAGToDAGISel::getConstantFieldValue(const GlobalAddressSDNode *Node,
338 uint64_t Offset, uint64_t Size,
339 unsigned char *ByteSeq) {
340 const GlobalVariable *V = dyn_cast<GlobalVariable>(Node->getGlobal());
28
Assuming the object is a 'GlobalVariable'
341
342 if (!V
28.1
'V' is non-null, which participates in a condition later
28.1
'V' is non-null, which participates in a condition later
28.1
'V' is non-null, which participates in a condition later
28.1
'V' is non-null, which participates in a condition later
28.1
'V' is non-null, which participates in a condition later
|| !V->hasInitializer() || !V->isConstant())
29
Calling 'GlobalVariable::hasInitializer'
32
Returning from 'GlobalVariable::hasInitializer'
33
Assuming the condition is false
34
Taking false branch
343 return false;
344
345 const Constant *Init = V->getInitializer();
346 const DataLayout &DL = CurDAG->getDataLayout();
347 val_vec_type TmpVal;
348
349 auto it = cs_vals_.find(static_cast<const void *>(Init));
350 if (it != cs_vals_.end()) {
35
Calling 'operator!='
38
Returning from 'operator!='
39
Taking false branch
351 TmpVal = it->second;
352 } else {
353 uint64_t total_size = 0;
354 if (const ConstantStruct *CS
40.1
'CS' is null
40.1
'CS' is null
40.1
'CS' is null
40.1
'CS' is null
40.1
'CS' is null
= dyn_cast<ConstantStruct>(Init))
40
Assuming 'Init' is not a 'ConstantStruct'
41
Taking false branch
355 total_size =
356 DL.getStructLayout(cast<StructType>(CS->getType()))->getSizeInBytes();
357 else if (const ConstantArray *CA
42.1
'CA' is non-null
42.1
'CA' is non-null
42.1
'CA' is non-null
42.1
'CA' is non-null
42.1
'CA' is non-null
= dyn_cast<ConstantArray>(Init))
42
Assuming 'Init' is a 'ConstantArray'
43
Taking true branch
358 total_size = DL.getTypeAllocSize(CA->getType()->getElementType()) *
359 CA->getNumOperands();
360 else
361 return false;
362
363 val_vec_type Vals(total_size, 0);
364 if (fillGenericConstant(DL, Init, Vals, 0) == false)
44
Taking false branch
365 return false;
366 cs_vals_[static_cast<const void *>(Init)] = Vals;
367 TmpVal = std::move(Vals);
368 }
369
370 // test whether host endianness matches target
371 union {
372 uint8_t c[2];
373 uint16_t s;
374 } test_buf;
375 uint16_t test_val = 0x2345;
376 if (DL.isLittleEndian())
45
Taking false branch
377 support::endian::write16le(test_buf.c, test_val);
378 else
379 support::endian::write16be(test_buf.c, test_val);
380
381 bool endian_match = test_buf.s == test_val;
46
Assuming 'test_val' is not equal to field 's'
382 for (uint64_t i = Offset, j = 0; i < Offset + Size; i++, j++)
47
Assuming the condition is false
48
Loop condition is false. Execution continues on line 385
383 ByteSeq[j] = endian_match ? TmpVal[i] : TmpVal[Offset + Size - 1 - j];
384
385 return true;
49
Returning the value 1, which participates in a condition later
386}
387
388bool BPFDAGToDAGISel::fillGenericConstant(const DataLayout &DL,
389 const Constant *CV,
390 val_vec_type &Vals, uint64_t Offset) {
391 uint64_t Size = DL.getTypeAllocSize(CV->getType());
392
393 if (isa<ConstantAggregateZero>(CV) || isa<UndefValue>(CV))
394 return true; // already done
395
396 if (const ConstantInt *CI = dyn_cast<ConstantInt>(CV)) {
397 uint64_t val = CI->getZExtValue();
398 LLVM_DEBUG(dbgs() << "Byte array at offset " << Offset << " with value "do { } while (false)
399 << val << '\n')do { } while (false);
400
401 if (Size > 8 || (Size & (Size - 1)))
402 return false;
403
404 // Store based on target endian
405 for (uint64_t i = 0; i < Size; ++i) {
406 Vals[Offset + i] = DL.isLittleEndian()
407 ? ((val >> (i * 8)) & 0xFF)
408 : ((val >> ((Size - i - 1) * 8)) & 0xFF);
409 }
410 return true;
411 }
412
413 if (const ConstantDataArray *CDA = dyn_cast<ConstantDataArray>(CV))
414 return fillConstantDataArray(DL, CDA, Vals, Offset);
415
416 if (const ConstantArray *CA = dyn_cast<ConstantArray>(CV))
417 return fillConstantArray(DL, CA, Vals, Offset);
418
419 if (const ConstantStruct *CVS = dyn_cast<ConstantStruct>(CV))
420 return fillConstantStruct(DL, CVS, Vals, Offset);
421
422 return false;
423}
424
425bool BPFDAGToDAGISel::fillConstantDataArray(const DataLayout &DL,
426 const ConstantDataArray *CDA,
427 val_vec_type &Vals, int Offset) {
428 for (unsigned i = 0, e = CDA->getNumElements(); i != e; ++i) {
429 if (fillGenericConstant(DL, CDA->getElementAsConstant(i), Vals, Offset) ==
430 false)
431 return false;
432 Offset += DL.getTypeAllocSize(CDA->getElementAsConstant(i)->getType());
433 }
434
435 return true;
436}
437
438bool BPFDAGToDAGISel::fillConstantArray(const DataLayout &DL,
439 const ConstantArray *CA,
440 val_vec_type &Vals, int Offset) {
441 for (unsigned i = 0, e = CA->getNumOperands(); i != e; ++i) {
442 if (fillGenericConstant(DL, CA->getOperand(i), Vals, Offset) == false)
443 return false;
444 Offset += DL.getTypeAllocSize(CA->getOperand(i)->getType());
445 }
446
447 return true;
448}
449
450bool BPFDAGToDAGISel::fillConstantStruct(const DataLayout &DL,
451 const ConstantStruct *CS,
452 val_vec_type &Vals, int Offset) {
453 const StructLayout *Layout = DL.getStructLayout(CS->getType());
454 for (unsigned i = 0, e = CS->getNumOperands(); i != e; ++i) {
455 const Constant *Field = CS->getOperand(i);
456 uint64_t SizeSoFar = Layout->getElementOffset(i);
457 if (fillGenericConstant(DL, Field, Vals, Offset + SizeSoFar) == false)
458 return false;
459 }
460 return true;
461}
462
463void BPFDAGToDAGISel::PreprocessTrunc(SDNode *Node,
464 SelectionDAG::allnodes_iterator &I) {
465 ConstantSDNode *MaskN = dyn_cast<ConstantSDNode>(Node->getOperand(1));
466 if (!MaskN)
467 return;
468
469 // The Reg operand should be a virtual register, which is defined
470 // outside the current basic block. DAG combiner has done a pretty
471 // good job in removing truncating inside a single basic block except
472 // when the Reg operand comes from bpf_load_[byte | half | word] for
473 // which the generic optimizer doesn't understand their results are
474 // zero extended.
475 SDValue BaseV = Node->getOperand(0);
476 if (BaseV.getOpcode() != ISD::INTRINSIC_W_CHAIN)
477 return;
478
479 unsigned IntNo = cast<ConstantSDNode>(BaseV->getOperand(1))->getZExtValue();
480 uint64_t MaskV = MaskN->getZExtValue();
481
482 if (!((IntNo == Intrinsic::bpf_load_byte && MaskV == 0xFF) ||
483 (IntNo == Intrinsic::bpf_load_half && MaskV == 0xFFFF) ||
484 (IntNo == Intrinsic::bpf_load_word && MaskV == 0xFFFFFFFF)))
485 return;
486
487 LLVM_DEBUG(dbgs() << "Remove the redundant AND operation in: ";do { } while (false)
488 Node->dump(); dbgs() << '\n')do { } while (false);
489
490 I--;
491 CurDAG->ReplaceAllUsesWith(SDValue(Node, 0), BaseV);
492 I++;
493 CurDAG->DeleteNode(Node);
494}
495
496FunctionPass *llvm::createBPFISelDag(BPFTargetMachine &TM) {
497 return new BPFDAGToDAGISel(TM);
498}

/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/include/llvm/CodeGen/MachineMemOperand.h

1//==- llvm/CodeGen/MachineMemOperand.h - MachineMemOperand class -*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the declaration of the MachineMemOperand class, which is a
10// description of a memory reference. It is used to help track dependencies
11// in the backend.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_CODEGEN_MACHINEMEMOPERAND_H
16#define LLVM_CODEGEN_MACHINEMEMOPERAND_H
17
18#include "llvm/ADT/BitmaskEnum.h"
19#include "llvm/ADT/PointerUnion.h"
20#include "llvm/CodeGen/PseudoSourceValue.h"
21#include "llvm/IR/DerivedTypes.h"
22#include "llvm/IR/Value.h" // PointerLikeTypeTraits<Value*>
23#include "llvm/Support/AtomicOrdering.h"
24#include "llvm/Support/DataTypes.h"
25#include "llvm/Support/LowLevelTypeImpl.h"
26
27namespace llvm {
28
29class FoldingSetNodeID;
30class MDNode;
31class raw_ostream;
32class MachineFunction;
33class ModuleSlotTracker;
34
35/// This class contains a discriminated union of information about pointers in
36/// memory operands, relating them back to LLVM IR or to virtual locations (such
37/// as frame indices) that are exposed during codegen.
38struct MachinePointerInfo {
39 /// This is the IR pointer value for the access, or it is null if unknown.
40 /// If this is null, then the access is to a pointer in the default address
41 /// space.
42 PointerUnion<const Value *, const PseudoSourceValue *> V;
43
44 /// Offset - This is an offset from the base Value*.
45 int64_t Offset;
46
47 unsigned AddrSpace = 0;
48
49 uint8_t StackID;
50
51 explicit MachinePointerInfo(const Value *v, int64_t offset = 0,
52 uint8_t ID = 0)
53 : V(v), Offset(offset), StackID(ID) {
54 AddrSpace = v ? v->getType()->getPointerAddressSpace() : 0;
55 }
56
57 explicit MachinePointerInfo(const PseudoSourceValue *v, int64_t offset = 0,
58 uint8_t ID = 0)
59 : V(v), Offset(offset), StackID(ID) {
60 AddrSpace = v ? v->getAddressSpace() : 0;
61 }
62
63 explicit MachinePointerInfo(unsigned AddressSpace = 0, int64_t offset = 0)
64 : V((const Value *)nullptr), Offset(offset), AddrSpace(AddressSpace),
65 StackID(0) {}
66
67 explicit MachinePointerInfo(
68 PointerUnion<const Value *, const PseudoSourceValue *> v,
69 int64_t offset = 0,
70 uint8_t ID = 0)
71 : V(v), Offset(offset), StackID(ID) {
72 if (V) {
73 if (const auto *ValPtr = V.dyn_cast<const Value*>())
74 AddrSpace = ValPtr->getType()->getPointerAddressSpace();
75 else
76 AddrSpace = V.get<const PseudoSourceValue*>()->getAddressSpace();
77 }
78 }
79
80 MachinePointerInfo getWithOffset(int64_t O) const {
81 if (V.isNull())
82 return MachinePointerInfo(AddrSpace, Offset + O);
83 if (V.is<const Value*>())
84 return MachinePointerInfo(V.get<const Value*>(), Offset + O, StackID);
85 return MachinePointerInfo(V.get<const PseudoSourceValue*>(), Offset + O,
86 StackID);
87 }
88
89 /// Return true if memory region [V, V+Offset+Size) is known to be
90 /// dereferenceable.
91 bool isDereferenceable(unsigned Size, LLVMContext &C,
92 const DataLayout &DL) const;
93
94 /// Return the LLVM IR address space number that this pointer points into.
95 unsigned getAddrSpace() const;
96
97 /// Return a MachinePointerInfo record that refers to the constant pool.
98 static MachinePointerInfo getConstantPool(MachineFunction &MF);
99
100 /// Return a MachinePointerInfo record that refers to the specified
101 /// FrameIndex.
102 static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI,
103 int64_t Offset = 0);
104
105 /// Return a MachinePointerInfo record that refers to a jump table entry.
106 static MachinePointerInfo getJumpTable(MachineFunction &MF);
107
108 /// Return a MachinePointerInfo record that refers to a GOT entry.
109 static MachinePointerInfo getGOT(MachineFunction &MF);
110
111 /// Stack pointer relative access.
112 static MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset,
113 uint8_t ID = 0);
114
115 /// Stack memory without other information.
116 static MachinePointerInfo getUnknownStack(MachineFunction &MF);
117};
118
119
120//===----------------------------------------------------------------------===//
121/// A description of a memory reference used in the backend.
122/// Instead of holding a StoreInst or LoadInst, this class holds the address
123/// Value of the reference along with a byte size and offset. This allows it
124/// to describe lowered loads and stores. Also, the special PseudoSourceValue
125/// objects can be used to represent loads and stores to memory locations
126/// that aren't explicit in the regular LLVM IR.
127///
128class MachineMemOperand {
129public:
130 /// Flags values. These may be or'd together.
131 enum Flags : uint16_t {
132 // No flags set.
133 MONone = 0,
134 /// The memory access reads data.
135 MOLoad = 1u << 0,
136 /// The memory access writes data.
137 MOStore = 1u << 1,
138 /// The memory access is volatile.
139 MOVolatile = 1u << 2,
140 /// The memory access is non-temporal.
141 MONonTemporal = 1u << 3,
142 /// The memory access is dereferenceable (i.e., doesn't trap).
143 MODereferenceable = 1u << 4,
144 /// The memory access always returns the same value (or traps).
145 MOInvariant = 1u << 5,
146
147 // Reserved for use by target-specific passes.
148 // Targets may override getSerializableMachineMemOperandTargetFlags() to
149 // enable MIR serialization/parsing of these flags. If more of these flags
150 // are added, the MIR printing/parsing code will need to be updated as well.
151 MOTargetFlag1 = 1u << 6,
152 MOTargetFlag2 = 1u << 7,
153 MOTargetFlag3 = 1u << 8,
154
155 LLVM_MARK_AS_BITMASK_ENUM(/* LargestFlag = */ MOTargetFlag3)LLVM_BITMASK_LARGEST_ENUMERATOR = MOTargetFlag3
156 };
157
158private:
159 /// Atomic information for this memory operation.
160 struct MachineAtomicInfo {
161 /// Synchronization scope ID for this memory operation.
162 unsigned SSID : 8; // SyncScope::ID
163 /// Atomic ordering requirements for this memory operation. For cmpxchg
164 /// atomic operations, atomic ordering requirements when store occurs.
165 unsigned Ordering : 4; // enum AtomicOrdering
166 /// For cmpxchg atomic operations, atomic ordering requirements when store
167 /// does not occur.
168 unsigned FailureOrdering : 4; // enum AtomicOrdering
169 };
170
171 MachinePointerInfo PtrInfo;
172
173 /// Track the memory type of the access. An access size which is unknown or
174 /// too large to be represented by LLT should use the invalid LLT.
175 LLT MemoryType;
176
177 Flags FlagVals;
178 Align BaseAlign;
179 MachineAtomicInfo AtomicInfo;
180 AAMDNodes AAInfo;
181 const MDNode *Ranges;
182
183public:
184 /// Construct a MachineMemOperand object with the specified PtrInfo, flags,
185 /// size, and base alignment. For atomic operations the synchronization scope
186 /// and atomic ordering requirements must also be specified. For cmpxchg
187 /// atomic operations the atomic ordering requirements when store does not
188 /// occur must also be specified.
189 MachineMemOperand(MachinePointerInfo PtrInfo, Flags flags, uint64_t s,
190 Align a, const AAMDNodes &AAInfo = AAMDNodes(),
191 const MDNode *Ranges = nullptr,
192 SyncScope::ID SSID = SyncScope::System,
193 AtomicOrdering Ordering = AtomicOrdering::NotAtomic,
194 AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic);
195 MachineMemOperand(MachinePointerInfo PtrInfo, Flags flags, LLT type, Align a,
196 const AAMDNodes &AAInfo = AAMDNodes(),
197 const MDNode *Ranges = nullptr,
198 SyncScope::ID SSID = SyncScope::System,
199 AtomicOrdering Ordering = AtomicOrdering::NotAtomic,
200 AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic);
201
202 const MachinePointerInfo &getPointerInfo() const { return PtrInfo; }
203
204 /// Return the base address of the memory access. This may either be a normal
205 /// LLVM IR Value, or one of the special values used in CodeGen.
206 /// Special values are those obtained via
207 /// PseudoSourceValue::getFixedStack(int), PseudoSourceValue::getStack, and
208 /// other PseudoSourceValue member functions which return objects which stand
209 /// for frame/stack pointer relative references and other special references
210 /// which are not representable in the high-level IR.
211 const Value *getValue() const { return PtrInfo.V.dyn_cast<const Value*>(); }
212
213 const PseudoSourceValue *getPseudoValue() const {
214 return PtrInfo.V.dyn_cast<const PseudoSourceValue*>();
215 }
216
217 const void *getOpaqueValue() const { return PtrInfo.V.getOpaqueValue(); }
218
219 /// Return the raw flags of the source value, \see Flags.
220 Flags getFlags() const { return FlagVals; }
221
222 /// Bitwise OR the current flags with the given flags.
223 void setFlags(Flags f) { FlagVals |= f; }
224
225 /// For normal values, this is a byte offset added to the base address.
226 /// For PseudoSourceValue::FPRel values, this is the FrameIndex number.
227 int64_t getOffset() const { return PtrInfo.Offset; }
228
229 unsigned getAddrSpace() const { return PtrInfo.getAddrSpace(); }
230
231 /// Return the memory type of the memory reference. This should only be relied
232 /// on for GlobalISel G_* operation legalization.
233 LLT getMemoryType() const { return MemoryType; }
234
235 /// Return the size in bytes of the memory reference.
236 uint64_t getSize() const {
237 return MemoryType.isValid() ? MemoryType.getSizeInBytes() : ~UINT64_C(0)0UL;
7
'?' condition is true
8
Returning value, which participates in a condition later
238 }
239
240 /// Return the size in bits of the memory reference.
241 uint64_t getSizeInBits() const {
242 return MemoryType.isValid() ? MemoryType.getSizeInBits() : ~UINT64_C(0)0UL;
243 }
244
245 LLT getType() const {
246 return MemoryType;
247 }
248
249 /// Return the minimum known alignment in bytes of the actual memory
250 /// reference.
251 Align getAlign() const;
252
253 /// Return the minimum known alignment in bytes of the base address, without
254 /// the offset.
255 Align getBaseAlign() const { return BaseAlign; }
256
257 /// Return the AA tags for the memory reference.
258 AAMDNodes getAAInfo() const { return AAInfo; }
259
260 /// Return the range tag for the memory reference.
261 const MDNode *getRanges() const { return Ranges; }
262
263 /// Returns the synchronization scope ID for this memory operation.
264 SyncScope::ID getSyncScopeID() const {
265 return static_cast<SyncScope::ID>(AtomicInfo.SSID);
266 }
267
268 /// Return the atomic ordering requirements for this memory operation. For
269 /// cmpxchg atomic operations, return the atomic ordering requirements when
270 /// store occurs.
271 AtomicOrdering getSuccessOrdering() const {
272 return static_cast<AtomicOrdering>(AtomicInfo.Ordering);
273 }
274
275 /// For cmpxchg atomic operations, return the atomic ordering requirements
276 /// when store does not occur.
277 AtomicOrdering getFailureOrdering() const {
278 return static_cast<AtomicOrdering>(AtomicInfo.FailureOrdering);
279 }
280
281 /// Return a single atomic ordering that is at least as strong as both the
282 /// success and failure orderings for an atomic operation. (For operations
283 /// other than cmpxchg, this is equivalent to getSuccessOrdering().)
284 AtomicOrdering getMergedOrdering() const {
285 return getMergedAtomicOrdering(getSuccessOrdering(), getFailureOrdering());
286 }
287
288 bool isLoad() const { return FlagVals & MOLoad; }
289 bool isStore() const { return FlagVals & MOStore; }
290 bool isVolatile() const { return FlagVals & MOVolatile; }
291 bool isNonTemporal() const { return FlagVals & MONonTemporal; }
292 bool isDereferenceable() const { return FlagVals & MODereferenceable; }
293 bool isInvariant() const { return FlagVals & MOInvariant; }
294
295 /// Returns true if this operation has an atomic ordering requirement of
296 /// unordered or higher, false otherwise.
297 bool isAtomic() const {
298 return getSuccessOrdering() != AtomicOrdering::NotAtomic;
299 }
300
301 /// Returns true if this memory operation doesn't have any ordering
302 /// constraints other than normal aliasing. Volatile and (ordered) atomic
303 /// memory operations can't be reordered.
304 bool isUnordered() const {
305 return (getSuccessOrdering() == AtomicOrdering::NotAtomic ||
306 getSuccessOrdering() == AtomicOrdering::Unordered) &&
307 !isVolatile();
308 }
309
310 /// Update this MachineMemOperand to reflect the alignment of MMO, if it has a
311 /// greater alignment. This must only be used when the new alignment applies
312 /// to all users of this MachineMemOperand.
313 void refineAlignment(const MachineMemOperand *MMO);
314
315 /// Change the SourceValue for this MachineMemOperand. This should only be
316 /// used when an object is being relocated and all references to it are being
317 /// updated.
318 void setValue(const Value *NewSV) { PtrInfo.V = NewSV; }
319 void setValue(const PseudoSourceValue *NewSV) { PtrInfo.V = NewSV; }
320 void setOffset(int64_t NewOffset) { PtrInfo.Offset = NewOffset; }
321
322 /// Reset the tracked memory type.
323 void setType(LLT NewTy) {
324 MemoryType = NewTy;
325 }
326
327 /// Profile - Gather unique data for the object.
328 ///
329 void Profile(FoldingSetNodeID &ID) const;
330
331 /// Support for operator<<.
332 /// @{
333 void print(raw_ostream &OS, ModuleSlotTracker &MST,
334 SmallVectorImpl<StringRef> &SSNs, const LLVMContext &Context,
335 const MachineFrameInfo *MFI, const TargetInstrInfo *TII) const;
336 /// @}
337
338 friend bool operator==(const MachineMemOperand &LHS,
339 const MachineMemOperand &RHS) {
340 return LHS.getValue() == RHS.getValue() &&
341 LHS.getPseudoValue() == RHS.getPseudoValue() &&
342 LHS.getSize() == RHS.getSize() &&
343 LHS.getOffset() == RHS.getOffset() &&
344 LHS.getFlags() == RHS.getFlags() &&
345 LHS.getAAInfo() == RHS.getAAInfo() &&
346 LHS.getRanges() == RHS.getRanges() &&
347 LHS.getAlign() == RHS.getAlign() &&
348 LHS.getAddrSpace() == RHS.getAddrSpace();
349 }
350
351 friend bool operator!=(const MachineMemOperand &LHS,
352 const MachineMemOperand &RHS) {
353 return !(LHS == RHS);
354 }
355};
356
357} // End llvm namespace
358
359#endif

/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/include/llvm/CodeGen/SelectionDAGNodes.h

1//===- llvm/CodeGen/SelectionDAGNodes.h - SelectionDAG Nodes ----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file declares the SDNode class and derived classes, which are used to
10// represent the nodes and operations present in a SelectionDAG. These nodes
11// and operations are machine code level operations, with some similarities to
12// the GCC RTL representation.
13//
14// Clients should include the SelectionDAG.h file instead of this file directly.
15//
16//===----------------------------------------------------------------------===//
17
18#ifndef LLVM_CODEGEN_SELECTIONDAGNODES_H
19#define LLVM_CODEGEN_SELECTIONDAGNODES_H
20
21#include "llvm/ADT/APFloat.h"
22#include "llvm/ADT/ArrayRef.h"
23#include "llvm/ADT/BitVector.h"
24#include "llvm/ADT/FoldingSet.h"
25#include "llvm/ADT/GraphTraits.h"
26#include "llvm/ADT/SmallPtrSet.h"
27#include "llvm/ADT/SmallVector.h"
28#include "llvm/ADT/ilist_node.h"
29#include "llvm/ADT/iterator.h"
30#include "llvm/ADT/iterator_range.h"
31#include "llvm/CodeGen/ISDOpcodes.h"
32#include "llvm/CodeGen/MachineMemOperand.h"
33#include "llvm/CodeGen/Register.h"
34#include "llvm/CodeGen/ValueTypes.h"
35#include "llvm/IR/Constants.h"
36#include "llvm/IR/DebugLoc.h"
37#include "llvm/IR/Instruction.h"
38#include "llvm/IR/Instructions.h"
39#include "llvm/IR/Metadata.h"
40#include "llvm/IR/Operator.h"
41#include "llvm/Support/AlignOf.h"
42#include "llvm/Support/AtomicOrdering.h"
43#include "llvm/Support/Casting.h"
44#include "llvm/Support/ErrorHandling.h"
45#include "llvm/Support/MachineValueType.h"
46#include "llvm/Support/TypeSize.h"
47#include <algorithm>
48#include <cassert>
49#include <climits>
50#include <cstddef>
51#include <cstdint>
52#include <cstring>
53#include <iterator>
54#include <string>
55#include <tuple>
56
57namespace llvm {
58
59class APInt;
60class Constant;
61template <typename T> struct DenseMapInfo;
62class GlobalValue;
63class MachineBasicBlock;
64class MachineConstantPoolValue;
65class MCSymbol;
66class raw_ostream;
67class SDNode;
68class SelectionDAG;
69class Type;
70class Value;
71
72void checkForCycles(const SDNode *N, const SelectionDAG *DAG = nullptr,
73 bool force = false);
74
75/// This represents a list of ValueType's that has been intern'd by
76/// a SelectionDAG. Instances of this simple value class are returned by
77/// SelectionDAG::getVTList(...).
78///
79struct SDVTList {
80 const EVT *VTs;
81 unsigned int NumVTs;
82};
83
84namespace ISD {
85
86 /// Node predicates
87
88/// If N is a BUILD_VECTOR or SPLAT_VECTOR node whose elements are all the
89/// same constant or undefined, return true and return the constant value in
90/// \p SplatValue.
91bool isConstantSplatVector(const SDNode *N, APInt &SplatValue);
92
93/// Return true if the specified node is a BUILD_VECTOR or SPLAT_VECTOR where
94/// all of the elements are ~0 or undef. If \p BuildVectorOnly is set to
95/// true, it only checks BUILD_VECTOR.
96bool isConstantSplatVectorAllOnes(const SDNode *N,
97 bool BuildVectorOnly = false);
98
99/// Return true if the specified node is a BUILD_VECTOR or SPLAT_VECTOR where
100/// all of the elements are 0 or undef. If \p BuildVectorOnly is set to true, it
101/// only checks BUILD_VECTOR.
102bool isConstantSplatVectorAllZeros(const SDNode *N,
103 bool BuildVectorOnly = false);
104
105/// Return true if the specified node is a BUILD_VECTOR where all of the
106/// elements are ~0 or undef.
107bool isBuildVectorAllOnes(const SDNode *N);
108
109/// Return true if the specified node is a BUILD_VECTOR where all of the
110/// elements are 0 or undef.
111bool isBuildVectorAllZeros(const SDNode *N);
112
113/// Return true if the specified node is a BUILD_VECTOR node of all
114/// ConstantSDNode or undef.
115bool isBuildVectorOfConstantSDNodes(const SDNode *N);
116
117/// Return true if the specified node is a BUILD_VECTOR node of all
118/// ConstantFPSDNode or undef.
119bool isBuildVectorOfConstantFPSDNodes(const SDNode *N);
120
121/// Return true if the node has at least one operand and all operands of the
122/// specified node are ISD::UNDEF.
123bool allOperandsUndef(const SDNode *N);
124
125} // end namespace ISD
126
127//===----------------------------------------------------------------------===//
128/// Unlike LLVM values, Selection DAG nodes may return multiple
129/// values as the result of a computation. Many nodes return multiple values,
130/// from loads (which define a token and a return value) to ADDC (which returns
131/// a result and a carry value), to calls (which may return an arbitrary number
132/// of values).
133///
134/// As such, each use of a SelectionDAG computation must indicate the node that
135/// computes it as well as which return value to use from that node. This pair
136/// of information is represented with the SDValue value type.
137///
138class SDValue {
139 friend struct DenseMapInfo<SDValue>;
140
141 SDNode *Node = nullptr; // The node defining the value we are using.
142 unsigned ResNo = 0; // Which return value of the node we are using.
143
144public:
145 SDValue() = default;
146 SDValue(SDNode *node, unsigned resno);
147
148 /// get the index which selects a specific result in the SDNode
149 unsigned getResNo() const { return ResNo; }
150
151 /// get the SDNode which holds the desired result
152 SDNode *getNode() const { return Node; }
153
154 /// set the SDNode
155 void setNode(SDNode *N) { Node = N; }
156
157 inline SDNode *operator->() const { return Node; }
158
159 bool operator==(const SDValue &O) const {
160 return Node == O.Node && ResNo == O.ResNo;
161 }
162 bool operator!=(const SDValue &O) const {
163 return !operator==(O);
164 }
165 bool operator<(const SDValue &O) const {
166 return std::tie(Node, ResNo) < std::tie(O.Node, O.ResNo);
167 }
168 explicit operator bool() const {
169 return Node != nullptr;
170 }
171
172 SDValue getValue(unsigned R) const {
173 return SDValue(Node, R);
174 }
175
176 /// Return true if this node is an operand of N.
177 bool isOperandOf(const SDNode *N) const;
178
179 /// Return the ValueType of the referenced return value.
180 inline EVT getValueType() const;
181
182 /// Return the simple ValueType of the referenced return value.
183 MVT getSimpleValueType() const {
184 return getValueType().getSimpleVT();
185 }
186
187 /// Returns the size of the value in bits.
188 ///
189 /// If the value type is a scalable vector type, the scalable property will
190 /// be set and the runtime size will be a positive integer multiple of the
191 /// base size.
192 TypeSize getValueSizeInBits() const {
193 return getValueType().getSizeInBits();
194 }
195
196 uint64_t getScalarValueSizeInBits() const {
197 return getValueType().getScalarType().getFixedSizeInBits();
198 }
199
200 // Forwarding methods - These forward to the corresponding methods in SDNode.
201 inline unsigned getOpcode() const;
202 inline unsigned getNumOperands() const;
203 inline const SDValue &getOperand(unsigned i) const;
204 inline uint64_t getConstantOperandVal(unsigned i) const;
205 inline const APInt &getConstantOperandAPInt(unsigned i) const;
206 inline bool isTargetMemoryOpcode() const;
207 inline bool isTargetOpcode() const;
208 inline bool isMachineOpcode() const;
209 inline bool isUndef() const;
210 inline unsigned getMachineOpcode() const;
211 inline const DebugLoc &getDebugLoc() const;
212 inline void dump() const;
213 inline void dump(const SelectionDAG *G) const;
214 inline void dumpr() const;
215 inline void dumpr(const SelectionDAG *G) const;
216
217 /// Return true if this operand (which must be a chain) reaches the
218 /// specified operand without crossing any side-effecting instructions.
219 /// In practice, this looks through token factors and non-volatile loads.
220 /// In order to remain efficient, this only
221 /// looks a couple of nodes in, it does not do an exhaustive search.
222 bool reachesChainWithoutSideEffects(SDValue Dest,
223 unsigned Depth = 2) const;
224
225 /// Return true if there are no nodes using value ResNo of Node.
226 inline bool use_empty() const;
227
228 /// Return true if there is exactly one node using value ResNo of Node.
229 inline bool hasOneUse() const;
230};
231
232template<> struct DenseMapInfo<SDValue> {
233 static inline SDValue getEmptyKey() {
234 SDValue V;
235 V.ResNo = -1U;
236 return V;
237 }
238
239 static inline SDValue getTombstoneKey() {
240 SDValue V;
241 V.ResNo = -2U;
242 return V;
243 }
244
245 static unsigned getHashValue(const SDValue &Val) {
246 return ((unsigned)((uintptr_t)Val.getNode() >> 4) ^
247 (unsigned)((uintptr_t)Val.getNode() >> 9)) + Val.getResNo();
248 }
249
250 static bool isEqual(const SDValue &LHS, const SDValue &RHS) {
251 return LHS == RHS;
252 }
253};
254
255/// Allow casting operators to work directly on
256/// SDValues as if they were SDNode*'s.
257template<> struct simplify_type<SDValue> {
258 using SimpleType = SDNode *;
259
260 static SimpleType getSimplifiedValue(SDValue &Val) {
261 return Val.getNode();
262 }
263};
264template<> struct simplify_type<const SDValue> {
265 using SimpleType = /*const*/ SDNode *;
266
267 static SimpleType getSimplifiedValue(const SDValue &Val) {
268 return Val.getNode();
269 }
270};
271
272/// Represents a use of a SDNode. This class holds an SDValue,
273/// which records the SDNode being used and the result number, a
274/// pointer to the SDNode using the value, and Next and Prev pointers,
275/// which link together all the uses of an SDNode.
276///
277class SDUse {
278 /// Val - The value being used.
279 SDValue Val;
280 /// User - The user of this value.
281 SDNode *User = nullptr;
282 /// Prev, Next - Pointers to the uses list of the SDNode referred by
283 /// this operand.
284 SDUse **Prev = nullptr;
285 SDUse *Next = nullptr;
286
287public:
288 SDUse() = default;
289 SDUse(const SDUse &U) = delete;
290 SDUse &operator=(const SDUse &) = delete;
291
292 /// Normally SDUse will just implicitly convert to an SDValue that it holds.
293 operator const SDValue&() const { return Val; }
294
295 /// If implicit conversion to SDValue doesn't work, the get() method returns
296 /// the SDValue.
297 const SDValue &get() const { return Val; }
298
299 /// This returns the SDNode that contains this Use.
300 SDNode *getUser() { return User; }
301
302 /// Get the next SDUse in the use list.
303 SDUse *getNext() const { return Next; }
304
305 /// Convenience function for get().getNode().
306 SDNode *getNode() const { return Val.getNode(); }
307 /// Convenience function for get().getResNo().
308 unsigned getResNo() const { return Val.getResNo(); }
309 /// Convenience function for get().getValueType().
310 EVT getValueType() const { return Val.getValueType(); }
311
312 /// Convenience function for get().operator==
313 bool operator==(const SDValue &V) const {
314 return Val == V;
315 }
316
317 /// Convenience function for get().operator!=
318 bool operator!=(const SDValue &V) const {
319 return Val != V;
320 }
321
322 /// Convenience function for get().operator<
323 bool operator<(const SDValue &V) const {
324 return Val < V;
325 }
326
327private:
328 friend class SelectionDAG;
329 friend class SDNode;
330 // TODO: unfriend HandleSDNode once we fix its operand handling.
331 friend class HandleSDNode;
332
333 void setUser(SDNode *p) { User = p; }
334
335 /// Remove this use from its existing use list, assign it the
336 /// given value, and add it to the new value's node's use list.
337 inline void set(const SDValue &V);
338 /// Like set, but only supports initializing a newly-allocated
339 /// SDUse with a non-null value.
340 inline void setInitial(const SDValue &V);
341 /// Like set, but only sets the Node portion of the value,
342 /// leaving the ResNo portion unmodified.
343 inline void setNode(SDNode *N);
344
345 void addToList(SDUse **List) {
346 Next = *List;
347 if (Next) Next->Prev = &Next;
348 Prev = List;
349 *List = this;
350 }
351
352 void removeFromList() {
353 *Prev = Next;
354 if (Next) Next->Prev = Prev;
355 }
356};
357
358/// simplify_type specializations - Allow casting operators to work directly on
359/// SDValues as if they were SDNode*'s.
360template<> struct simplify_type<SDUse> {
361 using SimpleType = SDNode *;
362
363 static SimpleType getSimplifiedValue(SDUse &Val) {
364 return Val.getNode();
365 }
366};
367
368/// These are IR-level optimization flags that may be propagated to SDNodes.
369/// TODO: This data structure should be shared by the IR optimizer and the
370/// the backend.
371struct SDNodeFlags {
372private:
373 bool NoUnsignedWrap : 1;
374 bool NoSignedWrap : 1;
375 bool Exact : 1;
376 bool NoNaNs : 1;
377 bool NoInfs : 1;
378 bool NoSignedZeros : 1;
379 bool AllowReciprocal : 1;
380 bool AllowContract : 1;
381 bool ApproximateFuncs : 1;
382 bool AllowReassociation : 1;
383
384 // We assume instructions do not raise floating-point exceptions by default,
385 // and only those marked explicitly may do so. We could choose to represent
386 // this via a positive "FPExcept" flags like on the MI level, but having a
387 // negative "NoFPExcept" flag here (that defaults to true) makes the flag
388 // intersection logic more straightforward.
389 bool NoFPExcept : 1;
390
391public:
392 /// Default constructor turns off all optimization flags.
393 SDNodeFlags()
394 : NoUnsignedWrap(false), NoSignedWrap(false), Exact(false), NoNaNs(false),
395 NoInfs(false), NoSignedZeros(false), AllowReciprocal(false),
396 AllowContract(false), ApproximateFuncs(false),
397 AllowReassociation(false), NoFPExcept(false) {}
398
399 /// Propagate the fast-math-flags from an IR FPMathOperator.
400 void copyFMF(const FPMathOperator &FPMO) {
401 setNoNaNs(FPMO.hasNoNaNs());
402 setNoInfs(FPMO.hasNoInfs());
403 setNoSignedZeros(FPMO.hasNoSignedZeros());
404 setAllowReciprocal(FPMO.hasAllowReciprocal());
405 setAllowContract(FPMO.hasAllowContract());
406 setApproximateFuncs(FPMO.hasApproxFunc());
407 setAllowReassociation(FPMO.hasAllowReassoc());
408 }
409
410 // These are mutators for each flag.
411 void setNoUnsignedWrap(bool b) { NoUnsignedWrap = b; }
412 void setNoSignedWrap(bool b) { NoSignedWrap = b; }
413 void setExact(bool b) { Exact = b; }
414 void setNoNaNs(bool b) { NoNaNs = b; }
415 void setNoInfs(bool b) { NoInfs = b; }
416 void setNoSignedZeros(bool b) { NoSignedZeros = b; }
417 void setAllowReciprocal(bool b) { AllowReciprocal = b; }
418 void setAllowContract(bool b) { AllowContract = b; }
419 void setApproximateFuncs(bool b) { ApproximateFuncs = b; }
420 void setAllowReassociation(bool b) { AllowReassociation = b; }
421 void setNoFPExcept(bool b) { NoFPExcept = b; }
422
423 // These are accessors for each flag.
424 bool hasNoUnsignedWrap() const { return NoUnsignedWrap; }
425 bool hasNoSignedWrap() const { return NoSignedWrap; }
426 bool hasExact() const { return Exact; }
427 bool hasNoNaNs() const { return NoNaNs; }
428 bool hasNoInfs() const { return NoInfs; }
429 bool hasNoSignedZeros() const { return NoSignedZeros; }
430 bool hasAllowReciprocal() const { return AllowReciprocal; }
431 bool hasAllowContract() const { return AllowContract; }
432 bool hasApproximateFuncs() const { return ApproximateFuncs; }
433 bool hasAllowReassociation() const { return AllowReassociation; }
434 bool hasNoFPExcept() const { return NoFPExcept; }
435
436 /// Clear any flags in this flag set that aren't also set in Flags. All
437 /// flags will be cleared if Flags are undefined.
438 void intersectWith(const SDNodeFlags Flags) {
439 NoUnsignedWrap &= Flags.NoUnsignedWrap;
440 NoSignedWrap &= Flags.NoSignedWrap;
441 Exact &= Flags.Exact;
442 NoNaNs &= Flags.NoNaNs;
443 NoInfs &= Flags.NoInfs;
444 NoSignedZeros &= Flags.NoSignedZeros;
445 AllowReciprocal &= Flags.AllowReciprocal;
446 AllowContract &= Flags.AllowContract;
447 ApproximateFuncs &= Flags.ApproximateFuncs;
448 AllowReassociation &= Flags.AllowReassociation;
449 NoFPExcept &= Flags.NoFPExcept;
450 }
451};
452
453/// Represents one node in the SelectionDAG.
454///
455class SDNode : public FoldingSetNode, public ilist_node<SDNode> {
456private:
457 /// The operation that this node performs.
458 int16_t NodeType;
459
460protected:
461 // We define a set of mini-helper classes to help us interpret the bits in our
462 // SubclassData. These are designed to fit within a uint16_t so they pack
463 // with NodeType.
464
465#if defined(_AIX) && (!defined(__GNUC__4) || defined(__clang__1))
466// Except for GCC; by default, AIX compilers store bit-fields in 4-byte words
467// and give the `pack` pragma push semantics.
468#define BEGIN_TWO_BYTE_PACK() _Pragma("pack(2)")pack(2)
469#define END_TWO_BYTE_PACK() _Pragma("pack(pop)")pack(pop)
470#else
471#define BEGIN_TWO_BYTE_PACK()
472#define END_TWO_BYTE_PACK()
473#endif
474
475BEGIN_TWO_BYTE_PACK()
476 class SDNodeBitfields {
477 friend class SDNode;
478 friend class MemIntrinsicSDNode;
479 friend class MemSDNode;
480 friend class SelectionDAG;
481
482 uint16_t HasDebugValue : 1;
483 uint16_t IsMemIntrinsic : 1;
484 uint16_t IsDivergent : 1;
485 };
486 enum { NumSDNodeBits = 3 };
487
488 class ConstantSDNodeBitfields {
489 friend class ConstantSDNode;
490
491 uint16_t : NumSDNodeBits;
492
493 uint16_t IsOpaque : 1;
494 };
495
496 class MemSDNodeBitfields {
497 friend class MemSDNode;
498 friend class MemIntrinsicSDNode;
499 friend class AtomicSDNode;
500
501 uint16_t : NumSDNodeBits;
502
503 uint16_t IsVolatile : 1;
504 uint16_t IsNonTemporal : 1;
505 uint16_t IsDereferenceable : 1;
506 uint16_t IsInvariant : 1;
507 };
508 enum { NumMemSDNodeBits = NumSDNodeBits + 4 };
509
510 class LSBaseSDNodeBitfields {
511 friend class LSBaseSDNode;
512 friend class VPLoadStoreSDNode;
513 friend class MaskedLoadStoreSDNode;
514 friend class MaskedGatherScatterSDNode;
515 friend class VPGatherScatterSDNode;
516
517 uint16_t : NumMemSDNodeBits;
518
519 // This storage is shared between disparate class hierarchies to hold an
520 // enumeration specific to the class hierarchy in use.
521 // LSBaseSDNode => enum ISD::MemIndexedMode
522 // VPLoadStoreBaseSDNode => enum ISD::MemIndexedMode
523 // MaskedLoadStoreBaseSDNode => enum ISD::MemIndexedMode
524 // VPGatherScatterSDNode => enum ISD::MemIndexType
525 // MaskedGatherScatterSDNode => enum ISD::MemIndexType
526 uint16_t AddressingMode : 3;
527 };
528 enum { NumLSBaseSDNodeBits = NumMemSDNodeBits + 3 };
529
530 class LoadSDNodeBitfields {
531 friend class LoadSDNode;
532 friend class VPLoadSDNode;
533 friend class MaskedLoadSDNode;
534 friend class MaskedGatherSDNode;
535 friend class VPGatherSDNode;
536
537 uint16_t : NumLSBaseSDNodeBits;
538
539 uint16_t ExtTy : 2; // enum ISD::LoadExtType
540 uint16_t IsExpanding : 1;
541 };
542
543 class StoreSDNodeBitfields {
544 friend class StoreSDNode;
545 friend class VPStoreSDNode;
546 friend class MaskedStoreSDNode;
547 friend class MaskedScatterSDNode;
548 friend class VPScatterSDNode;
549
550 uint16_t : NumLSBaseSDNodeBits;
551
552 uint16_t IsTruncating : 1;
553 uint16_t IsCompressing : 1;
554 };
555
556 union {
557 char RawSDNodeBits[sizeof(uint16_t)];
558 SDNodeBitfields SDNodeBits;
559 ConstantSDNodeBitfields ConstantSDNodeBits;
560 MemSDNodeBitfields MemSDNodeBits;
561 LSBaseSDNodeBitfields LSBaseSDNodeBits;
562 LoadSDNodeBitfields LoadSDNodeBits;
563 StoreSDNodeBitfields StoreSDNodeBits;
564 };
565END_TWO_BYTE_PACK()
566#undef BEGIN_TWO_BYTE_PACK
567#undef END_TWO_BYTE_PACK
568
569 // RawSDNodeBits must cover the entirety of the union. This means that all of
570 // the union's members must have size <= RawSDNodeBits. We write the RHS as
571 // "2" instead of sizeof(RawSDNodeBits) because MSVC can't handle the latter.
572 static_assert(sizeof(SDNodeBitfields) <= 2, "field too wide");
573 static_assert(sizeof(ConstantSDNodeBitfields) <= 2, "field too wide");
574 static_assert(sizeof(MemSDNodeBitfields) <= 2, "field too wide");
575 static_assert(sizeof(LSBaseSDNodeBitfields) <= 2, "field too wide");
576 static_assert(sizeof(LoadSDNodeBitfields) <= 2, "field too wide");
577 static_assert(sizeof(StoreSDNodeBitfields) <= 2, "field too wide");
578
579private:
580 friend class SelectionDAG;
581 // TODO: unfriend HandleSDNode once we fix its operand handling.
582 friend class HandleSDNode;
583
584 /// Unique id per SDNode in the DAG.
585 int NodeId = -1;
586
587 /// The values that are used by this operation.
588 SDUse *OperandList = nullptr;
589
590 /// The types of the values this node defines. SDNode's may
591 /// define multiple values simultaneously.
592 const EVT *ValueList;
593
594 /// List of uses for this SDNode.
595 SDUse *UseList = nullptr;
596
597 /// The number of entries in the Operand/Value list.
598 unsigned short NumOperands = 0;
599 unsigned short NumValues;
600
601 // The ordering of the SDNodes. It roughly corresponds to the ordering of the
602 // original LLVM instructions.
603 // This is used for turning off scheduling, because we'll forgo
604 // the normal scheduling algorithms and output the instructions according to
605 // this ordering.
606 unsigned IROrder;
607
608 /// Source line information.
609 DebugLoc debugLoc;
610
611 /// Return a pointer to the specified value type.
612 static const EVT *getValueTypeList(EVT VT);
613
614 SDNodeFlags Flags;
615
616public:
617 /// Unique and persistent id per SDNode in the DAG.
618 /// Used for debug printing.
619 uint16_t PersistentId;
620
621 //===--------------------------------------------------------------------===//
622 // Accessors
623 //
624
625 /// Return the SelectionDAG opcode value for this node. For
626 /// pre-isel nodes (those for which isMachineOpcode returns false), these
627 /// are the opcode values in the ISD and <target>ISD namespaces. For
628 /// post-isel opcodes, see getMachineOpcode.
629 unsigned getOpcode() const { return (unsigned short)NodeType; }
630
631 /// Test if this node has a target-specific opcode (in the
632 /// \<target\>ISD namespace).
633 bool isTargetOpcode() const { return NodeType >= ISD::BUILTIN_OP_END; }
634
635 /// Test if this node has a target-specific opcode that may raise
636 /// FP exceptions (in the \<target\>ISD namespace and greater than
637 /// FIRST_TARGET_STRICTFP_OPCODE). Note that all target memory
638 /// opcode are currently automatically considered to possibly raise
639 /// FP exceptions as well.
640 bool isTargetStrictFPOpcode() const {
641 return NodeType >= ISD::FIRST_TARGET_STRICTFP_OPCODE;
642 }
643
644 /// Test if this node has a target-specific
645 /// memory-referencing opcode (in the \<target\>ISD namespace and
646 /// greater than FIRST_TARGET_MEMORY_OPCODE).
647 bool isTargetMemoryOpcode() const {
648 return NodeType >= ISD::FIRST_TARGET_MEMORY_OPCODE;
649 }
650
651 /// Return true if the type of the node type undefined.
652 bool isUndef() const { return NodeType == ISD::UNDEF; }
653
654 /// Test if this node is a memory intrinsic (with valid pointer information).
655 /// INTRINSIC_W_CHAIN and INTRINSIC_VOID nodes are sometimes created for
656 /// non-memory intrinsics (with chains) that are not really instances of
657 /// MemSDNode. For such nodes, we need some extra state to determine the
658 /// proper classof relationship.
659 bool isMemIntrinsic() const {
660 return (NodeType == ISD::INTRINSIC_W_CHAIN ||
661 NodeType == ISD::INTRINSIC_VOID) &&
662 SDNodeBits.IsMemIntrinsic;
663 }
664
665 /// Test if this node is a strict floating point pseudo-op.
666 bool isStrictFPOpcode() {
667 switch (NodeType) {
668 default:
669 return false;
670 case ISD::STRICT_FP16_TO_FP:
671 case ISD::STRICT_FP_TO_FP16:
672#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
673 case ISD::STRICT_##DAGN:
674#include "llvm/IR/ConstrainedOps.def"
675 return true;
676 }
677 }
678
679 /// Test if this node has a post-isel opcode, directly
680 /// corresponding to a MachineInstr opcode.
681 bool isMachineOpcode() const { return NodeType < 0; }
682
683 /// This may only be called if isMachineOpcode returns
684 /// true. It returns the MachineInstr opcode value that the node's opcode
685 /// corresponds to.
686 unsigned getMachineOpcode() const {
687 assert(isMachineOpcode() && "Not a MachineInstr opcode!")(static_cast<void> (0));
688 return ~NodeType;
689 }
690
691 bool getHasDebugValue() const { return SDNodeBits.HasDebugValue; }
692 void setHasDebugValue(bool b) { SDNodeBits.HasDebugValue = b; }
693
694 bool isDivergent() const { return SDNodeBits.IsDivergent; }
695
696 /// Return true if there are no uses of this node.
697 bool use_empty() const { return UseList == nullptr; }
698
699 /// Return true if there is exactly one use of this node.
700 bool hasOneUse() const { return hasSingleElement(uses()); }
701
702 /// Return the number of uses of this node. This method takes
703 /// time proportional to the number of uses.
704 size_t use_size() const { return std::distance(use_begin(), use_end()); }
705
706 /// Return the unique node id.
707 int getNodeId() const { return NodeId; }
708
709 /// Set unique node id.
710 void setNodeId(int Id) { NodeId = Id; }
711
712 /// Return the node ordering.
713 unsigned getIROrder() const { return IROrder; }
714
715 /// Set the node ordering.
716 void setIROrder(unsigned Order) { IROrder = Order; }
717
718 /// Return the source location info.
719 const DebugLoc &getDebugLoc() const { return debugLoc; }
720
721 /// Set source location info. Try to avoid this, putting
722 /// it in the constructor is preferable.
723 void setDebugLoc(DebugLoc dl) { debugLoc = std::move(dl); }
724
725 /// This class provides iterator support for SDUse
726 /// operands that use a specific SDNode.
727 class use_iterator {
728 friend class SDNode;
729
730 SDUse *Op = nullptr;
731
732 explicit use_iterator(SDUse *op) : Op(op) {}
733
734 public:
735 using iterator_category = std::forward_iterator_tag;
736 using value_type = SDUse;
737 using difference_type = std::ptrdiff_t;
738 using pointer = value_type *;
739 using reference = value_type &;
740
741 use_iterator() = default;
742 use_iterator(const use_iterator &I) : Op(I.Op) {}
743
744 bool operator==(const use_iterator &x) const {
745 return Op == x.Op;
746 }
747 bool operator!=(const use_iterator &x) const {
748 return !operator==(x);
749 }
750
751 /// Return true if this iterator is at the end of uses list.
752 bool atEnd() const { return Op == nullptr; }
753
754 // Iterator traversal: forward iteration only.
755 use_iterator &operator++() { // Preincrement
756 assert(Op && "Cannot increment end iterator!")(static_cast<void> (0));
757 Op = Op->getNext();
758 return *this;
759 }
760
761 use_iterator operator++(int) { // Postincrement
762 use_iterator tmp = *this; ++*this; return tmp;
763 }
764
765 /// Retrieve a pointer to the current user node.
766 SDNode *operator*() const {
767 assert(Op && "Cannot dereference end iterator!")(static_cast<void> (0));
768 return Op->getUser();
769 }
770
771 SDNode *operator->() const { return operator*(); }
772
773 SDUse &getUse() const { return *Op; }
774
775 /// Retrieve the operand # of this use in its user.
776 unsigned getOperandNo() const {
777 assert(Op && "Cannot dereference end iterator!")(static_cast<void> (0));
778 return (unsigned)(Op - Op->getUser()->OperandList);
779 }
780 };
781
782 /// Provide iteration support to walk over all uses of an SDNode.
783 use_iterator use_begin() const {
784 return use_iterator(UseList);
785 }
786
787 static use_iterator use_end() { return use_iterator(nullptr); }
788
789 inline iterator_range<use_iterator> uses() {
790 return make_range(use_begin(), use_end());
791 }
792 inline iterator_range<use_iterator> uses() const {
793 return make_range(use_begin(), use_end());
794 }
795
796 /// Return true if there are exactly NUSES uses of the indicated value.
797 /// This method ignores uses of other values defined by this operation.
798 bool hasNUsesOfValue(unsigned NUses, unsigned Value) const;
799
800 /// Return true if there are any use of the indicated value.
801 /// This method ignores uses of other values defined by this operation.
802 bool hasAnyUseOfValue(unsigned Value) const;
803
804 /// Return true if this node is the only use of N.
805 bool isOnlyUserOf(const SDNode *N) const;
806
807 /// Return true if this node is an operand of N.
808 bool isOperandOf(const SDNode *N) const;
809
810 /// Return true if this node is a predecessor of N.
811 /// NOTE: Implemented on top of hasPredecessor and every bit as
812 /// expensive. Use carefully.
813 bool isPredecessorOf(const SDNode *N) const {
814 return N->hasPredecessor(this);
815 }
816
817 /// Return true if N is a predecessor of this node.
818 /// N is either an operand of this node, or can be reached by recursively
819 /// traversing up the operands.
820 /// NOTE: This is an expensive method. Use it carefully.
821 bool hasPredecessor(const SDNode *N) const;
822
823 /// Returns true if N is a predecessor of any node in Worklist. This
824 /// helper keeps Visited and Worklist sets externally to allow unions
825 /// searches to be performed in parallel, caching of results across
826 /// queries and incremental addition to Worklist. Stops early if N is
827 /// found but will resume. Remember to clear Visited and Worklists
828 /// if DAG changes. MaxSteps gives a maximum number of nodes to visit before
829 /// giving up. The TopologicalPrune flag signals that positive NodeIds are
830 /// topologically ordered (Operands have strictly smaller node id) and search
831 /// can be pruned leveraging this.
832 static bool hasPredecessorHelper(const SDNode *N,
833 SmallPtrSetImpl<const SDNode *> &Visited,
834 SmallVectorImpl<const SDNode *> &Worklist,
835 unsigned int MaxSteps = 0,
836 bool TopologicalPrune = false) {
837 SmallVector<const SDNode *, 8> DeferredNodes;
838 if (Visited.count(N))
839 return true;
840
841 // Node Id's are assigned in three places: As a topological
842 // ordering (> 0), during legalization (results in values set to
843 // 0), new nodes (set to -1). If N has a topolgical id then we
844 // know that all nodes with ids smaller than it cannot be
845 // successors and we need not check them. Filter out all node
846 // that can't be matches. We add them to the worklist before exit
847 // in case of multiple calls. Note that during selection the topological id
848 // may be violated if a node's predecessor is selected before it. We mark
849 // this at selection negating the id of unselected successors and
850 // restricting topological pruning to positive ids.
851
852 int NId = N->getNodeId();
853 // If we Invalidated the Id, reconstruct original NId.
854 if (NId < -1)
855 NId = -(NId + 1);
856
857 bool Found = false;
858 while (!Worklist.empty()) {
859 const SDNode *M = Worklist.pop_back_val();
860 int MId = M->getNodeId();
861 if (TopologicalPrune && M->getOpcode() != ISD::TokenFactor && (NId > 0) &&
862 (MId > 0) && (MId < NId)) {
863 DeferredNodes.push_back(M);
864 continue;
865 }
866 for (const SDValue &OpV : M->op_values()) {
867 SDNode *Op = OpV.getNode();
868 if (Visited.insert(Op).second)
869 Worklist.push_back(Op);
870 if (Op == N)
871 Found = true;
872 }
873 if (Found)
874 break;
875 if (MaxSteps != 0 && Visited.size() >= MaxSteps)
876 break;
877 }
878 // Push deferred nodes back on worklist.
879 Worklist.append(DeferredNodes.begin(), DeferredNodes.end());
880 // If we bailed early, conservatively return found.
881 if (MaxSteps != 0 && Visited.size() >= MaxSteps)
882 return true;
883 return Found;
884 }
885
886 /// Return true if all the users of N are contained in Nodes.
887 /// NOTE: Requires at least one match, but doesn't require them all.
888 static bool areOnlyUsersOf(ArrayRef<const SDNode *> Nodes, const SDNode *N);
889
890 /// Return the number of values used by this operation.
891 unsigned getNumOperands() const { return NumOperands; }
892
893 /// Return the maximum number of operands that a SDNode can hold.
894 static constexpr size_t getMaxNumOperands() {
895 return std::numeric_limits<decltype(SDNode::NumOperands)>::max();
896 }
897
898 /// Helper method returns the integer value of a ConstantSDNode operand.
899 inline uint64_t getConstantOperandVal(unsigned Num) const;
900
901 /// Helper method returns the APInt of a ConstantSDNode operand.
902 inline const APInt &getConstantOperandAPInt(unsigned Num) const;
903
904 const SDValue &getOperand(unsigned Num) const {
905 assert(Num < NumOperands && "Invalid child # of SDNode!")(static_cast<void> (0));
906 return OperandList[Num];
907 }
908
909 using op_iterator = SDUse *;
910
911 op_iterator op_begin() const { return OperandList; }
912 op_iterator op_end() const { return OperandList+NumOperands; }
913 ArrayRef<SDUse> ops() const { return makeArrayRef(op_begin(), op_end()); }
914
915 /// Iterator for directly iterating over the operand SDValue's.
916 struct value_op_iterator
917 : iterator_adaptor_base<value_op_iterator, op_iterator,
918 std::random_access_iterator_tag, SDValue,
919 ptrdiff_t, value_op_iterator *,
920 value_op_iterator *> {
921 explicit value_op_iterator(SDUse *U = nullptr)
922 : iterator_adaptor_base(U) {}
923
924 const SDValue &operator*() const { return I->get(); }
925 };
926
927 iterator_range<value_op_iterator> op_values() const {
928 return make_range(value_op_iterator(op_begin()),
929 value_op_iterator(op_end()));
930 }
931
932 SDVTList getVTList() const {
933 SDVTList X = { ValueList, NumValues };
934 return X;
935 }
936
937 /// If this node has a glue operand, return the node
938 /// to which the glue operand points. Otherwise return NULL.
939 SDNode *getGluedNode() const {
940 if (getNumOperands() != 0 &&
941 getOperand(getNumOperands()-1).getValueType() == MVT::Glue)
942 return getOperand(getNumOperands()-1).getNode();
943 return nullptr;
944 }
945
946 /// If this node has a glue value with a user, return
947 /// the user (there is at most one). Otherwise return NULL.
948 SDNode *getGluedUser() const {
949 for (use_iterator UI = use_begin(), UE = use_end(); UI != UE; ++UI)
950 if (UI.getUse().get().getValueType() == MVT::Glue)
951 return *UI;
952 return nullptr;
953 }
954
955 SDNodeFlags getFlags() const { return Flags; }
956 void setFlags(SDNodeFlags NewFlags) { Flags = NewFlags; }
957
958 /// Clear any flags in this node that aren't also set in Flags.
959 /// If Flags is not in a defined state then this has no effect.
960 void intersectFlagsWith(const SDNodeFlags Flags);
961
962 /// Return the number of values defined/returned by this operator.
963 unsigned getNumValues() const { return NumValues; }
964
965 /// Return the type of a specified result.
966 EVT getValueType(unsigned ResNo) const {
967 assert(ResNo < NumValues && "Illegal result number!")(static_cast<void> (0));
968 return ValueList[ResNo];
969 }
970
971 /// Return the type of a specified result as a simple type.
972 MVT getSimpleValueType(unsigned ResNo) const {
973 return getValueType(ResNo).getSimpleVT();
974 }
975
976 /// Returns MVT::getSizeInBits(getValueType(ResNo)).
977 ///
978 /// If the value type is a scalable vector type, the scalable property will
979 /// be set and the runtime size will be a positive integer multiple of the
980 /// base size.
981 TypeSize getValueSizeInBits(unsigned ResNo) const {
982 return getValueType(ResNo).getSizeInBits();
983 }
984
985 using value_iterator = const EVT *;
986
987 value_iterator value_begin() const { return ValueList; }
988 value_iterator value_end() const { return ValueList+NumValues; }
989 iterator_range<value_iterator> values() const {
990 return llvm::make_range(value_begin(), value_end());
991 }
992
993 /// Return the opcode of this operation for printing.
994 std::string getOperationName(const SelectionDAG *G = nullptr) const;
995 static const char* getIndexedModeName(ISD::MemIndexedMode AM);
996 void print_types(raw_ostream &OS, const SelectionDAG *G) const;
997 void print_details(raw_ostream &OS, const SelectionDAG *G) const;
998 void print(raw_ostream &OS, const SelectionDAG *G = nullptr) const;
999 void printr(raw_ostream &OS, const SelectionDAG *G = nullptr) const;
1000
1001 /// Print a SelectionDAG node and all children down to
1002 /// the leaves. The given SelectionDAG allows target-specific nodes
1003 /// to be printed in human-readable form. Unlike printr, this will
1004 /// print the whole DAG, including children that appear multiple
1005 /// times.
1006 ///
1007 void printrFull(raw_ostream &O, const SelectionDAG *G = nullptr) const;
1008
1009 /// Print a SelectionDAG node and children up to
1010 /// depth "depth." The given SelectionDAG allows target-specific
1011 /// nodes to be printed in human-readable form. Unlike printr, this
1012 /// will print children that appear multiple times wherever they are
1013 /// used.
1014 ///
1015 void printrWithDepth(raw_ostream &O, const SelectionDAG *G = nullptr,
1016 unsigned depth = 100) const;
1017
1018 /// Dump this node, for debugging.
1019 void dump() const;
1020
1021 /// Dump (recursively) this node and its use-def subgraph.
1022 void dumpr() const;
1023
1024 /// Dump this node, for debugging.
1025 /// The given SelectionDAG allows target-specific nodes to be printed
1026 /// in human-readable form.
1027 void dump(const SelectionDAG *G) const;
1028
1029 /// Dump (recursively) this node and its use-def subgraph.
1030 /// The given SelectionDAG allows target-specific nodes to be printed
1031 /// in human-readable form.
1032 void dumpr(const SelectionDAG *G) const;
1033
1034 /// printrFull to dbgs(). The given SelectionDAG allows
1035 /// target-specific nodes to be printed in human-readable form.
1036 /// Unlike dumpr, this will print the whole DAG, including children
1037 /// that appear multiple times.
1038 void dumprFull(const SelectionDAG *G = nullptr) const;
1039
1040 /// printrWithDepth to dbgs(). The given
1041 /// SelectionDAG allows target-specific nodes to be printed in
1042 /// human-readable form. Unlike dumpr, this will print children
1043 /// that appear multiple times wherever they are used.
1044 ///
1045 void dumprWithDepth(const SelectionDAG *G = nullptr,
1046 unsigned depth = 100) const;
1047
1048 /// Gather unique data for the node.
1049 void Profile(FoldingSetNodeID &ID) const;
1050
1051 /// This method should only be used by the SDUse class.
1052 void addUse(SDUse &U) { U.addToList(&UseList); }
1053
1054protected:
1055 static SDVTList getSDVTList(EVT VT) {
1056 SDVTList Ret = { getValueTypeList(VT), 1 };
1057 return Ret;
1058 }
1059
1060 /// Create an SDNode.
1061 ///
1062 /// SDNodes are created without any operands, and never own the operand
1063 /// storage. To add operands, see SelectionDAG::createOperands.
1064 SDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs)
1065 : NodeType(Opc), ValueList(VTs.VTs), NumValues(VTs.NumVTs),
1066 IROrder(Order), debugLoc(std::move(dl)) {
1067 memset(&RawSDNodeBits, 0, sizeof(RawSDNodeBits));
1068 assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor")(static_cast<void> (0));
1069 assert(NumValues == VTs.NumVTs &&(static_cast<void> (0))
1070 "NumValues wasn't wide enough for its operands!")(static_cast<void> (0));
1071 }
1072
1073 /// Release the operands and set this node to have zero operands.
1074 void DropOperands();
1075};
1076
1077/// Wrapper class for IR location info (IR ordering and DebugLoc) to be passed
1078/// into SDNode creation functions.
1079/// When an SDNode is created from the DAGBuilder, the DebugLoc is extracted
1080/// from the original Instruction, and IROrder is the ordinal position of
1081/// the instruction.
1082/// When an SDNode is created after the DAG is being built, both DebugLoc and
1083/// the IROrder are propagated from the original SDNode.
1084/// So SDLoc class provides two constructors besides the default one, one to
1085/// be used by the DAGBuilder, the other to be used by others.
1086class SDLoc {
1087private:
1088 DebugLoc DL;
1089 int IROrder = 0;
1090
1091public:
1092 SDLoc() = default;
1093 SDLoc(const SDNode *N) : DL(N->getDebugLoc()), IROrder(N->getIROrder()) {}
1094 SDLoc(const SDValue V) : SDLoc(V.getNode()) {}
1095 SDLoc(const Instruction *I, int Order) : IROrder(Order) {
1096 assert(Order >= 0 && "bad IROrder")(static_cast<void> (0));
1097 if (I)
1098 DL = I->getDebugLoc();
1099 }
1100
1101 unsigned getIROrder() const { return IROrder; }
1102 const DebugLoc &getDebugLoc() const { return DL; }
1103};
1104
1105// Define inline functions from the SDValue class.
1106
1107inline SDValue::SDValue(SDNode *node, unsigned resno)
1108 : Node(node), ResNo(resno) {
1109 // Explicitly check for !ResNo to avoid use-after-free, because there are
1110 // callers that use SDValue(N, 0) with a deleted N to indicate successful
1111 // combines.
1112 assert((!Node || !ResNo || ResNo < Node->getNumValues()) &&(static_cast<void> (0))
1113 "Invalid result number for the given node!")(static_cast<void> (0));
1114 assert(ResNo < -2U && "Cannot use result numbers reserved for DenseMaps.")(static_cast<void> (0));
1115}
1116
1117inline unsigned SDValue::getOpcode() const {
1118 return Node->getOpcode();
1119}
1120
1121inline EVT SDValue::getValueType() const {
1122 return Node->getValueType(ResNo);
1123}
1124
1125inline unsigned SDValue::getNumOperands() const {
1126 return Node->getNumOperands();
1127}
1128
1129inline const SDValue &SDValue::getOperand(unsigned i) const {
1130 return Node->getOperand(i);
1131}
1132
1133inline uint64_t SDValue::getConstantOperandVal(unsigned i) const {
1134 return Node->getConstantOperandVal(i);
1135}
1136
1137inline const APInt &SDValue::getConstantOperandAPInt(unsigned i) const {
1138 return Node->getConstantOperandAPInt(i);
1139}
1140
1141inline bool SDValue::isTargetOpcode() const {
1142 return Node->isTargetOpcode();
1143}
1144
1145inline bool SDValue::isTargetMemoryOpcode() const {
1146 return Node->isTargetMemoryOpcode();
1147}
1148
1149inline bool SDValue::isMachineOpcode() const {
1150 return Node->isMachineOpcode();
1151}
1152
1153inline unsigned SDValue::getMachineOpcode() const {
1154 return Node->getMachineOpcode();
1155}
1156
1157inline bool SDValue::isUndef() const {
1158 return Node->isUndef();
1159}
1160
1161inline bool SDValue::use_empty() const {
1162 return !Node->hasAnyUseOfValue(ResNo);
1163}
1164
1165inline bool SDValue::hasOneUse() const {
1166 return Node->hasNUsesOfValue(1, ResNo);
1167}
1168
1169inline const DebugLoc &SDValue::getDebugLoc() const {
1170 return Node->getDebugLoc();
1171}
1172
1173inline void SDValue::dump() const {
1174 return Node->dump();
1175}
1176
1177inline void SDValue::dump(const SelectionDAG *G) const {
1178 return Node->dump(G);
1179}
1180
1181inline void SDValue::dumpr() const {
1182 return Node->dumpr();
1183}
1184
1185inline void SDValue::dumpr(const SelectionDAG *G) const {
1186 return Node->dumpr(G);
1187}
1188
1189// Define inline functions from the SDUse class.
1190
1191inline void SDUse::set(const SDValue &V) {
1192 if (Val.getNode()) removeFromList();
1193 Val = V;
1194 if (V.getNode()) V.getNode()->addUse(*this);
1195}
1196
1197inline void SDUse::setInitial(const SDValue &V) {
1198 Val = V;
1199 V.getNode()->addUse(*this);
1200}
1201
1202inline void SDUse::setNode(SDNode *N) {
1203 if (Val.getNode()) removeFromList();
1204 Val.setNode(N);
1205 if (N) N->addUse(*this);
1206}
1207
1208/// This class is used to form a handle around another node that
1209/// is persistent and is updated across invocations of replaceAllUsesWith on its
1210/// operand. This node should be directly created by end-users and not added to
1211/// the AllNodes list.
1212class HandleSDNode : public SDNode {
1213 SDUse Op;
1214
1215public:
1216 explicit HandleSDNode(SDValue X)
1217 : SDNode(ISD::HANDLENODE, 0, DebugLoc(), getSDVTList(MVT::Other)) {
1218 // HandleSDNodes are never inserted into the DAG, so they won't be
1219 // auto-numbered. Use ID 65535 as a sentinel.
1220 PersistentId = 0xffff;
1221
1222 // Manually set up the operand list. This node type is special in that it's
1223 // always stack allocated and SelectionDAG does not manage its operands.
1224 // TODO: This should either (a) not be in the SDNode hierarchy, or (b) not
1225 // be so special.
1226 Op.setUser(this);
1227 Op.setInitial(X);
1228 NumOperands = 1;
1229 OperandList = &Op;
1230 }
1231 ~HandleSDNode();
1232
1233 const SDValue &getValue() const { return Op; }
1234};
1235
1236class AddrSpaceCastSDNode : public SDNode {
1237private:
1238 unsigned SrcAddrSpace;
1239 unsigned DestAddrSpace;
1240
1241public:
1242 AddrSpaceCastSDNode(unsigned Order, const DebugLoc &dl, EVT VT,
1243 unsigned SrcAS, unsigned DestAS);
1244
1245 unsigned getSrcAddressSpace() const { return SrcAddrSpace; }
1246 unsigned getDestAddressSpace() const { return DestAddrSpace; }
1247
1248 static bool classof(const SDNode *N) {
1249 return N->getOpcode() == ISD::ADDRSPACECAST;
1250 }
1251};
1252
1253/// This is an abstract virtual class for memory operations.
1254class MemSDNode : public SDNode {
1255private:
1256 // VT of in-memory value.
1257 EVT MemoryVT;
1258
1259protected:
1260 /// Memory reference information.
1261 MachineMemOperand *MMO;
1262
1263public:
1264 MemSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, SDVTList VTs,
1265 EVT memvt, MachineMemOperand *MMO);
1266
1267 bool readMem() const { return MMO->isLoad(); }
1268 bool writeMem() const { return MMO->isStore(); }
1269
1270 /// Returns alignment and volatility of the memory access
1271 Align getOriginalAlign() const { return MMO->getBaseAlign(); }
1272 Align getAlign() const { return MMO->getAlign(); }
1273 // FIXME: Remove once transition to getAlign is over.
1274 unsigned getAlignment() const { return MMO->getAlign().value(); }
1275
1276 /// Return the SubclassData value, without HasDebugValue. This contains an
1277 /// encoding of the volatile flag, as well as bits used by subclasses. This
1278 /// function should only be used to compute a FoldingSetNodeID value.
1279 /// The HasDebugValue bit is masked out because CSE map needs to match
1280 /// nodes with debug info with nodes without debug info. Same is about
1281 /// isDivergent bit.
1282 unsigned getRawSubclassData() const {
1283 uint16_t Data;
1284 union {
1285 char RawSDNodeBits[sizeof(uint16_t)];
1286 SDNodeBitfields SDNodeBits;
1287 };
1288 memcpy(&RawSDNodeBits, &this->RawSDNodeBits, sizeof(this->RawSDNodeBits));
1289 SDNodeBits.HasDebugValue = 0;
1290 SDNodeBits.IsDivergent = false;
1291 memcpy(&Data, &RawSDNodeBits, sizeof(RawSDNodeBits));
1292 return Data;
1293 }
1294
1295 bool isVolatile() const { return MemSDNodeBits.IsVolatile; }
1296 bool isNonTemporal() const { return MemSDNodeBits.IsNonTemporal; }
1297 bool isDereferenceable() const { return MemSDNodeBits.IsDereferenceable; }
1298 bool isInvariant() const { return MemSDNodeBits.IsInvariant; }
1299
1300 // Returns the offset from the location of the access.
1301 int64_t getSrcValueOffset() const { return MMO->getOffset(); }
1302
1303 /// Returns the AA info that describes the dereference.
1304 AAMDNodes getAAInfo() const { return MMO->getAAInfo(); }
1305
1306 /// Returns the Ranges that describes the dereference.
1307 const MDNode *getRanges() const { return MMO->getRanges(); }
1308
1309 /// Returns the synchronization scope ID for this memory operation.
1310 SyncScope::ID getSyncScopeID() const { return MMO->getSyncScopeID(); }
1311
1312 /// Return the atomic ordering requirements for this memory operation. For
1313 /// cmpxchg atomic operations, return the atomic ordering requirements when
1314 /// store occurs.
1315 AtomicOrdering getSuccessOrdering() const {
1316 return MMO->getSuccessOrdering();
1317 }
1318
1319 /// Return a single atomic ordering that is at least as strong as both the
1320 /// success and failure orderings for an atomic operation. (For operations
1321 /// other than cmpxchg, this is equivalent to getSuccessOrdering().)
1322 AtomicOrdering getMergedOrdering() const { return MMO->getMergedOrdering(); }
1323
1324 /// Return true if the memory operation ordering is Unordered or higher.
1325 bool isAtomic() const { return MMO->isAtomic(); }
1326
1327 /// Returns true if the memory operation doesn't imply any ordering
1328 /// constraints on surrounding memory operations beyond the normal memory
1329 /// aliasing rules.
1330 bool isUnordered() const { return MMO->isUnordered(); }
1331
1332 /// Returns true if the memory operation is neither atomic or volatile.
1333 bool isSimple() const { return !isAtomic() && !isVolatile(); }
14
Assuming the condition is true
15
Returning the value 1, which participates in a condition later
1334
1335 /// Return the type of the in-memory value.
1336 EVT getMemoryVT() const { return MemoryVT; }
1337
1338 /// Return a MachineMemOperand object describing the memory
1339 /// reference performed by operation.
1340 MachineMemOperand *getMemOperand() const { return MMO; }
1341
1342 const MachinePointerInfo &getPointerInfo() const {
1343 return MMO->getPointerInfo();
1344 }
1345
1346 /// Return the address space for the associated pointer
1347 unsigned getAddressSpace() const {
1348 return getPointerInfo().getAddrSpace();
1349 }
1350
1351 /// Update this MemSDNode's MachineMemOperand information
1352 /// to reflect the alignment of NewMMO, if it has a greater alignment.
1353 /// This must only be used when the new alignment applies to all users of
1354 /// this MachineMemOperand.
1355 void refineAlignment(const MachineMemOperand *NewMMO) {
1356 MMO->refineAlignment(NewMMO);
1357 }
1358
1359 const SDValue &getChain() const { return getOperand(0); }
1360
1361 const SDValue &getBasePtr() const {
1362 switch (getOpcode()) {
1363 case ISD::STORE:
1364 case ISD::VP_STORE:
1365 case ISD::MSTORE:
1366 return getOperand(2);
1367 case ISD::MGATHER:
1368 case ISD::MSCATTER:
1369 case ISD::VP_GATHER:
1370 case ISD::VP_SCATTER:
1371 return getOperand(3);
1372 default:
1373 return getOperand(1);
1374 }
1375 }
1376
1377 // Methods to support isa and dyn_cast
1378 static bool classof(const SDNode *N) {
1379 // For some targets, we lower some target intrinsics to a MemIntrinsicNode
1380 // with either an intrinsic or a target opcode.
1381 switch (N->getOpcode()) {
1382 case ISD::LOAD:
1383 case ISD::STORE:
1384 case ISD::PREFETCH:
1385 case ISD::ATOMIC_CMP_SWAP:
1386 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
1387 case ISD::ATOMIC_SWAP:
1388 case ISD::ATOMIC_LOAD_ADD:
1389 case ISD::ATOMIC_LOAD_SUB:
1390 case ISD::ATOMIC_LOAD_AND:
1391 case ISD::ATOMIC_LOAD_CLR:
1392 case ISD::ATOMIC_LOAD_OR:
1393 case ISD::ATOMIC_LOAD_XOR:
1394 case ISD::ATOMIC_LOAD_NAND:
1395 case ISD::ATOMIC_LOAD_MIN:
1396 case ISD::ATOMIC_LOAD_MAX:
1397 case ISD::ATOMIC_LOAD_UMIN:
1398 case ISD::ATOMIC_LOAD_UMAX:
1399 case ISD::ATOMIC_LOAD_FADD:
1400 case ISD::ATOMIC_LOAD_FSUB:
1401 case ISD::ATOMIC_LOAD:
1402 case ISD::ATOMIC_STORE:
1403 case ISD::MLOAD:
1404 case ISD::MSTORE:
1405 case ISD::MGATHER:
1406 case ISD::MSCATTER:
1407 case ISD::VP_LOAD:
1408 case ISD::VP_STORE:
1409 case ISD::VP_GATHER:
1410 case ISD::VP_SCATTER:
1411 return true;
1412 default:
1413 return N->isMemIntrinsic() || N->isTargetMemoryOpcode();
1414 }
1415 }
1416};
1417
1418/// This is an SDNode representing atomic operations.
1419class AtomicSDNode : public MemSDNode {
1420public:
1421 AtomicSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, SDVTList VTL,
1422 EVT MemVT, MachineMemOperand *MMO)
1423 : MemSDNode(Opc, Order, dl, VTL, MemVT, MMO) {
1424 assert(((Opc != ISD::ATOMIC_LOAD && Opc != ISD::ATOMIC_STORE) ||(static_cast<void> (0))
1425 MMO->isAtomic()) && "then why are we using an AtomicSDNode?")(static_cast<void> (0));
1426 }
1427
1428 const SDValue &getBasePtr() const { return getOperand(1); }
1429 const SDValue &getVal() const { return getOperand(2); }
1430
1431 /// Returns true if this SDNode represents cmpxchg atomic operation, false
1432 /// otherwise.
1433 bool isCompareAndSwap() const {
1434 unsigned Op = getOpcode();
1435 return Op == ISD::ATOMIC_CMP_SWAP ||
1436 Op == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS;
1437 }
1438
1439 /// For cmpxchg atomic operations, return the atomic ordering requirements
1440 /// when store does not occur.
1441 AtomicOrdering getFailureOrdering() const {
1442 assert(isCompareAndSwap() && "Must be cmpxchg operation")(static_cast<void> (0));
1443 return MMO->getFailureOrdering();
1444 }
1445
1446 // Methods to support isa and dyn_cast
1447 static bool classof(const SDNode *N) {
1448 return N->getOpcode() == ISD::ATOMIC_CMP_SWAP ||
1449 N->getOpcode() == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS ||
1450 N->getOpcode() == ISD::ATOMIC_SWAP ||
1451 N->getOpcode() == ISD::ATOMIC_LOAD_ADD ||
1452 N->getOpcode() == ISD::ATOMIC_LOAD_SUB ||
1453 N->getOpcode() == ISD::ATOMIC_LOAD_AND ||
1454 N->getOpcode() == ISD::ATOMIC_LOAD_CLR ||
1455 N->getOpcode() == ISD::ATOMIC_LOAD_OR ||
1456 N->getOpcode() == ISD::ATOMIC_LOAD_XOR ||
1457 N->getOpcode() == ISD::ATOMIC_LOAD_NAND ||
1458 N->getOpcode() == ISD::ATOMIC_LOAD_MIN ||
1459 N->getOpcode() == ISD::ATOMIC_LOAD_MAX ||
1460 N->getOpcode() == ISD::ATOMIC_LOAD_UMIN ||
1461 N->getOpcode() == ISD::ATOMIC_LOAD_UMAX ||
1462 N->getOpcode() == ISD::ATOMIC_LOAD_FADD ||
1463 N->getOpcode() == ISD::ATOMIC_LOAD_FSUB ||
1464 N->getOpcode() == ISD::ATOMIC_LOAD ||
1465 N->getOpcode() == ISD::ATOMIC_STORE;
1466 }
1467};
1468
1469/// This SDNode is used for target intrinsics that touch
1470/// memory and need an associated MachineMemOperand. Its opcode may be
1471/// INTRINSIC_VOID, INTRINSIC_W_CHAIN, PREFETCH, or a target-specific opcode
1472/// with a value not less than FIRST_TARGET_MEMORY_OPCODE.
1473class MemIntrinsicSDNode : public MemSDNode {
1474public:
1475 MemIntrinsicSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl,
1476 SDVTList VTs, EVT MemoryVT, MachineMemOperand *MMO)
1477 : MemSDNode(Opc, Order, dl, VTs, MemoryVT, MMO) {
1478 SDNodeBits.IsMemIntrinsic = true;
1479 }
1480
1481 // Methods to support isa and dyn_cast
1482 static bool classof(const SDNode *N) {
1483 // We lower some target intrinsics to their target opcode
1484 // early a node with a target opcode can be of this class
1485 return N->isMemIntrinsic() ||
1486 N->getOpcode() == ISD::PREFETCH ||
1487 N->isTargetMemoryOpcode();
1488 }
1489};
1490
1491/// This SDNode is used to implement the code generator
1492/// support for the llvm IR shufflevector instruction. It combines elements
1493/// from two input vectors into a new input vector, with the selection and
1494/// ordering of elements determined by an array of integers, referred to as
1495/// the shuffle mask. For input vectors of width N, mask indices of 0..N-1
1496/// refer to elements from the LHS input, and indices from N to 2N-1 the RHS.
1497/// An index of -1 is treated as undef, such that the code generator may put
1498/// any value in the corresponding element of the result.
1499class ShuffleVectorSDNode : public SDNode {
1500 // The memory for Mask is owned by the SelectionDAG's OperandAllocator, and
1501 // is freed when the SelectionDAG object is destroyed.
1502 const int *Mask;
1503
1504protected:
1505 friend class SelectionDAG;
1506
1507 ShuffleVectorSDNode(EVT VT, unsigned Order, const DebugLoc &dl, const int *M)
1508 : SDNode(ISD::VECTOR_SHUFFLE, Order, dl, getSDVTList(VT)), Mask(M) {}
1509
1510public:
1511 ArrayRef<int> getMask() const {
1512 EVT VT = getValueType(0);
1513 return makeArrayRef(Mask, VT.getVectorNumElements());
1514 }
1515
1516 int getMaskElt(unsigned Idx) const {
1517 assert(Idx < getValueType(0).getVectorNumElements() && "Idx out of range!")(static_cast<void> (0));
1518 return Mask[Idx];
1519 }
1520
1521 bool isSplat() const { return isSplatMask(Mask, getValueType(0)); }
1522
1523 int getSplatIndex() const {
1524 assert(isSplat() && "Cannot get splat index for non-splat!")(static_cast<void> (0));
1525 EVT VT = getValueType(0);
1526 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i)
1527 if (Mask[i] >= 0)
1528 return Mask[i];
1529
1530 // We can choose any index value here and be correct because all elements
1531 // are undefined. Return 0 for better potential for callers to simplify.
1532 return 0;
1533 }
1534
1535 static bool isSplatMask(const int *Mask, EVT VT);
1536
1537 /// Change values in a shuffle permute mask assuming
1538 /// the two vector operands have swapped position.
1539 static void commuteMask(MutableArrayRef<int> Mask) {
1540 unsigned NumElems = Mask.size();
1541 for (unsigned i = 0; i != NumElems; ++i) {
1542 int idx = Mask[i];
1543 if (idx < 0)
1544 continue;
1545 else if (idx < (int)NumElems)
1546 Mask[i] = idx + NumElems;
1547 else
1548 Mask[i] = idx - NumElems;
1549 }
1550 }
1551
1552 static bool classof(const SDNode *N) {
1553 return N->getOpcode() == ISD::VECTOR_SHUFFLE;
1554 }
1555};
1556
1557class ConstantSDNode : public SDNode {
1558 friend class SelectionDAG;
1559
1560 const ConstantInt *Value;
1561
1562 ConstantSDNode(bool isTarget, bool isOpaque, const ConstantInt *val, EVT VT)
1563 : SDNode(isTarget ? ISD::TargetConstant : ISD::Constant, 0, DebugLoc(),
1564 getSDVTList(VT)),
1565 Value(val) {
1566 ConstantSDNodeBits.IsOpaque = isOpaque;
1567 }
1568
1569public:
1570 const ConstantInt *getConstantIntValue() const { return Value; }
1571 const APInt &getAPIntValue() const { return Value->getValue(); }
1572 uint64_t getZExtValue() const { return Value->getZExtValue(); }
1573 int64_t getSExtValue() const { return Value->getSExtValue(); }
1574 uint64_t getLimitedValue(uint64_t Limit = UINT64_MAX(18446744073709551615UL)) {
1575 return Value->getLimitedValue(Limit);
1576 }
1577 MaybeAlign getMaybeAlignValue() const { return Value->getMaybeAlignValue(); }
1578 Align getAlignValue() const { return Value->getAlignValue(); }
1579
1580 bool isOne() const { return Value->isOne(); }
1581 bool isNullValue() const { return Value->isZero(); }
1582 bool isAllOnesValue() const { return Value->isMinusOne(); }
1583 bool isMaxSignedValue() const { return Value->isMaxValue(true); }
1584 bool isMinSignedValue() const { return Value->isMinValue(true); }
1585
1586 bool isOpaque() const { return ConstantSDNodeBits.IsOpaque; }
1587
1588 static bool classof(const SDNode *N) {
1589 return N->getOpcode() == ISD::Constant ||
1590 N->getOpcode() == ISD::TargetConstant;
1591 }
1592};
1593
1594uint64_t SDNode::getConstantOperandVal(unsigned Num) const {
1595 return cast<ConstantSDNode>(getOperand(Num))->getZExtValue();
1596}
1597
1598const APInt &SDNode::getConstantOperandAPInt(unsigned Num) const {
1599 return cast<ConstantSDNode>(getOperand(Num))->getAPIntValue();
1600}
1601
1602class ConstantFPSDNode : public SDNode {
1603 friend class SelectionDAG;
1604
1605 const ConstantFP *Value;
1606
1607 ConstantFPSDNode(bool isTarget, const ConstantFP *val, EVT VT)
1608 : SDNode(isTarget ? ISD::TargetConstantFP : ISD::ConstantFP, 0,
1609 DebugLoc(), getSDVTList(VT)),
1610 Value(val) {}
1611
1612public:
1613 const APFloat& getValueAPF() const { return Value->getValueAPF(); }
1614 const ConstantFP *getConstantFPValue() const { return Value; }
1615
1616 /// Return true if the value is positive or negative zero.
1617 bool isZero() const { return Value->isZero(); }
1618
1619 /// Return true if the value is a NaN.
1620 bool isNaN() const { return Value->isNaN(); }
1621
1622 /// Return true if the value is an infinity
1623 bool isInfinity() const { return Value->isInfinity(); }
1624
1625 /// Return true if the value is negative.
1626 bool isNegative() const { return Value->isNegative(); }
1627
1628 /// We don't rely on operator== working on double values, as
1629 /// it returns true for things that are clearly not equal, like -0.0 and 0.0.
1630 /// As such, this method can be used to do an exact bit-for-bit comparison of
1631 /// two floating point values.
1632
1633 /// We leave the version with the double argument here because it's just so
1634 /// convenient to write "2.0" and the like. Without this function we'd
1635 /// have to duplicate its logic everywhere it's called.
1636 bool isExactlyValue(double V) const {
1637 return Value->getValueAPF().isExactlyValue(V);
1638 }
1639 bool isExactlyValue(const APFloat& V) const;
1640
1641 static bool isValueValidForType(EVT VT, const APFloat& Val);
1642
1643 static bool classof(const SDNode *N) {
1644 return N->getOpcode() == ISD::ConstantFP ||
1645 N->getOpcode() == ISD::TargetConstantFP;
1646 }
1647};
1648
1649/// Returns true if \p V is a constant integer zero.
1650bool isNullConstant(SDValue V);
1651
1652/// Returns true if \p V is an FP constant with a value of positive zero.
1653bool isNullFPConstant(SDValue V);
1654
1655/// Returns true if \p V is an integer constant with all bits set.
1656bool isAllOnesConstant(SDValue V);
1657
1658/// Returns true if \p V is a constant integer one.
1659bool isOneConstant(SDValue V);
1660
1661/// Return the non-bitcasted source operand of \p V if it exists.
1662/// If \p V is not a bitcasted value, it is returned as-is.
1663SDValue peekThroughBitcasts(SDValue V);
1664
1665/// Return the non-bitcasted and one-use source operand of \p V if it exists.
1666/// If \p V is not a bitcasted one-use value, it is returned as-is.
1667SDValue peekThroughOneUseBitcasts(SDValue V);
1668
1669/// Return the non-extracted vector source operand of \p V if it exists.
1670/// If \p V is not an extracted subvector, it is returned as-is.
1671SDValue peekThroughExtractSubvectors(SDValue V);
1672
1673/// Returns true if \p V is a bitwise not operation. Assumes that an all ones
1674/// constant is canonicalized to be operand 1.
1675bool isBitwiseNot(SDValue V, bool AllowUndefs = false);
1676
1677/// Returns the SDNode if it is a constant splat BuildVector or constant int.
1678ConstantSDNode *isConstOrConstSplat(SDValue N, bool AllowUndefs = false,
1679 bool AllowTruncation = false);
1680
1681/// Returns the SDNode if it is a demanded constant splat BuildVector or
1682/// constant int.
1683ConstantSDNode *isConstOrConstSplat(SDValue N, const APInt &DemandedElts,
1684 bool AllowUndefs = false,
1685 bool AllowTruncation = false);
1686
1687/// Returns the SDNode if it is a constant splat BuildVector or constant float.
1688ConstantFPSDNode *isConstOrConstSplatFP(SDValue N, bool AllowUndefs = false);
1689
1690/// Returns the SDNode if it is a demanded constant splat BuildVector or
1691/// constant float.
1692ConstantFPSDNode *isConstOrConstSplatFP(SDValue N, const APInt &DemandedElts,
1693 bool AllowUndefs = false);
1694
1695/// Return true if the value is a constant 0 integer or a splatted vector of
1696/// a constant 0 integer (with no undefs by default).
1697/// Build vector implicit truncation is not an issue for null values.
1698bool isNullOrNullSplat(SDValue V, bool AllowUndefs = false);
1699
1700/// Return true if the value is a constant 1 integer or a splatted vector of a
1701/// constant 1 integer (with no undefs).
1702/// Does not permit build vector implicit truncation.
1703bool isOneOrOneSplat(SDValue V, bool AllowUndefs = false);
1704
1705/// Return true if the value is a constant -1 integer or a splatted vector of a
1706/// constant -1 integer (with no undefs).
1707/// Does not permit build vector implicit truncation.
1708bool isAllOnesOrAllOnesSplat(SDValue V, bool AllowUndefs = false);
1709
1710/// Return true if \p V is either a integer or FP constant.
1711inline bool isIntOrFPConstant(SDValue V) {
1712 return isa<ConstantSDNode>(V) || isa<ConstantFPSDNode>(V);
1713}
1714
1715class GlobalAddressSDNode : public SDNode {
1716 friend class SelectionDAG;
1717
1718 const GlobalValue *TheGlobal;
1719 int64_t Offset;
1720 unsigned TargetFlags;
1721
1722 GlobalAddressSDNode(unsigned Opc, unsigned Order, const DebugLoc &DL,
1723 const GlobalValue *GA, EVT VT, int64_t o,
1724 unsigned TF);
1725
1726public:
1727 const GlobalValue *getGlobal() const { return TheGlobal; }
1728 int64_t getOffset() const { return Offset; }
1729 unsigned getTargetFlags() const { return TargetFlags; }
1730 // Return the address space this GlobalAddress belongs to.
1731 unsigned getAddressSpace() const;
1732
1733 static bool classof(const SDNode *N) {
1734 return N->getOpcode() == ISD::GlobalAddress ||
1735 N->getOpcode() == ISD::TargetGlobalAddress ||
1736 N->getOpcode() == ISD::GlobalTLSAddress ||
1737 N->getOpcode() == ISD::TargetGlobalTLSAddress;
1738 }
1739};
1740
1741class FrameIndexSDNode : public SDNode {
1742 friend class SelectionDAG;
1743
1744 int FI;
1745
1746 FrameIndexSDNode(int fi, EVT VT, bool isTarg)
1747 : SDNode(isTarg ? ISD::TargetFrameIndex : ISD::FrameIndex,
1748 0, DebugLoc(), getSDVTList(VT)), FI(fi) {
1749 }
1750
1751public:
1752 int getIndex() const { return FI; }
1753
1754 static bool classof(const SDNode *N) {
1755 return N->getOpcode() == ISD::FrameIndex ||
1756 N->getOpcode() == ISD::TargetFrameIndex;
1757 }
1758};
1759
1760/// This SDNode is used for LIFETIME_START/LIFETIME_END values, which indicate
1761/// the offet and size that are started/ended in the underlying FrameIndex.
1762class LifetimeSDNode : public SDNode {
1763 friend class SelectionDAG;
1764 int64_t Size;
1765 int64_t Offset; // -1 if offset is unknown.
1766
1767 LifetimeSDNode(unsigned Opcode, unsigned Order, const DebugLoc &dl,
1768 SDVTList VTs, int64_t Size, int64_t Offset)
1769 : SDNode(Opcode, Order, dl, VTs), Size(Size), Offset(Offset) {}
1770public:
1771 int64_t getFrameIndex() const {
1772 return cast<FrameIndexSDNode>(getOperand(1))->getIndex();
1773 }
1774
1775 bool hasOffset() const { return Offset >= 0; }
1776 int64_t getOffset() const {
1777 assert(hasOffset() && "offset is unknown")(static_cast<void> (0));
1778 return Offset;
1779 }
1780 int64_t getSize() const {
1781 assert(hasOffset() && "offset is unknown")(static_cast<void> (0));
1782 return Size;
1783 }
1784
1785 // Methods to support isa and dyn_cast
1786 static bool classof(const SDNode *N) {
1787 return N->getOpcode() == ISD::LIFETIME_START ||
1788 N->getOpcode() == ISD::LIFETIME_END;
1789 }
1790};
1791
1792/// This SDNode is used for PSEUDO_PROBE values, which are the function guid and
1793/// the index of the basic block being probed. A pseudo probe serves as a place
1794/// holder and will be removed at the end of compilation. It does not have any
1795/// operand because we do not want the instruction selection to deal with any.
1796class PseudoProbeSDNode : public SDNode {
1797 friend class SelectionDAG;
1798 uint64_t Guid;
1799 uint64_t Index;
1800 uint32_t Attributes;
1801
1802 PseudoProbeSDNode(unsigned Opcode, unsigned Order, const DebugLoc &Dl,
1803 SDVTList VTs, uint64_t Guid, uint64_t Index, uint32_t Attr)
1804 : SDNode(Opcode, Order, Dl, VTs), Guid(Guid), Index(Index),
1805 Attributes(Attr) {}
1806
1807public:
1808 uint64_t getGuid() const { return Guid; }
1809 uint64_t getIndex() const { return Index; }
1810 uint32_t getAttributes() const { return Attributes; }
1811
1812 // Methods to support isa and dyn_cast
1813 static bool classof(const SDNode *N) {
1814 return N->getOpcode() == ISD::PSEUDO_PROBE;
1815 }
1816};
1817
1818class JumpTableSDNode : public SDNode {
1819 friend class SelectionDAG;
1820
1821 int JTI;
1822 unsigned TargetFlags;
1823
1824 JumpTableSDNode(int jti, EVT VT, bool isTarg, unsigned TF)
1825 : SDNode(isTarg ? ISD::TargetJumpTable : ISD::JumpTable,
1826 0, DebugLoc(), getSDVTList(VT)), JTI(jti), TargetFlags(TF) {
1827 }
1828
1829public:
1830 int getIndex() const { return JTI; }
1831 unsigned getTargetFlags() const { return TargetFlags; }
1832
1833 static bool classof(const SDNode *N) {
1834 return N->getOpcode() == ISD::JumpTable ||
1835 N->getOpcode() == ISD::TargetJumpTable;
1836 }
1837};
1838
1839class ConstantPoolSDNode : public SDNode {
1840 friend class SelectionDAG;
1841
1842 union {
1843 const Constant *ConstVal;
1844 MachineConstantPoolValue *MachineCPVal;
1845 } Val;
1846 int Offset; // It's a MachineConstantPoolValue if top bit is set.
1847 Align Alignment; // Minimum alignment requirement of CP.
1848 unsigned TargetFlags;
1849
1850 ConstantPoolSDNode(bool isTarget, const Constant *c, EVT VT, int o,
1851 Align Alignment, unsigned TF)
1852 : SDNode(isTarget ? ISD::TargetConstantPool : ISD::ConstantPool, 0,
1853 DebugLoc(), getSDVTList(VT)),
1854 Offset(o), Alignment(Alignment), TargetFlags(TF) {
1855 assert(Offset >= 0 && "Offset is too large")(static_cast<void> (0));
1856 Val.ConstVal = c;
1857 }
1858
1859 ConstantPoolSDNode(bool isTarget, MachineConstantPoolValue *v, EVT VT, int o,
1860 Align Alignment, unsigned TF)
1861 : SDNode(isTarget ? ISD::TargetConstantPool : ISD::ConstantPool, 0,
1862 DebugLoc(), getSDVTList(VT)),
1863 Offset(o), Alignment(Alignment), TargetFlags(TF) {
1864 assert(Offset >= 0 && "Offset is too large")(static_cast<void> (0));
1865 Val.MachineCPVal = v;
1866 Offset |= 1 << (sizeof(unsigned)*CHAR_BIT8-1);
1867 }
1868
1869public:
1870 bool isMachineConstantPoolEntry() const {
1871 return Offset < 0;
1872 }
1873
1874 const Constant *getConstVal() const {
1875 assert(!isMachineConstantPoolEntry() && "Wrong constantpool type")(static_cast<void> (0));
1876 return Val.ConstVal;
1877 }
1878
1879 MachineConstantPoolValue *getMachineCPVal() const {
1880 assert(isMachineConstantPoolEntry() && "Wrong constantpool type")(static_cast<void> (0));
1881 return Val.MachineCPVal;
1882 }
1883
1884 int getOffset() const {
1885 return Offset & ~(1 << (sizeof(unsigned)*CHAR_BIT8-1));
1886 }
1887
1888 // Return the alignment of this constant pool object, which is either 0 (for
1889 // default alignment) or the desired value.
1890 Align getAlign() const { return Alignment; }
1891 unsigned getTargetFlags() const { return TargetFlags; }
1892
1893 Type *getType() const;
1894
1895 static bool classof(const SDNode *N) {
1896 return N->getOpcode() == ISD::ConstantPool ||
1897 N->getOpcode() == ISD::TargetConstantPool;
1898 }
1899};
1900
1901/// Completely target-dependent object reference.
1902class TargetIndexSDNode : public SDNode {
1903 friend class SelectionDAG;
1904
1905 unsigned TargetFlags;
1906 int Index;
1907 int64_t Offset;
1908
1909public:
1910 TargetIndexSDNode(int Idx, EVT VT, int64_t Ofs, unsigned TF)
1911 : SDNode(ISD::TargetIndex, 0, DebugLoc(), getSDVTList(VT)),
1912 TargetFlags(TF), Index(Idx), Offset(Ofs) {}
1913
1914 unsigned getTargetFlags() const { return TargetFlags; }
1915 int getIndex() const { return Index; }
1916 int64_t getOffset() const { return Offset; }
1917
1918 static bool classof(const SDNode *N) {
1919 return N->getOpcode() == ISD::TargetIndex;
1920 }
1921};
1922
1923class BasicBlockSDNode : public SDNode {
1924 friend class SelectionDAG;
1925
1926 MachineBasicBlock *MBB;
1927
1928 /// Debug info is meaningful and potentially useful here, but we create
1929 /// blocks out of order when they're jumped to, which makes it a bit
1930 /// harder. Let's see if we need it first.
1931 explicit BasicBlockSDNode(MachineBasicBlock *mbb)
1932 : SDNode(ISD::BasicBlock, 0, DebugLoc(), getSDVTList(MVT::Other)), MBB(mbb)
1933 {}
1934
1935public:
1936 MachineBasicBlock *getBasicBlock() const { return MBB; }
1937
1938 static bool classof(const SDNode *N) {
1939 return N->getOpcode() == ISD::BasicBlock;
1940 }
1941};
1942
1943/// A "pseudo-class" with methods for operating on BUILD_VECTORs.
1944class BuildVectorSDNode : public SDNode {
1945public:
1946 // These are constructed as SDNodes and then cast to BuildVectorSDNodes.
1947 explicit BuildVectorSDNode() = delete;
1948
1949 /// Check if this is a constant splat, and if so, find the
1950 /// smallest element size that splats the vector. If MinSplatBits is
1951 /// nonzero, the element size must be at least that large. Note that the
1952 /// splat element may be the entire vector (i.e., a one element vector).
1953 /// Returns the splat element value in SplatValue. Any undefined bits in
1954 /// that value are zero, and the corresponding bits in the SplatUndef mask
1955 /// are set. The SplatBitSize value is set to the splat element size in
1956 /// bits. HasAnyUndefs is set to true if any bits in the vector are
1957 /// undefined. isBigEndian describes the endianness of the target.
1958 bool isConstantSplat(APInt &SplatValue, APInt &SplatUndef,
1959 unsigned &SplatBitSize, bool &HasAnyUndefs,
1960 unsigned MinSplatBits = 0,
1961 bool isBigEndian = false) const;
1962
1963 /// Returns the demanded splatted value or a null value if this is not a
1964 /// splat.
1965 ///
1966 /// The DemandedElts mask indicates the elements that must be in the splat.
1967 /// If passed a non-null UndefElements bitvector, it will resize it to match
1968 /// the vector width and set the bits where elements are undef.
1969 SDValue getSplatValue(const APInt &DemandedElts,
1970 BitVector *UndefElements = nullptr) const;
1971
1972 /// Returns the splatted value or a null value if this is not a splat.
1973 ///
1974 /// If passed a non-null UndefElements bitvector, it will resize it to match
1975 /// the vector width and set the bits where elements are undef.
1976 SDValue getSplatValue(BitVector *UndefElements = nullptr) const;
1977
1978 /// Find the shortest repeating sequence of values in the build vector.
1979 ///
1980 /// e.g. { u, X, u, X, u, u, X, u } -> { X }
1981 /// { X, Y, u, Y, u, u, X, u } -> { X, Y }
1982 ///
1983 /// Currently this must be a power-of-2 build vector.
1984 /// The DemandedElts mask indicates the elements that must be present,
1985 /// undemanded elements in Sequence may be null (SDValue()). If passed a
1986 /// non-null UndefElements bitvector, it will resize it to match the original
1987 /// vector width and set the bits where elements are undef. If result is
1988 /// false, Sequence will be empty.
1989 bool getRepeatedSequence(const APInt &DemandedElts,
1990 SmallVectorImpl<SDValue> &Sequence,
1991 BitVector *UndefElements = nullptr) const;
1992
1993 /// Find the shortest repeating sequence of values in the build vector.
1994 ///
1995 /// e.g. { u, X, u, X, u, u, X, u } -> { X }
1996 /// { X, Y, u, Y, u, u, X, u } -> { X, Y }
1997 ///
1998 /// Currently this must be a power-of-2 build vector.
1999 /// If passed a non-null UndefElements bitvector, it will resize it to match
2000 /// the original vector width and set the bits where elements are undef.
2001 /// If result is false, Sequence will be empty.
2002 bool getRepeatedSequence(SmallVectorImpl<SDValue> &Sequence,
2003 BitVector *UndefElements = nullptr) const;
2004
2005 /// Returns the demanded splatted constant or null if this is not a constant
2006 /// splat.
2007 ///
2008 /// The DemandedElts mask indicates the elements that must be in the splat.
2009 /// If passed a non-null UndefElements bitvector, it will resize it to match
2010 /// the vector width and set the bits where elements are undef.
2011 ConstantSDNode *
2012 getConstantSplatNode(const APInt &DemandedElts,
2013 BitVector *UndefElements = nullptr) const;
2014
2015 /// Returns the splatted constant or null if this is not a constant
2016 /// splat.
2017 ///
2018 /// If passed a non-null UndefElements bitvector, it will resize it to match
2019 /// the vector width and set the bits where elements are undef.
2020 ConstantSDNode *
2021 getConstantSplatNode(BitVector *UndefElements = nullptr) const;
2022
2023 /// Returns the demanded splatted constant FP or null if this is not a
2024 /// constant FP splat.
2025 ///
2026 /// The DemandedElts mask indicates the elements that must be in the splat.
2027 /// If passed a non-null UndefElements bitvector, it will resize it to match
2028 /// the vector width and set the bits where elements are undef.
2029 ConstantFPSDNode *
2030 getConstantFPSplatNode(const APInt &DemandedElts,
2031 BitVector *UndefElements = nullptr) const;
2032
2033 /// Returns the splatted constant FP or null if this is not a constant
2034 /// FP splat.
2035 ///
2036 /// If passed a non-null UndefElements bitvector, it will resize it to match
2037 /// the vector width and set the bits where elements are undef.
2038 ConstantFPSDNode *
2039 getConstantFPSplatNode(BitVector *UndefElements = nullptr) const;
2040
2041 /// If this is a constant FP splat and the splatted constant FP is an
2042 /// exact power or 2, return the log base 2 integer value. Otherwise,
2043 /// return -1.
2044 ///
2045 /// The BitWidth specifies the necessary bit precision.
2046 int32_t getConstantFPSplatPow2ToLog2Int(BitVector *UndefElements,
2047 uint32_t BitWidth) const;
2048
2049 bool isConstant() const;
2050
2051 static bool classof(const SDNode *N) {
2052 return N->getOpcode() == ISD::BUILD_VECTOR;
2053 }
2054};
2055
2056/// An SDNode that holds an arbitrary LLVM IR Value. This is
2057/// used when the SelectionDAG needs to make a simple reference to something
2058/// in the LLVM IR representation.
2059///
2060class SrcValueSDNode : public SDNode {
2061 friend class SelectionDAG;
2062
2063 const Value *V;
2064
2065 /// Create a SrcValue for a general value.
2066 explicit SrcValueSDNode(const Value *v)
2067 : SDNode(ISD::SRCVALUE, 0, DebugLoc(), getSDVTList(MVT::Other)), V(v) {}
2068
2069public:
2070 /// Return the contained Value.
2071 const Value *getValue() const { return V; }
2072
2073 static bool classof(const SDNode *N) {
2074 return N->getOpcode() == ISD::SRCVALUE;
2075 }
2076};
2077
2078class MDNodeSDNode : public SDNode {
2079 friend class SelectionDAG;
2080
2081 const MDNode *MD;
2082
2083 explicit MDNodeSDNode(const MDNode *md)
2084 : SDNode(ISD::MDNODE_SDNODE, 0, DebugLoc(), getSDVTList(MVT::Other)), MD(md)
2085 {}
2086
2087public:
2088 const MDNode *getMD() const { return MD; }
2089
2090 static bool classof(const SDNode *N) {
2091 return N->getOpcode() == ISD::MDNODE_SDNODE;
2092 }
2093};
2094
2095class RegisterSDNode : public SDNode {
2096 friend class SelectionDAG;
2097
2098 Register Reg;
2099
2100 RegisterSDNode(Register reg, EVT VT)
2101 : SDNode(ISD::Register, 0, DebugLoc(), getSDVTList(VT)), Reg(reg) {}
2102
2103public:
2104 Register getReg() const { return Reg; }
2105
2106 static bool classof(const SDNode *N) {
2107 return N->getOpcode() == ISD::Register;
2108 }
2109};
2110
2111class RegisterMaskSDNode : public SDNode {
2112 friend class SelectionDAG;
2113
2114 // The memory for RegMask is not owned by the node.
2115 const uint32_t *RegMask;
2116
2117 RegisterMaskSDNode(const uint32_t *mask)
2118 : SDNode(ISD::RegisterMask, 0, DebugLoc(), getSDVTList(MVT::Untyped)),
2119 RegMask(mask) {}
2120
2121public:
2122 const uint32_t *getRegMask() const { return RegMask; }
2123
2124 static bool classof(const SDNode *N) {
2125 return N->getOpcode() == ISD::RegisterMask;
2126 }
2127};
2128
2129class BlockAddressSDNode : public SDNode {
2130 friend class SelectionDAG;
2131
2132 const BlockAddress *BA;
2133 int64_t Offset;
2134 unsigned TargetFlags;
2135
2136 BlockAddressSDNode(unsigned NodeTy, EVT VT, const BlockAddress *ba,
2137 int64_t o, unsigned Flags)
2138 : SDNode(NodeTy, 0, DebugLoc(), getSDVTList(VT)),
2139 BA(ba), Offset(o), TargetFlags(Flags) {}
2140
2141public:
2142 const BlockAddress *getBlockAddress() const { return BA; }
2143 int64_t getOffset() const { return Offset; }
2144 unsigned getTargetFlags() const { return TargetFlags; }
2145
2146 static bool classof(const SDNode *N) {
2147 return N->getOpcode() == ISD::BlockAddress ||
2148 N->getOpcode() == ISD::TargetBlockAddress;
2149 }
2150};
2151
2152class LabelSDNode : public SDNode {
2153 friend class SelectionDAG;
2154
2155 MCSymbol *Label;
2156
2157 LabelSDNode(unsigned Opcode, unsigned Order, const DebugLoc &dl, MCSymbol *L)
2158 : SDNode(Opcode, Order, dl, getSDVTList(MVT::Other)), Label(L) {
2159 assert(LabelSDNode::classof(this) && "not a label opcode")(static_cast<void> (0));
2160 }
2161
2162public:
2163 MCSymbol *getLabel() const { return Label; }
2164
2165 static bool classof(const SDNode *N) {
2166 return N->getOpcode() == ISD::EH_LABEL ||
2167 N->getOpcode() == ISD::ANNOTATION_LABEL;
2168 }
2169};
2170
2171class ExternalSymbolSDNode : public SDNode {
2172 friend class SelectionDAG;
2173
2174 const char *Symbol;
2175 unsigned TargetFlags;
2176
2177 ExternalSymbolSDNode(bool isTarget, const char *Sym, unsigned TF, EVT VT)
2178 : SDNode(isTarget ? ISD::TargetExternalSymbol : ISD::ExternalSymbol, 0,
2179 DebugLoc(), getSDVTList(VT)),
2180 Symbol(Sym), TargetFlags(TF) {}
2181
2182public:
2183 const char *getSymbol() const { return Symbol; }
2184 unsigned getTargetFlags() const { return TargetFlags; }
2185
2186 static bool classof(const SDNode *N) {
2187 return N->getOpcode() == ISD::ExternalSymbol ||
2188 N->getOpcode() == ISD::TargetExternalSymbol;
2189 }
2190};
2191
2192class MCSymbolSDNode : public SDNode {
2193 friend class SelectionDAG;
2194
2195 MCSymbol *Symbol;
2196
2197 MCSymbolSDNode(MCSymbol *Symbol, EVT VT)
2198 : SDNode(ISD::MCSymbol, 0, DebugLoc(), getSDVTList(VT)), Symbol(Symbol) {}
2199
2200public:
2201 MCSymbol *getMCSymbol() const { return Symbol; }
2202
2203 static bool classof(const SDNode *N) {
2204 return N->getOpcode() == ISD::MCSymbol;
2205 }
2206};
2207
2208class CondCodeSDNode : public SDNode {
2209 friend class SelectionDAG;
2210
2211 ISD::CondCode Condition;
2212
2213 explicit CondCodeSDNode(ISD::CondCode Cond)
2214 : SDNode(ISD::CONDCODE, 0, DebugLoc(), getSDVTList(MVT::Other)),
2215 Condition(Cond) {}
2216
2217public:
2218 ISD::CondCode get() const { return Condition; }
2219
2220 static bool classof(const SDNode *N) {
2221 return N->getOpcode() == ISD::CONDCODE;
2222 }
2223};
2224
2225/// This class is used to represent EVT's, which are used
2226/// to parameterize some operations.
2227class VTSDNode : public SDNode {
2228 friend class SelectionDAG;
2229
2230 EVT ValueType;
2231
2232 explicit VTSDNode(EVT VT)
2233 : SDNode(ISD::VALUETYPE, 0, DebugLoc(), getSDVTList(MVT::Other)),
2234 ValueType(VT) {}
2235
2236public:
2237 EVT getVT() const { return ValueType; }
2238
2239 static bool classof(const SDNode *N) {
2240 return N->getOpcode() == ISD::VALUETYPE;
2241 }
2242};
2243
2244/// Base class for LoadSDNode and StoreSDNode
2245class LSBaseSDNode : public MemSDNode {
2246public:
2247 LSBaseSDNode(ISD::NodeType NodeTy, unsigned Order, const DebugLoc &dl,
2248 SDVTList VTs, ISD::MemIndexedMode AM, EVT MemVT,
2249 MachineMemOperand *MMO)
2250 : MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) {
2251 LSBaseSDNodeBits.AddressingMode = AM;
2252 assert(getAddressingMode() == AM && "Value truncated")(static_cast<void> (0));
2253 }
2254
2255 const SDValue &getOffset() const {
2256 return getOperand(getOpcode() == ISD::LOAD ? 2 : 3);
2257 }
2258
2259 /// Return the addressing mode for this load or store:
2260 /// unindexed, pre-inc, pre-dec, post-inc, or post-dec.
2261 ISD::MemIndexedMode getAddressingMode() const {
2262 return static_cast<ISD::MemIndexedMode>(LSBaseSDNodeBits.AddressingMode);
2263 }
2264
2265 /// Return true if this is a pre/post inc/dec load/store.
2266 bool isIndexed() const { return getAddressingMode() != ISD::UNINDEXED; }
2267
2268 /// Return true if this is NOT a pre/post inc/dec load/store.
2269 bool isUnindexed() const { return getAddressingMode() == ISD::UNINDEXED; }
2270
2271 static bool classof(const SDNode *N) {
2272 return N->getOpcode() == ISD::LOAD ||
2273 N->getOpcode() == ISD::STORE;
2274 }
2275};
2276
2277/// This class is used to represent ISD::LOAD nodes.
2278class LoadSDNode : public LSBaseSDNode {
2279 friend class SelectionDAG;
2280
2281 LoadSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
2282 ISD::MemIndexedMode AM, ISD::LoadExtType ETy, EVT MemVT,
2283 MachineMemOperand *MMO)
2284 : LSBaseSDNode(ISD::LOAD, Order, dl, VTs, AM, MemVT, MMO) {
2285 LoadSDNodeBits.ExtTy = ETy;
2286 assert(readMem() && "Load MachineMemOperand is not a load!")(static_cast<void> (0));
2287 assert(!writeMem() && "Load MachineMemOperand is a store!")(static_cast<void> (0));
2288 }
2289
2290public:
2291 /// Return whether this is a plain node,
2292 /// or one of the varieties of value-extending loads.
2293 ISD::LoadExtType getExtensionType() const {
2294 return static_cast<ISD::LoadExtType>(LoadSDNodeBits.ExtTy);
2295 }
2296
2297 const SDValue &getBasePtr() const { return getOperand(1); }
2298 const SDValue &getOffset() const { return getOperand(2); }
2299
2300 static bool classof(const SDNode *N) {
2301 return N->getOpcode() == ISD::LOAD;
2302 }
2303};
2304
2305/// This class is used to represent ISD::STORE nodes.
2306class StoreSDNode : public LSBaseSDNode {
2307 friend class SelectionDAG;
2308
2309 StoreSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
2310 ISD::MemIndexedMode AM, bool isTrunc, EVT MemVT,
2311 MachineMemOperand *MMO)
2312 : LSBaseSDNode(ISD::STORE, Order, dl, VTs, AM, MemVT, MMO) {
2313 StoreSDNodeBits.IsTruncating = isTrunc;
2314 assert(!readMem() && "Store MachineMemOperand is a load!")(static_cast<void> (0));
2315 assert(writeMem() && "Store MachineMemOperand is not a store!")(static_cast<void> (0));
2316 }
2317
2318public:
2319 /// Return true if the op does a truncation before store.
2320 /// For integers this is the same as doing a TRUNCATE and storing the result.
2321 /// For floats, it is the same as doing an FP_ROUND and storing the result.
2322 bool isTruncatingStore() const { return StoreSDNodeBits.IsTruncating; }
2323 void setTruncatingStore(bool Truncating) {
2324 StoreSDNodeBits.IsTruncating = Truncating;
2325 }
2326
2327 const SDValue &getValue() const { return getOperand(1); }
2328 const SDValue &getBasePtr() const { return getOperand(2); }
2329 const SDValue &getOffset() const { return getOperand(3); }
2330
2331 static bool classof(const SDNode *N) {
2332 return N->getOpcode() == ISD::STORE;
2333 }
2334};
2335
2336/// This base class is used to represent VP_LOAD and VP_STORE nodes
2337class VPLoadStoreSDNode : public MemSDNode {
2338public:
2339 friend class SelectionDAG;
2340
2341 VPLoadStoreSDNode(ISD::NodeType NodeTy, unsigned Order, const DebugLoc &dl,
2342 SDVTList VTs, ISD::MemIndexedMode AM, EVT MemVT,
2343 MachineMemOperand *MMO)
2344 : MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) {
2345 LSBaseSDNodeBits.AddressingMode = AM;
2346 assert(getAddressingMode() == AM && "Value truncated")(static_cast<void> (0));
2347 }
2348
2349 // VPLoadSDNode (Chain, Ptr, Offset, Mask, EVL)
2350 // VPStoreSDNode (Chain, Data, Ptr, Offset, Mask, EVL)
2351 // Mask is a vector of i1 elements;
2352 // the type of EVL is TLI.getVPExplicitVectorLengthTy().
2353 const SDValue &getOffset() const {
2354 return getOperand(getOpcode() == ISD::MLOAD ? 2 : 3);
2355 }
2356 const SDValue &getBasePtr() const {
2357 return getOperand(getOpcode() == ISD::VP_LOAD ? 1 : 2);
2358 }
2359 const SDValue &getMask() const {
2360 return getOperand(getOpcode() == ISD::VP_LOAD ? 3 : 4);
2361 }
2362 const SDValue &getVectorLength() const {
2363 return getOperand(getOpcode() == ISD::VP_LOAD ? 4 : 5);
2364 }
2365
2366 /// Return the addressing mode for this load or store:
2367 /// unindexed, pre-inc, pre-dec, post-inc, or post-dec.
2368 ISD::MemIndexedMode getAddressingMode() const {
2369 return static_cast<ISD::MemIndexedMode>(LSBaseSDNodeBits.AddressingMode);
2370 }
2371
2372 /// Return true if this is a pre/post inc/dec load/store.
2373 bool isIndexed() const { return getAddressingMode() != ISD::UNINDEXED; }
2374
2375 /// Return true if this is NOT a pre/post inc/dec load/store.
2376 bool isUnindexed() const { return getAddressingMode() == ISD::UNINDEXED; }
2377
2378 static bool classof(const SDNode *N) {
2379 return N->getOpcode() == ISD::VP_LOAD || N->getOpcode() == ISD::VP_STORE;
2380 }
2381};
2382
2383/// This class is used to represent a VP_LOAD node
2384class VPLoadSDNode : public VPLoadStoreSDNode {
2385public:
2386 friend class SelectionDAG;
2387
2388 VPLoadSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
2389 ISD::MemIndexedMode AM, ISD::LoadExtType ETy, bool isExpanding,
2390 EVT MemVT, MachineMemOperand *MMO)
2391 : VPLoadStoreSDNode(ISD::VP_LOAD, Order, dl, VTs, AM, MemVT, MMO) {
2392 LoadSDNodeBits.ExtTy = ETy;
2393 LoadSDNodeBits.IsExpanding = isExpanding;
2394 }
2395
2396 ISD::LoadExtType getExtensionType() const {
2397 return static_cast<ISD::LoadExtType>(LoadSDNodeBits.ExtTy);
2398 }
2399
2400 const SDValue &getBasePtr() const { return getOperand(1); }
2401 const SDValue &getOffset() const { return getOperand(2); }
2402 const SDValue &getMask() const { return getOperand(3); }
2403 const SDValue &getVectorLength() const { return getOperand(4); }
2404
2405 static bool classof(const SDNode *N) {
2406 return N->getOpcode() == ISD::VP_LOAD;
2407 }
2408 bool isExpandingLoad() const { return LoadSDNodeBits.IsExpanding; }
2409};
2410
2411/// This class is used to represent a VP_STORE node
2412class VPStoreSDNode : public VPLoadStoreSDNode {
2413public:
2414 friend class SelectionDAG;
2415
2416 VPStoreSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
2417 ISD::MemIndexedMode AM, bool isTrunc, bool isCompressing,
2418 EVT MemVT, MachineMemOperand *MMO)
2419 : VPLoadStoreSDNode(ISD::VP_STORE, Order, dl, VTs, AM, MemVT, MMO) {
2420 StoreSDNodeBits.IsTruncating = isTrunc;
2421 StoreSDNodeBits.IsCompressing = isCompressing;
2422 }
2423
2424 /// Return true if this is a truncating store.
2425 /// For integers this is the same as doing a TRUNCATE and storing the result.
2426 /// For floats, it is the same as doing an FP_ROUND and storing the result.
2427 bool isTruncatingStore() const { return StoreSDNodeBits.IsTruncating; }
2428
2429 /// Returns true if the op does a compression to the vector before storing.
2430 /// The node contiguously stores the active elements (integers or floats)
2431 /// in src (those with their respective bit set in writemask k) to unaligned
2432 /// memory at base_addr.
2433 bool isCompressingStore() const { return StoreSDNodeBits.IsCompressing; }
2434
2435 const SDValue &getValue() const { return getOperand(1); }
2436 const SDValue &getBasePtr() const { return getOperand(2); }
2437 const SDValue &getOffset() const { return getOperand(3); }
2438 const SDValue &getMask() const { return getOperand(4); }
2439 const SDValue &getVectorLength() const { return getOperand(5); }
2440
2441 static bool classof(const SDNode *N) {
2442 return N->getOpcode() == ISD::VP_STORE;
2443 }
2444};
2445
2446/// This base class is used to represent MLOAD and MSTORE nodes
2447class MaskedLoadStoreSDNode : public MemSDNode {
2448public:
2449 friend class SelectionDAG;
2450
2451 MaskedLoadStoreSDNode(ISD::NodeType NodeTy, unsigned Order,
2452 const DebugLoc &dl, SDVTList VTs,
2453 ISD::MemIndexedMode AM, EVT MemVT,
2454 MachineMemOperand *MMO)
2455 : MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) {
2456 LSBaseSDNodeBits.AddressingMode = AM;
2457 assert(getAddressingMode() == AM && "Value truncated")(static_cast<void> (0));
2458 }
2459
2460 // MaskedLoadSDNode (Chain, ptr, offset, mask, passthru)
2461 // MaskedStoreSDNode (Chain, data, ptr, offset, mask)
2462 // Mask is a vector of i1 elements
2463 const SDValue &getOffset() const {
2464 return getOperand(getOpcode() == ISD::MLOAD ? 2 : 3);
2465 }
2466 const SDValue &getMask() const {
2467 return getOperand(getOpcode() == ISD::MLOAD ? 3 : 4);
2468 }
2469
2470 /// Return the addressing mode for this load or store:
2471 /// unindexed, pre-inc, pre-dec, post-inc, or post-dec.
2472 ISD::MemIndexedMode getAddressingMode() const {
2473 return static_cast<ISD::MemIndexedMode>(LSBaseSDNodeBits.AddressingMode);
2474 }
2475
2476 /// Return true if this is a pre/post inc/dec load/store.
2477 bool isIndexed() const { return getAddressingMode() != ISD::UNINDEXED; }
2478
2479 /// Return true if this is NOT a pre/post inc/dec load/store.
2480 bool isUnindexed() const { return getAddressingMode() == ISD::UNINDEXED; }
2481
2482 static bool classof(const SDNode *N) {
2483 return N->getOpcode() == ISD::MLOAD ||
2484 N->getOpcode() == ISD::MSTORE;
2485 }
2486};
2487
2488/// This class is used to represent an MLOAD node
2489class MaskedLoadSDNode : public MaskedLoadStoreSDNode {
2490public:
2491 friend class SelectionDAG;
2492
2493 MaskedLoadSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
2494 ISD::MemIndexedMode AM, ISD::LoadExtType ETy,
2495 bool IsExpanding, EVT MemVT, MachineMemOperand *MMO)
2496 : MaskedLoadStoreSDNode(ISD::MLOAD, Order, dl, VTs, AM, MemVT, MMO) {
2497 LoadSDNodeBits.ExtTy = ETy;
2498 LoadSDNodeBits.IsExpanding = IsExpanding;
2499 }
2500
2501 ISD::LoadExtType getExtensionType() const {
2502 return static_cast<ISD::LoadExtType>(LoadSDNodeBits.ExtTy);
2503 }
2504
2505 const SDValue &getBasePtr() const { return getOperand(1); }
2506 const SDValue &getOffset() const { return getOperand(2); }
2507 const SDValue &getMask() const { return getOperand(3); }
2508 const SDValue &getPassThru() const { return getOperand(4); }
2509
2510 static bool classof(const SDNode *N) {
2511 return N->getOpcode() == ISD::MLOAD;
2512 }
2513
2514 bool isExpandingLoad() const { return LoadSDNodeBits.IsExpanding; }
2515};
2516
2517/// This class is used to represent an MSTORE node
2518class MaskedStoreSDNode : public MaskedLoadStoreSDNode {
2519public:
2520 friend class SelectionDAG;
2521
2522 MaskedStoreSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
2523 ISD::MemIndexedMode AM, bool isTrunc, bool isCompressing,
2524 EVT MemVT, MachineMemOperand *MMO)
2525 : MaskedLoadStoreSDNode(ISD::MSTORE, Order, dl, VTs, AM, MemVT, MMO) {
2526 StoreSDNodeBits.IsTruncating = isTrunc;
2527 StoreSDNodeBits.IsCompressing = isCompressing;
2528 }
2529
2530 /// Return true if the op does a truncation before store.
2531 /// For integers this is the same as doing a TRUNCATE and storing the result.
2532 /// For floats, it is the same as doing an FP_ROUND and storing the result.
2533 bool isTruncatingStore() const { return StoreSDNodeBits.IsTruncating; }
2534
2535 /// Returns true if the op does a compression to the vector before storing.
2536 /// The node contiguously stores the active elements (integers or floats)
2537 /// in src (those with their respective bit set in writemask k) to unaligned
2538 /// memory at base_addr.
2539 bool isCompressingStore() const { return StoreSDNodeBits.IsCompressing; }
2540
2541 const SDValue &getValue() const { return getOperand(1); }
2542 const SDValue &getBasePtr() const { return getOperand(2); }
2543 const SDValue &getOffset() const { return getOperand(3); }
2544 const SDValue &getMask() const { return getOperand(4); }
2545
2546 static bool classof(const SDNode *N) {
2547 return N->getOpcode() == ISD::MSTORE;
2548 }
2549};
2550
2551/// This is a base class used to represent
2552/// VP_GATHER and VP_SCATTER nodes
2553///
2554class VPGatherScatterSDNode : public MemSDNode {
2555public:
2556 friend class SelectionDAG;
2557
2558 VPGatherScatterSDNode(ISD::NodeType NodeTy, unsigned Order,
2559 const DebugLoc &dl, SDVTList VTs, EVT MemVT,
2560 MachineMemOperand *MMO, ISD::MemIndexType IndexType)
2561 : MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) {
2562 LSBaseSDNodeBits.AddressingMode = IndexType;
2563 assert(getIndexType() == IndexType && "Value truncated")(static_cast<void> (0));
2564 }
2565
2566 /// How is Index applied to BasePtr when computing addresses.
2567 ISD::MemIndexType getIndexType() const {
2568 return static_cast<ISD::MemIndexType>(LSBaseSDNodeBits.AddressingMode);
2569 }
2570 bool isIndexScaled() const {
2571 return (getIndexType() == ISD::SIGNED_SCALED) ||
2572 (getIndexType() == ISD::UNSIGNED_SCALED);
2573 }
2574 bool isIndexSigned() const {
2575 return (getIndexType() == ISD::SIGNED_SCALED) ||
2576 (getIndexType() == ISD::SIGNED_UNSCALED);
2577 }
2578
2579 // In the both nodes address is Op1, mask is Op2:
2580 // VPGatherSDNode (Chain, base, index, scale, mask, vlen)
2581 // VPScatterSDNode (Chain, value, base, index, scale, mask, vlen)
2582 // Mask is a vector of i1 elements
2583 const SDValue &getBasePtr() const {
2584 return getOperand((getOpcode() == ISD::VP_GATHER) ? 1 : 2);
2585 }
2586 const SDValue &getIndex() const {
2587 return getOperand((getOpcode() == ISD::VP_GATHER) ? 2 : 3);
2588 }
2589 const SDValue &getScale() const {
2590 return getOperand((getOpcode() == ISD::VP_GATHER) ? 3 : 4);
2591 }
2592 const SDValue &getMask() const {
2593 return getOperand((getOpcode() == ISD::VP_GATHER) ? 4 : 5);
2594 }
2595 const SDValue &getVectorLength() const {
2596 return getOperand((getOpcode() == ISD::VP_GATHER) ? 5 : 6);
2597 }
2598
2599 static bool classof(const SDNode *N) {
2600 return N->getOpcode() == ISD::VP_GATHER ||
2601 N->getOpcode() == ISD::VP_SCATTER;
2602 }
2603};
2604
2605/// This class is used to represent an VP_GATHER node
2606///
2607class VPGatherSDNode : public VPGatherScatterSDNode {
2608public:
2609 friend class SelectionDAG;
2610
2611 VPGatherSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs, EVT MemVT,
2612 MachineMemOperand *MMO, ISD::MemIndexType IndexType)
2613 : VPGatherScatterSDNode(ISD::VP_GATHER, Order, dl, VTs, MemVT, MMO,
2614 IndexType) {}
2615
2616 static bool classof(const SDNode *N) {
2617 return N->getOpcode() == ISD::VP_GATHER;
2618 }
2619};
2620
2621/// This class is used to represent an VP_SCATTER node
2622///
2623class VPScatterSDNode : public VPGatherScatterSDNode {
2624public:
2625 friend class SelectionDAG;
2626
2627 VPScatterSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs, EVT MemVT,
2628 MachineMemOperand *MMO, ISD::MemIndexType IndexType)
2629 : VPGatherScatterSDNode(ISD::VP_SCATTER, Order, dl, VTs, MemVT, MMO,
2630 IndexType) {}
2631
2632 const SDValue &getValue() const { return getOperand(1); }
2633
2634 static bool classof(const SDNode *N) {
2635 return N->getOpcode() == ISD::VP_SCATTER;
2636 }
2637};
2638
2639/// This is a base class used to represent
2640/// MGATHER and MSCATTER nodes
2641///
2642class MaskedGatherScatterSDNode : public MemSDNode {
2643public:
2644 friend class SelectionDAG;
2645
2646 MaskedGatherScatterSDNode(ISD::NodeType NodeTy, unsigned Order,
2647 const DebugLoc &dl, SDVTList VTs, EVT MemVT,
2648 MachineMemOperand *MMO, ISD::MemIndexType IndexType)
2649 : MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) {
2650 LSBaseSDNodeBits.AddressingMode = IndexType;
2651 assert(getIndexType() == IndexType && "Value truncated")(static_cast<void> (0));
2652 }
2653
2654 /// How is Index applied to BasePtr when computing addresses.
2655 ISD::MemIndexType getIndexType() const {
2656 return static_cast<ISD::MemIndexType>(LSBaseSDNodeBits.AddressingMode);
2657 }
2658 void setIndexType(ISD::MemIndexType IndexType) {
2659 LSBaseSDNodeBits.AddressingMode = IndexType;
2660 }
2661 bool isIndexScaled() const {
2662 return (getIndexType() == ISD::SIGNED_SCALED) ||
2663 (getIndexType() == ISD::UNSIGNED_SCALED);
2664 }
2665 bool isIndexSigned() const {
2666 return (getIndexType() == ISD::SIGNED_SCALED) ||
2667 (getIndexType() == ISD::SIGNED_UNSCALED);
2668 }
2669
2670 // In the both nodes address is Op1, mask is Op2:
2671 // MaskedGatherSDNode (Chain, passthru, mask, base, index, scale)
2672 // MaskedScatterSDNode (Chain, value, mask, base, index, scale)
2673 // Mask is a vector of i1 elements
2674 const SDValue &getBasePtr() const { return getOperand(3); }
2675 const SDValue &getIndex() const { return getOperand(4); }
2676 const SDValue &getMask() const { return getOperand(2); }
2677 const SDValue &getScale() const { return getOperand(5); }
2678
2679 static bool classof(const SDNode *N) {
2680 return N->getOpcode() == ISD::MGATHER ||
2681 N->getOpcode() == ISD::MSCATTER;
2682 }
2683};
2684
2685/// This class is used to represent an MGATHER node
2686///
2687class MaskedGatherSDNode : public MaskedGatherScatterSDNode {
2688public:
2689 friend class SelectionDAG;
2690
2691 MaskedGatherSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
2692 EVT MemVT, MachineMemOperand *MMO,
2693 ISD::MemIndexType IndexType, ISD::LoadExtType ETy)
2694 : MaskedGatherScatterSDNode(ISD::MGATHER, Order, dl, VTs, MemVT, MMO,
2695 IndexType) {
2696 LoadSDNodeBits.ExtTy = ETy;
2697 }
2698
2699 const SDValue &getPassThru() const { return getOperand(1); }
2700
2701 ISD::LoadExtType getExtensionType() const {
2702 return ISD::LoadExtType(LoadSDNodeBits.ExtTy);
2703 }
2704
2705 static bool classof(const SDNode *N) {
2706 return N->getOpcode() == ISD::MGATHER;
2707 }
2708};
2709
2710/// This class is used to represent an MSCATTER node
2711///
2712class MaskedScatterSDNode : public MaskedGatherScatterSDNode {
2713public:
2714 friend class SelectionDAG;
2715
2716 MaskedScatterSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
2717 EVT MemVT, MachineMemOperand *MMO,
2718 ISD::MemIndexType IndexType, bool IsTrunc)
2719 : MaskedGatherScatterSDNode(ISD::MSCATTER, Order, dl, VTs, MemVT, MMO,
2720 IndexType) {
2721 StoreSDNodeBits.IsTruncating = IsTrunc;
2722 }
2723
2724 /// Return true if the op does a truncation before store.
2725 /// For integers this is the same as doing a TRUNCATE and storing the result.
2726 /// For floats, it is the same as doing an FP_ROUND and storing the result.
2727 bool isTruncatingStore() const { return StoreSDNodeBits.IsTruncating; }
2728
2729 const SDValue &getValue() const { return getOperand(1); }
2730
2731 static bool classof(const SDNode *N) {
2732 return N->getOpcode() == ISD::MSCATTER;
2733 }
2734};
2735
2736/// An SDNode that represents everything that will be needed
2737/// to construct a MachineInstr. These nodes are created during the
2738/// instruction selection proper phase.
2739///
2740/// Note that the only supported way to set the `memoperands` is by calling the
2741/// `SelectionDAG::setNodeMemRefs` function as the memory management happens
2742/// inside the DAG rather than in the node.
2743class MachineSDNode : public SDNode {
2744private:
2745 friend class SelectionDAG;
2746
2747 MachineSDNode(unsigned Opc, unsigned Order, const DebugLoc &DL, SDVTList VTs)
2748 : SDNode(Opc, Order, DL, VTs) {}
2749
2750 // We use a pointer union between a single `MachineMemOperand` pointer and
2751 // a pointer to an array of `MachineMemOperand` pointers. This is null when
2752 // the number of these is zero, the single pointer variant used when the
2753 // number is one, and the array is used for larger numbers.
2754 //
2755 // The array is allocated via the `SelectionDAG`'s allocator and so will
2756 // always live until the DAG is cleaned up and doesn't require ownership here.
2757 //
2758 // We can't use something simpler like `TinyPtrVector` here because `SDNode`
2759 // subclasses aren't managed in a conforming C++ manner. See the comments on
2760 // `SelectionDAG::MorphNodeTo` which details what all goes on, but the
2761 // constraint here is that these don't manage memory with their constructor or
2762 // destructor and can be initialized to a good state even if they start off
2763 // uninitialized.
2764 PointerUnion<MachineMemOperand *, MachineMemOperand **> MemRefs = {};
2765
2766 // Note that this could be folded into the above `MemRefs` member if doing so
2767 // is advantageous at some point. We don't need to store this in most cases.
2768 // However, at the moment this doesn't appear to make the allocation any
2769 // smaller and makes the code somewhat simpler to read.
2770 int NumMemRefs = 0;
2771
2772public:
2773 using mmo_iterator = ArrayRef<MachineMemOperand *>::const_iterator;
2774
2775 ArrayRef<MachineMemOperand *> memoperands() const {
2776 // Special case the common cases.
2777 if (NumMemRefs == 0)
2778 return {};
2779 if (NumMemRefs == 1)
2780 return makeArrayRef(MemRefs.getAddrOfPtr1(), 1);
2781
2782 // Otherwise we have an actual array.
2783 return makeArrayRef(MemRefs.get<MachineMemOperand **>(), NumMemRefs);
2784 }
2785 mmo_iterator memoperands_begin() const { return memoperands().begin(); }
2786 mmo_iterator memoperands_end() const { return memoperands().end(); }
2787 bool memoperands_empty() const { return memoperands().empty(); }
2788
2789 /// Clear out the memory reference descriptor list.
2790 void clearMemRefs() {
2791 MemRefs = nullptr;
2792 NumMemRefs = 0;
2793 }
2794
2795 static bool classof(const SDNode *N) {
2796 return N->isMachineOpcode();
2797 }
2798};
2799
2800/// An SDNode that records if a register contains a value that is guaranteed to
2801/// be aligned accordingly.
2802class AssertAlignSDNode : public SDNode {
2803 Align Alignment;
2804
2805public:
2806 AssertAlignSDNode(unsigned Order, const DebugLoc &DL, EVT VT, Align A)
2807 : SDNode(ISD::AssertAlign, Order, DL, getSDVTList(VT)), Alignment(A) {}
2808
2809 Align getAlign() const { return Alignment; }
2810
2811 static bool classof(const SDNode *N) {
2812 return N->getOpcode() == ISD::AssertAlign;
2813 }
2814};
2815
2816class SDNodeIterator {
2817 const SDNode *Node;
2818 unsigned Operand;
2819
2820 SDNodeIterator(const SDNode *N, unsigned Op) : Node(N), Operand(Op) {}
2821
2822public:
2823 using iterator_category = std::forward_iterator_tag;
2824 using value_type = SDNode;
2825 using difference_type = std::ptrdiff_t;
2826 using pointer = value_type *;
2827 using reference = value_type &;
2828
2829 bool operator==(const SDNodeIterator& x) const {
2830 return Operand == x.Operand;
2831 }
2832 bool operator!=(const SDNodeIterator& x) const { return !operator==(x); }
2833
2834 pointer operator*() const {
2835 return Node->getOperand(Operand).getNode();
2836 }
2837 pointer operator->() const { return operator*(); }
2838
2839 SDNodeIterator& operator++() { // Preincrement
2840 ++Operand;
2841 return *this;
2842 }
2843 SDNodeIterator operator++(int) { // Postincrement
2844 SDNodeIterator tmp = *this; ++*this; return tmp;
2845 }
2846 size_t operator-(SDNodeIterator Other) const {
2847 assert(Node == Other.Node &&(static_cast<void> (0))
2848 "Cannot compare iterators of two different nodes!")(static_cast<void> (0));
2849 return Operand - Other.Operand;
2850 }
2851
2852 static SDNodeIterator begin(const SDNode *N) { return SDNodeIterator(N, 0); }
2853 static SDNodeIterator end (const SDNode *N) {
2854 return SDNodeIterator(N, N->getNumOperands());
2855 }
2856
2857 unsigned getOperand() const { return Operand; }
2858 const SDNode *getNode() const { return Node; }
2859};
2860
2861template <> struct GraphTraits<SDNode*> {
2862 using NodeRef = SDNode *;
2863 using ChildIteratorType = SDNodeIterator;
2864
2865 static NodeRef getEntryNode(SDNode *N) { return N; }
2866
2867 static ChildIteratorType child_begin(NodeRef N) {
2868 return SDNodeIterator::begin(N);
2869 }
2870
2871 static ChildIteratorType child_end(NodeRef N) {
2872 return SDNodeIterator::end(N);
2873 }
2874};
2875
2876/// A representation of the largest SDNode, for use in sizeof().
2877///
2878/// This needs to be a union because the largest node differs on 32 bit systems
2879/// with 4 and 8 byte pointer alignment, respectively.
2880using LargestSDNode = AlignedCharArrayUnion<AtomicSDNode, TargetIndexSDNode,
2881 BlockAddressSDNode,
2882 GlobalAddressSDNode,
2883 PseudoProbeSDNode>;
2884
2885/// The SDNode class with the greatest alignment requirement.
2886using MostAlignedSDNode = GlobalAddressSDNode;
2887
2888namespace ISD {
2889
2890 /// Returns true if the specified node is a non-extending and unindexed load.
2891 inline bool isNormalLoad(const SDNode *N) {
2892 const LoadSDNode *Ld = dyn_cast<LoadSDNode>(N);
2893 return Ld && Ld->getExtensionType() == ISD::NON_EXTLOAD &&
2894 Ld->getAddressingMode() == ISD::UNINDEXED;
2895 }
2896
2897 /// Returns true if the specified node is a non-extending load.
2898 inline bool isNON_EXTLoad(const SDNode *N) {
2899 return isa<LoadSDNode>(N) &&
2900 cast<LoadSDNode>(N)->getExtensionType() == ISD::NON_EXTLOAD;
2901 }
2902
2903 /// Returns true if the specified node is a EXTLOAD.
2904 inline bool isEXTLoad(const SDNode *N) {
2905 return isa<LoadSDNode>(N) &&
2906 cast<LoadSDNode>(N)->getExtensionType() == ISD::EXTLOAD;
2907 }
2908
2909 /// Returns true if the specified node is a SEXTLOAD.
2910 inline bool isSEXTLoad(const SDNode *N) {
2911 return isa<LoadSDNode>(N) &&
2912 cast<LoadSDNode>(N)->getExtensionType() == ISD::SEXTLOAD;
2913 }
2914
2915 /// Returns true if the specified node is a ZEXTLOAD.
2916 inline bool isZEXTLoad(const SDNode *N) {
2917 return isa<LoadSDNode>(N) &&
2918 cast<LoadSDNode>(N)->getExtensionType() == ISD::ZEXTLOAD;
2919 }
2920
2921 /// Returns true if the specified node is an unindexed load.
2922 inline bool isUNINDEXEDLoad(const SDNode *N) {
2923 return isa<LoadSDNode>(N) &&
2924 cast<LoadSDNode>(N)->getAddressingMode() == ISD::UNINDEXED;
2925 }
2926
2927 /// Returns true if the specified node is a non-truncating
2928 /// and unindexed store.
2929 inline bool isNormalStore(const SDNode *N) {
2930 const StoreSDNode *St = dyn_cast<StoreSDNode>(N);
2931 return St && !St->isTruncatingStore() &&
2932 St->getAddressingMode() == ISD::UNINDEXED;
2933 }
2934
2935 /// Returns true if the specified node is an unindexed store.
2936 inline bool isUNINDEXEDStore(const SDNode *N) {
2937 return isa<StoreSDNode>(N) &&
2938 cast<StoreSDNode>(N)->getAddressingMode() == ISD::UNINDEXED;
2939 }
2940
2941 /// Attempt to match a unary predicate against a scalar/splat constant or
2942 /// every element of a constant BUILD_VECTOR.
2943 /// If AllowUndef is true, then UNDEF elements will pass nullptr to Match.
2944 bool matchUnaryPredicate(SDValue Op,
2945 std::function<bool(ConstantSDNode *)> Match,
2946 bool AllowUndefs = false);
2947
2948 /// Attempt to match a binary predicate against a pair of scalar/splat
2949 /// constants or every element of a pair of constant BUILD_VECTORs.
2950 /// If AllowUndef is true, then UNDEF elements will pass nullptr to Match.
2951 /// If AllowTypeMismatch is true then RetType + ArgTypes don't need to match.
2952 bool matchBinaryPredicate(
2953 SDValue LHS, SDValue RHS,
2954 std::function<bool(ConstantSDNode *, ConstantSDNode *)> Match,
2955 bool AllowUndefs = false, bool AllowTypeMismatch = false);
2956
2957 /// Returns true if the specified value is the overflow result from one
2958 /// of the overflow intrinsic nodes.
2959 inline bool isOverflowIntrOpRes(SDValue Op) {
2960 unsigned Opc = Op.getOpcode();
2961 return (Op.getResNo() == 1 &&
2962 (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO ||
2963 Opc == ISD::USUBO || Opc == ISD::SMULO || Opc == ISD::UMULO));
2964 }
2965
2966} // end namespace ISD
2967
2968} // end namespace llvm
2969
2970#endif // LLVM_CODEGEN_SELECTIONDAGNODES_H

/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/include/llvm/IR/GlobalVariable.h

1//===-- llvm/GlobalVariable.h - GlobalVariable class ------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the declaration of the GlobalVariable class, which
10// represents a single global variable (or constant) in the VM.
11//
12// Global variables are constant pointers that refer to hunks of space that are
13// allocated by either the VM, or by the linker in a static compiler. A global
14// variable may have an initial value, which is copied into the executables .data
15// area. Global Constants are required to have initializers.
16//
17//===----------------------------------------------------------------------===//
18
19#ifndef LLVM_IR_GLOBALVARIABLE_H
20#define LLVM_IR_GLOBALVARIABLE_H
21
22#include "llvm/ADT/Twine.h"
23#include "llvm/ADT/ilist_node.h"
24#include "llvm/IR/Attributes.h"
25#include "llvm/IR/GlobalObject.h"
26#include "llvm/IR/OperandTraits.h"
27#include "llvm/IR/Value.h"
28#include <cassert>
29#include <cstddef>
30
31namespace llvm {
32
33class Constant;
34class Module;
35
36template <typename ValueSubClass> class SymbolTableListTraits;
37class DIGlobalVariable;
38class DIGlobalVariableExpression;
39
40class GlobalVariable : public GlobalObject, public ilist_node<GlobalVariable> {
41 friend class SymbolTableListTraits<GlobalVariable>;
42
43 AttributeSet Attrs;
44 bool isConstantGlobal : 1; // Is this a global constant?
45 bool isExternallyInitializedConstant : 1; // Is this a global whose value
46 // can change from its initial
47 // value before global
48 // initializers are run?
49
50public:
51 /// GlobalVariable ctor - If a parent module is specified, the global is
52 /// automatically inserted into the end of the specified modules global list.
53 GlobalVariable(Type *Ty, bool isConstant, LinkageTypes Linkage,
54 Constant *Initializer = nullptr, const Twine &Name = "",
55 ThreadLocalMode = NotThreadLocal, unsigned AddressSpace = 0,
56 bool isExternallyInitialized = false);
57 /// GlobalVariable ctor - This creates a global and inserts it before the
58 /// specified other global.
59 GlobalVariable(Module &M, Type *Ty, bool isConstant, LinkageTypes Linkage,
60 Constant *Initializer, const Twine &Name = "",
61 GlobalVariable *InsertBefore = nullptr,
62 ThreadLocalMode = NotThreadLocal,
63 Optional<unsigned> AddressSpace = None,
64 bool isExternallyInitialized = false);
65 GlobalVariable(const GlobalVariable &) = delete;
66 GlobalVariable &operator=(const GlobalVariable &) = delete;
67
68 ~GlobalVariable() {
69 dropAllReferences();
70 }
71
72 // allocate space for exactly one operand
73 void *operator new(size_t s) {
74 return User::operator new(s, 1);
75 }
76
77 // delete space for exactly one operand as created in the corresponding new operator
78 void operator delete(void *ptr){
79 assert(ptr != nullptr && "must not be nullptr")(static_cast<void> (0));
80 User *Obj = static_cast<User *>(ptr);
81 // Number of operands can be set to 0 after construction and initialization. Make sure
82 // that number of operands is reset to 1, as this is needed in User::operator delete
83 Obj->setGlobalVariableNumOperands(1);
84 User::operator delete(Obj);
85 }
86
87 /// Provide fast operand accessors
88 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
89
90 /// Definitions have initializers, declarations don't.
91 ///
92 inline bool hasInitializer() const { return !isDeclaration(); }
30
Assuming the condition is true
31
Returning the value 1, which participates in a condition later
93
94 /// hasDefinitiveInitializer - Whether the global variable has an initializer,
95 /// and any other instances of the global (this can happen due to weak
96 /// linkage) are guaranteed to have the same initializer.
97 ///
98 /// Note that if you want to transform a global, you must use
99 /// hasUniqueInitializer() instead, because of the *_odr linkage type.
100 ///
101 /// Example:
102 ///
103 /// @a = global SomeType* null - Initializer is both definitive and unique.
104 ///
105 /// @b = global weak SomeType* null - Initializer is neither definitive nor
106 /// unique.
107 ///
108 /// @c = global weak_odr SomeType* null - Initializer is definitive, but not
109 /// unique.
110 inline bool hasDefinitiveInitializer() const {
111 return hasInitializer() &&
112 // The initializer of a global variable may change to something arbitrary
113 // at link time.
114 !isInterposable() &&
115 // The initializer of a global variable with the externally_initialized
116 // marker may change at runtime before C++ initializers are evaluated.
117 !isExternallyInitialized();
118 }
119
120 /// hasUniqueInitializer - Whether the global variable has an initializer, and
121 /// any changes made to the initializer will turn up in the final executable.
122 inline bool hasUniqueInitializer() const {
123 return
124 // We need to be sure this is the definition that will actually be used
125 isStrongDefinitionForLinker() &&
126 // It is not safe to modify initializers of global variables with the
127 // external_initializer marker since the value may be changed at runtime
128 // before C++ initializers are evaluated.
129 !isExternallyInitialized();
130 }
131
132 /// getInitializer - Return the initializer for this global variable. It is
133 /// illegal to call this method if the global is external, because we cannot
134 /// tell what the value is initialized to!
135 ///
136 inline const Constant *getInitializer() const {
137 assert(hasInitializer() && "GV doesn't have initializer!")(static_cast<void> (0));
138 return static_cast<Constant*>(Op<0>().get());
139 }
140 inline Constant *getInitializer() {
141 assert(hasInitializer() && "GV doesn't have initializer!")(static_cast<void> (0));
142 return static_cast<Constant*>(Op<0>().get());
143 }
144 /// setInitializer - Sets the initializer for this global variable, removing
145 /// any existing initializer if InitVal==NULL. If this GV has type T*, the
146 /// initializer must have type T.
147 void setInitializer(Constant *InitVal);
148
149 /// If the value is a global constant, its value is immutable throughout the
150 /// runtime execution of the program. Assigning a value into the constant
151 /// leads to undefined behavior.
152 ///
153 bool isConstant() const { return isConstantGlobal; }
154 void setConstant(bool Val) { isConstantGlobal = Val; }
155
156 bool isExternallyInitialized() const {
157 return isExternallyInitializedConstant;
158 }
159 void setExternallyInitialized(bool Val) {
160 isExternallyInitializedConstant = Val;
161 }
162
163 /// copyAttributesFrom - copy all additional attributes (those not needed to
164 /// create a GlobalVariable) from the GlobalVariable Src to this one.
165 void copyAttributesFrom(const GlobalVariable *Src);
166
167 /// removeFromParent - This method unlinks 'this' from the containing module,
168 /// but does not delete it.
169 ///
170 void removeFromParent();
171
172 /// eraseFromParent - This method unlinks 'this' from the containing module
173 /// and deletes it.
174 ///
175 void eraseFromParent();
176
177 /// Drop all references in preparation to destroy the GlobalVariable. This
178 /// drops not only the reference to the initializer but also to any metadata.
179 void dropAllReferences();
180
181 /// Attach a DIGlobalVariableExpression.
182 void addDebugInfo(DIGlobalVariableExpression *GV);
183
184 /// Fill the vector with all debug info attachements.
185 void getDebugInfo(SmallVectorImpl<DIGlobalVariableExpression *> &GVs) const;
186
187 /// Add attribute to this global.
188 void addAttribute(Attribute::AttrKind Kind) {
189 Attrs = Attrs.addAttribute(getContext(), Kind);
190 }
191
192 /// Add attribute to this global.
193 void addAttribute(StringRef Kind, StringRef Val = StringRef()) {
194 Attrs = Attrs.addAttribute(getContext(), Kind, Val);
195 }
196
197 /// Return true if the attribute exists.
198 bool hasAttribute(Attribute::AttrKind Kind) const {
199 return Attrs.hasAttribute(Kind);
200 }
201
202 /// Return true if the attribute exists.
203 bool hasAttribute(StringRef Kind) const {
204 return Attrs.hasAttribute(Kind);
205 }
206
207 /// Return true if any attributes exist.
208 bool hasAttributes() const {
209 return Attrs.hasAttributes();
210 }
211
212 /// Return the attribute object.
213 Attribute getAttribute(Attribute::AttrKind Kind) const {
214 return Attrs.getAttribute(Kind);
215 }
216
217 /// Return the attribute object.
218 Attribute getAttribute(StringRef Kind) const {
219 return Attrs.getAttribute(Kind);
220 }
221
222 /// Return the attribute set for this global
223 AttributeSet getAttributes() const {
224 return Attrs;
225 }
226
227 /// Return attribute set as list with index.
228 /// FIXME: This may not be required once ValueEnumerators
229 /// in bitcode-writer can enumerate attribute-set.
230 AttributeList getAttributesAsList(unsigned index) const {
231 if (!hasAttributes())
232 return AttributeList();
233 std::pair<unsigned, AttributeSet> AS[1] = {{index, Attrs}};
234 return AttributeList::get(getContext(), AS);
235 }
236
237 /// Set attribute list for this global
238 void setAttributes(AttributeSet A) {
239 Attrs = A;
240 }
241
242 /// Check if section name is present
243 bool hasImplicitSection() const {
244 return getAttributes().hasAttribute("bss-section") ||
245 getAttributes().hasAttribute("data-section") ||
246 getAttributes().hasAttribute("relro-section") ||
247 getAttributes().hasAttribute("rodata-section");
248 }
249
250 // Methods for support type inquiry through isa, cast, and dyn_cast:
251 static bool classof(const Value *V) {
252 return V->getValueID() == Value::GlobalVariableVal;
253 }
254};
255
256template <>
257struct OperandTraits<GlobalVariable> :
258 public OptionalOperandTraits<GlobalVariable> {
259};
260
261DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GlobalVariable, Value)GlobalVariable::op_iterator GlobalVariable::op_begin() { return
OperandTraits<GlobalVariable>::op_begin(this); } GlobalVariable
::const_op_iterator GlobalVariable::op_begin() const { return
OperandTraits<GlobalVariable>::op_begin(const_cast<
GlobalVariable*>(this)); } GlobalVariable::op_iterator GlobalVariable
::op_end() { return OperandTraits<GlobalVariable>::op_end
(this); } GlobalVariable::const_op_iterator GlobalVariable::op_end
() const { return OperandTraits<GlobalVariable>::op_end
(const_cast<GlobalVariable*>(this)); } Value *GlobalVariable
::getOperand(unsigned i_nocapture) const { (static_cast<void
> (0)); return cast_or_null<Value>( OperandTraits<
GlobalVariable>::op_begin(const_cast<GlobalVariable*>
(this))[i_nocapture].get()); } void GlobalVariable::setOperand
(unsigned i_nocapture, Value *Val_nocapture) { (static_cast<
void> (0)); OperandTraits<GlobalVariable>::op_begin(
this)[i_nocapture] = Val_nocapture; } unsigned GlobalVariable
::getNumOperands() const { return OperandTraits<GlobalVariable
>::operands(this); } template <int Idx_nocapture> Use
&GlobalVariable::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
GlobalVariable::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
262
263} // end namespace llvm
264
265#endif // LLVM_IR_GLOBALVARIABLE_H

/usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/bits/stl_tree.h

1// RB tree implementation -*- C++ -*-
2
3// Copyright (C) 2001-2020 Free Software Foundation, Inc.
4//
5// This file is part of the GNU ISO C++ Library. This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
19
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23// <http://www.gnu.org/licenses/>.
24
25/*
26 *
27 * Copyright (c) 1996,1997
28 * Silicon Graphics Computer Systems, Inc.
29 *
30 * Permission to use, copy, modify, distribute and sell this software
31 * and its documentation for any purpose is hereby granted without fee,
32 * provided that the above copyright notice appear in all copies and
33 * that both that copyright notice and this permission notice appear
34 * in supporting documentation. Silicon Graphics makes no
35 * representations about the suitability of this software for any
36 * purpose. It is provided "as is" without express or implied warranty.
37 *
38 *
39 * Copyright (c) 1994
40 * Hewlett-Packard Company
41 *
42 * Permission to use, copy, modify, distribute and sell this software
43 * and its documentation for any purpose is hereby granted without fee,
44 * provided that the above copyright notice appear in all copies and
45 * that both that copyright notice and this permission notice appear
46 * in supporting documentation. Hewlett-Packard Company makes no
47 * representations about the suitability of this software for any
48 * purpose. It is provided "as is" without express or implied warranty.
49 *
50 *
51 */
52
53/** @file bits/stl_tree.h
54 * This is an internal header file, included by other library headers.
55 * Do not attempt to use it directly. @headername{map,set}
56 */
57
58#ifndef _STL_TREE_H1
59#define _STL_TREE_H1 1
60
61#pragma GCC system_header
62
63#include <bits/stl_algobase.h>
64#include <bits/allocator.h>
65#include <bits/stl_function.h>
66#include <bits/cpp_type_traits.h>
67#include <ext/alloc_traits.h>
68#if __cplusplus201402L >= 201103L
69# include <ext/aligned_buffer.h>
70#endif
71#if __cplusplus201402L > 201402L
72# include <bits/node_handle.h>
73#endif
74
75namespace std _GLIBCXX_VISIBILITY(default)__attribute__ ((__visibility__ ("default")))
76{
77_GLIBCXX_BEGIN_NAMESPACE_VERSION
78
79#if __cplusplus201402L > 201103L
80# define __cpp_lib_generic_associative_lookup201304 201304
81#endif
82
83 // Red-black tree class, designed for use in implementing STL
84 // associative containers (set, multiset, map, and multimap). The
85 // insertion and deletion algorithms are based on those in Cormen,
86 // Leiserson, and Rivest, Introduction to Algorithms (MIT Press,
87 // 1990), except that
88 //
89 // (1) the header cell is maintained with links not only to the root
90 // but also to the leftmost node of the tree, to enable constant
91 // time begin(), and to the rightmost node of the tree, to enable
92 // linear time performance when used with the generic set algorithms
93 // (set_union, etc.)
94 //
95 // (2) when a node being deleted has two children its successor node
96 // is relinked into its place, rather than copied, so that the only
97 // iterators invalidated are those referring to the deleted node.
98
99 enum _Rb_tree_color { _S_red = false, _S_black = true };
100
101 struct _Rb_tree_node_base
102 {
103 typedef _Rb_tree_node_base* _Base_ptr;
104 typedef const _Rb_tree_node_base* _Const_Base_ptr;
105
106 _Rb_tree_color _M_color;
107 _Base_ptr _M_parent;
108 _Base_ptr _M_left;
109 _Base_ptr _M_right;
110
111 static _Base_ptr
112 _S_minimum(_Base_ptr __x) _GLIBCXX_NOEXCEPTnoexcept
113 {
114 while (__x->_M_left != 0) __x = __x->_M_left;
115 return __x;
116 }
117
118 static _Const_Base_ptr
119 _S_minimum(_Const_Base_ptr __x) _GLIBCXX_NOEXCEPTnoexcept
120 {
121 while (__x->_M_left != 0) __x = __x->_M_left;
122 return __x;
123 }
124
125 static _Base_ptr
126 _S_maximum(_Base_ptr __x) _GLIBCXX_NOEXCEPTnoexcept
127 {
128 while (__x->_M_right != 0) __x = __x->_M_right;
129 return __x;
130 }
131
132 static _Const_Base_ptr
133 _S_maximum(_Const_Base_ptr __x) _GLIBCXX_NOEXCEPTnoexcept
134 {
135 while (__x->_M_right != 0) __x = __x->_M_right;
136 return __x;
137 }
138 };
139
140 // Helper type offering value initialization guarantee on the compare functor.
141 template<typename _Key_compare>
142 struct _Rb_tree_key_compare
143 {
144 _Key_compare _M_key_compare;
145
146 _Rb_tree_key_compare()
147 _GLIBCXX_NOEXCEPT_IF(noexcept(is_nothrow_default_constructible<_Key_compare>
::value)
148 is_nothrow_default_constructible<_Key_compare>::value)noexcept(is_nothrow_default_constructible<_Key_compare>
::value)
149 : _M_key_compare()
150 { }
151
152 _Rb_tree_key_compare(const _Key_compare& __comp)
153 : _M_key_compare(__comp)
154 { }
155
156#if __cplusplus201402L >= 201103L
157 // Copy constructor added for consistency with C++98 mode.
158 _Rb_tree_key_compare(const _Rb_tree_key_compare&) = default;
159
160 _Rb_tree_key_compare(_Rb_tree_key_compare&& __x)
161 noexcept(is_nothrow_copy_constructible<_Key_compare>::value)
162 : _M_key_compare(__x._M_key_compare)
163 { }
164#endif
165 };
166
167 // Helper type to manage default initialization of node count and header.
168 struct _Rb_tree_header
169 {
170 _Rb_tree_node_base _M_header;
171 size_t _M_node_count; // Keeps track of size of tree.
172
173 _Rb_tree_header() _GLIBCXX_NOEXCEPTnoexcept
174 {
175 _M_header._M_color = _S_red;
176 _M_reset();
177 }
178
179#if __cplusplus201402L >= 201103L
180 _Rb_tree_header(_Rb_tree_header&& __x) noexcept
181 {
182 if (__x._M_header._M_parent != nullptr)
183 _M_move_data(__x);
184 else
185 {
186 _M_header._M_color = _S_red;
187 _M_reset();
188 }
189 }
190#endif
191
192 void
193 _M_move_data(_Rb_tree_header& __from)
194 {
195 _M_header._M_color = __from._M_header._M_color;
196 _M_header._M_parent = __from._M_header._M_parent;
197 _M_header._M_left = __from._M_header._M_left;
198 _M_header._M_right = __from._M_header._M_right;
199 _M_header._M_parent->_M_parent = &_M_header;
200 _M_node_count = __from._M_node_count;
201
202 __from._M_reset();
203 }
204
205 void
206 _M_reset()
207 {
208 _M_header._M_parent = 0;
209 _M_header._M_left = &_M_header;
210 _M_header._M_right = &_M_header;
211 _M_node_count = 0;
212 }
213 };
214
215 template<typename _Val>
216 struct _Rb_tree_node : public _Rb_tree_node_base
217 {
218 typedef _Rb_tree_node<_Val>* _Link_type;
219
220#if __cplusplus201402L < 201103L
221 _Val _M_value_field;
222
223 _Val*
224 _M_valptr()
225 { return std::__addressof(_M_value_field); }
226
227 const _Val*
228 _M_valptr() const
229 { return std::__addressof(_M_value_field); }
230#else
231 __gnu_cxx::__aligned_membuf<_Val> _M_storage;
232
233 _Val*
234 _M_valptr()
235 { return _M_storage._M_ptr(); }
236
237 const _Val*
238 _M_valptr() const
239 { return _M_storage._M_ptr(); }
240#endif
241 };
242
243 _GLIBCXX_PURE__attribute__ ((__pure__)) _Rb_tree_node_base*
244 _Rb_tree_increment(_Rb_tree_node_base* __x) throw ();
245
246 _GLIBCXX_PURE__attribute__ ((__pure__)) const _Rb_tree_node_base*
247 _Rb_tree_increment(const _Rb_tree_node_base* __x) throw ();
248
249 _GLIBCXX_PURE__attribute__ ((__pure__)) _Rb_tree_node_base*
250 _Rb_tree_decrement(_Rb_tree_node_base* __x) throw ();
251
252 _GLIBCXX_PURE__attribute__ ((__pure__)) const _Rb_tree_node_base*
253 _Rb_tree_decrement(const _Rb_tree_node_base* __x) throw ();
254
255 template<typename _Tp>
256 struct _Rb_tree_iterator
257 {
258 typedef _Tp value_type;
259 typedef _Tp& reference;
260 typedef _Tp* pointer;
261
262 typedef bidirectional_iterator_tag iterator_category;
263 typedef ptrdiff_t difference_type;
264
265 typedef _Rb_tree_iterator<_Tp> _Self;
266 typedef _Rb_tree_node_base::_Base_ptr _Base_ptr;
267 typedef _Rb_tree_node<_Tp>* _Link_type;
268
269 _Rb_tree_iterator() _GLIBCXX_NOEXCEPTnoexcept
270 : _M_node() { }
271
272 explicit
273 _Rb_tree_iterator(_Base_ptr __x) _GLIBCXX_NOEXCEPTnoexcept
274 : _M_node(__x) { }
275
276 reference
277 operator*() const _GLIBCXX_NOEXCEPTnoexcept
278 { return *static_cast<_Link_type>(_M_node)->_M_valptr(); }
279
280 pointer
281 operator->() const _GLIBCXX_NOEXCEPTnoexcept
282 { return static_cast<_Link_type> (_M_node)->_M_valptr(); }
283
284 _Self&
285 operator++() _GLIBCXX_NOEXCEPTnoexcept
286 {
287 _M_node = _Rb_tree_increment(_M_node);
288 return *this;
289 }
290
291 _Self
292 operator++(int) _GLIBCXX_NOEXCEPTnoexcept
293 {
294 _Self __tmp = *this;
295 _M_node = _Rb_tree_increment(_M_node);
296 return __tmp;
297 }
298
299 _Self&
300 operator--() _GLIBCXX_NOEXCEPTnoexcept
301 {
302 _M_node = _Rb_tree_decrement(_M_node);
303 return *this;
304 }
305
306 _Self
307 operator--(int) _GLIBCXX_NOEXCEPTnoexcept
308 {
309 _Self __tmp = *this;
310 _M_node = _Rb_tree_decrement(_M_node);
311 return __tmp;
312 }
313
314 friend bool
315 operator==(const _Self& __x, const _Self& __y) _GLIBCXX_NOEXCEPTnoexcept
316 { return __x._M_node == __y._M_node; }
317
318#if ! __cpp_lib_three_way_comparison
319 friend bool
320 operator!=(const _Self& __x, const _Self& __y) _GLIBCXX_NOEXCEPTnoexcept
321 { return __x._M_node != __y._M_node; }
36
Assuming '__x._M_node' is equal to '__y._M_node'
37
Returning zero, which participates in a condition later
322#endif
323
324 _Base_ptr _M_node;
325 };
326
327 template<typename _Tp>
328 struct _Rb_tree_const_iterator
329 {
330 typedef _Tp value_type;
331 typedef const _Tp& reference;
332 typedef const _Tp* pointer;
333
334 typedef _Rb_tree_iterator<_Tp> iterator;
335
336 typedef bidirectional_iterator_tag iterator_category;
337 typedef ptrdiff_t difference_type;
338
339 typedef _Rb_tree_const_iterator<_Tp> _Self;
340 typedef _Rb_tree_node_base::_Const_Base_ptr _Base_ptr;
341 typedef const _Rb_tree_node<_Tp>* _Link_type;
342
343 _Rb_tree_const_iterator() _GLIBCXX_NOEXCEPTnoexcept
344 : _M_node() { }
345
346 explicit
347 _Rb_tree_const_iterator(_Base_ptr __x) _GLIBCXX_NOEXCEPTnoexcept
348 : _M_node(__x) { }
349
350 _Rb_tree_const_iterator(const iterator& __it) _GLIBCXX_NOEXCEPTnoexcept
351 : _M_node(__it._M_node) { }
352
353 iterator
354 _M_const_cast() const _GLIBCXX_NOEXCEPTnoexcept
355 { return iterator(const_cast<typename iterator::_Base_ptr>(_M_node)); }
356
357 reference
358 operator*() const _GLIBCXX_NOEXCEPTnoexcept
359 { return *static_cast<_Link_type>(_M_node)->_M_valptr(); }
360
361 pointer
362 operator->() const _GLIBCXX_NOEXCEPTnoexcept
363 { return static_cast<_Link_type>(_M_node)->_M_valptr(); }
364
365 _Self&
366 operator++() _GLIBCXX_NOEXCEPTnoexcept
367 {
368 _M_node = _Rb_tree_increment(_M_node);
369 return *this;
370 }
371
372 _Self
373 operator++(int) _GLIBCXX_NOEXCEPTnoexcept
374 {
375 _Self __tmp = *this;
376 _M_node = _Rb_tree_increment(_M_node);
377 return __tmp;
378 }
379
380 _Self&
381 operator--() _GLIBCXX_NOEXCEPTnoexcept
382 {
383 _M_node = _Rb_tree_decrement(_M_node);
384 return *this;
385 }
386
387 _Self
388 operator--(int) _GLIBCXX_NOEXCEPTnoexcept
389 {
390 _Self __tmp = *this;
391 _M_node = _Rb_tree_decrement(_M_node);
392 return __tmp;
393 }
394
395 friend bool
396 operator==(const _Self& __x, const _Self& __y) _GLIBCXX_NOEXCEPTnoexcept
397 { return __x._M_node == __y._M_node; }
398
399#if ! __cpp_lib_three_way_comparison
400 friend bool
401 operator!=(const _Self& __x, const _Self& __y) _GLIBCXX_NOEXCEPTnoexcept
402 { return __x._M_node != __y._M_node; }
403#endif
404
405 _Base_ptr _M_node;
406 };
407
408 void
409 _Rb_tree_insert_and_rebalance(const bool __insert_left,
410 _Rb_tree_node_base* __x,
411 _Rb_tree_node_base* __p,
412 _Rb_tree_node_base& __header) throw ();
413
414 _Rb_tree_node_base*
415 _Rb_tree_rebalance_for_erase(_Rb_tree_node_base* const __z,
416 _Rb_tree_node_base& __header) throw ();
417
418#if __cplusplus201402L >= 201402L
419 template<typename _Cmp, typename _SfinaeType, typename = __void_t<>>
420 struct __has_is_transparent
421 { };
422
423 template<typename _Cmp, typename _SfinaeType>
424 struct __has_is_transparent<_Cmp, _SfinaeType,
425 __void_t<typename _Cmp::is_transparent>>
426 { typedef void type; };
427
428 template<typename _Cmp, typename _SfinaeType>
429 using __has_is_transparent_t
430 = typename __has_is_transparent<_Cmp, _SfinaeType>::type;
431#endif
432
433#if __cplusplus201402L > 201402L
434 template<typename _Tree1, typename _Cmp2>
435 struct _Rb_tree_merge_helper { };
436#endif
437
438 template<typename _Key, typename _Val, typename _KeyOfValue,
439 typename _Compare, typename _Alloc = allocator<_Val> >
440 class _Rb_tree
441 {
442 typedef typename __gnu_cxx::__alloc_traits<_Alloc>::template
443 rebind<_Rb_tree_node<_Val> >::other _Node_allocator;
444
445 typedef __gnu_cxx::__alloc_traits<_Node_allocator> _Alloc_traits;
446
447 protected:
448 typedef _Rb_tree_node_base* _Base_ptr;
449 typedef const _Rb_tree_node_base* _Const_Base_ptr;
450 typedef _Rb_tree_node<_Val>* _Link_type;
451 typedef const _Rb_tree_node<_Val>* _Const_Link_type;
452
453 private:
454 // Functor recycling a pool of nodes and using allocation once the pool
455 // is empty.
456 struct _Reuse_or_alloc_node
457 {
458 _Reuse_or_alloc_node(_Rb_tree& __t)
459 : _M_root(__t._M_root()), _M_nodes(__t._M_rightmost()), _M_t(__t)
460 {
461 if (_M_root)
462 {
463 _M_root->_M_parent = 0;
464
465 if (_M_nodes->_M_left)
466 _M_nodes = _M_nodes->_M_left;
467 }
468 else
469 _M_nodes = 0;
470 }
471
472#if __cplusplus201402L >= 201103L
473 _Reuse_or_alloc_node(const _Reuse_or_alloc_node&) = delete;
474#endif
475
476 ~_Reuse_or_alloc_node()
477 { _M_t._M_erase(static_cast<_Link_type>(_M_root)); }
478
479 template<typename _Arg>
480 _Link_type
481#if __cplusplus201402L < 201103L
482 operator()(const _Arg& __arg)
483#else
484 operator()(_Arg&& __arg)
485#endif
486 {
487 _Link_type __node = static_cast<_Link_type>(_M_extract());
488 if (__node)
489 {
490 _M_t._M_destroy_node(__node);
491 _M_t._M_construct_node(__node, _GLIBCXX_FORWARD(_Arg, __arg)std::forward<_Arg>(__arg));
492 return __node;
493 }
494
495 return _M_t._M_create_node(_GLIBCXX_FORWARD(_Arg, __arg)std::forward<_Arg>(__arg));
496 }
497
498 private:
499 _Base_ptr
500 _M_extract()
501 {
502 if (!_M_nodes)
503 return _M_nodes;
504
505 _Base_ptr __node = _M_nodes;
506 _M_nodes = _M_nodes->_M_parent;
507 if (_M_nodes)
508 {
509 if (_M_nodes->_M_right == __node)
510 {
511 _M_nodes->_M_right = 0;
512
513 if (_M_nodes->_M_left)
514 {
515 _M_nodes = _M_nodes->_M_left;
516
517 while (_M_nodes->_M_right)
518 _M_nodes = _M_nodes->_M_right;
519
520 if (_M_nodes->_M_left)
521 _M_nodes = _M_nodes->_M_left;
522 }
523 }
524 else // __node is on the left.
525 _M_nodes->_M_left = 0;
526 }
527 else
528 _M_root = 0;
529
530 return __node;
531 }
532
533 _Base_ptr _M_root;
534 _Base_ptr _M_nodes;
535 _Rb_tree& _M_t;
536 };
537
538 // Functor similar to the previous one but without any pool of nodes to
539 // recycle.
540 struct _Alloc_node
541 {
542 _Alloc_node(_Rb_tree& __t)
543 : _M_t(__t) { }
544
545 template<typename _Arg>
546 _Link_type
547#if __cplusplus201402L < 201103L
548 operator()(const _Arg& __arg) const
549#else
550 operator()(_Arg&& __arg) const
551#endif
552 { return _M_t._M_create_node(_GLIBCXX_FORWARD(_Arg, __arg)std::forward<_Arg>(__arg)); }
553
554 private:
555 _Rb_tree& _M_t;
556 };
557
558 public:
559 typedef _Key key_type;
560 typedef _Val value_type;
561 typedef value_type* pointer;
562 typedef const value_type* const_pointer;
563 typedef value_type& reference;
564 typedef const value_type& const_reference;
565 typedef size_t size_type;
566 typedef ptrdiff_t difference_type;
567 typedef _Alloc allocator_type;
568
569 _Node_allocator&
570 _M_get_Node_allocator() _GLIBCXX_NOEXCEPTnoexcept
571 { return this->_M_impl; }
572
573 const _Node_allocator&
574 _M_get_Node_allocator() const _GLIBCXX_NOEXCEPTnoexcept
575 { return this->_M_impl; }
576
577 allocator_type
578 get_allocator() const _GLIBCXX_NOEXCEPTnoexcept
579 { return allocator_type(_M_get_Node_allocator()); }
580
581 protected:
582 _Link_type
583 _M_get_node()
584 { return _Alloc_traits::allocate(_M_get_Node_allocator(), 1); }
585
586 void
587 _M_put_node(_Link_type __p) _GLIBCXX_NOEXCEPTnoexcept
588 { _Alloc_traits::deallocate(_M_get_Node_allocator(), __p, 1); }
589
590#if __cplusplus201402L < 201103L
591 void
592 _M_construct_node(_Link_type __node, const value_type& __x)
593 {
594 __tryif (true)
595 { get_allocator().construct(__node->_M_valptr(), __x); }
596 __catch(...)if (false)
597 {
598 _M_put_node(__node);
599 __throw_exception_again;
600 }
601 }
602
603 _Link_type
604 _M_create_node(const value_type& __x)
605 {
606 _Link_type __tmp = _M_get_node();
607 _M_construct_node(__tmp, __x);
608 return __tmp;
609 }
610#else
611 template<typename... _Args>
612 void
613 _M_construct_node(_Link_type __node, _Args&&... __args)
614 {
615 __tryif (true)
616 {
617 ::new(__node) _Rb_tree_node<_Val>;
618 _Alloc_traits::construct(_M_get_Node_allocator(),
619 __node->_M_valptr(),
620 std::forward<_Args>(__args)...);
621 }
622 __catch(...)if (false)
623 {
624 __node->~_Rb_tree_node<_Val>();
625 _M_put_node(__node);
626 __throw_exception_again;
627 }
628 }
629
630 template<typename... _Args>
631 _Link_type
632 _M_create_node(_Args&&... __args)
633 {
634 _Link_type __tmp = _M_get_node();
635 _M_construct_node(__tmp, std::forward<_Args>(__args)...);
636 return __tmp;
637 }
638#endif
639
640 void
641 _M_destroy_node(_Link_type __p) _GLIBCXX_NOEXCEPTnoexcept
642 {
643#if __cplusplus201402L < 201103L
644 get_allocator().destroy(__p->_M_valptr());
645#else
646 _Alloc_traits::destroy(_M_get_Node_allocator(), __p->_M_valptr());
647 __p->~_Rb_tree_node<_Val>();
648#endif
649 }
650
651 void
652 _M_drop_node(_Link_type __p) _GLIBCXX_NOEXCEPTnoexcept
653 {
654 _M_destroy_node(__p);
655 _M_put_node(__p);
656 }
657
658 template<typename _NodeGen>
659 _Link_type
660 _M_clone_node(_Const_Link_type __x, _NodeGen& __node_gen)
661 {
662 _Link_type __tmp = __node_gen(*__x->_M_valptr());
663 __tmp->_M_color = __x->_M_color;
664 __tmp->_M_left = 0;
665 __tmp->_M_right = 0;
666 return __tmp;
667 }
668
669 protected:
670#if _GLIBCXX_INLINE_VERSION0
671 template<typename _Key_compare>
672#else
673 // Unused _Is_pod_comparator is kept as it is part of mangled name.
674 template<typename _Key_compare,
675 bool /* _Is_pod_comparator */ = __is_pod(_Key_compare)>
676#endif
677 struct _Rb_tree_impl
678 : public _Node_allocator
679 , public _Rb_tree_key_compare<_Key_compare>
680 , public _Rb_tree_header
681 {
682 typedef _Rb_tree_key_compare<_Key_compare> _Base_key_compare;
683
684 _Rb_tree_impl()
685 _GLIBCXX_NOEXCEPT_IF(noexcept(is_nothrow_default_constructible<_Node_allocator>
::value && is_nothrow_default_constructible<_Base_key_compare
>::value)
686 is_nothrow_default_constructible<_Node_allocator>::valuenoexcept(is_nothrow_default_constructible<_Node_allocator>
::value && is_nothrow_default_constructible<_Base_key_compare
>::value)
687 && is_nothrow_default_constructible<_Base_key_compare>::value )noexcept(is_nothrow_default_constructible<_Node_allocator>
::value && is_nothrow_default_constructible<_Base_key_compare
>::value)
688 : _Node_allocator()
689 { }
690
691 _Rb_tree_impl(const _Rb_tree_impl& __x)
692 : _Node_allocator(_Alloc_traits::_S_select_on_copy(__x))
693 , _Base_key_compare(__x._M_key_compare)
694 { }
695
696#if __cplusplus201402L < 201103L
697 _Rb_tree_impl(const _Key_compare& __comp, const _Node_allocator& __a)
698 : _Node_allocator(__a), _Base_key_compare(__comp)
699 { }
700#else
701 _Rb_tree_impl(_Rb_tree_impl&&) = default;
702
703 explicit
704 _Rb_tree_impl(_Node_allocator&& __a)
705 : _Node_allocator(std::move(__a))
706 { }
707
708 _Rb_tree_impl(_Rb_tree_impl&& __x, _Node_allocator&& __a)
709 : _Node_allocator(std::move(__a)),
710 _Base_key_compare(std::move(__x)),
711 _Rb_tree_header(std::move(__x))
712 { }
713
714 _Rb_tree_impl(const _Key_compare& __comp, _Node_allocator&& __a)
715 : _Node_allocator(std::move(__a)), _Base_key_compare(__comp)
716 { }
717#endif
718 };
719
720 _Rb_tree_impl<_Compare> _M_impl;
721
722 protected:
723 _Base_ptr&
724 _M_root() _GLIBCXX_NOEXCEPTnoexcept
725 { return this->_M_impl._M_header._M_parent; }
726
727 _Const_Base_ptr
728 _M_root() const _GLIBCXX_NOEXCEPTnoexcept
729 { return this->_M_impl._M_header._M_parent; }
730
731 _Base_ptr&
732 _M_leftmost() _GLIBCXX_NOEXCEPTnoexcept
733 { return this->_M_impl._M_header._M_left; }
734
735 _Const_Base_ptr
736 _M_leftmost() const _GLIBCXX_NOEXCEPTnoexcept
737 { return this->_M_impl._M_header._M_left; }
738
739 _Base_ptr&
740 _M_rightmost() _GLIBCXX_NOEXCEPTnoexcept
741 { return this->_M_impl._M_header._M_right; }
742
743 _Const_Base_ptr
744 _M_rightmost() const _GLIBCXX_NOEXCEPTnoexcept
745 { return this->_M_impl._M_header._M_right; }
746
747 _Link_type
748 _M_begin() _GLIBCXX_NOEXCEPTnoexcept
749 { return static_cast<_Link_type>(this->_M_impl._M_header._M_parent); }
750
751 _Const_Link_type
752 _M_begin() const _GLIBCXX_NOEXCEPTnoexcept
753 {
754 return static_cast<_Const_Link_type>
755 (this->_M_impl._M_header._M_parent);
756 }
757
758 _Base_ptr
759 _M_end() _GLIBCXX_NOEXCEPTnoexcept
760 { return &this->_M_impl._M_header; }
761
762 _Const_Base_ptr
763 _M_end() const _GLIBCXX_NOEXCEPTnoexcept
764 { return &this->_M_impl._M_header; }
765
766 static const _Key&
767 _S_key(_Const_Link_type __x)
768 {
769#if __cplusplus201402L >= 201103L
770 // If we're asking for the key we're presumably using the comparison
771 // object, and so this is a good place to sanity check it.
772 static_assert(__is_invocable<_Compare&, const _Key&, const _Key&>{},
773 "comparison object must be invocable "
774 "with two arguments of key type");
775# if __cplusplus201402L >= 201703L
776 // _GLIBCXX_RESOLVE_LIB_DEFECTS
777 // 2542. Missing const requirements for associative containers
778 if constexpr (__is_invocable<_Compare&, const _Key&, const _Key&>{})
779 static_assert(
780 is_invocable_v<const _Compare&, const _Key&, const _Key&>,
781 "comparison object must be invocable as const");
782# endif // C++17
783#endif // C++11
784
785 return _KeyOfValue()(*__x->_M_valptr());
786 }
787
788 static _Link_type
789 _S_left(_Base_ptr __x) _GLIBCXX_NOEXCEPTnoexcept
790 { return static_cast<_Link_type>(__x->_M_left); }
791
792 static _Const_Link_type
793 _S_left(_Const_Base_ptr __x) _GLIBCXX_NOEXCEPTnoexcept
794 { return static_cast<_Const_Link_type>(__x->_M_left); }
795
796 static _Link_type
797 _S_right(_Base_ptr __x) _GLIBCXX_NOEXCEPTnoexcept
798 { return static_cast<_Link_type>(__x->_M_right); }
799
800 static _Const_Link_type
801 _S_right(_Const_Base_ptr __x) _GLIBCXX_NOEXCEPTnoexcept
802 { return static_cast<_Const_Link_type>(__x->_M_right); }
803
804 static const _Key&
805 _S_key(_Const_Base_ptr __x)
806 { return _S_key(static_cast<_Const_Link_type>(__x)); }
807
808 static _Base_ptr
809 _S_minimum(_Base_ptr __x) _GLIBCXX_NOEXCEPTnoexcept
810 { return _Rb_tree_node_base::_S_minimum(__x); }
811
812 static _Const_Base_ptr
813 _S_minimum(_Const_Base_ptr __x) _GLIBCXX_NOEXCEPTnoexcept
814 { return _Rb_tree_node_base::_S_minimum(__x); }
815
816 static _Base_ptr
817 _S_maximum(_Base_ptr __x) _GLIBCXX_NOEXCEPTnoexcept
818 { return _Rb_tree_node_base::_S_maximum(__x); }
819
820 static _Const_Base_ptr
821 _S_maximum(_Const_Base_ptr __x) _GLIBCXX_NOEXCEPTnoexcept
822 { return _Rb_tree_node_base::_S_maximum(__x); }
823
824 public:
825 typedef _Rb_tree_iterator<value_type> iterator;
826 typedef _Rb_tree_const_iterator<value_type> const_iterator;
827
828 typedef std::reverse_iterator<iterator> reverse_iterator;
829 typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
830
831#if __cplusplus201402L > 201402L
832 using node_type = _Node_handle<_Key, _Val, _Node_allocator>;
833 using insert_return_type = _Node_insert_return<
834 conditional_t<is_same_v<_Key, _Val>, const_iterator, iterator>,
835 node_type>;
836#endif
837
838 pair<_Base_ptr, _Base_ptr>
839 _M_get_insert_unique_pos(const key_type& __k);
840
841 pair<_Base_ptr, _Base_ptr>
842 _M_get_insert_equal_pos(const key_type& __k);
843
844 pair<_Base_ptr, _Base_ptr>
845 _M_get_insert_hint_unique_pos(const_iterator __pos,
846 const key_type& __k);
847
848 pair<_Base_ptr, _Base_ptr>
849 _M_get_insert_hint_equal_pos(const_iterator __pos,
850 const key_type& __k);
851
852 private:
853#if __cplusplus201402L >= 201103L
854 template<typename _Arg, typename _NodeGen>
855 iterator
856 _M_insert_(_Base_ptr __x, _Base_ptr __y, _Arg&& __v, _NodeGen&);
857
858 iterator
859 _M_insert_node(_Base_ptr __x, _Base_ptr __y, _Link_type __z);
860
861 template<typename _Arg>
862 iterator
863 _M_insert_lower(_Base_ptr __y, _Arg&& __v);
864
865 template<typename _Arg>
866 iterator
867 _M_insert_equal_lower(_Arg&& __x);
868
869 iterator
870 _M_insert_lower_node(_Base_ptr __p, _Link_type __z);
871
872 iterator
873 _M_insert_equal_lower_node(_Link_type __z);
874#else
875 template<typename _NodeGen>
876 iterator
877 _M_insert_(_Base_ptr __x, _Base_ptr __y,
878 const value_type& __v, _NodeGen&);
879
880 // _GLIBCXX_RESOLVE_LIB_DEFECTS
881 // 233. Insertion hints in associative containers.
882 iterator
883 _M_insert_lower(_Base_ptr __y, const value_type& __v);
884
885 iterator
886 _M_insert_equal_lower(const value_type& __x);
887#endif
888
889 template<typename _NodeGen>
890 _Link_type
891 _M_copy(_Const_Link_type __x, _Base_ptr __p, _NodeGen&);
892
893 template<typename _NodeGen>
894 _Link_type
895 _M_copy(const _Rb_tree& __x, _NodeGen& __gen)
896 {
897 _Link_type __root = _M_copy(__x._M_begin(), _M_end(), __gen);
898 _M_leftmost() = _S_minimum(__root);
899 _M_rightmost() = _S_maximum(__root);
900 _M_impl._M_node_count = __x._M_impl._M_node_count;
901 return __root;
902 }
903
904 _Link_type
905 _M_copy(const _Rb_tree& __x)
906 {
907 _Alloc_node __an(*this);
908 return _M_copy(__x, __an);
909 }
910
911 void
912 _M_erase(_Link_type __x);
913
914 iterator
915 _M_lower_bound(_Link_type __x, _Base_ptr __y,
916 const _Key& __k);
917
918 const_iterator
919 _M_lower_bound(_Const_Link_type __x, _Const_Base_ptr __y,
920 const _Key& __k) const;
921
922 iterator
923 _M_upper_bound(_Link_type __x, _Base_ptr __y,
924 const _Key& __k);
925
926 const_iterator
927 _M_upper_bound(_Const_Link_type __x, _Const_Base_ptr __y,
928 const _Key& __k) const;
929
930 public:
931 // allocation/deallocation
932#if __cplusplus201402L < 201103L
933 _Rb_tree() { }
934#else
935 _Rb_tree() = default;
936#endif
937
938 _Rb_tree(const _Compare& __comp,
939 const allocator_type& __a = allocator_type())
940 : _M_impl(__comp, _Node_allocator(__a)) { }
941
942 _Rb_tree(const _Rb_tree& __x)
943 : _M_impl(__x._M_impl)
944 {
945 if (__x._M_root() != 0)
946 _M_root() = _M_copy(__x);
947 }
948
949#if __cplusplus201402L >= 201103L
950 _Rb_tree(const allocator_type& __a)
951 : _M_impl(_Node_allocator(__a))
952 { }
953
954 _Rb_tree(const _Rb_tree& __x, const allocator_type& __a)
955 : _M_impl(__x._M_impl._M_key_compare, _Node_allocator(__a))
956 {
957 if (__x._M_root() != nullptr)
958 _M_root() = _M_copy(__x);
959 }
960
961 _Rb_tree(_Rb_tree&&) = default;
962
963 _Rb_tree(_Rb_tree&& __x, const allocator_type& __a)
964 : _Rb_tree(std::move(__x), _Node_allocator(__a))
965 { }
966
967 private:
968 _Rb_tree(_Rb_tree&& __x, _Node_allocator&& __a, true_type)
969 noexcept(is_nothrow_default_constructible<_Compare>::value)
970 : _M_impl(std::move(__x._M_impl), std::move(__a))
971 { }
972
973 _Rb_tree(_Rb_tree&& __x, _Node_allocator&& __a, false_type)
974 : _M_impl(__x._M_impl._M_key_compare, std::move(__a))
975 {
976 if (__x._M_root() != nullptr)
977 _M_move_data(__x, false_type{});
978 }
979
980 public:
981 _Rb_tree(_Rb_tree&& __x, _Node_allocator&& __a)
982 noexcept( noexcept(
983 _Rb_tree(std::declval<_Rb_tree&&>(), std::declval<_Node_allocator&&>(),
984 std::declval<typename _Alloc_traits::is_always_equal>())) )
985 : _Rb_tree(std::move(__x), std::move(__a),
986 typename _Alloc_traits::is_always_equal{})
987 { }
988#endif
989
990 ~_Rb_tree() _GLIBCXX_NOEXCEPTnoexcept
991 { _M_erase(_M_begin()); }
992
993 _Rb_tree&
994 operator=(const _Rb_tree& __x);
995
996 // Accessors.
997 _Compare
998 key_comp() const
999 { return _M_impl._M_key_compare; }
1000
1001 iterator
1002 begin() _GLIBCXX_NOEXCEPTnoexcept
1003 { return iterator(this->_M_impl._M_header._M_left); }
1004
1005 const_iterator
1006 begin() const _GLIBCXX_NOEXCEPTnoexcept
1007 { return const_iterator(this->_M_impl._M_header._M_left); }
1008
1009 iterator
1010 end() _GLIBCXX_NOEXCEPTnoexcept
1011 { return iterator(&this->_M_impl._M_header); }
1012
1013 const_iterator
1014 end() const _GLIBCXX_NOEXCEPTnoexcept
1015 { return const_iterator(&this->_M_impl._M_header); }
1016
1017 reverse_iterator
1018 rbegin() _GLIBCXX_NOEXCEPTnoexcept
1019 { return reverse_iterator(end()); }
1020
1021 const_reverse_iterator
1022 rbegin() const _GLIBCXX_NOEXCEPTnoexcept
1023 { return const_reverse_iterator(end()); }
1024
1025 reverse_iterator
1026 rend() _GLIBCXX_NOEXCEPTnoexcept
1027 { return reverse_iterator(begin()); }
1028
1029 const_reverse_iterator
1030 rend() const _GLIBCXX_NOEXCEPTnoexcept
1031 { return const_reverse_iterator(begin()); }
1032
1033 _GLIBCXX_NODISCARD bool
1034 empty() const _GLIBCXX_NOEXCEPTnoexcept
1035 { return _M_impl._M_node_count == 0; }
1036
1037 size_type
1038 size() const _GLIBCXX_NOEXCEPTnoexcept
1039 { return _M_impl._M_node_count; }
1040
1041 size_type
1042 max_size() const _GLIBCXX_NOEXCEPTnoexcept
1043 { return _Alloc_traits::max_size(_M_get_Node_allocator()); }
1044
1045 void
1046 swap(_Rb_tree& __t)
1047 _GLIBCXX_NOEXCEPT_IF(__is_nothrow_swappable<_Compare>::value)noexcept(__is_nothrow_swappable<_Compare>::value);
1048
1049 // Insert/erase.
1050#if __cplusplus201402L >= 201103L
1051 template<typename _Arg>
1052 pair<iterator, bool>
1053 _M_insert_unique(_Arg&& __x);
1054
1055 template<typename _Arg>
1056 iterator
1057 _M_insert_equal(_Arg&& __x);
1058
1059 template<typename _Arg, typename _NodeGen>
1060 iterator
1061 _M_insert_unique_(const_iterator __pos, _Arg&& __x, _NodeGen&);
1062
1063 template<typename _Arg>
1064 iterator
1065 _M_insert_unique_(const_iterator __pos, _Arg&& __x)
1066 {
1067 _Alloc_node __an(*this);
1068 return _M_insert_unique_(__pos, std::forward<_Arg>(__x), __an);
1069 }
1070
1071 template<typename _Arg, typename _NodeGen>
1072 iterator
1073 _M_insert_equal_(const_iterator __pos, _Arg&& __x, _NodeGen&);
1074
1075 template<typename _Arg>
1076 iterator
1077 _M_insert_equal_(const_iterator __pos, _Arg&& __x)
1078 {
1079 _Alloc_node __an(*this);
1080 return _M_insert_equal_(__pos, std::forward<_Arg>(__x), __an);
1081 }
1082
1083 template<typename... _Args>
1084 pair<iterator, bool>
1085 _M_emplace_unique(_Args&&... __args);
1086
1087 template<typename... _Args>
1088 iterator
1089 _M_emplace_equal(_Args&&... __args);
1090
1091 template<typename... _Args>
1092 iterator
1093 _M_emplace_hint_unique(const_iterator __pos, _Args&&... __args);
1094
1095 template<typename... _Args>
1096 iterator
1097 _M_emplace_hint_equal(const_iterator __pos, _Args&&... __args);
1098
1099 template<typename _Iter>
1100 using __same_value_type
1101 = is_same<value_type, typename iterator_traits<_Iter>::value_type>;
1102
1103 template<typename _InputIterator>
1104 __enable_if_t<__same_value_type<_InputIterator>::value>
1105 _M_insert_range_unique(_InputIterator __first, _InputIterator __last)
1106 {
1107 _Alloc_node __an(*this);
1108 for (; __first != __last; ++__first)
1109 _M_insert_unique_(end(), *__first, __an);
1110 }
1111
1112 template<typename _InputIterator>
1113 __enable_if_t<!__same_value_type<_InputIterator>::value>
1114 _M_insert_range_unique(_InputIterator __first, _InputIterator __last)
1115 {
1116 for (; __first != __last; ++__first)
1117 _M_emplace_unique(*__first);
1118 }
1119
1120 template<typename _InputIterator>
1121 __enable_if_t<__same_value_type<_InputIterator>::value>
1122 _M_insert_range_equal(_InputIterator __first, _InputIterator __last)
1123 {
1124 _Alloc_node __an(*this);
1125 for (; __first != __last; ++__first)
1126 _M_insert_equal_(end(), *__first, __an);
1127 }
1128
1129 template<typename _InputIterator>
1130 __enable_if_t<!__same_value_type<_InputIterator>::value>
1131 _M_insert_range_equal(_InputIterator __first, _InputIterator __last)
1132 {
1133 _Alloc_node __an(*this);
1134 for (; __first != __last; ++__first)
1135 _M_emplace_equal(*__first);
1136 }
1137#else
1138 pair<iterator, bool>
1139 _M_insert_unique(const value_type& __x);
1140
1141 iterator
1142 _M_insert_equal(const value_type& __x);
1143
1144 template<typename _NodeGen>
1145 iterator
1146 _M_insert_unique_(const_iterator __pos, const value_type& __x,
1147 _NodeGen&);
1148
1149 iterator
1150 _M_insert_unique_(const_iterator __pos, const value_type& __x)
1151 {
1152 _Alloc_node __an(*this);
1153 return _M_insert_unique_(__pos, __x, __an);
1154 }
1155
1156 template<typename _NodeGen>
1157 iterator
1158 _M_insert_equal_(const_iterator __pos, const value_type& __x,
1159 _NodeGen&);
1160 iterator
1161 _M_insert_equal_(const_iterator __pos, const value_type& __x)
1162 {
1163 _Alloc_node __an(*this);
1164 return _M_insert_equal_(__pos, __x, __an);
1165 }
1166
1167 template<typename _InputIterator>
1168 void
1169 _M_insert_range_unique(_InputIterator __first, _InputIterator __last)
1170 {
1171 _Alloc_node __an(*this);
1172 for (; __first != __last; ++__first)
1173 _M_insert_unique_(end(), *__first, __an);
1174 }
1175
1176 template<typename _InputIterator>
1177 void
1178 _M_insert_range_equal(_InputIterator __first, _InputIterator __last)
1179 {
1180 _Alloc_node __an(*this);
1181 for (; __first != __last; ++__first)
1182 _M_insert_equal_(end(), *__first, __an);
1183 }
1184#endif
1185
1186 private:
1187 void
1188 _M_erase_aux(const_iterator __position);
1189
1190 void
1191 _M_erase_aux(const_iterator __first, const_iterator __last);
1192
1193 public:
1194#if __cplusplus201402L >= 201103L
1195 // _GLIBCXX_RESOLVE_LIB_DEFECTS
1196 // DR 130. Associative erase should return an iterator.
1197 _GLIBCXX_ABI_TAG_CXX11__attribute ((__abi_tag__ ("cxx11")))
1198 iterator
1199 erase(const_iterator __position)
1200 {
1201 __glibcxx_assert(__position != end());
1202 const_iterator __result = __position;
1203 ++__result;
1204 _M_erase_aux(__position);
1205 return __result._M_const_cast();
1206 }
1207
1208 // LWG 2059.
1209 _GLIBCXX_ABI_TAG_CXX11__attribute ((__abi_tag__ ("cxx11")))
1210 iterator
1211 erase(iterator __position)
1212 {
1213 __glibcxx_assert(__position != end());
1214 iterator __result = __position;
1215 ++__result;
1216 _M_erase_aux(__position);
1217 return __result;
1218 }
1219#else
1220 void
1221 erase(iterator __position)
1222 {
1223 __glibcxx_assert(__position != end());
1224 _M_erase_aux(__position);
1225 }
1226
1227 void
1228 erase(const_iterator __position)
1229 {
1230 __glibcxx_assert(__position != end());
1231 _M_erase_aux(__position);
1232 }
1233#endif
1234
1235 size_type
1236 erase(const key_type& __x);
1237
1238#if __cplusplus201402L >= 201103L
1239 // _GLIBCXX_RESOLVE_LIB_DEFECTS
1240 // DR 130. Associative erase should return an iterator.
1241 _GLIBCXX_ABI_TAG_CXX11__attribute ((__abi_tag__ ("cxx11")))
1242 iterator
1243 erase(const_iterator __first, const_iterator __last)
1244 {
1245 _M_erase_aux(__first, __last);
1246 return __last._M_const_cast();
1247 }
1248#else
1249 void
1250 erase(iterator __first, iterator __last)
1251 { _M_erase_aux(__first, __last); }
1252
1253 void
1254 erase(const_iterator __first, const_iterator __last)
1255 { _M_erase_aux(__first, __last); }
1256#endif
1257
1258 void
1259 clear() _GLIBCXX_NOEXCEPTnoexcept
1260 {
1261 _M_erase(_M_begin());
1262 _M_impl._M_reset();
1263 }
1264
1265 // Set operations.
1266 iterator
1267 find(const key_type& __k);
1268
1269 const_iterator
1270 find(const key_type& __k) const;
1271
1272 size_type
1273 count(const key_type& __k) const;
1274
1275 iterator
1276 lower_bound(const key_type& __k)
1277 { return _M_lower_bound(_M_begin(), _M_end(), __k); }
1278
1279 const_iterator
1280 lower_bound(const key_type& __k) const
1281 { return _M_lower_bound(_M_begin(), _M_end(), __k); }
1282
1283 iterator
1284 upper_bound(const key_type& __k)
1285 { return _M_upper_bound(_M_begin(), _M_end(), __k); }
1286
1287 const_iterator
1288 upper_bound(const key_type& __k) const
1289 { return _M_upper_bound(_M_begin(), _M_end(), __k); }
1290
1291 pair<iterator, iterator>
1292 equal_range(const key_type& __k);
1293
1294 pair<const_iterator, const_iterator>
1295 equal_range(const key_type& __k) const;
1296
1297#if __cplusplus201402L >= 201402L
1298 template<typename _Kt,
1299 typename _Req = __has_is_transparent_t<_Compare, _Kt>>
1300 iterator
1301 _M_find_tr(const _Kt& __k)
1302 {
1303 const _Rb_tree* __const_this = this;
1304 return __const_this->_M_find_tr(__k)._M_const_cast();
1305 }
1306
1307 template<typename _Kt,
1308 typename _Req = __has_is_transparent_t<_Compare, _Kt>>
1309 const_iterator
1310 _M_find_tr(const _Kt& __k) const
1311 {
1312 auto __j = _M_lower_bound_tr(__k);
1313 if (__j != end() && _M_impl._M_key_compare(__k, _S_key(__j._M_node)))
1314 __j = end();
1315 return __j;
1316 }
1317
1318 template<typename _Kt,
1319 typename _Req = __has_is_transparent_t<_Compare, _Kt>>
1320 size_type
1321 _M_count_tr(const _Kt& __k) const
1322 {
1323 auto __p = _M_equal_range_tr(__k);
1324 return std::distance(__p.first, __p.second);
1325 }
1326
1327 template<typename _Kt,
1328 typename _Req = __has_is_transparent_t<_Compare, _Kt>>
1329 iterator
1330 _M_lower_bound_tr(const _Kt& __k)
1331 {
1332 const _Rb_tree* __const_this = this;
1333 return __const_this->_M_lower_bound_tr(__k)._M_const_cast();
1334 }
1335
1336 template<typename _Kt,
1337 typename _Req = __has_is_transparent_t<_Compare, _Kt>>
1338 const_iterator
1339 _M_lower_bound_tr(const _Kt& __k) const
1340 {
1341 auto __x = _M_begin();
1342 auto __y = _M_end();
1343 while (__x != 0)
1344 if (!_M_impl._M_key_compare(_S_key(__x), __k))
1345 {
1346 __y = __x;
1347 __x = _S_left(__x);
1348 }
1349 else
1350 __x = _S_right(__x);
1351 return const_iterator(__y);
1352 }
1353
1354 template<typename _Kt,
1355 typename _Req = __has_is_transparent_t<_Compare, _Kt>>
1356 iterator
1357 _M_upper_bound_tr(const _Kt& __k)
1358 {
1359 const _Rb_tree* __const_this = this;
1360 return __const_this->_M_upper_bound_tr(__k)._M_const_cast();
1361 }
1362
1363 template<typename _Kt,
1364 typename _Req = __has_is_transparent_t<_Compare, _Kt>>
1365 const_iterator
1366 _M_upper_bound_tr(const _Kt& __k) const
1367 {
1368 auto __x = _M_begin();
1369 auto __y = _M_end();
1370 while (__x != 0)
1371 if (_M_impl._M_key_compare(__k, _S_key(__x)))
1372 {
1373 __y = __x;
1374 __x = _S_left(__x);
1375 }
1376 else
1377 __x = _S_right(__x);
1378 return const_iterator(__y);
1379 }
1380
1381 template<typename _Kt,
1382 typename _Req = __has_is_transparent_t<_Compare, _Kt>>
1383 pair<iterator, iterator>
1384 _M_equal_range_tr(const _Kt& __k)
1385 {
1386 const _Rb_tree* __const_this = this;
1387 auto __ret = __const_this->_M_equal_range_tr(__k);
1388 return { __ret.first._M_const_cast(), __ret.second._M_const_cast() };
1389 }
1390
1391 template<typename _Kt,
1392 typename _Req = __has_is_transparent_t<_Compare, _Kt>>
1393 pair<const_iterator, const_iterator>
1394 _M_equal_range_tr(const _Kt& __k) const
1395 {
1396 auto __low = _M_lower_bound_tr(__k);
1397 auto __high = __low;
1398 auto& __cmp = _M_impl._M_key_compare;
1399 while (__high != end() && !__cmp(__k, _S_key(__high._M_node)))
1400 ++__high;
1401 return { __low, __high };
1402 }
1403#endif
1404
1405 // Debugging.
1406 bool
1407 __rb_verify() const;
1408
1409#if __cplusplus201402L >= 201103L
1410 _Rb_tree&
1411 operator=(_Rb_tree&&)
1412 noexcept(_Alloc_traits::_S_nothrow_move()
1413 && is_nothrow_move_assignable<_Compare>::value);
1414
1415 template<typename _Iterator>
1416 void
1417 _M_assign_unique(_Iterator, _Iterator);
1418
1419 template<typename _Iterator>
1420 void
1421 _M_assign_equal(_Iterator, _Iterator);
1422
1423 private:
1424 // Move elements from container with equal allocator.
1425 void
1426 _M_move_data(_Rb_tree& __x, true_type)
1427 { _M_impl._M_move_data(__x._M_impl); }
1428
1429 // Move elements from container with possibly non-equal allocator,
1430 // which might result in a copy not a move.
1431 void
1432 _M_move_data(_Rb_tree&, false_type);
1433
1434 // Move assignment from container with equal allocator.
1435 void
1436 _M_move_assign(_Rb_tree&, true_type);
1437
1438 // Move assignment from container with possibly non-equal allocator,
1439 // which might result in a copy not a move.
1440 void
1441 _M_move_assign(_Rb_tree&, false_type);
1442#endif
1443
1444#if __cplusplus201402L > 201402L
1445 public:
1446 /// Re-insert an extracted node.
1447 insert_return_type
1448 _M_reinsert_node_unique(node_type&& __nh)
1449 {
1450 insert_return_type __ret;
1451 if (__nh.empty())
1452 __ret.position = end();
1453 else
1454 {
1455 __glibcxx_assert(_M_get_Node_allocator() == *__nh._M_alloc);
1456
1457 auto __res = _M_get_insert_unique_pos(__nh._M_key());
1458 if (__res.second)
1459 {
1460 __ret.position
1461 = _M_insert_node(__res.first, __res.second, __nh._M_ptr);
1462 __nh._M_ptr = nullptr;
1463 __ret.inserted = true;
1464 }
1465 else
1466 {
1467 __ret.node = std::move(__nh);
1468 __ret.position = iterator(__res.first);
1469 __ret.inserted = false;
1470 }
1471 }
1472 return __ret;
1473 }
1474
1475 /// Re-insert an extracted node.
1476 iterator
1477 _M_reinsert_node_equal(node_type&& __nh)
1478 {
1479 iterator __ret;
1480 if (__nh.empty())
1481 __ret = end();
1482 else
1483 {
1484 __glibcxx_assert(_M_get_Node_allocator() == *__nh._M_alloc);
1485 auto __res = _M_get_insert_equal_pos(__nh._M_key());
1486 if (__res.second)
1487 __ret = _M_insert_node(__res.first, __res.second, __nh._M_ptr);
1488 else
1489 __ret = _M_insert_equal_lower_node(__nh._M_ptr);
1490 __nh._M_ptr = nullptr;
1491 }
1492 return __ret;
1493 }
1494
1495 /// Re-insert an extracted node.
1496 iterator
1497 _M_reinsert_node_hint_unique(const_iterator __hint, node_type&& __nh)
1498 {
1499 iterator __ret;
1500 if (__nh.empty())
1501 __ret = end();
1502 else
1503 {
1504 __glibcxx_assert(_M_get_Node_allocator() == *__nh._M_alloc);
1505 auto __res = _M_get_insert_hint_unique_pos(__hint, __nh._M_key());
1506 if (__res.second)
1507 {
1508 __ret = _M_insert_node(__res.first, __res.second, __nh._M_ptr);
1509 __nh._M_ptr = nullptr;
1510 }
1511 else
1512 __ret = iterator(__res.first);
1513 }
1514 return __ret;
1515 }
1516
1517 /// Re-insert an extracted node.
1518 iterator
1519 _M_reinsert_node_hint_equal(const_iterator __hint, node_type&& __nh)
1520 {
1521 iterator __ret;
1522 if (__nh.empty())
1523 __ret = end();
1524 else
1525 {
1526 __glibcxx_assert(_M_get_Node_allocator() == *__nh._M_alloc);
1527 auto __res = _M_get_insert_hint_equal_pos(__hint, __nh._M_key());
1528 if (__res.second)
1529 __ret = _M_insert_node(__res.first, __res.second, __nh._M_ptr);
1530 else
1531 __ret = _M_insert_equal_lower_node(__nh._M_ptr);
1532 __nh._M_ptr = nullptr;
1533 }
1534 return __ret;
1535 }
1536
1537 /// Extract a node.
1538 node_type
1539 extract(const_iterator __pos)
1540 {
1541 auto __ptr = _Rb_tree_rebalance_for_erase(
1542 __pos._M_const_cast()._M_node, _M_impl._M_header);
1543 --_M_impl._M_node_count;
1544 return { static_cast<_Link_type>(__ptr), _M_get_Node_allocator() };
1545 }
1546
1547 /// Extract a node.
1548 node_type
1549 extract(const key_type& __k)
1550 {
1551 node_type __nh;
1552 auto __pos = find(__k);
1553 if (__pos != end())
1554 __nh = extract(const_iterator(__pos));
1555 return __nh;
1556 }
1557
1558 template<typename _Compare2>
1559 using _Compatible_tree
1560 = _Rb_tree<_Key, _Val, _KeyOfValue, _Compare2, _Alloc>;
1561
1562 template<typename, typename>
1563 friend class _Rb_tree_merge_helper;
1564
1565 /// Merge from a compatible container into one with unique keys.
1566 template<typename _Compare2>
1567 void
1568 _M_merge_unique(_Compatible_tree<_Compare2>& __src) noexcept
1569 {
1570 using _Merge_helper = _Rb_tree_merge_helper<_Rb_tree, _Compare2>;
1571 for (auto __i = __src.begin(), __end = __src.end(); __i != __end;)
1572 {
1573 auto __pos = __i++;
1574 auto __res = _M_get_insert_unique_pos(_KeyOfValue()(*__pos));
1575 if (__res.second)
1576 {
1577 auto& __src_impl = _Merge_helper::_S_get_impl(__src);
1578 auto __ptr = _Rb_tree_rebalance_for_erase(
1579 __pos._M_node, __src_impl._M_header);
1580 --__src_impl._M_node_count;
1581 _M_insert_node(__res.first, __res.second,
1582 static_cast<_Link_type>(__ptr));
1583 }
1584 }
1585 }
1586
1587 /// Merge from a compatible container into one with equivalent keys.
1588 template<typename _Compare2>
1589 void
1590 _M_merge_equal(_Compatible_tree<_Compare2>& __src) noexcept
1591 {
1592 using _Merge_helper = _Rb_tree_merge_helper<_Rb_tree, _Compare2>;
1593 for (auto __i = __src.begin(), __end = __src.end(); __i != __end;)
1594 {
1595 auto __pos = __i++;
1596 auto __res = _M_get_insert_equal_pos(_KeyOfValue()(*__pos));
1597 if (__res.second)
1598 {
1599 auto& __src_impl = _Merge_helper::_S_get_impl(__src);
1600 auto __ptr = _Rb_tree_rebalance_for_erase(
1601 __pos._M_node, __src_impl._M_header);
1602 --__src_impl._M_node_count;
1603 _M_insert_node(__res.first, __res.second,
1604 static_cast<_Link_type>(__ptr));
1605 }
1606 }
1607 }
1608#endif // C++17
1609
1610 friend bool
1611 operator==(const _Rb_tree& __x, const _Rb_tree& __y)
1612 {
1613 return __x.size() == __y.size()
1614 && std::equal(__x.begin(), __x.end(), __y.begin());
1615 }
1616
1617#if __cpp_lib_three_way_comparison
1618 friend auto
1619 operator<=>(const _Rb_tree& __x, const _Rb_tree& __y)
1620 {
1621 if constexpr (requires { typename __detail::__synth3way_t<_Val>; })
1622 return std::lexicographical_compare_three_way(__x.begin(), __x.end(),
1623 __y.begin(), __y.end(),
1624 __detail::__synth3way);
1625 }
1626#else
1627 friend bool
1628 operator<(const _Rb_tree& __x, const _Rb_tree& __y)
1629 {
1630 return std::lexicographical_compare(__x.begin(), __x.end(),
1631 __y.begin(), __y.end());
1632 }
1633
1634 friend bool _GLIBCXX_DEPRECATED__attribute__ ((__deprecated__))
1635 operator!=(const _Rb_tree& __x, const _Rb_tree& __y)
1636 { return !(__x == __y); }
1637
1638 friend bool _GLIBCXX_DEPRECATED__attribute__ ((__deprecated__))
1639 operator>(const _Rb_tree& __x, const _Rb_tree& __y)
1640 { return __y < __x; }
1641
1642 friend bool _GLIBCXX_DEPRECATED__attribute__ ((__deprecated__))
1643 operator<=(const _Rb_tree& __x, const _Rb_tree& __y)
1644 { return !(__y < __x); }
1645
1646 friend bool _GLIBCXX_DEPRECATED__attribute__ ((__deprecated__))
1647 operator>=(const _Rb_tree& __x, const _Rb_tree& __y)
1648 { return !(__x < __y); }
1649#endif
1650 };
1651
1652 template<typename _Key, typename _Val, typename