Bug Summary

File:lib/Target/X86/X86ISelDAGToDAG.cpp
Warning:line 581, column 13
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name X86ISelDAGToDAG.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mthread-model posix -mframe-pointer=none -fmath-errno -masm-verbose -mconstructor-aliases -munwind-tables -fuse-init-array -target-cpu x86-64 -dwarf-column-info -debugger-tuning=gdb -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-10/lib/clang/10.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-10~svn372087/build-llvm/lib/Target/X86 -I /build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86 -I /build/llvm-toolchain-snapshot-10~svn372087/build-llvm/include -I /build/llvm-toolchain-snapshot-10~svn372087/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-10/lib/clang/10.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-10~svn372087/build-llvm/lib/Target/X86 -fdebug-prefix-map=/build/llvm-toolchain-snapshot-10~svn372087=. -ferror-limit 19 -fmessage-length 0 -fvisibility-inlines-hidden -stack-protector 2 -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -o /tmp/scan-build-2019-09-17-145504-7198-1 -x c++ /build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp

/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp

1//===- X86ISelDAGToDAG.cpp - A DAG pattern matching inst selector for X86 -===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines a DAG pattern matching instruction selector for X86,
10// converting from a legalized dag to a X86 dag.
11//
12//===----------------------------------------------------------------------===//
13
14#include "X86.h"
15#include "X86MachineFunctionInfo.h"
16#include "X86RegisterInfo.h"
17#include "X86Subtarget.h"
18#include "X86TargetMachine.h"
19#include "llvm/ADT/Statistic.h"
20#include "llvm/CodeGen/MachineFrameInfo.h"
21#include "llvm/CodeGen/MachineFunction.h"
22#include "llvm/CodeGen/SelectionDAGISel.h"
23#include "llvm/Config/llvm-config.h"
24#include "llvm/IR/ConstantRange.h"
25#include "llvm/IR/Function.h"
26#include "llvm/IR/Instructions.h"
27#include "llvm/IR/Intrinsics.h"
28#include "llvm/IR/Type.h"
29#include "llvm/Support/Debug.h"
30#include "llvm/Support/ErrorHandling.h"
31#include "llvm/Support/KnownBits.h"
32#include "llvm/Support/MathExtras.h"
33#include "llvm/Support/raw_ostream.h"
34#include "llvm/Target/TargetMachine.h"
35#include "llvm/Target/TargetOptions.h"
36#include <stdint.h>
37using namespace llvm;
38
39#define DEBUG_TYPE"x86-isel" "x86-isel"
40
41STATISTIC(NumLoadMoved, "Number of loads moved below TokenFactor")static llvm::Statistic NumLoadMoved = {"x86-isel", "NumLoadMoved"
, "Number of loads moved below TokenFactor", {0}, {false}}
;
42
43static cl::opt<bool> AndImmShrink("x86-and-imm-shrink", cl::init(true),
44 cl::desc("Enable setting constant bits to reduce size of mask immediates"),
45 cl::Hidden);
46
47//===----------------------------------------------------------------------===//
48// Pattern Matcher Implementation
49//===----------------------------------------------------------------------===//
50
51namespace {
52 /// This corresponds to X86AddressMode, but uses SDValue's instead of register
53 /// numbers for the leaves of the matched tree.
54 struct X86ISelAddressMode {
55 enum {
56 RegBase,
57 FrameIndexBase
58 } BaseType;
59
60 // This is really a union, discriminated by BaseType!
61 SDValue Base_Reg;
62 int Base_FrameIndex;
63
64 unsigned Scale;
65 SDValue IndexReg;
66 int32_t Disp;
67 SDValue Segment;
68 const GlobalValue *GV;
69 const Constant *CP;
70 const BlockAddress *BlockAddr;
71 const char *ES;
72 MCSymbol *MCSym;
73 int JT;
74 unsigned Align; // CP alignment.
75 unsigned char SymbolFlags; // X86II::MO_*
76 bool NegateIndex = false;
77
78 X86ISelAddressMode()
79 : BaseType(RegBase), Base_FrameIndex(0), Scale(1), IndexReg(), Disp(0),
80 Segment(), GV(nullptr), CP(nullptr), BlockAddr(nullptr), ES(nullptr),
81 MCSym(nullptr), JT(-1), Align(0), SymbolFlags(X86II::MO_NO_FLAG) {}
82
83 bool hasSymbolicDisplacement() const {
84 return GV != nullptr || CP != nullptr || ES != nullptr ||
85 MCSym != nullptr || JT != -1 || BlockAddr != nullptr;
86 }
87
88 bool hasBaseOrIndexReg() const {
89 return BaseType == FrameIndexBase ||
90 IndexReg.getNode() != nullptr || Base_Reg.getNode() != nullptr;
91 }
92
93 /// Return true if this addressing mode is already RIP-relative.
94 bool isRIPRelative() const {
95 if (BaseType != RegBase) return false;
96 if (RegisterSDNode *RegNode =
97 dyn_cast_or_null<RegisterSDNode>(Base_Reg.getNode()))
98 return RegNode->getReg() == X86::RIP;
99 return false;
100 }
101
102 void setBaseReg(SDValue Reg) {
103 BaseType = RegBase;
104 Base_Reg = Reg;
105 }
106
107#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
108 void dump(SelectionDAG *DAG = nullptr) {
109 dbgs() << "X86ISelAddressMode " << this << '\n';
110 dbgs() << "Base_Reg ";
111 if (Base_Reg.getNode())
112 Base_Reg.getNode()->dump(DAG);
113 else
114 dbgs() << "nul\n";
115 if (BaseType == FrameIndexBase)
116 dbgs() << " Base.FrameIndex " << Base_FrameIndex << '\n';
117 dbgs() << " Scale " << Scale << '\n'
118 << "IndexReg ";
119 if (NegateIndex)
120 dbgs() << "negate ";
121 if (IndexReg.getNode())
122 IndexReg.getNode()->dump(DAG);
123 else
124 dbgs() << "nul\n";
125 dbgs() << " Disp " << Disp << '\n'
126 << "GV ";
127 if (GV)
128 GV->dump();
129 else
130 dbgs() << "nul";
131 dbgs() << " CP ";
132 if (CP)
133 CP->dump();
134 else
135 dbgs() << "nul";
136 dbgs() << '\n'
137 << "ES ";
138 if (ES)
139 dbgs() << ES;
140 else
141 dbgs() << "nul";
142 dbgs() << " MCSym ";
143 if (MCSym)
144 dbgs() << MCSym;
145 else
146 dbgs() << "nul";
147 dbgs() << " JT" << JT << " Align" << Align << '\n';
148 }
149#endif
150 };
151}
152
153namespace {
154 //===--------------------------------------------------------------------===//
155 /// ISel - X86-specific code to select X86 machine instructions for
156 /// SelectionDAG operations.
157 ///
158 class X86DAGToDAGISel final : public SelectionDAGISel {
159 /// Keep a pointer to the X86Subtarget around so that we can
160 /// make the right decision when generating code for different targets.
161 const X86Subtarget *Subtarget;
162
163 /// If true, selector should try to optimize for code size instead of
164 /// performance.
165 bool OptForSize;
166
167 /// If true, selector should try to optimize for minimum code size.
168 bool OptForMinSize;
169
170 /// Disable direct TLS access through segment registers.
171 bool IndirectTlsSegRefs;
172
173 public:
174 explicit X86DAGToDAGISel(X86TargetMachine &tm, CodeGenOpt::Level OptLevel)
175 : SelectionDAGISel(tm, OptLevel), Subtarget(nullptr), OptForSize(false),
176 OptForMinSize(false), IndirectTlsSegRefs(false) {}
177
178 StringRef getPassName() const override {
179 return "X86 DAG->DAG Instruction Selection";
180 }
181
182 bool runOnMachineFunction(MachineFunction &MF) override {
183 // Reset the subtarget each time through.
184 Subtarget = &MF.getSubtarget<X86Subtarget>();
185 IndirectTlsSegRefs = MF.getFunction().hasFnAttribute(
186 "indirect-tls-seg-refs");
187
188 // OptFor[Min]Size are used in pattern predicates that isel is matching.
189 OptForSize = MF.getFunction().hasOptSize();
190 OptForMinSize = MF.getFunction().hasMinSize();
191 assert((!OptForMinSize || OptForSize) &&(((!OptForMinSize || OptForSize) && "OptForMinSize implies OptForSize"
) ? static_cast<void> (0) : __assert_fail ("(!OptForMinSize || OptForSize) && \"OptForMinSize implies OptForSize\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 192, __PRETTY_FUNCTION__))
192 "OptForMinSize implies OptForSize")(((!OptForMinSize || OptForSize) && "OptForMinSize implies OptForSize"
) ? static_cast<void> (0) : __assert_fail ("(!OptForMinSize || OptForSize) && \"OptForMinSize implies OptForSize\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 192, __PRETTY_FUNCTION__))
;
193
194 SelectionDAGISel::runOnMachineFunction(MF);
195 return true;
196 }
197
198 void EmitFunctionEntryCode() override;
199
200 bool IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const override;
201
202 void PreprocessISelDAG() override;
203 void PostprocessISelDAG() override;
204
205// Include the pieces autogenerated from the target description.
206#include "X86GenDAGISel.inc"
207
208 private:
209 void Select(SDNode *N) override;
210
211 bool foldOffsetIntoAddress(uint64_t Offset, X86ISelAddressMode &AM);
212 bool matchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM);
213 bool matchWrapper(SDValue N, X86ISelAddressMode &AM);
214 bool matchAddress(SDValue N, X86ISelAddressMode &AM);
215 bool matchVectorAddress(SDValue N, X86ISelAddressMode &AM);
216 bool matchAdd(SDValue &N, X86ISelAddressMode &AM, unsigned Depth);
217 bool matchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
218 unsigned Depth);
219 bool matchAddressBase(SDValue N, X86ISelAddressMode &AM);
220 bool selectAddr(SDNode *Parent, SDValue N, SDValue &Base,
221 SDValue &Scale, SDValue &Index, SDValue &Disp,
222 SDValue &Segment);
223 bool selectVectorAddr(SDNode *Parent, SDValue N, SDValue &Base,
224 SDValue &Scale, SDValue &Index, SDValue &Disp,
225 SDValue &Segment);
226 bool selectMOV64Imm32(SDValue N, SDValue &Imm);
227 bool selectLEAAddr(SDValue N, SDValue &Base,
228 SDValue &Scale, SDValue &Index, SDValue &Disp,
229 SDValue &Segment);
230 bool selectLEA64_32Addr(SDValue N, SDValue &Base,
231 SDValue &Scale, SDValue &Index, SDValue &Disp,
232 SDValue &Segment);
233 bool selectTLSADDRAddr(SDValue N, SDValue &Base,
234 SDValue &Scale, SDValue &Index, SDValue &Disp,
235 SDValue &Segment);
236 bool selectScalarSSELoad(SDNode *Root, SDNode *Parent, SDValue N,
237 SDValue &Base, SDValue &Scale,
238 SDValue &Index, SDValue &Disp,
239 SDValue &Segment,
240 SDValue &NodeWithChain);
241 bool selectRelocImm(SDValue N, SDValue &Op);
242
243 bool tryFoldLoad(SDNode *Root, SDNode *P, SDValue N,
244 SDValue &Base, SDValue &Scale,
245 SDValue &Index, SDValue &Disp,
246 SDValue &Segment);
247
248 // Convenience method where P is also root.
249 bool tryFoldLoad(SDNode *P, SDValue N,
250 SDValue &Base, SDValue &Scale,
251 SDValue &Index, SDValue &Disp,
252 SDValue &Segment) {
253 return tryFoldLoad(P, P, N, Base, Scale, Index, Disp, Segment);
254 }
255
256 /// Implement addressing mode selection for inline asm expressions.
257 bool SelectInlineAsmMemoryOperand(const SDValue &Op,
258 unsigned ConstraintID,
259 std::vector<SDValue> &OutOps) override;
260
261 void emitSpecialCodeForMain();
262
263 inline void getAddressOperands(X86ISelAddressMode &AM, const SDLoc &DL,
264 MVT VT, SDValue &Base, SDValue &Scale,
265 SDValue &Index, SDValue &Disp,
266 SDValue &Segment) {
267 if (AM.BaseType == X86ISelAddressMode::FrameIndexBase)
268 Base = CurDAG->getTargetFrameIndex(
269 AM.Base_FrameIndex, TLI->getPointerTy(CurDAG->getDataLayout()));
270 else if (AM.Base_Reg.getNode())
271 Base = AM.Base_Reg;
272 else
273 Base = CurDAG->getRegister(0, VT);
274
275 Scale = getI8Imm(AM.Scale, DL);
276
277 // Negate the index if needed.
278 if (AM.NegateIndex) {
279 unsigned NegOpc = VT == MVT::i64 ? X86::NEG64r : X86::NEG32r;
280 SDValue Neg = SDValue(CurDAG->getMachineNode(NegOpc, DL, VT, MVT::i32,
281 AM.IndexReg), 0);
282 AM.IndexReg = Neg;
283 }
284
285 if (AM.IndexReg.getNode())
286 Index = AM.IndexReg;
287 else
288 Index = CurDAG->getRegister(0, VT);
289
290 // These are 32-bit even in 64-bit mode since RIP-relative offset
291 // is 32-bit.
292 if (AM.GV)
293 Disp = CurDAG->getTargetGlobalAddress(AM.GV, SDLoc(),
294 MVT::i32, AM.Disp,
295 AM.SymbolFlags);
296 else if (AM.CP)
297 Disp = CurDAG->getTargetConstantPool(AM.CP, MVT::i32,
298 AM.Align, AM.Disp, AM.SymbolFlags);
299 else if (AM.ES) {
300 assert(!AM.Disp && "Non-zero displacement is ignored with ES.")((!AM.Disp && "Non-zero displacement is ignored with ES."
) ? static_cast<void> (0) : __assert_fail ("!AM.Disp && \"Non-zero displacement is ignored with ES.\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 300, __PRETTY_FUNCTION__))
;
301 Disp = CurDAG->getTargetExternalSymbol(AM.ES, MVT::i32, AM.SymbolFlags);
302 } else if (AM.MCSym) {
303 assert(!AM.Disp && "Non-zero displacement is ignored with MCSym.")((!AM.Disp && "Non-zero displacement is ignored with MCSym."
) ? static_cast<void> (0) : __assert_fail ("!AM.Disp && \"Non-zero displacement is ignored with MCSym.\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 303, __PRETTY_FUNCTION__))
;
304 assert(AM.SymbolFlags == 0 && "oo")((AM.SymbolFlags == 0 && "oo") ? static_cast<void>
(0) : __assert_fail ("AM.SymbolFlags == 0 && \"oo\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 304, __PRETTY_FUNCTION__))
;
305 Disp = CurDAG->getMCSymbol(AM.MCSym, MVT::i32);
306 } else if (AM.JT != -1) {
307 assert(!AM.Disp && "Non-zero displacement is ignored with JT.")((!AM.Disp && "Non-zero displacement is ignored with JT."
) ? static_cast<void> (0) : __assert_fail ("!AM.Disp && \"Non-zero displacement is ignored with JT.\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 307, __PRETTY_FUNCTION__))
;
308 Disp = CurDAG->getTargetJumpTable(AM.JT, MVT::i32, AM.SymbolFlags);
309 } else if (AM.BlockAddr)
310 Disp = CurDAG->getTargetBlockAddress(AM.BlockAddr, MVT::i32, AM.Disp,
311 AM.SymbolFlags);
312 else
313 Disp = CurDAG->getTargetConstant(AM.Disp, DL, MVT::i32);
314
315 if (AM.Segment.getNode())
316 Segment = AM.Segment;
317 else
318 Segment = CurDAG->getRegister(0, MVT::i16);
319 }
320
321 // Utility function to determine whether we should avoid selecting
322 // immediate forms of instructions for better code size or not.
323 // At a high level, we'd like to avoid such instructions when
324 // we have similar constants used within the same basic block
325 // that can be kept in a register.
326 //
327 bool shouldAvoidImmediateInstFormsForSize(SDNode *N) const {
328 uint32_t UseCount = 0;
329
330 // Do not want to hoist if we're not optimizing for size.
331 // TODO: We'd like to remove this restriction.
332 // See the comment in X86InstrInfo.td for more info.
333 if (!OptForSize)
334 return false;
335
336 // Walk all the users of the immediate.
337 for (SDNode::use_iterator UI = N->use_begin(),
338 UE = N->use_end(); (UI != UE) && (UseCount < 2); ++UI) {
339
340 SDNode *User = *UI;
341
342 // This user is already selected. Count it as a legitimate use and
343 // move on.
344 if (User->isMachineOpcode()) {
345 UseCount++;
346 continue;
347 }
348
349 // We want to count stores of immediates as real uses.
350 if (User->getOpcode() == ISD::STORE &&
351 User->getOperand(1).getNode() == N) {
352 UseCount++;
353 continue;
354 }
355
356 // We don't currently match users that have > 2 operands (except
357 // for stores, which are handled above)
358 // Those instruction won't match in ISEL, for now, and would
359 // be counted incorrectly.
360 // This may change in the future as we add additional instruction
361 // types.
362 if (User->getNumOperands() != 2)
363 continue;
364
365 // If this can match to INC/DEC, don't count it as a use.
366 if (User->getOpcode() == ISD::ADD &&
367 (isOneConstant(SDValue(N, 0)) || isAllOnesConstant(SDValue(N, 0))))
368 continue;
369
370 // Immediates that are used for offsets as part of stack
371 // manipulation should be left alone. These are typically
372 // used to indicate SP offsets for argument passing and
373 // will get pulled into stores/pushes (implicitly).
374 if (User->getOpcode() == X86ISD::ADD ||
375 User->getOpcode() == ISD::ADD ||
376 User->getOpcode() == X86ISD::SUB ||
377 User->getOpcode() == ISD::SUB) {
378
379 // Find the other operand of the add/sub.
380 SDValue OtherOp = User->getOperand(0);
381 if (OtherOp.getNode() == N)
382 OtherOp = User->getOperand(1);
383
384 // Don't count if the other operand is SP.
385 RegisterSDNode *RegNode;
386 if (OtherOp->getOpcode() == ISD::CopyFromReg &&
387 (RegNode = dyn_cast_or_null<RegisterSDNode>(
388 OtherOp->getOperand(1).getNode())))
389 if ((RegNode->getReg() == X86::ESP) ||
390 (RegNode->getReg() == X86::RSP))
391 continue;
392 }
393
394 // ... otherwise, count this and move on.
395 UseCount++;
396 }
397
398 // If we have more than 1 use, then recommend for hoisting.
399 return (UseCount > 1);
400 }
401
402 /// Return a target constant with the specified value of type i8.
403 inline SDValue getI8Imm(unsigned Imm, const SDLoc &DL) {
404 return CurDAG->getTargetConstant(Imm, DL, MVT::i8);
405 }
406
407 /// Return a target constant with the specified value, of type i32.
408 inline SDValue getI32Imm(unsigned Imm, const SDLoc &DL) {
409 return CurDAG->getTargetConstant(Imm, DL, MVT::i32);
410 }
411
412 /// Return a target constant with the specified value, of type i64.
413 inline SDValue getI64Imm(uint64_t Imm, const SDLoc &DL) {
414 return CurDAG->getTargetConstant(Imm, DL, MVT::i64);
415 }
416
417 SDValue getExtractVEXTRACTImmediate(SDNode *N, unsigned VecWidth,
418 const SDLoc &DL) {
419 assert((VecWidth == 128 || VecWidth == 256) && "Unexpected vector width")(((VecWidth == 128 || VecWidth == 256) && "Unexpected vector width"
) ? static_cast<void> (0) : __assert_fail ("(VecWidth == 128 || VecWidth == 256) && \"Unexpected vector width\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 419, __PRETTY_FUNCTION__))
;
420 uint64_t Index = N->getConstantOperandVal(1);
421 MVT VecVT = N->getOperand(0).getSimpleValueType();
422 return getI8Imm((Index * VecVT.getScalarSizeInBits()) / VecWidth, DL);
423 }
424
425 SDValue getInsertVINSERTImmediate(SDNode *N, unsigned VecWidth,
426 const SDLoc &DL) {
427 assert((VecWidth == 128 || VecWidth == 256) && "Unexpected vector width")(((VecWidth == 128 || VecWidth == 256) && "Unexpected vector width"
) ? static_cast<void> (0) : __assert_fail ("(VecWidth == 128 || VecWidth == 256) && \"Unexpected vector width\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 427, __PRETTY_FUNCTION__))
;
428 uint64_t Index = N->getConstantOperandVal(2);
429 MVT VecVT = N->getSimpleValueType(0);
430 return getI8Imm((Index * VecVT.getScalarSizeInBits()) / VecWidth, DL);
431 }
432
433 // Helper to detect unneeded and instructions on shift amounts. Called
434 // from PatFrags in tablegen.
435 bool isUnneededShiftMask(SDNode *N, unsigned Width) const {
436 assert(N->getOpcode() == ISD::AND && "Unexpected opcode")((N->getOpcode() == ISD::AND && "Unexpected opcode"
) ? static_cast<void> (0) : __assert_fail ("N->getOpcode() == ISD::AND && \"Unexpected opcode\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 436, __PRETTY_FUNCTION__))
;
437 const APInt &Val = cast<ConstantSDNode>(N->getOperand(1))->getAPIntValue();
438
439 if (Val.countTrailingOnes() >= Width)
440 return true;
441
442 APInt Mask = Val | CurDAG->computeKnownBits(N->getOperand(0)).Zero;
443 return Mask.countTrailingOnes() >= Width;
444 }
445
446 /// Return an SDNode that returns the value of the global base register.
447 /// Output instructions required to initialize the global base register,
448 /// if necessary.
449 SDNode *getGlobalBaseReg();
450
451 /// Return a reference to the TargetMachine, casted to the target-specific
452 /// type.
453 const X86TargetMachine &getTargetMachine() const {
454 return static_cast<const X86TargetMachine &>(TM);
455 }
456
457 /// Return a reference to the TargetInstrInfo, casted to the target-specific
458 /// type.
459 const X86InstrInfo *getInstrInfo() const {
460 return Subtarget->getInstrInfo();
461 }
462
463 /// Address-mode matching performs shift-of-and to and-of-shift
464 /// reassociation in order to expose more scaled addressing
465 /// opportunities.
466 bool ComplexPatternFuncMutatesDAG() const override {
467 return true;
468 }
469
470 bool isSExtAbsoluteSymbolRef(unsigned Width, SDNode *N) const;
471
472 /// Returns whether this is a relocatable immediate in the range
473 /// [-2^Width .. 2^Width-1].
474 template <unsigned Width> bool isSExtRelocImm(SDNode *N) const {
475 if (auto *CN = dyn_cast<ConstantSDNode>(N))
476 return isInt<Width>(CN->getSExtValue());
477 return isSExtAbsoluteSymbolRef(Width, N);
478 }
479
480 // Indicates we should prefer to use a non-temporal load for this load.
481 bool useNonTemporalLoad(LoadSDNode *N) const {
482 if (!N->isNonTemporal())
54
Assuming the condition is true
55
Taking true branch
483 return false;
56
Returning zero, which participates in a condition later
484
485 unsigned StoreSize = N->getMemoryVT().getStoreSize();
486
487 if (N->getAlignment() < StoreSize)
488 return false;
489
490 switch (StoreSize) {
491 default: llvm_unreachable("Unsupported store size")::llvm::llvm_unreachable_internal("Unsupported store size", "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 491)
;
492 case 4:
493 case 8:
494 return false;
495 case 16:
496 return Subtarget->hasSSE41();
497 case 32:
498 return Subtarget->hasAVX2();
499 case 64:
500 return Subtarget->hasAVX512();
501 }
502 }
503
504 bool foldLoadStoreIntoMemOperand(SDNode *Node);
505 MachineSDNode *matchBEXTRFromAndImm(SDNode *Node);
506 bool matchBitExtract(SDNode *Node);
507 bool shrinkAndImmediate(SDNode *N);
508 bool isMaskZeroExtended(SDNode *N) const;
509 bool tryShiftAmountMod(SDNode *N);
510 bool combineIncDecVector(SDNode *Node);
511 bool tryShrinkShlLogicImm(SDNode *N);
512 bool tryVPTESTM(SDNode *Root, SDValue Setcc, SDValue Mask);
513
514 MachineSDNode *emitPCMPISTR(unsigned ROpc, unsigned MOpc, bool MayFoldLoad,
515 const SDLoc &dl, MVT VT, SDNode *Node);
516 MachineSDNode *emitPCMPESTR(unsigned ROpc, unsigned MOpc, bool MayFoldLoad,
517 const SDLoc &dl, MVT VT, SDNode *Node,
518 SDValue &InFlag);
519
520 bool tryOptimizeRem8Extend(SDNode *N);
521
522 bool onlyUsesZeroFlag(SDValue Flags) const;
523 bool hasNoSignFlagUses(SDValue Flags) const;
524 bool hasNoCarryFlagUses(SDValue Flags) const;
525 };
526}
527
528
529// Returns true if this masked compare can be implemented legally with this
530// type.
531static bool isLegalMaskCompare(SDNode *N, const X86Subtarget *Subtarget) {
532 unsigned Opcode = N->getOpcode();
533 if (Opcode == X86ISD::CMPM || Opcode == ISD::SETCC ||
534 Opcode == X86ISD::CMPM_SAE || Opcode == X86ISD::VFPCLASS) {
535 // We can get 256-bit 8 element types here without VLX being enabled. When
536 // this happens we will use 512-bit operations and the mask will not be
537 // zero extended.
538 EVT OpVT = N->getOperand(0).getValueType();
539 if (OpVT.is256BitVector() || OpVT.is128BitVector())
540 return Subtarget->hasVLX();
541
542 return true;
543 }
544 // Scalar opcodes use 128 bit registers, but aren't subject to the VLX check.
545 if (Opcode == X86ISD::VFPCLASSS || Opcode == X86ISD::FSETCCM ||
546 Opcode == X86ISD::FSETCCM_SAE)
547 return true;
548
549 return false;
550}
551
552// Returns true if we can assume the writer of the mask has zero extended it
553// for us.
554bool X86DAGToDAGISel::isMaskZeroExtended(SDNode *N) const {
555 // If this is an AND, check if we have a compare on either side. As long as
556 // one side guarantees the mask is zero extended, the AND will preserve those
557 // zeros.
558 if (N->getOpcode() == ISD::AND)
559 return isLegalMaskCompare(N->getOperand(0).getNode(), Subtarget) ||
560 isLegalMaskCompare(N->getOperand(1).getNode(), Subtarget);
561
562 return isLegalMaskCompare(N, Subtarget);
563}
564
565bool
566X86DAGToDAGISel::IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const {
567 if (OptLevel == CodeGenOpt::None) return false;
38
Assuming field 'OptLevel' is not equal to None
39
Taking false branch
568
569 if (!N.hasOneUse())
40
Assuming the condition is false
41
Taking false branch
570 return false;
571
572 if (N.getOpcode() != ISD::LOAD)
42
Assuming the condition is false
43
Taking false branch
573 return true;
574
575 // Don't fold non-temporal loads if we have an instruction for them.
576 if (useNonTemporalLoad(cast<LoadSDNode>(N)))
44
Calling 'cast<llvm::LoadSDNode, llvm::SDValue>'
52
Returning from 'cast<llvm::LoadSDNode, llvm::SDValue>'
53
Calling 'X86DAGToDAGISel::useNonTemporalLoad'
57
Returning from 'X86DAGToDAGISel::useNonTemporalLoad'
58
Taking false branch
577 return false;
578
579 // If N is a load, do additional profitability checks.
580 if (U == Root) {
59
Assuming 'U' is equal to 'Root'
60
Taking true branch
581 switch (U->getOpcode()) {
61
Called C++ object pointer is null
582 default: break;
583 case X86ISD::ADD:
584 case X86ISD::ADC:
585 case X86ISD::SUB:
586 case X86ISD::SBB:
587 case X86ISD::AND:
588 case X86ISD::XOR:
589 case X86ISD::OR:
590 case ISD::ADD:
591 case ISD::ADDCARRY:
592 case ISD::AND:
593 case ISD::OR:
594 case ISD::XOR: {
595 SDValue Op1 = U->getOperand(1);
596
597 // If the other operand is a 8-bit immediate we should fold the immediate
598 // instead. This reduces code size.
599 // e.g.
600 // movl 4(%esp), %eax
601 // addl $4, %eax
602 // vs.
603 // movl $4, %eax
604 // addl 4(%esp), %eax
605 // The former is 2 bytes shorter. In case where the increment is 1, then
606 // the saving can be 4 bytes (by using incl %eax).
607 if (ConstantSDNode *Imm = dyn_cast<ConstantSDNode>(Op1)) {
608 if (Imm->getAPIntValue().isSignedIntN(8))
609 return false;
610
611 // If this is a 64-bit AND with an immediate that fits in 32-bits,
612 // prefer using the smaller and over folding the load. This is needed to
613 // make sure immediates created by shrinkAndImmediate are always folded.
614 // Ideally we would narrow the load during DAG combine and get the
615 // best of both worlds.
616 if (U->getOpcode() == ISD::AND &&
617 Imm->getAPIntValue().getBitWidth() == 64 &&
618 Imm->getAPIntValue().isIntN(32))
619 return false;
620
621 // If this really a zext_inreg that can be represented with a movzx
622 // instruction, prefer that.
623 // TODO: We could shrink the load and fold if it is non-volatile.
624 if (U->getOpcode() == ISD::AND &&
625 (Imm->getAPIntValue() == UINT8_MAX(255) ||
626 Imm->getAPIntValue() == UINT16_MAX(65535) ||
627 Imm->getAPIntValue() == UINT32_MAX(4294967295U)))
628 return false;
629
630 // ADD/SUB with can negate the immediate and use the opposite operation
631 // to fit 128 into a sign extended 8 bit immediate.
632 if ((U->getOpcode() == ISD::ADD || U->getOpcode() == ISD::SUB) &&
633 (-Imm->getAPIntValue()).isSignedIntN(8))
634 return false;
635 }
636
637 // If the other operand is a TLS address, we should fold it instead.
638 // This produces
639 // movl %gs:0, %eax
640 // leal i@NTPOFF(%eax), %eax
641 // instead of
642 // movl $i@NTPOFF, %eax
643 // addl %gs:0, %eax
644 // if the block also has an access to a second TLS address this will save
645 // a load.
646 // FIXME: This is probably also true for non-TLS addresses.
647 if (Op1.getOpcode() == X86ISD::Wrapper) {
648 SDValue Val = Op1.getOperand(0);
649 if (Val.getOpcode() == ISD::TargetGlobalTLSAddress)
650 return false;
651 }
652
653 // Don't fold load if this matches the BTS/BTR/BTC patterns.
654 // BTS: (or X, (shl 1, n))
655 // BTR: (and X, (rotl -2, n))
656 // BTC: (xor X, (shl 1, n))
657 if (U->getOpcode() == ISD::OR || U->getOpcode() == ISD::XOR) {
658 if (U->getOperand(0).getOpcode() == ISD::SHL &&
659 isOneConstant(U->getOperand(0).getOperand(0)))
660 return false;
661
662 if (U->getOperand(1).getOpcode() == ISD::SHL &&
663 isOneConstant(U->getOperand(1).getOperand(0)))
664 return false;
665 }
666 if (U->getOpcode() == ISD::AND) {
667 SDValue U0 = U->getOperand(0);
668 SDValue U1 = U->getOperand(1);
669 if (U0.getOpcode() == ISD::ROTL) {
670 auto *C = dyn_cast<ConstantSDNode>(U0.getOperand(0));
671 if (C && C->getSExtValue() == -2)
672 return false;
673 }
674
675 if (U1.getOpcode() == ISD::ROTL) {
676 auto *C = dyn_cast<ConstantSDNode>(U1.getOperand(0));
677 if (C && C->getSExtValue() == -2)
678 return false;
679 }
680 }
681
682 break;
683 }
684 case ISD::SHL:
685 case ISD::SRA:
686 case ISD::SRL:
687 // Don't fold a load into a shift by immediate. The BMI2 instructions
688 // support folding a load, but not an immediate. The legacy instructions
689 // support folding an immediate, but can't fold a load. Folding an
690 // immediate is preferable to folding a load.
691 if (isa<ConstantSDNode>(U->getOperand(1)))
692 return false;
693
694 break;
695 }
696 }
697
698 // Prevent folding a load if this can implemented with an insert_subreg or
699 // a move that implicitly zeroes.
700 if (Root->getOpcode() == ISD::INSERT_SUBVECTOR &&
701 isNullConstant(Root->getOperand(2)) &&
702 (Root->getOperand(0).isUndef() ||
703 ISD::isBuildVectorAllZeros(Root->getOperand(0).getNode())))
704 return false;
705
706 return true;
707}
708
709/// Replace the original chain operand of the call with
710/// load's chain operand and move load below the call's chain operand.
711static void moveBelowOrigChain(SelectionDAG *CurDAG, SDValue Load,
712 SDValue Call, SDValue OrigChain) {
713 SmallVector<SDValue, 8> Ops;
714 SDValue Chain = OrigChain.getOperand(0);
715 if (Chain.getNode() == Load.getNode())
716 Ops.push_back(Load.getOperand(0));
717 else {
718 assert(Chain.getOpcode() == ISD::TokenFactor &&((Chain.getOpcode() == ISD::TokenFactor && "Unexpected chain operand"
) ? static_cast<void> (0) : __assert_fail ("Chain.getOpcode() == ISD::TokenFactor && \"Unexpected chain operand\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 719, __PRETTY_FUNCTION__))
719 "Unexpected chain operand")((Chain.getOpcode() == ISD::TokenFactor && "Unexpected chain operand"
) ? static_cast<void> (0) : __assert_fail ("Chain.getOpcode() == ISD::TokenFactor && \"Unexpected chain operand\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 719, __PRETTY_FUNCTION__))
;
720 for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i)
721 if (Chain.getOperand(i).getNode() == Load.getNode())
722 Ops.push_back(Load.getOperand(0));
723 else
724 Ops.push_back(Chain.getOperand(i));
725 SDValue NewChain =
726 CurDAG->getNode(ISD::TokenFactor, SDLoc(Load), MVT::Other, Ops);
727 Ops.clear();
728 Ops.push_back(NewChain);
729 }
730 Ops.append(OrigChain->op_begin() + 1, OrigChain->op_end());
731 CurDAG->UpdateNodeOperands(OrigChain.getNode(), Ops);
732 CurDAG->UpdateNodeOperands(Load.getNode(), Call.getOperand(0),
733 Load.getOperand(1), Load.getOperand(2));
734
735 Ops.clear();
736 Ops.push_back(SDValue(Load.getNode(), 1));
737 Ops.append(Call->op_begin() + 1, Call->op_end());
738 CurDAG->UpdateNodeOperands(Call.getNode(), Ops);
739}
740
741/// Return true if call address is a load and it can be
742/// moved below CALLSEQ_START and the chains leading up to the call.
743/// Return the CALLSEQ_START by reference as a second output.
744/// In the case of a tail call, there isn't a callseq node between the call
745/// chain and the load.
746static bool isCalleeLoad(SDValue Callee, SDValue &Chain, bool HasCallSeq) {
747 // The transformation is somewhat dangerous if the call's chain was glued to
748 // the call. After MoveBelowOrigChain the load is moved between the call and
749 // the chain, this can create a cycle if the load is not folded. So it is
750 // *really* important that we are sure the load will be folded.
751 if (Callee.getNode() == Chain.getNode() || !Callee.hasOneUse())
752 return false;
753 LoadSDNode *LD = dyn_cast<LoadSDNode>(Callee.getNode());
754 if (!LD ||
755 !LD->isSimple() ||
756 LD->getAddressingMode() != ISD::UNINDEXED ||
757 LD->getExtensionType() != ISD::NON_EXTLOAD)
758 return false;
759
760 // Now let's find the callseq_start.
761 while (HasCallSeq && Chain.getOpcode() != ISD::CALLSEQ_START) {
762 if (!Chain.hasOneUse())
763 return false;
764 Chain = Chain.getOperand(0);
765 }
766
767 if (!Chain.getNumOperands())
768 return false;
769 // Since we are not checking for AA here, conservatively abort if the chain
770 // writes to memory. It's not safe to move the callee (a load) across a store.
771 if (isa<MemSDNode>(Chain.getNode()) &&
772 cast<MemSDNode>(Chain.getNode())->writeMem())
773 return false;
774 if (Chain.getOperand(0).getNode() == Callee.getNode())
775 return true;
776 if (Chain.getOperand(0).getOpcode() == ISD::TokenFactor &&
777 Callee.getValue(1).isOperandOf(Chain.getOperand(0).getNode()) &&
778 Callee.getValue(1).hasOneUse())
779 return true;
780 return false;
781}
782
783void X86DAGToDAGISel::PreprocessISelDAG() {
784 for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
785 E = CurDAG->allnodes_end(); I != E; ) {
786 SDNode *N = &*I++; // Preincrement iterator to avoid invalidation issues.
787
788 // If this is a target specific AND node with no flag usages, turn it back
789 // into ISD::AND to enable test instruction matching.
790 if (N->getOpcode() == X86ISD::AND && !N->hasAnyUseOfValue(1)) {
791 SDValue Res = CurDAG->getNode(ISD::AND, SDLoc(N), N->getValueType(0),
792 N->getOperand(0), N->getOperand(1));
793 --I;
794 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Res);
795 ++I;
796 CurDAG->DeleteNode(N);
797 continue;
798 }
799
800 switch (N->getOpcode()) {
801 case ISD::FP_TO_SINT:
802 case ISD::FP_TO_UINT: {
803 // Replace vector fp_to_s/uint with their X86 specific equivalent so we
804 // don't need 2 sets of patterns.
805 if (!N->getSimpleValueType(0).isVector())
806 break;
807
808 unsigned NewOpc;
809 switch (N->getOpcode()) {
810 default: llvm_unreachable("Unexpected opcode!")::llvm::llvm_unreachable_internal("Unexpected opcode!", "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 810)
;
811 case ISD::FP_TO_SINT: NewOpc = X86ISD::CVTTP2SI; break;
812 case ISD::FP_TO_UINT: NewOpc = X86ISD::CVTTP2UI; break;
813 }
814 SDValue Res = CurDAG->getNode(NewOpc, SDLoc(N), N->getValueType(0),
815 N->getOperand(0));
816 --I;
817 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Res);
818 ++I;
819 CurDAG->DeleteNode(N);
820 continue;
821 }
822 case ISD::SHL:
823 case ISD::SRA:
824 case ISD::SRL: {
825 // Replace vector shifts with their X86 specific equivalent so we don't
826 // need 2 sets of patterns.
827 if (!N->getValueType(0).isVector())
828 break;
829
830 unsigned NewOpc;
831 switch (N->getOpcode()) {
832 default: llvm_unreachable("Unexpected opcode!")::llvm::llvm_unreachable_internal("Unexpected opcode!", "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 832)
;
833 case ISD::SHL: NewOpc = X86ISD::VSHLV; break;
834 case ISD::SRA: NewOpc = X86ISD::VSRAV; break;
835 case ISD::SRL: NewOpc = X86ISD::VSRLV; break;
836 }
837 SDValue Res = CurDAG->getNode(NewOpc, SDLoc(N), N->getValueType(0),
838 N->getOperand(0), N->getOperand(1));
839 --I;
840 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Res);
841 ++I;
842 CurDAG->DeleteNode(N);
843 continue;
844 }
845 case ISD::ANY_EXTEND:
846 case ISD::ANY_EXTEND_VECTOR_INREG: {
847 // Replace vector any extend with the zero extend equivalents so we don't
848 // need 2 sets of patterns. Ignore vXi1 extensions.
849 if (!N->getValueType(0).isVector() ||
850 N->getOperand(0).getScalarValueSizeInBits() == 1)
851 break;
852
853 unsigned NewOpc = N->getOpcode() == ISD::ANY_EXTEND
854 ? ISD::ZERO_EXTEND
855 : ISD::ZERO_EXTEND_VECTOR_INREG;
856
857 SDValue Res = CurDAG->getNode(NewOpc, SDLoc(N), N->getValueType(0),
858 N->getOperand(0));
859 --I;
860 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Res);
861 ++I;
862 CurDAG->DeleteNode(N);
863 continue;
864 }
865 case ISD::FCEIL:
866 case ISD::FFLOOR:
867 case ISD::FTRUNC:
868 case ISD::FNEARBYINT:
869 case ISD::FRINT: {
870 // Replace fp rounding with their X86 specific equivalent so we don't
871 // need 2 sets of patterns.
872 unsigned Imm;
873 switch (N->getOpcode()) {
874 default: llvm_unreachable("Unexpected opcode!")::llvm::llvm_unreachable_internal("Unexpected opcode!", "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 874)
;
875 case ISD::FCEIL: Imm = 0xA; break;
876 case ISD::FFLOOR: Imm = 0x9; break;
877 case ISD::FTRUNC: Imm = 0xB; break;
878 case ISD::FNEARBYINT: Imm = 0xC; break;
879 case ISD::FRINT: Imm = 0x4; break;
880 }
881 SDLoc dl(N);
882 SDValue Res = CurDAG->getNode(X86ISD::VRNDSCALE, dl,
883 N->getValueType(0),
884 N->getOperand(0),
885 CurDAG->getConstant(Imm, dl, MVT::i8));
886 --I;
887 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Res);
888 ++I;
889 CurDAG->DeleteNode(N);
890 continue;
891 }
892 case X86ISD::FANDN:
893 case X86ISD::FAND:
894 case X86ISD::FOR:
895 case X86ISD::FXOR: {
896 // Widen scalar fp logic ops to vector to reduce isel patterns.
897 // FIXME: Can we do this during lowering/combine.
898 MVT VT = N->getSimpleValueType(0);
899 if (VT.isVector() || VT == MVT::f128)
900 break;
901
902 MVT VecVT = VT == MVT::f64 ? MVT::v2f64 : MVT::v4f32;
903 SDLoc dl(N);
904 SDValue Op0 = CurDAG->getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT,
905 N->getOperand(0));
906 SDValue Op1 = CurDAG->getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT,
907 N->getOperand(1));
908
909 SDValue Res;
910 if (Subtarget->hasSSE2()) {
911 EVT IntVT = EVT(VecVT).changeVectorElementTypeToInteger();
912 Op0 = CurDAG->getNode(ISD::BITCAST, dl, IntVT, Op0);
913 Op1 = CurDAG->getNode(ISD::BITCAST, dl, IntVT, Op1);
914 unsigned Opc;
915 switch (N->getOpcode()) {
916 default: llvm_unreachable("Unexpected opcode!")::llvm::llvm_unreachable_internal("Unexpected opcode!", "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 916)
;
917 case X86ISD::FANDN: Opc = X86ISD::ANDNP; break;
918 case X86ISD::FAND: Opc = ISD::AND; break;
919 case X86ISD::FOR: Opc = ISD::OR; break;
920 case X86ISD::FXOR: Opc = ISD::XOR; break;
921 }
922 Res = CurDAG->getNode(Opc, dl, IntVT, Op0, Op1);
923 Res = CurDAG->getNode(ISD::BITCAST, dl, VecVT, Res);
924 } else {
925 Res = CurDAG->getNode(N->getOpcode(), dl, VecVT, Op0, Op1);
926 }
927 Res = CurDAG->getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Res,
928 CurDAG->getIntPtrConstant(0, dl));
929 --I;
930 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Res);
931 ++I;
932 CurDAG->DeleteNode(N);
933 continue;
934 }
935 }
936
937 if (OptLevel != CodeGenOpt::None &&
938 // Only do this when the target can fold the load into the call or
939 // jmp.
940 !Subtarget->useRetpolineIndirectCalls() &&
941 ((N->getOpcode() == X86ISD::CALL && !Subtarget->slowTwoMemOps()) ||
942 (N->getOpcode() == X86ISD::TC_RETURN &&
943 (Subtarget->is64Bit() ||
944 !getTargetMachine().isPositionIndependent())))) {
945 /// Also try moving call address load from outside callseq_start to just
946 /// before the call to allow it to be folded.
947 ///
948 /// [Load chain]
949 /// ^
950 /// |
951 /// [Load]
952 /// ^ ^
953 /// | |
954 /// / \--
955 /// / |
956 ///[CALLSEQ_START] |
957 /// ^ |
958 /// | |
959 /// [LOAD/C2Reg] |
960 /// | |
961 /// \ /
962 /// \ /
963 /// [CALL]
964 bool HasCallSeq = N->getOpcode() == X86ISD::CALL;
965 SDValue Chain = N->getOperand(0);
966 SDValue Load = N->getOperand(1);
967 if (!isCalleeLoad(Load, Chain, HasCallSeq))
968 continue;
969 moveBelowOrigChain(CurDAG, Load, SDValue(N, 0), Chain);
970 ++NumLoadMoved;
971 continue;
972 }
973
974 // Lower fpround and fpextend nodes that target the FP stack to be store and
975 // load to the stack. This is a gross hack. We would like to simply mark
976 // these as being illegal, but when we do that, legalize produces these when
977 // it expands calls, then expands these in the same legalize pass. We would
978 // like dag combine to be able to hack on these between the call expansion
979 // and the node legalization. As such this pass basically does "really
980 // late" legalization of these inline with the X86 isel pass.
981 // FIXME: This should only happen when not compiled with -O0.
982 switch (N->getOpcode()) {
983 default: continue;
984 case ISD::FP_ROUND:
985 case ISD::FP_EXTEND:
986 {
987 MVT SrcVT = N->getOperand(0).getSimpleValueType();
988 MVT DstVT = N->getSimpleValueType(0);
989
990 // If any of the sources are vectors, no fp stack involved.
991 if (SrcVT.isVector() || DstVT.isVector())
992 continue;
993
994 // If the source and destination are SSE registers, then this is a legal
995 // conversion that should not be lowered.
996 const X86TargetLowering *X86Lowering =
997 static_cast<const X86TargetLowering *>(TLI);
998 bool SrcIsSSE = X86Lowering->isScalarFPTypeInSSEReg(SrcVT);
999 bool DstIsSSE = X86Lowering->isScalarFPTypeInSSEReg(DstVT);
1000 if (SrcIsSSE && DstIsSSE)
1001 continue;
1002
1003 if (!SrcIsSSE && !DstIsSSE) {
1004 // If this is an FPStack extension, it is a noop.
1005 if (N->getOpcode() == ISD::FP_EXTEND)
1006 continue;
1007 // If this is a value-preserving FPStack truncation, it is a noop.
1008 if (N->getConstantOperandVal(1))
1009 continue;
1010 }
1011
1012 // Here we could have an FP stack truncation or an FPStack <-> SSE convert.
1013 // FPStack has extload and truncstore. SSE can fold direct loads into other
1014 // operations. Based on this, decide what we want to do.
1015 MVT MemVT;
1016 if (N->getOpcode() == ISD::FP_ROUND)
1017 MemVT = DstVT; // FP_ROUND must use DstVT, we can't do a 'trunc load'.
1018 else
1019 MemVT = SrcIsSSE ? SrcVT : DstVT;
1020
1021 SDValue MemTmp = CurDAG->CreateStackTemporary(MemVT);
1022 SDLoc dl(N);
1023
1024 // FIXME: optimize the case where the src/dest is a load or store?
1025
1026 SDValue Store = CurDAG->getTruncStore(CurDAG->getEntryNode(), dl, N->getOperand(0),
1027 MemTmp, MachinePointerInfo(), MemVT);
1028 SDValue Result = CurDAG->getExtLoad(ISD::EXTLOAD, dl, DstVT, Store, MemTmp,
1029 MachinePointerInfo(), MemVT);
1030
1031 // We're about to replace all uses of the FP_ROUND/FP_EXTEND with the
1032 // extload we created. This will cause general havok on the dag because
1033 // anything below the conversion could be folded into other existing nodes.
1034 // To avoid invalidating 'I', back it up to the convert node.
1035 --I;
1036 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Result);
1037 break;
1038 }
1039
1040 //The sequence of events for lowering STRICT_FP versions of these nodes requires
1041 //dealing with the chain differently, as there is already a preexisting chain.
1042 case ISD::STRICT_FP_ROUND:
1043 case ISD::STRICT_FP_EXTEND:
1044 {
1045 MVT SrcVT = N->getOperand(1).getSimpleValueType();
1046 MVT DstVT = N->getSimpleValueType(0);
1047
1048 // If any of the sources are vectors, no fp stack involved.
1049 if (SrcVT.isVector() || DstVT.isVector())
1050 continue;
1051
1052 // If the source and destination are SSE registers, then this is a legal
1053 // conversion that should not be lowered.
1054 const X86TargetLowering *X86Lowering =
1055 static_cast<const X86TargetLowering *>(TLI);
1056 bool SrcIsSSE = X86Lowering->isScalarFPTypeInSSEReg(SrcVT);
1057 bool DstIsSSE = X86Lowering->isScalarFPTypeInSSEReg(DstVT);
1058 if (SrcIsSSE && DstIsSSE)
1059 continue;
1060
1061 if (!SrcIsSSE && !DstIsSSE) {
1062 // If this is an FPStack extension, it is a noop.
1063 if (N->getOpcode() == ISD::STRICT_FP_EXTEND)
1064 continue;
1065 // If this is a value-preserving FPStack truncation, it is a noop.
1066 if (N->getConstantOperandVal(2))
1067 continue;
1068 }
1069
1070 // Here we could have an FP stack truncation or an FPStack <-> SSE convert.
1071 // FPStack has extload and truncstore. SSE can fold direct loads into other
1072 // operations. Based on this, decide what we want to do.
1073 MVT MemVT;
1074 if (N->getOpcode() == ISD::STRICT_FP_ROUND)
1075 MemVT = DstVT; // FP_ROUND must use DstVT, we can't do a 'trunc load'.
1076 else
1077 MemVT = SrcIsSSE ? SrcVT : DstVT;
1078
1079 SDValue MemTmp = CurDAG->CreateStackTemporary(MemVT);
1080 SDLoc dl(N);
1081
1082 // FIXME: optimize the case where the src/dest is a load or store?
1083
1084 //Since the operation is StrictFP, use the preexisting chain.
1085 SDValue Store = CurDAG->getTruncStore(N->getOperand(0), dl, N->getOperand(1),
1086 MemTmp, MachinePointerInfo(), MemVT);
1087 SDValue Result = CurDAG->getExtLoad(ISD::EXTLOAD, dl, DstVT, Store, MemTmp,
1088 MachinePointerInfo(), MemVT);
1089
1090 // We're about to replace all uses of the FP_ROUND/FP_EXTEND with the
1091 // extload we created. This will cause general havok on the dag because
1092 // anything below the conversion could be folded into other existing nodes.
1093 // To avoid invalidating 'I', back it up to the convert node.
1094 --I;
1095 CurDAG->ReplaceAllUsesWith(N, Result.getNode());
1096 break;
1097 }
1098 }
1099
1100
1101 // Now that we did that, the node is dead. Increment the iterator to the
1102 // next node to process, then delete N.
1103 ++I;
1104 CurDAG->DeleteNode(N);
1105 }
1106
1107 // The load+call transform above can leave some dead nodes in the graph. Make
1108 // sure we remove them. Its possible some of the other transforms do to so
1109 // just remove dead nodes unconditionally.
1110 CurDAG->RemoveDeadNodes();
1111}
1112
1113// Look for a redundant movzx/movsx that can occur after an 8-bit divrem.
1114bool X86DAGToDAGISel::tryOptimizeRem8Extend(SDNode *N) {
1115 unsigned Opc = N->getMachineOpcode();
1116 if (Opc != X86::MOVZX32rr8 && Opc != X86::MOVSX32rr8 &&
1117 Opc != X86::MOVSX64rr8)
1118 return false;
1119
1120 SDValue N0 = N->getOperand(0);
1121
1122 // We need to be extracting the lower bit of an extend.
1123 if (!N0.isMachineOpcode() ||
1124 N0.getMachineOpcode() != TargetOpcode::EXTRACT_SUBREG ||
1125 N0.getConstantOperandVal(1) != X86::sub_8bit)
1126 return false;
1127
1128 // We're looking for either a movsx or movzx to match the original opcode.
1129 unsigned ExpectedOpc = Opc == X86::MOVZX32rr8 ? X86::MOVZX32rr8_NOREX
1130 : X86::MOVSX32rr8_NOREX;
1131 SDValue N00 = N0.getOperand(0);
1132 if (!N00.isMachineOpcode() || N00.getMachineOpcode() != ExpectedOpc)
1133 return false;
1134
1135 if (Opc == X86::MOVSX64rr8) {
1136 // If we had a sign extend from 8 to 64 bits. We still need to go from 32
1137 // to 64.
1138 MachineSDNode *Extend = CurDAG->getMachineNode(X86::MOVSX64rr32, SDLoc(N),
1139 MVT::i64, N00);
1140 ReplaceUses(N, Extend);
1141 } else {
1142 // Ok we can drop this extend and just use the original extend.
1143 ReplaceUses(N, N00.getNode());
1144 }
1145
1146 return true;
1147}
1148
1149void X86DAGToDAGISel::PostprocessISelDAG() {
1150 // Skip peepholes at -O0.
1151 if (TM.getOptLevel() == CodeGenOpt::None)
1152 return;
1153
1154 SelectionDAG::allnodes_iterator Position = CurDAG->allnodes_end();
1155
1156 bool MadeChange = false;
1157 while (Position != CurDAG->allnodes_begin()) {
1158 SDNode *N = &*--Position;
1159 // Skip dead nodes and any non-machine opcodes.
1160 if (N->use_empty() || !N->isMachineOpcode())
1161 continue;
1162
1163 if (tryOptimizeRem8Extend(N)) {
1164 MadeChange = true;
1165 continue;
1166 }
1167
1168 // Look for a TESTrr+ANDrr pattern where both operands of the test are
1169 // the same. Rewrite to remove the AND.
1170 unsigned Opc = N->getMachineOpcode();
1171 if ((Opc == X86::TEST8rr || Opc == X86::TEST16rr ||
1172 Opc == X86::TEST32rr || Opc == X86::TEST64rr) &&
1173 N->getOperand(0) == N->getOperand(1) &&
1174 N->isOnlyUserOf(N->getOperand(0).getNode()) &&
1175 N->getOperand(0).isMachineOpcode()) {
1176 SDValue And = N->getOperand(0);
1177 unsigned N0Opc = And.getMachineOpcode();
1178 if (N0Opc == X86::AND8rr || N0Opc == X86::AND16rr ||
1179 N0Opc == X86::AND32rr || N0Opc == X86::AND64rr) {
1180 MachineSDNode *Test = CurDAG->getMachineNode(Opc, SDLoc(N),
1181 MVT::i32,
1182 And.getOperand(0),
1183 And.getOperand(1));
1184 ReplaceUses(N, Test);
1185 MadeChange = true;
1186 continue;
1187 }
1188 if (N0Opc == X86::AND8rm || N0Opc == X86::AND16rm ||
1189 N0Opc == X86::AND32rm || N0Opc == X86::AND64rm) {
1190 unsigned NewOpc;
1191 switch (N0Opc) {
1192 case X86::AND8rm: NewOpc = X86::TEST8mr; break;
1193 case X86::AND16rm: NewOpc = X86::TEST16mr; break;
1194 case X86::AND32rm: NewOpc = X86::TEST32mr; break;
1195 case X86::AND64rm: NewOpc = X86::TEST64mr; break;
1196 }
1197
1198 // Need to swap the memory and register operand.
1199 SDValue Ops[] = { And.getOperand(1),
1200 And.getOperand(2),
1201 And.getOperand(3),
1202 And.getOperand(4),
1203 And.getOperand(5),
1204 And.getOperand(0),
1205 And.getOperand(6) /* Chain */ };
1206 MachineSDNode *Test = CurDAG->getMachineNode(NewOpc, SDLoc(N),
1207 MVT::i32, MVT::Other, Ops);
1208 ReplaceUses(N, Test);
1209 MadeChange = true;
1210 continue;
1211 }
1212 }
1213
1214 // Look for a KAND+KORTEST and turn it into KTEST if only the zero flag is
1215 // used. We're doing this late so we can prefer to fold the AND into masked
1216 // comparisons. Doing that can be better for the live range of the mask
1217 // register.
1218 if ((Opc == X86::KORTESTBrr || Opc == X86::KORTESTWrr ||
1219 Opc == X86::KORTESTDrr || Opc == X86::KORTESTQrr) &&
1220 N->getOperand(0) == N->getOperand(1) &&
1221 N->isOnlyUserOf(N->getOperand(0).getNode()) &&
1222 N->getOperand(0).isMachineOpcode() &&
1223 onlyUsesZeroFlag(SDValue(N, 0))) {
1224 SDValue And = N->getOperand(0);
1225 unsigned N0Opc = And.getMachineOpcode();
1226 // KANDW is legal with AVX512F, but KTESTW requires AVX512DQ. The other
1227 // KAND instructions and KTEST use the same ISA feature.
1228 if (N0Opc == X86::KANDBrr ||
1229 (N0Opc == X86::KANDWrr && Subtarget->hasDQI()) ||
1230 N0Opc == X86::KANDDrr || N0Opc == X86::KANDQrr) {
1231 unsigned NewOpc;
1232 switch (Opc) {
1233 default: llvm_unreachable("Unexpected opcode!")::llvm::llvm_unreachable_internal("Unexpected opcode!", "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 1233)
;
1234 case X86::KORTESTBrr: NewOpc = X86::KTESTBrr; break;
1235 case X86::KORTESTWrr: NewOpc = X86::KTESTWrr; break;
1236 case X86::KORTESTDrr: NewOpc = X86::KTESTDrr; break;
1237 case X86::KORTESTQrr: NewOpc = X86::KTESTQrr; break;
1238 }
1239 MachineSDNode *KTest = CurDAG->getMachineNode(NewOpc, SDLoc(N),
1240 MVT::i32,
1241 And.getOperand(0),
1242 And.getOperand(1));
1243 ReplaceUses(N, KTest);
1244 MadeChange = true;
1245 continue;
1246 }
1247 }
1248
1249 // Attempt to remove vectors moves that were inserted to zero upper bits.
1250 if (Opc != TargetOpcode::SUBREG_TO_REG)
1251 continue;
1252
1253 unsigned SubRegIdx = N->getConstantOperandVal(2);
1254 if (SubRegIdx != X86::sub_xmm && SubRegIdx != X86::sub_ymm)
1255 continue;
1256
1257 SDValue Move = N->getOperand(1);
1258 if (!Move.isMachineOpcode())
1259 continue;
1260
1261 // Make sure its one of the move opcodes we recognize.
1262 switch (Move.getMachineOpcode()) {
1263 default:
1264 continue;
1265 case X86::VMOVAPDrr: case X86::VMOVUPDrr:
1266 case X86::VMOVAPSrr: case X86::VMOVUPSrr:
1267 case X86::VMOVDQArr: case X86::VMOVDQUrr:
1268 case X86::VMOVAPDYrr: case X86::VMOVUPDYrr:
1269 case X86::VMOVAPSYrr: case X86::VMOVUPSYrr:
1270 case X86::VMOVDQAYrr: case X86::VMOVDQUYrr:
1271 case X86::VMOVAPDZ128rr: case X86::VMOVUPDZ128rr:
1272 case X86::VMOVAPSZ128rr: case X86::VMOVUPSZ128rr:
1273 case X86::VMOVDQA32Z128rr: case X86::VMOVDQU32Z128rr:
1274 case X86::VMOVDQA64Z128rr: case X86::VMOVDQU64Z128rr:
1275 case X86::VMOVAPDZ256rr: case X86::VMOVUPDZ256rr:
1276 case X86::VMOVAPSZ256rr: case X86::VMOVUPSZ256rr:
1277 case X86::VMOVDQA32Z256rr: case X86::VMOVDQU32Z256rr:
1278 case X86::VMOVDQA64Z256rr: case X86::VMOVDQU64Z256rr:
1279 break;
1280 }
1281
1282 SDValue In = Move.getOperand(0);
1283 if (!In.isMachineOpcode() ||
1284 In.getMachineOpcode() <= TargetOpcode::GENERIC_OP_END)
1285 continue;
1286
1287 // Make sure the instruction has a VEX, XOP, or EVEX prefix. This covers
1288 // the SHA instructions which use a legacy encoding.
1289 uint64_t TSFlags = getInstrInfo()->get(In.getMachineOpcode()).TSFlags;
1290 if ((TSFlags & X86II::EncodingMask) != X86II::VEX &&
1291 (TSFlags & X86II::EncodingMask) != X86II::EVEX &&
1292 (TSFlags & X86II::EncodingMask) != X86II::XOP)
1293 continue;
1294
1295 // Producing instruction is another vector instruction. We can drop the
1296 // move.
1297 CurDAG->UpdateNodeOperands(N, N->getOperand(0), In, N->getOperand(2));
1298 MadeChange = true;
1299 }
1300
1301 if (MadeChange)
1302 CurDAG->RemoveDeadNodes();
1303}
1304
1305
1306/// Emit any code that needs to be executed only in the main function.
1307void X86DAGToDAGISel::emitSpecialCodeForMain() {
1308 if (Subtarget->isTargetCygMing()) {
1309 TargetLowering::ArgListTy Args;
1310 auto &DL = CurDAG->getDataLayout();
1311
1312 TargetLowering::CallLoweringInfo CLI(*CurDAG);
1313 CLI.setChain(CurDAG->getRoot())
1314 .setCallee(CallingConv::C, Type::getVoidTy(*CurDAG->getContext()),
1315 CurDAG->getExternalSymbol("__main", TLI->getPointerTy(DL)),
1316 std::move(Args));
1317 const TargetLowering &TLI = CurDAG->getTargetLoweringInfo();
1318 std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
1319 CurDAG->setRoot(Result.second);
1320 }
1321}
1322
1323void X86DAGToDAGISel::EmitFunctionEntryCode() {
1324 // If this is main, emit special code for main.
1325 const Function &F = MF->getFunction();
1326 if (F.hasExternalLinkage() && F.getName() == "main")
1327 emitSpecialCodeForMain();
1328}
1329
1330static bool isDispSafeForFrameIndex(int64_t Val) {
1331 // On 64-bit platforms, we can run into an issue where a frame index
1332 // includes a displacement that, when added to the explicit displacement,
1333 // will overflow the displacement field. Assuming that the frame index
1334 // displacement fits into a 31-bit integer (which is only slightly more
1335 // aggressive than the current fundamental assumption that it fits into
1336 // a 32-bit integer), a 31-bit disp should always be safe.
1337 return isInt<31>(Val);
1338}
1339
1340bool X86DAGToDAGISel::foldOffsetIntoAddress(uint64_t Offset,
1341 X86ISelAddressMode &AM) {
1342 // If there's no offset to fold, we don't need to do any work.
1343 if (Offset == 0)
1344 return false;
1345
1346 // Cannot combine ExternalSymbol displacements with integer offsets.
1347 if (AM.ES || AM.MCSym)
1348 return true;
1349
1350 int64_t Val = AM.Disp + Offset;
1351 CodeModel::Model M = TM.getCodeModel();
1352 if (Subtarget->is64Bit()) {
1353 if (!X86::isOffsetSuitableForCodeModel(Val, M,
1354 AM.hasSymbolicDisplacement()))
1355 return true;
1356 // In addition to the checks required for a register base, check that
1357 // we do not try to use an unsafe Disp with a frame index.
1358 if (AM.BaseType == X86ISelAddressMode::FrameIndexBase &&
1359 !isDispSafeForFrameIndex(Val))
1360 return true;
1361 }
1362 AM.Disp = Val;
1363 return false;
1364
1365}
1366
1367bool X86DAGToDAGISel::matchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM){
1368 SDValue Address = N->getOperand(1);
1369
1370 // load gs:0 -> GS segment register.
1371 // load fs:0 -> FS segment register.
1372 //
1373 // This optimization is valid because the GNU TLS model defines that
1374 // gs:0 (or fs:0 on X86-64) contains its own address.
1375 // For more information see http://people.redhat.com/drepper/tls.pdf
1376 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Address))
1377 if (C->getSExtValue() == 0 && AM.Segment.getNode() == nullptr &&
1378 !IndirectTlsSegRefs &&
1379 (Subtarget->isTargetGlibc() || Subtarget->isTargetAndroid() ||
1380 Subtarget->isTargetFuchsia()))
1381 switch (N->getPointerInfo().getAddrSpace()) {
1382 case 256:
1383 AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16);
1384 return false;
1385 case 257:
1386 AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16);
1387 return false;
1388 // Address space 258 is not handled here, because it is not used to
1389 // address TLS areas.
1390 }
1391
1392 return true;
1393}
1394
1395/// Try to match X86ISD::Wrapper and X86ISD::WrapperRIP nodes into an addressing
1396/// mode. These wrap things that will resolve down into a symbol reference.
1397/// If no match is possible, this returns true, otherwise it returns false.
1398bool X86DAGToDAGISel::matchWrapper(SDValue N, X86ISelAddressMode &AM) {
1399 // If the addressing mode already has a symbol as the displacement, we can
1400 // never match another symbol.
1401 if (AM.hasSymbolicDisplacement())
1402 return true;
1403
1404 bool IsRIPRelTLS = false;
1405 bool IsRIPRel = N.getOpcode() == X86ISD::WrapperRIP;
1406 if (IsRIPRel) {
1407 SDValue Val = N.getOperand(0);
1408 if (Val.getOpcode() == ISD::TargetGlobalTLSAddress)
1409 IsRIPRelTLS = true;
1410 }
1411
1412 // We can't use an addressing mode in the 64-bit large code model.
1413 // Global TLS addressing is an exception. In the medium code model,
1414 // we use can use a mode when RIP wrappers are present.
1415 // That signifies access to globals that are known to be "near",
1416 // such as the GOT itself.
1417 CodeModel::Model M = TM.getCodeModel();
1418 if (Subtarget->is64Bit() &&
1419 ((M == CodeModel::Large && !IsRIPRelTLS) ||
1420 (M == CodeModel::Medium && !IsRIPRel)))
1421 return true;
1422
1423 // Base and index reg must be 0 in order to use %rip as base.
1424 if (IsRIPRel && AM.hasBaseOrIndexReg())
1425 return true;
1426
1427 // Make a local copy in case we can't do this fold.
1428 X86ISelAddressMode Backup = AM;
1429
1430 int64_t Offset = 0;
1431 SDValue N0 = N.getOperand(0);
1432 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
1433 AM.GV = G->getGlobal();
1434 AM.SymbolFlags = G->getTargetFlags();
1435 Offset = G->getOffset();
1436 } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
1437 AM.CP = CP->getConstVal();
1438 AM.Align = CP->getAlignment();
1439 AM.SymbolFlags = CP->getTargetFlags();
1440 Offset = CP->getOffset();
1441 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(N0)) {
1442 AM.ES = S->getSymbol();
1443 AM.SymbolFlags = S->getTargetFlags();
1444 } else if (auto *S = dyn_cast<MCSymbolSDNode>(N0)) {
1445 AM.MCSym = S->getMCSymbol();
1446 } else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) {
1447 AM.JT = J->getIndex();
1448 AM.SymbolFlags = J->getTargetFlags();
1449 } else if (BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(N0)) {
1450 AM.BlockAddr = BA->getBlockAddress();
1451 AM.SymbolFlags = BA->getTargetFlags();
1452 Offset = BA->getOffset();
1453 } else
1454 llvm_unreachable("Unhandled symbol reference node.")::llvm::llvm_unreachable_internal("Unhandled symbol reference node."
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 1454)
;
1455
1456 if (foldOffsetIntoAddress(Offset, AM)) {
1457 AM = Backup;
1458 return true;
1459 }
1460
1461 if (IsRIPRel)
1462 AM.setBaseReg(CurDAG->getRegister(X86::RIP, MVT::i64));
1463
1464 // Commit the changes now that we know this fold is safe.
1465 return false;
1466}
1467
1468/// Add the specified node to the specified addressing mode, returning true if
1469/// it cannot be done. This just pattern matches for the addressing mode.
1470bool X86DAGToDAGISel::matchAddress(SDValue N, X86ISelAddressMode &AM) {
1471 if (matchAddressRecursively(N, AM, 0))
1472 return true;
1473
1474 // Post-processing: Convert lea(,%reg,2) to lea(%reg,%reg), which has
1475 // a smaller encoding and avoids a scaled-index.
1476 if (AM.Scale == 2 &&
1477 AM.BaseType == X86ISelAddressMode::RegBase &&
1478 AM.Base_Reg.getNode() == nullptr) {
1479 AM.Base_Reg = AM.IndexReg;
1480 AM.Scale = 1;
1481 }
1482
1483 // Post-processing: Convert foo to foo(%rip), even in non-PIC mode,
1484 // because it has a smaller encoding.
1485 // TODO: Which other code models can use this?
1486 switch (TM.getCodeModel()) {
1487 default: break;
1488 case CodeModel::Small:
1489 case CodeModel::Kernel:
1490 if (Subtarget->is64Bit() &&
1491 AM.Scale == 1 &&
1492 AM.BaseType == X86ISelAddressMode::RegBase &&
1493 AM.Base_Reg.getNode() == nullptr &&
1494 AM.IndexReg.getNode() == nullptr &&
1495 AM.SymbolFlags == X86II::MO_NO_FLAG &&
1496 AM.hasSymbolicDisplacement())
1497 AM.Base_Reg = CurDAG->getRegister(X86::RIP, MVT::i64);
1498 break;
1499 }
1500
1501 return false;
1502}
1503
1504bool X86DAGToDAGISel::matchAdd(SDValue &N, X86ISelAddressMode &AM,
1505 unsigned Depth) {
1506 // Add an artificial use to this node so that we can keep track of
1507 // it if it gets CSE'd with a different node.
1508 HandleSDNode Handle(N);
1509
1510 X86ISelAddressMode Backup = AM;
1511 if (!matchAddressRecursively(N.getOperand(0), AM, Depth+1) &&
1512 !matchAddressRecursively(Handle.getValue().getOperand(1), AM, Depth+1))
1513 return false;
1514 AM = Backup;
1515
1516 // Try again after commuting the operands.
1517 if (!matchAddressRecursively(Handle.getValue().getOperand(1), AM, Depth+1) &&
1518 !matchAddressRecursively(Handle.getValue().getOperand(0), AM, Depth+1))
1519 return false;
1520 AM = Backup;
1521
1522 // If we couldn't fold both operands into the address at the same time,
1523 // see if we can just put each operand into a register and fold at least
1524 // the add.
1525 if (AM.BaseType == X86ISelAddressMode::RegBase &&
1526 !AM.Base_Reg.getNode() &&
1527 !AM.IndexReg.getNode()) {
1528 N = Handle.getValue();
1529 AM.Base_Reg = N.getOperand(0);
1530 AM.IndexReg = N.getOperand(1);
1531 AM.Scale = 1;
1532 return false;
1533 }
1534 N = Handle.getValue();
1535 return true;
1536}
1537
1538// Insert a node into the DAG at least before the Pos node's position. This
1539// will reposition the node as needed, and will assign it a node ID that is <=
1540// the Pos node's ID. Note that this does *not* preserve the uniqueness of node
1541// IDs! The selection DAG must no longer depend on their uniqueness when this
1542// is used.
1543static void insertDAGNode(SelectionDAG &DAG, SDValue Pos, SDValue N) {
1544 if (N->getNodeId() == -1 ||
1545 (SelectionDAGISel::getUninvalidatedNodeId(N.getNode()) >
1546 SelectionDAGISel::getUninvalidatedNodeId(Pos.getNode()))) {
1547 DAG.RepositionNode(Pos->getIterator(), N.getNode());
1548 // Mark Node as invalid for pruning as after this it may be a successor to a
1549 // selected node but otherwise be in the same position of Pos.
1550 // Conservatively mark it with the same -abs(Id) to assure node id
1551 // invariant is preserved.
1552 N->setNodeId(Pos->getNodeId());
1553 SelectionDAGISel::InvalidateNodeId(N.getNode());
1554 }
1555}
1556
1557// Transform "(X >> (8-C1)) & (0xff << C1)" to "((X >> 8) & 0xff) << C1" if
1558// safe. This allows us to convert the shift and and into an h-register
1559// extract and a scaled index. Returns false if the simplification is
1560// performed.
1561static bool foldMaskAndShiftToExtract(SelectionDAG &DAG, SDValue N,
1562 uint64_t Mask,
1563 SDValue Shift, SDValue X,
1564 X86ISelAddressMode &AM) {
1565 if (Shift.getOpcode() != ISD::SRL ||
1566 !isa<ConstantSDNode>(Shift.getOperand(1)) ||
1567 !Shift.hasOneUse())
1568 return true;
1569
1570 int ScaleLog = 8 - Shift.getConstantOperandVal(1);
1571 if (ScaleLog <= 0 || ScaleLog >= 4 ||
1572 Mask != (0xffu << ScaleLog))
1573 return true;
1574
1575 MVT VT = N.getSimpleValueType();
1576 SDLoc DL(N);
1577 SDValue Eight = DAG.getConstant(8, DL, MVT::i8);
1578 SDValue NewMask = DAG.getConstant(0xff, DL, VT);
1579 SDValue Srl = DAG.getNode(ISD::SRL, DL, VT, X, Eight);
1580 SDValue And = DAG.getNode(ISD::AND, DL, VT, Srl, NewMask);
1581 SDValue ShlCount = DAG.getConstant(ScaleLog, DL, MVT::i8);
1582 SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, And, ShlCount);
1583
1584 // Insert the new nodes into the topological ordering. We must do this in
1585 // a valid topological ordering as nothing is going to go back and re-sort
1586 // these nodes. We continually insert before 'N' in sequence as this is
1587 // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
1588 // hierarchy left to express.
1589 insertDAGNode(DAG, N, Eight);
1590 insertDAGNode(DAG, N, Srl);
1591 insertDAGNode(DAG, N, NewMask);
1592 insertDAGNode(DAG, N, And);
1593 insertDAGNode(DAG, N, ShlCount);
1594 insertDAGNode(DAG, N, Shl);
1595 DAG.ReplaceAllUsesWith(N, Shl);
1596 DAG.RemoveDeadNode(N.getNode());
1597 AM.IndexReg = And;
1598 AM.Scale = (1 << ScaleLog);
1599 return false;
1600}
1601
1602// Transforms "(X << C1) & C2" to "(X & (C2>>C1)) << C1" if safe and if this
1603// allows us to fold the shift into this addressing mode. Returns false if the
1604// transform succeeded.
1605static bool foldMaskedShiftToScaledMask(SelectionDAG &DAG, SDValue N,
1606 X86ISelAddressMode &AM) {
1607 SDValue Shift = N.getOperand(0);
1608
1609 // Use a signed mask so that shifting right will insert sign bits. These
1610 // bits will be removed when we shift the result left so it doesn't matter
1611 // what we use. This might allow a smaller immediate encoding.
1612 int64_t Mask = cast<ConstantSDNode>(N->getOperand(1))->getSExtValue();
1613
1614 // If we have an any_extend feeding the AND, look through it to see if there
1615 // is a shift behind it. But only if the AND doesn't use the extended bits.
1616 // FIXME: Generalize this to other ANY_EXTEND than i32 to i64?
1617 bool FoundAnyExtend = false;
1618 if (Shift.getOpcode() == ISD::ANY_EXTEND && Shift.hasOneUse() &&
1619 Shift.getOperand(0).getSimpleValueType() == MVT::i32 &&
1620 isUInt<32>(Mask)) {
1621 FoundAnyExtend = true;
1622 Shift = Shift.getOperand(0);
1623 }
1624
1625 if (Shift.getOpcode() != ISD::SHL ||
1626 !isa<ConstantSDNode>(Shift.getOperand(1)))
1627 return true;
1628
1629 SDValue X = Shift.getOperand(0);
1630
1631 // Not likely to be profitable if either the AND or SHIFT node has more
1632 // than one use (unless all uses are for address computation). Besides,
1633 // isel mechanism requires their node ids to be reused.
1634 if (!N.hasOneUse() || !Shift.hasOneUse())
1635 return true;
1636
1637 // Verify that the shift amount is something we can fold.
1638 unsigned ShiftAmt = Shift.getConstantOperandVal(1);
1639 if (ShiftAmt != 1 && ShiftAmt != 2 && ShiftAmt != 3)
1640 return true;
1641
1642 MVT VT = N.getSimpleValueType();
1643 SDLoc DL(N);
1644 if (FoundAnyExtend) {
1645 SDValue NewX = DAG.getNode(ISD::ANY_EXTEND, DL, VT, X);
1646 insertDAGNode(DAG, N, NewX);
1647 X = NewX;
1648 }
1649
1650 SDValue NewMask = DAG.getConstant(Mask >> ShiftAmt, DL, VT);
1651 SDValue NewAnd = DAG.getNode(ISD::AND, DL, VT, X, NewMask);
1652 SDValue NewShift = DAG.getNode(ISD::SHL, DL, VT, NewAnd, Shift.getOperand(1));
1653
1654 // Insert the new nodes into the topological ordering. We must do this in
1655 // a valid topological ordering as nothing is going to go back and re-sort
1656 // these nodes. We continually insert before 'N' in sequence as this is
1657 // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
1658 // hierarchy left to express.
1659 insertDAGNode(DAG, N, NewMask);
1660 insertDAGNode(DAG, N, NewAnd);
1661 insertDAGNode(DAG, N, NewShift);
1662 DAG.ReplaceAllUsesWith(N, NewShift);
1663 DAG.RemoveDeadNode(N.getNode());
1664
1665 AM.Scale = 1 << ShiftAmt;
1666 AM.IndexReg = NewAnd;
1667 return false;
1668}
1669
1670// Implement some heroics to detect shifts of masked values where the mask can
1671// be replaced by extending the shift and undoing that in the addressing mode
1672// scale. Patterns such as (shl (srl x, c1), c2) are canonicalized into (and
1673// (srl x, SHIFT), MASK) by DAGCombines that don't know the shl can be done in
1674// the addressing mode. This results in code such as:
1675//
1676// int f(short *y, int *lookup_table) {
1677// ...
1678// return *y + lookup_table[*y >> 11];
1679// }
1680//
1681// Turning into:
1682// movzwl (%rdi), %eax
1683// movl %eax, %ecx
1684// shrl $11, %ecx
1685// addl (%rsi,%rcx,4), %eax
1686//
1687// Instead of:
1688// movzwl (%rdi), %eax
1689// movl %eax, %ecx
1690// shrl $9, %ecx
1691// andl $124, %rcx
1692// addl (%rsi,%rcx), %eax
1693//
1694// Note that this function assumes the mask is provided as a mask *after* the
1695// value is shifted. The input chain may or may not match that, but computing
1696// such a mask is trivial.
1697static bool foldMaskAndShiftToScale(SelectionDAG &DAG, SDValue N,
1698 uint64_t Mask,
1699 SDValue Shift, SDValue X,
1700 X86ISelAddressMode &AM) {
1701 if (Shift.getOpcode() != ISD::SRL || !Shift.hasOneUse() ||
1702 !isa<ConstantSDNode>(Shift.getOperand(1)))
1703 return true;
1704
1705 unsigned ShiftAmt = Shift.getConstantOperandVal(1);
1706 unsigned MaskLZ = countLeadingZeros(Mask);
1707 unsigned MaskTZ = countTrailingZeros(Mask);
1708
1709 // The amount of shift we're trying to fit into the addressing mode is taken
1710 // from the trailing zeros of the mask.
1711 unsigned AMShiftAmt = MaskTZ;
1712
1713 // There is nothing we can do here unless the mask is removing some bits.
1714 // Also, the addressing mode can only represent shifts of 1, 2, or 3 bits.
1715 if (AMShiftAmt <= 0 || AMShiftAmt > 3) return true;
1716
1717 // We also need to ensure that mask is a continuous run of bits.
1718 if (countTrailingOnes(Mask >> MaskTZ) + MaskTZ + MaskLZ != 64) return true;
1719
1720 // Scale the leading zero count down based on the actual size of the value.
1721 // Also scale it down based on the size of the shift.
1722 unsigned ScaleDown = (64 - X.getSimpleValueType().getSizeInBits()) + ShiftAmt;
1723 if (MaskLZ < ScaleDown)
1724 return true;
1725 MaskLZ -= ScaleDown;
1726
1727 // The final check is to ensure that any masked out high bits of X are
1728 // already known to be zero. Otherwise, the mask has a semantic impact
1729 // other than masking out a couple of low bits. Unfortunately, because of
1730 // the mask, zero extensions will be removed from operands in some cases.
1731 // This code works extra hard to look through extensions because we can
1732 // replace them with zero extensions cheaply if necessary.
1733 bool ReplacingAnyExtend = false;
1734 if (X.getOpcode() == ISD::ANY_EXTEND) {
1735 unsigned ExtendBits = X.getSimpleValueType().getSizeInBits() -
1736 X.getOperand(0).getSimpleValueType().getSizeInBits();
1737 // Assume that we'll replace the any-extend with a zero-extend, and
1738 // narrow the search to the extended value.
1739 X = X.getOperand(0);
1740 MaskLZ = ExtendBits > MaskLZ ? 0 : MaskLZ - ExtendBits;
1741 ReplacingAnyExtend = true;
1742 }
1743 APInt MaskedHighBits =
1744 APInt::getHighBitsSet(X.getSimpleValueType().getSizeInBits(), MaskLZ);
1745 KnownBits Known = DAG.computeKnownBits(X);
1746 if (MaskedHighBits != Known.Zero) return true;
1747
1748 // We've identified a pattern that can be transformed into a single shift
1749 // and an addressing mode. Make it so.
1750 MVT VT = N.getSimpleValueType();
1751 if (ReplacingAnyExtend) {
1752 assert(X.getValueType() != VT)((X.getValueType() != VT) ? static_cast<void> (0) : __assert_fail
("X.getValueType() != VT", "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 1752, __PRETTY_FUNCTION__))
;
1753 // We looked through an ANY_EXTEND node, insert a ZERO_EXTEND.
1754 SDValue NewX = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(X), VT, X);
1755 insertDAGNode(DAG, N, NewX);
1756 X = NewX;
1757 }
1758 SDLoc DL(N);
1759 SDValue NewSRLAmt = DAG.getConstant(ShiftAmt + AMShiftAmt, DL, MVT::i8);
1760 SDValue NewSRL = DAG.getNode(ISD::SRL, DL, VT, X, NewSRLAmt);
1761 SDValue NewSHLAmt = DAG.getConstant(AMShiftAmt, DL, MVT::i8);
1762 SDValue NewSHL = DAG.getNode(ISD::SHL, DL, VT, NewSRL, NewSHLAmt);
1763
1764 // Insert the new nodes into the topological ordering. We must do this in
1765 // a valid topological ordering as nothing is going to go back and re-sort
1766 // these nodes. We continually insert before 'N' in sequence as this is
1767 // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
1768 // hierarchy left to express.
1769 insertDAGNode(DAG, N, NewSRLAmt);
1770 insertDAGNode(DAG, N, NewSRL);
1771 insertDAGNode(DAG, N, NewSHLAmt);
1772 insertDAGNode(DAG, N, NewSHL);
1773 DAG.ReplaceAllUsesWith(N, NewSHL);
1774 DAG.RemoveDeadNode(N.getNode());
1775
1776 AM.Scale = 1 << AMShiftAmt;
1777 AM.IndexReg = NewSRL;
1778 return false;
1779}
1780
1781// Transform "(X >> SHIFT) & (MASK << C1)" to
1782// "((X >> (SHIFT + C1)) & (MASK)) << C1". Everything before the SHL will be
1783// matched to a BEXTR later. Returns false if the simplification is performed.
1784static bool foldMaskedShiftToBEXTR(SelectionDAG &DAG, SDValue N,
1785 uint64_t Mask,
1786 SDValue Shift, SDValue X,
1787 X86ISelAddressMode &AM,
1788 const X86Subtarget &Subtarget) {
1789 if (Shift.getOpcode() != ISD::SRL ||
1790 !isa<ConstantSDNode>(Shift.getOperand(1)) ||
1791 !Shift.hasOneUse() || !N.hasOneUse())
1792 return true;
1793
1794 // Only do this if BEXTR will be matched by matchBEXTRFromAndImm.
1795 if (!Subtarget.hasTBM() &&
1796 !(Subtarget.hasBMI() && Subtarget.hasFastBEXTR()))
1797 return true;
1798
1799 // We need to ensure that mask is a continuous run of bits.
1800 if (!isShiftedMask_64(Mask)) return true;
1801
1802 unsigned ShiftAmt = Shift.getConstantOperandVal(1);
1803
1804 // The amount of shift we're trying to fit into the addressing mode is taken
1805 // from the trailing zeros of the mask.
1806 unsigned AMShiftAmt = countTrailingZeros(Mask);
1807
1808 // There is nothing we can do here unless the mask is removing some bits.
1809 // Also, the addressing mode can only represent shifts of 1, 2, or 3 bits.
1810 if (AMShiftAmt <= 0 || AMShiftAmt > 3) return true;
1811
1812 MVT VT = N.getSimpleValueType();
1813 SDLoc DL(N);
1814 SDValue NewSRLAmt = DAG.getConstant(ShiftAmt + AMShiftAmt, DL, MVT::i8);
1815 SDValue NewSRL = DAG.getNode(ISD::SRL, DL, VT, X, NewSRLAmt);
1816 SDValue NewMask = DAG.getConstant(Mask >> AMShiftAmt, DL, VT);
1817 SDValue NewAnd = DAG.getNode(ISD::AND, DL, VT, NewSRL, NewMask);
1818 SDValue NewSHLAmt = DAG.getConstant(AMShiftAmt, DL, MVT::i8);
1819 SDValue NewSHL = DAG.getNode(ISD::SHL, DL, VT, NewAnd, NewSHLAmt);
1820
1821 // Insert the new nodes into the topological ordering. We must do this in
1822 // a valid topological ordering as nothing is going to go back and re-sort
1823 // these nodes. We continually insert before 'N' in sequence as this is
1824 // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
1825 // hierarchy left to express.
1826 insertDAGNode(DAG, N, NewSRLAmt);
1827 insertDAGNode(DAG, N, NewSRL);
1828 insertDAGNode(DAG, N, NewMask);
1829 insertDAGNode(DAG, N, NewAnd);
1830 insertDAGNode(DAG, N, NewSHLAmt);
1831 insertDAGNode(DAG, N, NewSHL);
1832 DAG.ReplaceAllUsesWith(N, NewSHL);
1833 DAG.RemoveDeadNode(N.getNode());
1834
1835 AM.Scale = 1 << AMShiftAmt;
1836 AM.IndexReg = NewAnd;
1837 return false;
1838}
1839
1840bool X86DAGToDAGISel::matchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
1841 unsigned Depth) {
1842 SDLoc dl(N);
1843 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-isel")) { { dbgs() << "MatchAddress: "; AM.dump(CurDAG
); }; } } while (false)
1844 dbgs() << "MatchAddress: ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-isel")) { { dbgs() << "MatchAddress: "; AM.dump(CurDAG
); }; } } while (false)
1845 AM.dump(CurDAG);do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-isel")) { { dbgs() << "MatchAddress: "; AM.dump(CurDAG
); }; } } while (false)
1846 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-isel")) { { dbgs() << "MatchAddress: "; AM.dump(CurDAG
); }; } } while (false)
;
1847 // Limit recursion.
1848 if (Depth > 5)
1849 return matchAddressBase(N, AM);
1850
1851 // If this is already a %rip relative address, we can only merge immediates
1852 // into it. Instead of handling this in every case, we handle it here.
1853 // RIP relative addressing: %rip + 32-bit displacement!
1854 if (AM.isRIPRelative()) {
1855 // FIXME: JumpTable and ExternalSymbol address currently don't like
1856 // displacements. It isn't very important, but this should be fixed for
1857 // consistency.
1858 if (!(AM.ES || AM.MCSym) && AM.JT != -1)
1859 return true;
1860
1861 if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N))
1862 if (!foldOffsetIntoAddress(Cst->getSExtValue(), AM))
1863 return false;
1864 return true;
1865 }
1866
1867 switch (N.getOpcode()) {
1868 default: break;
1869 case ISD::LOCAL_RECOVER: {
1870 if (!AM.hasSymbolicDisplacement() && AM.Disp == 0)
1871 if (const auto *ESNode = dyn_cast<MCSymbolSDNode>(N.getOperand(0))) {
1872 // Use the symbol and don't prefix it.
1873 AM.MCSym = ESNode->getMCSymbol();
1874 return false;
1875 }
1876 break;
1877 }
1878 case ISD::Constant: {
1879 uint64_t Val = cast<ConstantSDNode>(N)->getSExtValue();
1880 if (!foldOffsetIntoAddress(Val, AM))
1881 return false;
1882 break;
1883 }
1884
1885 case X86ISD::Wrapper:
1886 case X86ISD::WrapperRIP:
1887 if (!matchWrapper(N, AM))
1888 return false;
1889 break;
1890
1891 case ISD::LOAD:
1892 if (!matchLoadInAddress(cast<LoadSDNode>(N), AM))
1893 return false;
1894 break;
1895
1896 case ISD::FrameIndex:
1897 if (AM.BaseType == X86ISelAddressMode::RegBase &&
1898 AM.Base_Reg.getNode() == nullptr &&
1899 (!Subtarget->is64Bit() || isDispSafeForFrameIndex(AM.Disp))) {
1900 AM.BaseType = X86ISelAddressMode::FrameIndexBase;
1901 AM.Base_FrameIndex = cast<FrameIndexSDNode>(N)->getIndex();
1902 return false;
1903 }
1904 break;
1905
1906 case ISD::SHL:
1907 if (AM.IndexReg.getNode() != nullptr || AM.Scale != 1)
1908 break;
1909
1910 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1911 unsigned Val = CN->getZExtValue();
1912 // Note that we handle x<<1 as (,x,2) rather than (x,x) here so
1913 // that the base operand remains free for further matching. If
1914 // the base doesn't end up getting used, a post-processing step
1915 // in MatchAddress turns (,x,2) into (x,x), which is cheaper.
1916 if (Val == 1 || Val == 2 || Val == 3) {
1917 AM.Scale = 1 << Val;
1918 SDValue ShVal = N.getOperand(0);
1919
1920 // Okay, we know that we have a scale by now. However, if the scaled
1921 // value is an add of something and a constant, we can fold the
1922 // constant into the disp field here.
1923 if (CurDAG->isBaseWithConstantOffset(ShVal)) {
1924 AM.IndexReg = ShVal.getOperand(0);
1925 ConstantSDNode *AddVal = cast<ConstantSDNode>(ShVal.getOperand(1));
1926 uint64_t Disp = (uint64_t)AddVal->getSExtValue() << Val;
1927 if (!foldOffsetIntoAddress(Disp, AM))
1928 return false;
1929 }
1930
1931 AM.IndexReg = ShVal;
1932 return false;
1933 }
1934 }
1935 break;
1936
1937 case ISD::SRL: {
1938 // Scale must not be used already.
1939 if (AM.IndexReg.getNode() != nullptr || AM.Scale != 1) break;
1940
1941 // We only handle up to 64-bit values here as those are what matter for
1942 // addressing mode optimizations.
1943 assert(N.getSimpleValueType().getSizeInBits() <= 64 &&((N.getSimpleValueType().getSizeInBits() <= 64 && "Unexpected value size!"
) ? static_cast<void> (0) : __assert_fail ("N.getSimpleValueType().getSizeInBits() <= 64 && \"Unexpected value size!\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 1944, __PRETTY_FUNCTION__))
1944 "Unexpected value size!")((N.getSimpleValueType().getSizeInBits() <= 64 && "Unexpected value size!"
) ? static_cast<void> (0) : __assert_fail ("N.getSimpleValueType().getSizeInBits() <= 64 && \"Unexpected value size!\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 1944, __PRETTY_FUNCTION__))
;
1945
1946 SDValue And = N.getOperand(0);
1947 if (And.getOpcode() != ISD::AND) break;
1948 SDValue X = And.getOperand(0);
1949
1950 // The mask used for the transform is expected to be post-shift, but we
1951 // found the shift first so just apply the shift to the mask before passing
1952 // it down.
1953 if (!isa<ConstantSDNode>(N.getOperand(1)) ||
1954 !isa<ConstantSDNode>(And.getOperand(1)))
1955 break;
1956 uint64_t Mask = And.getConstantOperandVal(1) >> N.getConstantOperandVal(1);
1957
1958 // Try to fold the mask and shift into the scale, and return false if we
1959 // succeed.
1960 if (!foldMaskAndShiftToScale(*CurDAG, N, Mask, N, X, AM))
1961 return false;
1962 break;
1963 }
1964
1965 case ISD::SMUL_LOHI:
1966 case ISD::UMUL_LOHI:
1967 // A mul_lohi where we need the low part can be folded as a plain multiply.
1968 if (N.getResNo() != 0) break;
1969 LLVM_FALLTHROUGH[[gnu::fallthrough]];
1970 case ISD::MUL:
1971 case X86ISD::MUL_IMM:
1972 // X*[3,5,9] -> X+X*[2,4,8]
1973 if (AM.BaseType == X86ISelAddressMode::RegBase &&
1974 AM.Base_Reg.getNode() == nullptr &&
1975 AM.IndexReg.getNode() == nullptr) {
1976 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1)))
1977 if (CN->getZExtValue() == 3 || CN->getZExtValue() == 5 ||
1978 CN->getZExtValue() == 9) {
1979 AM.Scale = unsigned(CN->getZExtValue())-1;
1980
1981 SDValue MulVal = N.getOperand(0);
1982 SDValue Reg;
1983
1984 // Okay, we know that we have a scale by now. However, if the scaled
1985 // value is an add of something and a constant, we can fold the
1986 // constant into the disp field here.
1987 if (MulVal.getNode()->getOpcode() == ISD::ADD && MulVal.hasOneUse() &&
1988 isa<ConstantSDNode>(MulVal.getOperand(1))) {
1989 Reg = MulVal.getOperand(0);
1990 ConstantSDNode *AddVal =
1991 cast<ConstantSDNode>(MulVal.getOperand(1));
1992 uint64_t Disp = AddVal->getSExtValue() * CN->getZExtValue();
1993 if (foldOffsetIntoAddress(Disp, AM))
1994 Reg = N.getOperand(0);
1995 } else {
1996 Reg = N.getOperand(0);
1997 }
1998
1999 AM.IndexReg = AM.Base_Reg = Reg;
2000 return false;
2001 }
2002 }
2003 break;
2004
2005 case ISD::SUB: {
2006 // Given A-B, if A can be completely folded into the address and
2007 // the index field with the index field unused, use -B as the index.
2008 // This is a win if a has multiple parts that can be folded into
2009 // the address. Also, this saves a mov if the base register has
2010 // other uses, since it avoids a two-address sub instruction, however
2011 // it costs an additional mov if the index register has other uses.
2012
2013 // Add an artificial use to this node so that we can keep track of
2014 // it if it gets CSE'd with a different node.
2015 HandleSDNode Handle(N);
2016
2017 // Test if the LHS of the sub can be folded.
2018 X86ISelAddressMode Backup = AM;
2019 if (matchAddressRecursively(N.getOperand(0), AM, Depth+1)) {
2020 N = Handle.getValue();
2021 AM = Backup;
2022 break;
2023 }
2024 N = Handle.getValue();
2025 // Test if the index field is free for use.
2026 if (AM.IndexReg.getNode() || AM.isRIPRelative()) {
2027 AM = Backup;
2028 break;
2029 }
2030
2031 int Cost = 0;
2032 SDValue RHS = N.getOperand(1);
2033 // If the RHS involves a register with multiple uses, this
2034 // transformation incurs an extra mov, due to the neg instruction
2035 // clobbering its operand.
2036 if (!RHS.getNode()->hasOneUse() ||
2037 RHS.getNode()->getOpcode() == ISD::CopyFromReg ||
2038 RHS.getNode()->getOpcode() == ISD::TRUNCATE ||
2039 RHS.getNode()->getOpcode() == ISD::ANY_EXTEND ||
2040 (RHS.getNode()->getOpcode() == ISD::ZERO_EXTEND &&
2041 RHS.getOperand(0).getValueType() == MVT::i32))
2042 ++Cost;
2043 // If the base is a register with multiple uses, this
2044 // transformation may save a mov.
2045 if ((AM.BaseType == X86ISelAddressMode::RegBase && AM.Base_Reg.getNode() &&
2046 !AM.Base_Reg.getNode()->hasOneUse()) ||
2047 AM.BaseType == X86ISelAddressMode::FrameIndexBase)
2048 --Cost;
2049 // If the folded LHS was interesting, this transformation saves
2050 // address arithmetic.
2051 if ((AM.hasSymbolicDisplacement() && !Backup.hasSymbolicDisplacement()) +
2052 ((AM.Disp != 0) && (Backup.Disp == 0)) +
2053 (AM.Segment.getNode() && !Backup.Segment.getNode()) >= 2)
2054 --Cost;
2055 // If it doesn't look like it may be an overall win, don't do it.
2056 if (Cost >= 0) {
2057 AM = Backup;
2058 break;
2059 }
2060
2061 // Ok, the transformation is legal and appears profitable. Go for it.
2062 // Negation will be emitted later to avoid creating dangling nodes if this
2063 // was an unprofitable LEA.
2064 AM.IndexReg = RHS;
2065 AM.NegateIndex = true;
2066 AM.Scale = 1;
2067 return false;
2068 }
2069
2070 case ISD::ADD:
2071 if (!matchAdd(N, AM, Depth))
2072 return false;
2073 break;
2074
2075 case ISD::OR:
2076 // We want to look through a transform in InstCombine and DAGCombiner that
2077 // turns 'add' into 'or', so we can treat this 'or' exactly like an 'add'.
2078 // Example: (or (and x, 1), (shl y, 3)) --> (add (and x, 1), (shl y, 3))
2079 // An 'lea' can then be used to match the shift (multiply) and add:
2080 // and $1, %esi
2081 // lea (%rsi, %rdi, 8), %rax
2082 if (CurDAG->haveNoCommonBitsSet(N.getOperand(0), N.getOperand(1)) &&
2083 !matchAdd(N, AM, Depth))
2084 return false;
2085 break;
2086
2087 case ISD::AND: {
2088 // Perform some heroic transforms on an and of a constant-count shift
2089 // with a constant to enable use of the scaled offset field.
2090
2091 // Scale must not be used already.
2092 if (AM.IndexReg.getNode() != nullptr || AM.Scale != 1) break;
2093
2094 // We only handle up to 64-bit values here as those are what matter for
2095 // addressing mode optimizations.
2096 assert(N.getSimpleValueType().getSizeInBits() <= 64 &&((N.getSimpleValueType().getSizeInBits() <= 64 && "Unexpected value size!"
) ? static_cast<void> (0) : __assert_fail ("N.getSimpleValueType().getSizeInBits() <= 64 && \"Unexpected value size!\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 2097, __PRETTY_FUNCTION__))
2097 "Unexpected value size!")((N.getSimpleValueType().getSizeInBits() <= 64 && "Unexpected value size!"
) ? static_cast<void> (0) : __assert_fail ("N.getSimpleValueType().getSizeInBits() <= 64 && \"Unexpected value size!\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 2097, __PRETTY_FUNCTION__))
;
2098
2099 if (!isa<ConstantSDNode>(N.getOperand(1)))
2100 break;
2101
2102 if (N.getOperand(0).getOpcode() == ISD::SRL) {
2103 SDValue Shift = N.getOperand(0);
2104 SDValue X = Shift.getOperand(0);
2105
2106 uint64_t Mask = N.getConstantOperandVal(1);
2107
2108 // Try to fold the mask and shift into an extract and scale.
2109 if (!foldMaskAndShiftToExtract(*CurDAG, N, Mask, Shift, X, AM))
2110 return false;
2111
2112 // Try to fold the mask and shift directly into the scale.
2113 if (!foldMaskAndShiftToScale(*CurDAG, N, Mask, Shift, X, AM))
2114 return false;
2115
2116 // Try to fold the mask and shift into BEXTR and scale.
2117 if (!foldMaskedShiftToBEXTR(*CurDAG, N, Mask, Shift, X, AM, *Subtarget))
2118 return false;
2119 }
2120
2121 // Try to swap the mask and shift to place shifts which can be done as
2122 // a scale on the outside of the mask.
2123 if (!foldMaskedShiftToScaledMask(*CurDAG, N, AM))
2124 return false;
2125
2126 break;
2127 }
2128 case ISD::ZERO_EXTEND: {
2129 // Try to widen a zexted shift left to the same size as its use, so we can
2130 // match the shift as a scale factor.
2131 if (AM.IndexReg.getNode() != nullptr || AM.Scale != 1)
2132 break;
2133 if (N.getOperand(0).getOpcode() != ISD::SHL || !N.getOperand(0).hasOneUse())
2134 break;
2135
2136 // Give up if the shift is not a valid scale factor [1,2,3].
2137 SDValue Shl = N.getOperand(0);
2138 auto *ShAmtC = dyn_cast<ConstantSDNode>(Shl.getOperand(1));
2139 if (!ShAmtC || ShAmtC->getZExtValue() > 3)
2140 break;
2141
2142 // The narrow shift must only shift out zero bits (it must be 'nuw').
2143 // That makes it safe to widen to the destination type.
2144 APInt HighZeros = APInt::getHighBitsSet(Shl.getValueSizeInBits(),
2145 ShAmtC->getZExtValue());
2146 if (!CurDAG->MaskedValueIsZero(Shl.getOperand(0), HighZeros))
2147 break;
2148
2149 // zext (shl nuw i8 %x, C) to i32 --> shl (zext i8 %x to i32), (zext C)
2150 MVT VT = N.getSimpleValueType();
2151 SDLoc DL(N);
2152 SDValue Zext = CurDAG->getNode(ISD::ZERO_EXTEND, DL, VT, Shl.getOperand(0));
2153 SDValue NewShl = CurDAG->getNode(ISD::SHL, DL, VT, Zext, Shl.getOperand(1));
2154
2155 // Convert the shift to scale factor.
2156 AM.Scale = 1 << ShAmtC->getZExtValue();
2157 AM.IndexReg = Zext;
2158
2159 insertDAGNode(*CurDAG, N, Zext);
2160 insertDAGNode(*CurDAG, N, NewShl);
2161 CurDAG->ReplaceAllUsesWith(N, NewShl);
2162 CurDAG->RemoveDeadNode(N.getNode());
2163 return false;
2164 }
2165 }
2166
2167 return matchAddressBase(N, AM);
2168}
2169
2170/// Helper for MatchAddress. Add the specified node to the
2171/// specified addressing mode without any further recursion.
2172bool X86DAGToDAGISel::matchAddressBase(SDValue N, X86ISelAddressMode &AM) {
2173 // Is the base register already occupied?
2174 if (AM.BaseType != X86ISelAddressMode::RegBase || AM.Base_Reg.getNode()) {
2175 // If so, check to see if the scale index register is set.
2176 if (!AM.IndexReg.getNode()) {
2177 AM.IndexReg = N;
2178 AM.Scale = 1;
2179 return false;
2180 }
2181
2182 // Otherwise, we cannot select it.
2183 return true;
2184 }
2185
2186 // Default, generate it as a register.
2187 AM.BaseType = X86ISelAddressMode::RegBase;
2188 AM.Base_Reg = N;
2189 return false;
2190}
2191
2192/// Helper for selectVectorAddr. Handles things that can be folded into a
2193/// gather scatter address. The index register and scale should have already
2194/// been handled.
2195bool X86DAGToDAGISel::matchVectorAddress(SDValue N, X86ISelAddressMode &AM) {
2196 // TODO: Support other operations.
2197 switch (N.getOpcode()) {
2198 case ISD::Constant: {
2199 uint64_t Val = cast<ConstantSDNode>(N)->getSExtValue();
2200 if (!foldOffsetIntoAddress(Val, AM))
2201 return false;
2202 break;
2203 }
2204 case X86ISD::Wrapper:
2205 if (!matchWrapper(N, AM))
2206 return false;
2207 break;
2208 }
2209
2210 return matchAddressBase(N, AM);
2211}
2212
2213bool X86DAGToDAGISel::selectVectorAddr(SDNode *Parent, SDValue N, SDValue &Base,
2214 SDValue &Scale, SDValue &Index,
2215 SDValue &Disp, SDValue &Segment) {
2216 X86ISelAddressMode AM;
2217 auto *Mgs = cast<X86MaskedGatherScatterSDNode>(Parent);
2218 AM.IndexReg = Mgs->getIndex();
2219 AM.Scale = cast<ConstantSDNode>(Mgs->getScale())->getZExtValue();
2220
2221 unsigned AddrSpace = cast<MemSDNode>(Parent)->getPointerInfo().getAddrSpace();
2222 // AddrSpace 256 -> GS, 257 -> FS, 258 -> SS.
2223 if (AddrSpace == 256)
2224 AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16);
2225 if (AddrSpace == 257)
2226 AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16);
2227 if (AddrSpace == 258)
2228 AM.Segment = CurDAG->getRegister(X86::SS, MVT::i16);
2229
2230 SDLoc DL(N);
2231 MVT VT = N.getSimpleValueType();
2232
2233 // Try to match into the base and displacement fields.
2234 if (matchVectorAddress(N, AM))
2235 return false;
2236
2237 getAddressOperands(AM, DL, VT, Base, Scale, Index, Disp, Segment);
2238 return true;
2239}
2240
2241/// Returns true if it is able to pattern match an addressing mode.
2242/// It returns the operands which make up the maximal addressing mode it can
2243/// match by reference.
2244///
2245/// Parent is the parent node of the addr operand that is being matched. It
2246/// is always a load, store, atomic node, or null. It is only null when
2247/// checking memory operands for inline asm nodes.
2248bool X86DAGToDAGISel::selectAddr(SDNode *Parent, SDValue N, SDValue &Base,
2249 SDValue &Scale, SDValue &Index,
2250 SDValue &Disp, SDValue &Segment) {
2251 X86ISelAddressMode AM;
2252
2253 if (Parent &&
2254 // This list of opcodes are all the nodes that have an "addr:$ptr" operand
2255 // that are not a MemSDNode, and thus don't have proper addrspace info.
2256 Parent->getOpcode() != ISD::INTRINSIC_W_CHAIN && // unaligned loads, fixme
2257 Parent->getOpcode() != ISD::INTRINSIC_VOID && // nontemporal stores
2258 Parent->getOpcode() != X86ISD::TLSCALL && // Fixme
2259 Parent->getOpcode() != X86ISD::ENQCMD && // Fixme
2260 Parent->getOpcode() != X86ISD::ENQCMDS && // Fixme
2261 Parent->getOpcode() != X86ISD::EH_SJLJ_SETJMP && // setjmp
2262 Parent->getOpcode() != X86ISD::EH_SJLJ_LONGJMP) { // longjmp
2263 unsigned AddrSpace =
2264 cast<MemSDNode>(Parent)->getPointerInfo().getAddrSpace();
2265 // AddrSpace 256 -> GS, 257 -> FS, 258 -> SS.
2266 if (AddrSpace == 256)
2267 AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16);
2268 if (AddrSpace == 257)
2269 AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16);
2270 if (AddrSpace == 258)
2271 AM.Segment = CurDAG->getRegister(X86::SS, MVT::i16);
2272 }
2273
2274 // Save the DL and VT before calling matchAddress, it can invalidate N.
2275 SDLoc DL(N);
2276 MVT VT = N.getSimpleValueType();
2277
2278 if (matchAddress(N, AM))
2279 return false;
2280
2281 getAddressOperands(AM, DL, VT, Base, Scale, Index, Disp, Segment);
2282 return true;
2283}
2284
2285// We can only fold a load if all nodes between it and the root node have a
2286// single use. If there are additional uses, we could end up duplicating the
2287// load.
2288static bool hasSingleUsesFromRoot(SDNode *Root, SDNode *User) {
2289 while (User != Root) {
2290 if (!User->hasOneUse())
2291 return false;
2292 User = *User->use_begin();
2293 }
2294
2295 return true;
2296}
2297
2298/// Match a scalar SSE load. In particular, we want to match a load whose top
2299/// elements are either undef or zeros. The load flavor is derived from the
2300/// type of N, which is either v4f32 or v2f64.
2301///
2302/// We also return:
2303/// PatternChainNode: this is the matched node that has a chain input and
2304/// output.
2305bool X86DAGToDAGISel::selectScalarSSELoad(SDNode *Root, SDNode *Parent,
2306 SDValue N, SDValue &Base,
2307 SDValue &Scale, SDValue &Index,
2308 SDValue &Disp, SDValue &Segment,
2309 SDValue &PatternNodeWithChain) {
2310 if (!hasSingleUsesFromRoot(Root, Parent))
2311 return false;
2312
2313 // We can allow a full vector load here since narrowing a load is ok unless
2314 // it's volatile or atomic.
2315 if (ISD::isNON_EXTLoad(N.getNode())) {
2316 LoadSDNode *LD = cast<LoadSDNode>(N);
2317 if (LD->isSimple() &&
2318 IsProfitableToFold(N, LD, Root) &&
2319 IsLegalToFold(N, Parent, Root, OptLevel)) {
2320 PatternNodeWithChain = N;
2321 return selectAddr(LD, LD->getBasePtr(), Base, Scale, Index, Disp,
2322 Segment);
2323 }
2324 }
2325
2326 // We can also match the special zero extended load opcode.
2327 if (N.getOpcode() == X86ISD::VZEXT_LOAD) {
2328 PatternNodeWithChain = N;
2329 if (IsProfitableToFold(PatternNodeWithChain, N.getNode(), Root) &&
2330 IsLegalToFold(PatternNodeWithChain, Parent, Root, OptLevel)) {
2331 auto *MI = cast<MemIntrinsicSDNode>(PatternNodeWithChain);
2332 return selectAddr(MI, MI->getBasePtr(), Base, Scale, Index, Disp,
2333 Segment);
2334 }
2335 }
2336
2337 // Need to make sure that the SCALAR_TO_VECTOR and load are both only used
2338 // once. Otherwise the load might get duplicated and the chain output of the
2339 // duplicate load will not be observed by all dependencies.
2340 if (N.getOpcode() == ISD::SCALAR_TO_VECTOR && N.getNode()->hasOneUse()) {
2341 PatternNodeWithChain = N.getOperand(0);
2342 if (ISD::isNON_EXTLoad(PatternNodeWithChain.getNode()) &&
2343 IsProfitableToFold(PatternNodeWithChain, N.getNode(), Root) &&
2344 IsLegalToFold(PatternNodeWithChain, N.getNode(), Root, OptLevel)) {
2345 LoadSDNode *LD = cast<LoadSDNode>(PatternNodeWithChain);
2346 return selectAddr(LD, LD->getBasePtr(), Base, Scale, Index, Disp,
2347 Segment);
2348 }
2349 }
2350
2351 return false;
2352}
2353
2354
2355bool X86DAGToDAGISel::selectMOV64Imm32(SDValue N, SDValue &Imm) {
2356 if (const ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) {
2357 uint64_t ImmVal = CN->getZExtValue();
2358 if (!isUInt<32>(ImmVal))
2359 return false;
2360
2361 Imm = CurDAG->getTargetConstant(ImmVal, SDLoc(N), MVT::i64);
2362 return true;
2363 }
2364
2365 // In static codegen with small code model, we can get the address of a label
2366 // into a register with 'movl'
2367 if (N->getOpcode() != X86ISD::Wrapper)
2368 return false;
2369
2370 N = N.getOperand(0);
2371
2372 // At least GNU as does not accept 'movl' for TPOFF relocations.
2373 // FIXME: We could use 'movl' when we know we are targeting MC.
2374 if (N->getOpcode() == ISD::TargetGlobalTLSAddress)
2375 return false;
2376
2377 Imm = N;
2378 if (N->getOpcode() != ISD::TargetGlobalAddress)
2379 return TM.getCodeModel() == CodeModel::Small;
2380
2381 Optional<ConstantRange> CR =
2382 cast<GlobalAddressSDNode>(N)->getGlobal()->getAbsoluteSymbolRange();
2383 if (!CR)
2384 return TM.getCodeModel() == CodeModel::Small;
2385
2386 return CR->getUnsignedMax().ult(1ull << 32);
2387}
2388
2389bool X86DAGToDAGISel::selectLEA64_32Addr(SDValue N, SDValue &Base,
2390 SDValue &Scale, SDValue &Index,
2391 SDValue &Disp, SDValue &Segment) {
2392 // Save the debug loc before calling selectLEAAddr, in case it invalidates N.
2393 SDLoc DL(N);
2394
2395 if (!selectLEAAddr(N, Base, Scale, Index, Disp, Segment))
2396 return false;
2397
2398 RegisterSDNode *RN = dyn_cast<RegisterSDNode>(Base);
2399 if (RN && RN->getReg() == 0)
2400 Base = CurDAG->getRegister(0, MVT::i64);
2401 else if (Base.getValueType() == MVT::i32 && !isa<FrameIndexSDNode>(Base)) {
2402 // Base could already be %rip, particularly in the x32 ABI.
2403 SDValue ImplDef = SDValue(CurDAG->getMachineNode(X86::IMPLICIT_DEF, DL,
2404 MVT::i64), 0);
2405 Base = CurDAG->getTargetInsertSubreg(X86::sub_32bit, DL, MVT::i64, ImplDef,
2406 Base);
2407 }
2408
2409 RN = dyn_cast<RegisterSDNode>(Index);
2410 if (RN && RN->getReg() == 0)
2411 Index = CurDAG->getRegister(0, MVT::i64);
2412 else {
2413 assert(Index.getValueType() == MVT::i32 &&((Index.getValueType() == MVT::i32 && "Expect to be extending 32-bit registers for use in LEA"
) ? static_cast<void> (0) : __assert_fail ("Index.getValueType() == MVT::i32 && \"Expect to be extending 32-bit registers for use in LEA\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 2414, __PRETTY_FUNCTION__))
2414 "Expect to be extending 32-bit registers for use in LEA")((Index.getValueType() == MVT::i32 && "Expect to be extending 32-bit registers for use in LEA"
) ? static_cast<void> (0) : __assert_fail ("Index.getValueType() == MVT::i32 && \"Expect to be extending 32-bit registers for use in LEA\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 2414, __PRETTY_FUNCTION__))
;
2415 SDValue ImplDef = SDValue(CurDAG->getMachineNode(X86::IMPLICIT_DEF, DL,
2416 MVT::i64), 0);
2417 Index = CurDAG->getTargetInsertSubreg(X86::sub_32bit, DL, MVT::i64, ImplDef,
2418 Index);
2419 }
2420
2421 return true;
2422}
2423
2424/// Calls SelectAddr and determines if the maximal addressing
2425/// mode it matches can be cost effectively emitted as an LEA instruction.
2426bool X86DAGToDAGISel::selectLEAAddr(SDValue N,
2427 SDValue &Base, SDValue &Scale,
2428 SDValue &Index, SDValue &Disp,
2429 SDValue &Segment) {
2430 X86ISelAddressMode AM;
2431
2432 // Save the DL and VT before calling matchAddress, it can invalidate N.
2433 SDLoc DL(N);
2434 MVT VT = N.getSimpleValueType();
2435
2436 // Set AM.Segment to prevent MatchAddress from using one. LEA doesn't support
2437 // segments.
2438 SDValue Copy = AM.Segment;
2439 SDValue T = CurDAG->getRegister(0, MVT::i32);
2440 AM.Segment = T;
2441 if (matchAddress(N, AM))
2442 return false;
2443 assert (T == AM.Segment)((T == AM.Segment) ? static_cast<void> (0) : __assert_fail
("T == AM.Segment", "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 2443, __PRETTY_FUNCTION__))
;
2444 AM.Segment = Copy;
2445
2446 unsigned Complexity = 0;
2447 if (AM.BaseType == X86ISelAddressMode::RegBase && AM.Base_Reg.getNode())
2448 Complexity = 1;
2449 else if (AM.BaseType == X86ISelAddressMode::FrameIndexBase)
2450 Complexity = 4;
2451
2452 if (AM.IndexReg.getNode())
2453 Complexity++;
2454
2455 // Don't match just leal(,%reg,2). It's cheaper to do addl %reg, %reg, or with
2456 // a simple shift.
2457 if (AM.Scale > 1)
2458 Complexity++;
2459
2460 // FIXME: We are artificially lowering the criteria to turn ADD %reg, $GA
2461 // to a LEA. This is determined with some experimentation but is by no means
2462 // optimal (especially for code size consideration). LEA is nice because of
2463 // its three-address nature. Tweak the cost function again when we can run
2464 // convertToThreeAddress() at register allocation time.
2465 if (AM.hasSymbolicDisplacement()) {
2466 // For X86-64, always use LEA to materialize RIP-relative addresses.
2467 if (Subtarget->is64Bit())
2468 Complexity = 4;
2469 else
2470 Complexity += 2;
2471 }
2472
2473 // Heuristic: try harder to form an LEA from ADD if the operands set flags.
2474 // Unlike ADD, LEA does not affect flags, so we will be less likely to require
2475 // duplicating flag-producing instructions later in the pipeline.
2476 if (N.getOpcode() == ISD::ADD) {
2477 auto isMathWithFlags = [](SDValue V) {
2478 switch (V.getOpcode()) {
2479 case X86ISD::ADD:
2480 case X86ISD::SUB:
2481 case X86ISD::ADC:
2482 case X86ISD::SBB:
2483 /* TODO: These opcodes can be added safely, but we may want to justify
2484 their inclusion for different reasons (better for reg-alloc).
2485 case X86ISD::SMUL:
2486 case X86ISD::UMUL:
2487 case X86ISD::OR:
2488 case X86ISD::XOR:
2489 case X86ISD::AND:
2490 */
2491 // Value 1 is the flag output of the node - verify it's not dead.
2492 return !SDValue(V.getNode(), 1).use_empty();
2493 default:
2494 return false;
2495 }
2496 };
2497 // TODO: This could be an 'or' rather than 'and' to make the transform more
2498 // likely to happen. We might want to factor in whether there's a
2499 // load folding opportunity for the math op that disappears with LEA.
2500 if (isMathWithFlags(N.getOperand(0)) && isMathWithFlags(N.getOperand(1)))
2501 Complexity++;
2502 }
2503
2504 if (AM.Disp)
2505 Complexity++;
2506
2507 // If it isn't worth using an LEA, reject it.
2508 if (Complexity <= 2)
2509 return false;
2510
2511 getAddressOperands(AM, DL, VT, Base, Scale, Index, Disp, Segment);
2512 return true;
2513}
2514
2515/// This is only run on TargetGlobalTLSAddress nodes.
2516bool X86DAGToDAGISel::selectTLSADDRAddr(SDValue N, SDValue &Base,
2517 SDValue &Scale, SDValue &Index,
2518 SDValue &Disp, SDValue &Segment) {
2519 assert(N.getOpcode() == ISD::TargetGlobalTLSAddress)((N.getOpcode() == ISD::TargetGlobalTLSAddress) ? static_cast
<void> (0) : __assert_fail ("N.getOpcode() == ISD::TargetGlobalTLSAddress"
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 2519, __PRETTY_FUNCTION__))
;
2520 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N);
2521
2522 X86ISelAddressMode AM;
2523 AM.GV = GA->getGlobal();
2524 AM.Disp += GA->getOffset();
2525 AM.SymbolFlags = GA->getTargetFlags();
2526
2527 MVT VT = N.getSimpleValueType();
2528 if (VT == MVT::i32) {
2529 AM.Scale = 1;
2530 AM.IndexReg = CurDAG->getRegister(X86::EBX, MVT::i32);
2531 }
2532
2533 getAddressOperands(AM, SDLoc(N), VT, Base, Scale, Index, Disp, Segment);
2534 return true;
2535}
2536
2537bool X86DAGToDAGISel::selectRelocImm(SDValue N, SDValue &Op) {
2538 if (auto *CN = dyn_cast<ConstantSDNode>(N)) {
2539 Op = CurDAG->getTargetConstant(CN->getAPIntValue(), SDLoc(CN),
2540 N.getValueType());
2541 return true;
2542 }
2543
2544 // Keep track of the original value type and whether this value was
2545 // truncated. If we see a truncation from pointer type to VT that truncates
2546 // bits that are known to be zero, we can use a narrow reference.
2547 EVT VT = N.getValueType();
2548 bool WasTruncated = false;
2549 if (N.getOpcode() == ISD::TRUNCATE) {
2550 WasTruncated = true;
2551 N = N.getOperand(0);
2552 }
2553
2554 if (N.getOpcode() != X86ISD::Wrapper)
2555 return false;
2556
2557 // We can only use non-GlobalValues as immediates if they were not truncated,
2558 // as we do not have any range information. If we have a GlobalValue and the
2559 // address was not truncated, we can select it as an operand directly.
2560 unsigned Opc = N.getOperand(0)->getOpcode();
2561 if (Opc != ISD::TargetGlobalAddress || !WasTruncated) {
2562 Op = N.getOperand(0);
2563 // We can only select the operand directly if we didn't have to look past a
2564 // truncate.
2565 return !WasTruncated;
2566 }
2567
2568 // Check that the global's range fits into VT.
2569 auto *GA = cast<GlobalAddressSDNode>(N.getOperand(0));
2570 Optional<ConstantRange> CR = GA->getGlobal()->getAbsoluteSymbolRange();
2571 if (!CR || CR->getUnsignedMax().uge(1ull << VT.getSizeInBits()))
2572 return false;
2573
2574 // Okay, we can use a narrow reference.
2575 Op = CurDAG->getTargetGlobalAddress(GA->getGlobal(), SDLoc(N), VT,
2576 GA->getOffset(), GA->getTargetFlags());
2577 return true;
2578}
2579
2580bool X86DAGToDAGISel::tryFoldLoad(SDNode *Root, SDNode *P, SDValue N,
2581 SDValue &Base, SDValue &Scale,
2582 SDValue &Index, SDValue &Disp,
2583 SDValue &Segment) {
2584 if (!ISD::isNON_EXTLoad(N.getNode()) ||
30
Calling 'isNON_EXTLoad'
35
Returning from 'isNON_EXTLoad'
2585 !IsProfitableToFold(N, P, Root) ||
36
Passing null pointer value via 2nd parameter 'U'
37
Calling 'X86DAGToDAGISel::IsProfitableToFold'
2586 !IsLegalToFold(N, P, Root, OptLevel))
2587 return false;
2588
2589 return selectAddr(N.getNode(),
2590 N.getOperand(1), Base, Scale, Index, Disp, Segment);
2591}
2592
2593/// Return an SDNode that returns the value of the global base register.
2594/// Output instructions required to initialize the global base register,
2595/// if necessary.
2596SDNode *X86DAGToDAGISel::getGlobalBaseReg() {
2597 unsigned GlobalBaseReg = getInstrInfo()->getGlobalBaseReg(MF);
2598 auto &DL = MF->getDataLayout();
2599 return CurDAG->getRegister(GlobalBaseReg, TLI->getPointerTy(DL)).getNode();
2600}
2601
2602bool X86DAGToDAGISel::isSExtAbsoluteSymbolRef(unsigned Width, SDNode *N) const {
2603 if (N->getOpcode() == ISD::TRUNCATE)
2604 N = N->getOperand(0).getNode();
2605 if (N->getOpcode() != X86ISD::Wrapper)
2606 return false;
2607
2608 auto *GA = dyn_cast<GlobalAddressSDNode>(N->getOperand(0));
2609 if (!GA)
2610 return false;
2611
2612 Optional<ConstantRange> CR = GA->getGlobal()->getAbsoluteSymbolRange();
2613 return CR && CR->getSignedMin().sge(-1ull << Width) &&
2614 CR->getSignedMax().slt(1ull << Width);
2615}
2616
2617static X86::CondCode getCondFromNode(SDNode *N) {
2618 assert(N->isMachineOpcode() && "Unexpected node")((N->isMachineOpcode() && "Unexpected node") ? static_cast
<void> (0) : __assert_fail ("N->isMachineOpcode() && \"Unexpected node\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 2618, __PRETTY_FUNCTION__))
;
2619 X86::CondCode CC = X86::COND_INVALID;
2620 unsigned Opc = N->getMachineOpcode();
2621 if (Opc == X86::JCC_1)
2622 CC = static_cast<X86::CondCode>(N->getConstantOperandVal(1));
2623 else if (Opc == X86::SETCCr)
2624 CC = static_cast<X86::CondCode>(N->getConstantOperandVal(0));
2625 else if (Opc == X86::SETCCm)
2626 CC = static_cast<X86::CondCode>(N->getConstantOperandVal(5));
2627 else if (Opc == X86::CMOV16rr || Opc == X86::CMOV32rr ||
2628 Opc == X86::CMOV64rr)
2629 CC = static_cast<X86::CondCode>(N->getConstantOperandVal(2));
2630 else if (Opc == X86::CMOV16rm || Opc == X86::CMOV32rm ||
2631 Opc == X86::CMOV64rm)
2632 CC = static_cast<X86::CondCode>(N->getConstantOperandVal(6));
2633
2634 return CC;
2635}
2636
2637/// Test whether the given X86ISD::CMP node has any users that use a flag
2638/// other than ZF.
2639bool X86DAGToDAGISel::onlyUsesZeroFlag(SDValue Flags) const {
2640 // Examine each user of the node.
2641 for (SDNode::use_iterator UI = Flags->use_begin(), UE = Flags->use_end();
2642 UI != UE; ++UI) {
2643 // Only check things that use the flags.
2644 if (UI.getUse().getResNo() != Flags.getResNo())
2645 continue;
2646 // Only examine CopyToReg uses that copy to EFLAGS.
2647 if (UI->getOpcode() != ISD::CopyToReg ||
2648 cast<RegisterSDNode>(UI->getOperand(1))->getReg() != X86::EFLAGS)
2649 return false;
2650 // Examine each user of the CopyToReg use.
2651 for (SDNode::use_iterator FlagUI = UI->use_begin(),
2652 FlagUE = UI->use_end(); FlagUI != FlagUE; ++FlagUI) {
2653 // Only examine the Flag result.
2654 if (FlagUI.getUse().getResNo() != 1) continue;
2655 // Anything unusual: assume conservatively.
2656 if (!FlagUI->isMachineOpcode()) return false;
2657 // Examine the condition code of the user.
2658 X86::CondCode CC = getCondFromNode(*FlagUI);
2659
2660 switch (CC) {
2661 // Comparisons which only use the zero flag.
2662 case X86::COND_E: case X86::COND_NE:
2663 continue;
2664 // Anything else: assume conservatively.
2665 default:
2666 return false;
2667 }
2668 }
2669 }
2670 return true;
2671}
2672
2673/// Test whether the given X86ISD::CMP node has any uses which require the SF
2674/// flag to be accurate.
2675bool X86DAGToDAGISel::hasNoSignFlagUses(SDValue Flags) const {
2676 // Examine each user of the node.
2677 for (SDNode::use_iterator UI = Flags->use_begin(), UE = Flags->use_end();
2678 UI != UE; ++UI) {
2679 // Only check things that use the flags.
2680 if (UI.getUse().getResNo() != Flags.getResNo())
2681 continue;
2682 // Only examine CopyToReg uses that copy to EFLAGS.
2683 if (UI->getOpcode() != ISD::CopyToReg ||
2684 cast<RegisterSDNode>(UI->getOperand(1))->getReg() != X86::EFLAGS)
2685 return false;
2686 // Examine each user of the CopyToReg use.
2687 for (SDNode::use_iterator FlagUI = UI->use_begin(),
2688 FlagUE = UI->use_end(); FlagUI != FlagUE; ++FlagUI) {
2689 // Only examine the Flag result.
2690 if (FlagUI.getUse().getResNo() != 1) continue;
2691 // Anything unusual: assume conservatively.
2692 if (!FlagUI->isMachineOpcode()) return false;
2693 // Examine the condition code of the user.
2694 X86::CondCode CC = getCondFromNode(*FlagUI);
2695
2696 switch (CC) {
2697 // Comparisons which don't examine the SF flag.
2698 case X86::COND_A: case X86::COND_AE:
2699 case X86::COND_B: case X86::COND_BE:
2700 case X86::COND_E: case X86::COND_NE:
2701 case X86::COND_O: case X86::COND_NO:
2702 case X86::COND_P: case X86::COND_NP:
2703 continue;
2704 // Anything else: assume conservatively.
2705 default:
2706 return false;
2707 }
2708 }
2709 }
2710 return true;
2711}
2712
2713static bool mayUseCarryFlag(X86::CondCode CC) {
2714 switch (CC) {
2715 // Comparisons which don't examine the CF flag.
2716 case X86::COND_O: case X86::COND_NO:
2717 case X86::COND_E: case X86::COND_NE:
2718 case X86::COND_S: case X86::COND_NS:
2719 case X86::COND_P: case X86::COND_NP:
2720 case X86::COND_L: case X86::COND_GE:
2721 case X86::COND_G: case X86::COND_LE:
2722 return false;
2723 // Anything else: assume conservatively.
2724 default:
2725 return true;
2726 }
2727}
2728
2729/// Test whether the given node which sets flags has any uses which require the
2730/// CF flag to be accurate.
2731 bool X86DAGToDAGISel::hasNoCarryFlagUses(SDValue Flags) const {
2732 // Examine each user of the node.
2733 for (SDNode::use_iterator UI = Flags->use_begin(), UE = Flags->use_end();
2734 UI != UE; ++UI) {
2735 // Only check things that use the flags.
2736 if (UI.getUse().getResNo() != Flags.getResNo())
2737 continue;
2738
2739 unsigned UIOpc = UI->getOpcode();
2740
2741 if (UIOpc == ISD::CopyToReg) {
2742 // Only examine CopyToReg uses that copy to EFLAGS.
2743 if (cast<RegisterSDNode>(UI->getOperand(1))->getReg() != X86::EFLAGS)
2744 return false;
2745 // Examine each user of the CopyToReg use.
2746 for (SDNode::use_iterator FlagUI = UI->use_begin(), FlagUE = UI->use_end();
2747 FlagUI != FlagUE; ++FlagUI) {
2748 // Only examine the Flag result.
2749 if (FlagUI.getUse().getResNo() != 1)
2750 continue;
2751 // Anything unusual: assume conservatively.
2752 if (!FlagUI->isMachineOpcode())
2753 return false;
2754 // Examine the condition code of the user.
2755 X86::CondCode CC = getCondFromNode(*FlagUI);
2756
2757 if (mayUseCarryFlag(CC))
2758 return false;
2759 }
2760
2761 // This CopyToReg is ok. Move on to the next user.
2762 continue;
2763 }
2764
2765 // This might be an unselected node. So look for the pre-isel opcodes that
2766 // use flags.
2767 unsigned CCOpNo;
2768 switch (UIOpc) {
2769 default:
2770 // Something unusual. Be conservative.
2771 return false;
2772 case X86ISD::SETCC: CCOpNo = 0; break;
2773 case X86ISD::SETCC_CARRY: CCOpNo = 0; break;
2774 case X86ISD::CMOV: CCOpNo = 2; break;
2775 case X86ISD::BRCOND: CCOpNo = 2; break;
2776 }
2777
2778 X86::CondCode CC = (X86::CondCode)UI->getConstantOperandVal(CCOpNo);
2779 if (mayUseCarryFlag(CC))
2780 return false;
2781 }
2782 return true;
2783}
2784
2785/// Check whether or not the chain ending in StoreNode is suitable for doing
2786/// the {load; op; store} to modify transformation.
2787static bool isFusableLoadOpStorePattern(StoreSDNode *StoreNode,
2788 SDValue StoredVal, SelectionDAG *CurDAG,
2789 unsigned LoadOpNo,
2790 LoadSDNode *&LoadNode,
2791 SDValue &InputChain) {
2792 // Is the stored value result 0 of the operation?
2793 if (StoredVal.getResNo() != 0) return false;
2794
2795 // Are there other uses of the operation other than the store?
2796 if (!StoredVal.getNode()->hasNUsesOfValue(1, 0)) return false;
2797
2798 // Is the store non-extending and non-indexed?
2799 if (!ISD::isNormalStore(StoreNode) || StoreNode->isNonTemporal())
2800 return false;
2801
2802 SDValue Load = StoredVal->getOperand(LoadOpNo);
2803 // Is the stored value a non-extending and non-indexed load?
2804 if (!ISD::isNormalLoad(Load.getNode())) return false;
2805
2806 // Return LoadNode by reference.
2807 LoadNode = cast<LoadSDNode>(Load);
2808
2809 // Is store the only read of the loaded value?
2810 if (!Load.hasOneUse())
2811 return false;
2812
2813 // Is the address of the store the same as the load?
2814 if (LoadNode->getBasePtr() != StoreNode->getBasePtr() ||
2815 LoadNode->getOffset() != StoreNode->getOffset())
2816 return false;
2817
2818 bool FoundLoad = false;
2819 SmallVector<SDValue, 4> ChainOps;
2820 SmallVector<const SDNode *, 4> LoopWorklist;
2821 SmallPtrSet<const SDNode *, 16> Visited;
2822 const unsigned int Max = 1024;
2823
2824 // Visualization of Load-Op-Store fusion:
2825 // -------------------------
2826 // Legend:
2827 // *-lines = Chain operand dependencies.
2828 // |-lines = Normal operand dependencies.
2829 // Dependencies flow down and right. n-suffix references multiple nodes.
2830 //
2831 // C Xn C
2832 // * * *
2833 // * * *
2834 // Xn A-LD Yn TF Yn
2835 // * * \ | * |
2836 // * * \ | * |
2837 // * * \ | => A--LD_OP_ST
2838 // * * \| \
2839 // TF OP \
2840 // * | \ Zn
2841 // * | \
2842 // A-ST Zn
2843 //
2844
2845 // This merge induced dependences from: #1: Xn -> LD, OP, Zn
2846 // #2: Yn -> LD
2847 // #3: ST -> Zn
2848
2849 // Ensure the transform is safe by checking for the dual
2850 // dependencies to make sure we do not induce a loop.
2851
2852 // As LD is a predecessor to both OP and ST we can do this by checking:
2853 // a). if LD is a predecessor to a member of Xn or Yn.
2854 // b). if a Zn is a predecessor to ST.
2855
2856 // However, (b) can only occur through being a chain predecessor to
2857 // ST, which is the same as Zn being a member or predecessor of Xn,
2858 // which is a subset of LD being a predecessor of Xn. So it's
2859 // subsumed by check (a).
2860
2861 SDValue Chain = StoreNode->getChain();
2862
2863 // Gather X elements in ChainOps.
2864 if (Chain == Load.getValue(1)) {
2865 FoundLoad = true;
2866 ChainOps.push_back(Load.getOperand(0));
2867 } else if (Chain.getOpcode() == ISD::TokenFactor) {
2868 for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i) {
2869 SDValue Op = Chain.getOperand(i);
2870 if (Op == Load.getValue(1)) {
2871 FoundLoad = true;
2872 // Drop Load, but keep its chain. No cycle check necessary.
2873 ChainOps.push_back(Load.getOperand(0));
2874 continue;
2875 }
2876 LoopWorklist.push_back(Op.getNode());
2877 ChainOps.push_back(Op);
2878 }
2879 }
2880
2881 if (!FoundLoad)
2882 return false;
2883
2884 // Worklist is currently Xn. Add Yn to worklist.
2885 for (SDValue Op : StoredVal->ops())
2886 if (Op.getNode() != LoadNode)
2887 LoopWorklist.push_back(Op.getNode());
2888
2889 // Check (a) if Load is a predecessor to Xn + Yn
2890 if (SDNode::hasPredecessorHelper(Load.getNode(), Visited, LoopWorklist, Max,
2891 true))
2892 return false;
2893
2894 InputChain =
2895 CurDAG->getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ChainOps);
2896 return true;
2897}
2898
2899// Change a chain of {load; op; store} of the same value into a simple op
2900// through memory of that value, if the uses of the modified value and its
2901// address are suitable.
2902//
2903// The tablegen pattern memory operand pattern is currently not able to match
2904// the case where the EFLAGS on the original operation are used.
2905//
2906// To move this to tablegen, we'll need to improve tablegen to allow flags to
2907// be transferred from a node in the pattern to the result node, probably with
2908// a new keyword. For example, we have this
2909// def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
2910// [(store (add (loadi64 addr:$dst), -1), addr:$dst),
2911// (implicit EFLAGS)]>;
2912// but maybe need something like this
2913// def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
2914// [(store (add (loadi64 addr:$dst), -1), addr:$dst),
2915// (transferrable EFLAGS)]>;
2916//
2917// Until then, we manually fold these and instruction select the operation
2918// here.
2919bool X86DAGToDAGISel::foldLoadStoreIntoMemOperand(SDNode *Node) {
2920 StoreSDNode *StoreNode = cast<StoreSDNode>(Node);
2921 SDValue StoredVal = StoreNode->getOperand(1);
2922 unsigned Opc = StoredVal->getOpcode();
2923
2924 // Before we try to select anything, make sure this is memory operand size
2925 // and opcode we can handle. Note that this must match the code below that
2926 // actually lowers the opcodes.
2927 EVT MemVT = StoreNode->getMemoryVT();
2928 if (MemVT != MVT::i64 && MemVT != MVT::i32 && MemVT != MVT::i16 &&
2929 MemVT != MVT::i8)
2930 return false;
2931
2932 bool IsCommutable = false;
2933 bool IsNegate = false;
2934 switch (Opc) {
2935 default:
2936 return false;
2937 case X86ISD::SUB:
2938 IsNegate = isNullConstant(StoredVal.getOperand(0));
2939 break;
2940 case X86ISD::SBB:
2941 break;
2942 case X86ISD::ADD:
2943 case X86ISD::ADC:
2944 case X86ISD::AND:
2945 case X86ISD::OR:
2946 case X86ISD::XOR:
2947 IsCommutable = true;
2948 break;
2949 }
2950
2951 unsigned LoadOpNo = IsNegate ? 1 : 0;
2952 LoadSDNode *LoadNode = nullptr;
2953 SDValue InputChain;
2954 if (!isFusableLoadOpStorePattern(StoreNode, StoredVal, CurDAG, LoadOpNo,
2955 LoadNode, InputChain)) {
2956 if (!IsCommutable)
2957 return false;
2958
2959 // This operation is commutable, try the other operand.
2960 LoadOpNo = 1;
2961 if (!isFusableLoadOpStorePattern(StoreNode, StoredVal, CurDAG, LoadOpNo,
2962 LoadNode, InputChain))
2963 return false;
2964 }
2965
2966 SDValue Base, Scale, Index, Disp, Segment;
2967 if (!selectAddr(LoadNode, LoadNode->getBasePtr(), Base, Scale, Index, Disp,
2968 Segment))
2969 return false;
2970
2971 auto SelectOpcode = [&](unsigned Opc64, unsigned Opc32, unsigned Opc16,
2972 unsigned Opc8) {
2973 switch (MemVT.getSimpleVT().SimpleTy) {
2974 case MVT::i64:
2975 return Opc64;
2976 case MVT::i32:
2977 return Opc32;
2978 case MVT::i16:
2979 return Opc16;
2980 case MVT::i8:
2981 return Opc8;
2982 default:
2983 llvm_unreachable("Invalid size!")::llvm::llvm_unreachable_internal("Invalid size!", "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 2983)
;
2984 }
2985 };
2986
2987 MachineSDNode *Result;
2988 switch (Opc) {
2989 case X86ISD::SUB:
2990 // Handle negate.
2991 if (IsNegate) {
2992 unsigned NewOpc = SelectOpcode(X86::NEG64m, X86::NEG32m, X86::NEG16m,
2993 X86::NEG8m);
2994 const SDValue Ops[] = {Base, Scale, Index, Disp, Segment, InputChain};
2995 Result = CurDAG->getMachineNode(NewOpc, SDLoc(Node), MVT::i32,
2996 MVT::Other, Ops);
2997 break;
2998 }
2999 LLVM_FALLTHROUGH[[gnu::fallthrough]];
3000 case X86ISD::ADD:
3001 // Try to match inc/dec.
3002 if (!Subtarget->slowIncDec() || OptForSize) {
3003 bool IsOne = isOneConstant(StoredVal.getOperand(1));
3004 bool IsNegOne = isAllOnesConstant(StoredVal.getOperand(1));
3005 // ADD/SUB with 1/-1 and carry flag isn't used can use inc/dec.
3006 if ((IsOne || IsNegOne) && hasNoCarryFlagUses(StoredVal.getValue(1))) {
3007 unsigned NewOpc =
3008 ((Opc == X86ISD::ADD) == IsOne)
3009 ? SelectOpcode(X86::INC64m, X86::INC32m, X86::INC16m, X86::INC8m)
3010 : SelectOpcode(X86::DEC64m, X86::DEC32m, X86::DEC16m, X86::DEC8m);
3011 const SDValue Ops[] = {Base, Scale, Index, Disp, Segment, InputChain};
3012 Result = CurDAG->getMachineNode(NewOpc, SDLoc(Node), MVT::i32,
3013 MVT::Other, Ops);
3014 break;
3015 }
3016 }
3017 LLVM_FALLTHROUGH[[gnu::fallthrough]];
3018 case X86ISD::ADC:
3019 case X86ISD::SBB:
3020 case X86ISD::AND:
3021 case X86ISD::OR:
3022 case X86ISD::XOR: {
3023 auto SelectRegOpcode = [SelectOpcode](unsigned Opc) {
3024 switch (Opc) {
3025 case X86ISD::ADD:
3026 return SelectOpcode(X86::ADD64mr, X86::ADD32mr, X86::ADD16mr,
3027 X86::ADD8mr);
3028 case X86ISD::ADC:
3029 return SelectOpcode(X86::ADC64mr, X86::ADC32mr, X86::ADC16mr,
3030 X86::ADC8mr);
3031 case X86ISD::SUB:
3032 return SelectOpcode(X86::SUB64mr, X86::SUB32mr, X86::SUB16mr,
3033 X86::SUB8mr);
3034 case X86ISD::SBB:
3035 return SelectOpcode(X86::SBB64mr, X86::SBB32mr, X86::SBB16mr,
3036 X86::SBB8mr);
3037 case X86ISD::AND:
3038 return SelectOpcode(X86::AND64mr, X86::AND32mr, X86::AND16mr,
3039 X86::AND8mr);
3040 case X86ISD::OR:
3041 return SelectOpcode(X86::OR64mr, X86::OR32mr, X86::OR16mr, X86::OR8mr);
3042 case X86ISD::XOR:
3043 return SelectOpcode(X86::XOR64mr, X86::XOR32mr, X86::XOR16mr,
3044 X86::XOR8mr);
3045 default:
3046 llvm_unreachable("Invalid opcode!")::llvm::llvm_unreachable_internal("Invalid opcode!", "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 3046)
;
3047 }
3048 };
3049 auto SelectImm8Opcode = [SelectOpcode](unsigned Opc) {
3050 switch (Opc) {
3051 case X86ISD::ADD:
3052 return SelectOpcode(X86::ADD64mi8, X86::ADD32mi8, X86::ADD16mi8, 0);
3053 case X86ISD::ADC:
3054 return SelectOpcode(X86::ADC64mi8, X86::ADC32mi8, X86::ADC16mi8, 0);
3055 case X86ISD::SUB:
3056 return SelectOpcode(X86::SUB64mi8, X86::SUB32mi8, X86::SUB16mi8, 0);
3057 case X86ISD::SBB:
3058 return SelectOpcode(X86::SBB64mi8, X86::SBB32mi8, X86::SBB16mi8, 0);
3059 case X86ISD::AND:
3060 return SelectOpcode(X86::AND64mi8, X86::AND32mi8, X86::AND16mi8, 0);
3061 case X86ISD::OR:
3062 return SelectOpcode(X86::OR64mi8, X86::OR32mi8, X86::OR16mi8, 0);
3063 case X86ISD::XOR:
3064 return SelectOpcode(X86::XOR64mi8, X86::XOR32mi8, X86::XOR16mi8, 0);
3065 default:
3066 llvm_unreachable("Invalid opcode!")::llvm::llvm_unreachable_internal("Invalid opcode!", "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 3066)
;
3067 }
3068 };
3069 auto SelectImmOpcode = [SelectOpcode](unsigned Opc) {
3070 switch (Opc) {
3071 case X86ISD::ADD:
3072 return SelectOpcode(X86::ADD64mi32, X86::ADD32mi, X86::ADD16mi,
3073 X86::ADD8mi);
3074 case X86ISD::ADC:
3075 return SelectOpcode(X86::ADC64mi32, X86::ADC32mi, X86::ADC16mi,
3076 X86::ADC8mi);
3077 case X86ISD::SUB:
3078 return SelectOpcode(X86::SUB64mi32, X86::SUB32mi, X86::SUB16mi,
3079 X86::SUB8mi);
3080 case X86ISD::SBB:
3081 return SelectOpcode(X86::SBB64mi32, X86::SBB32mi, X86::SBB16mi,
3082 X86::SBB8mi);
3083 case X86ISD::AND:
3084 return SelectOpcode(X86::AND64mi32, X86::AND32mi, X86::AND16mi,
3085 X86::AND8mi);
3086 case X86ISD::OR:
3087 return SelectOpcode(X86::OR64mi32, X86::OR32mi, X86::OR16mi,
3088 X86::OR8mi);
3089 case X86ISD::XOR:
3090 return SelectOpcode(X86::XOR64mi32, X86::XOR32mi, X86::XOR16mi,
3091 X86::XOR8mi);
3092 default:
3093 llvm_unreachable("Invalid opcode!")::llvm::llvm_unreachable_internal("Invalid opcode!", "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 3093)
;
3094 }
3095 };
3096
3097 unsigned NewOpc = SelectRegOpcode(Opc);
3098 SDValue Operand = StoredVal->getOperand(1-LoadOpNo);
3099
3100 // See if the operand is a constant that we can fold into an immediate
3101 // operand.
3102 if (auto *OperandC = dyn_cast<ConstantSDNode>(Operand)) {
3103 int64_t OperandV = OperandC->getSExtValue();
3104
3105 // Check if we can shrink the operand enough to fit in an immediate (or
3106 // fit into a smaller immediate) by negating it and switching the
3107 // operation.
3108 if ((Opc == X86ISD::ADD || Opc == X86ISD::SUB) &&
3109 ((MemVT != MVT::i8 && !isInt<8>(OperandV) && isInt<8>(-OperandV)) ||
3110 (MemVT == MVT::i64 && !isInt<32>(OperandV) &&
3111 isInt<32>(-OperandV))) &&
3112 hasNoCarryFlagUses(StoredVal.getValue(1))) {
3113 OperandV = -OperandV;
3114 Opc = Opc == X86ISD::ADD ? X86ISD::SUB : X86ISD::ADD;
3115 }
3116
3117 // First try to fit this into an Imm8 operand. If it doesn't fit, then try
3118 // the larger immediate operand.
3119 if (MemVT != MVT::i8 && isInt<8>(OperandV)) {
3120 Operand = CurDAG->getTargetConstant(OperandV, SDLoc(Node), MemVT);
3121 NewOpc = SelectImm8Opcode(Opc);
3122 } else if (MemVT != MVT::i64 || isInt<32>(OperandV)) {
3123 Operand = CurDAG->getTargetConstant(OperandV, SDLoc(Node), MemVT);
3124 NewOpc = SelectImmOpcode(Opc);
3125 }
3126 }
3127
3128 if (Opc == X86ISD::ADC || Opc == X86ISD::SBB) {
3129 SDValue CopyTo =
3130 CurDAG->getCopyToReg(InputChain, SDLoc(Node), X86::EFLAGS,
3131 StoredVal.getOperand(2), SDValue());
3132
3133 const SDValue Ops[] = {Base, Scale, Index, Disp,
3134 Segment, Operand, CopyTo, CopyTo.getValue(1)};
3135 Result = CurDAG->getMachineNode(NewOpc, SDLoc(Node), MVT::i32, MVT::Other,
3136 Ops);
3137 } else {
3138 const SDValue Ops[] = {Base, Scale, Index, Disp,
3139 Segment, Operand, InputChain};
3140 Result = CurDAG->getMachineNode(NewOpc, SDLoc(Node), MVT::i32, MVT::Other,
3141 Ops);
3142 }
3143 break;
3144 }
3145 default:
3146 llvm_unreachable("Invalid opcode!")::llvm::llvm_unreachable_internal("Invalid opcode!", "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 3146)
;
3147 }
3148
3149 MachineMemOperand *MemOps[] = {StoreNode->getMemOperand(),
3150 LoadNode->getMemOperand()};
3151 CurDAG->setNodeMemRefs(Result, MemOps);
3152
3153 // Update Load Chain uses as well.
3154 ReplaceUses(SDValue(LoadNode, 1), SDValue(Result, 1));
3155 ReplaceUses(SDValue(StoreNode, 0), SDValue(Result, 1));
3156 ReplaceUses(SDValue(StoredVal.getNode(), 1), SDValue(Result, 0));
3157 CurDAG->RemoveDeadNode(Node);
3158 return true;
3159}
3160
3161// See if this is an X & Mask that we can match to BEXTR/BZHI.
3162// Where Mask is one of the following patterns:
3163// a) x & (1 << nbits) - 1
3164// b) x & ~(-1 << nbits)
3165// c) x & (-1 >> (32 - y))
3166// d) x << (32 - y) >> (32 - y)
3167bool X86DAGToDAGISel::matchBitExtract(SDNode *Node) {
3168 assert((((Node->getOpcode() == ISD::AND || Node->getOpcode() ==
ISD::SRL) && "Should be either an and-mask, or right-shift after clearing high bits."
) ? static_cast<void> (0) : __assert_fail ("(Node->getOpcode() == ISD::AND || Node->getOpcode() == ISD::SRL) && \"Should be either an and-mask, or right-shift after clearing high bits.\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 3170, __PRETTY_FUNCTION__))
3169 (Node->getOpcode() == ISD::AND || Node->getOpcode() == ISD::SRL) &&(((Node->getOpcode() == ISD::AND || Node->getOpcode() ==
ISD::SRL) && "Should be either an and-mask, or right-shift after clearing high bits."
) ? static_cast<void> (0) : __assert_fail ("(Node->getOpcode() == ISD::AND || Node->getOpcode() == ISD::SRL) && \"Should be either an and-mask, or right-shift after clearing high bits.\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 3170, __PRETTY_FUNCTION__))
3170 "Should be either an and-mask, or right-shift after clearing high bits.")(((Node->getOpcode() == ISD::AND || Node->getOpcode() ==
ISD::SRL) && "Should be either an and-mask, or right-shift after clearing high bits."
) ? static_cast<void> (0) : __assert_fail ("(Node->getOpcode() == ISD::AND || Node->getOpcode() == ISD::SRL) && \"Should be either an and-mask, or right-shift after clearing high bits.\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 3170, __PRETTY_FUNCTION__))
;
3171
3172 // BEXTR is BMI instruction, BZHI is BMI2 instruction. We need at least one.
3173 if (!Subtarget->hasBMI() && !Subtarget->hasBMI2())
3174 return false;
3175
3176 MVT NVT = Node->getSimpleValueType(0);
3177
3178 // Only supported for 32 and 64 bits.
3179 if (NVT != MVT::i32 && NVT != MVT::i64)
3180 return false;
3181
3182 SDValue NBits;
3183
3184 // If we have BMI2's BZHI, we are ok with muti-use patterns.
3185 // Else, if we only have BMI1's BEXTR, we require one-use.
3186 const bool CanHaveExtraUses = Subtarget->hasBMI2();
3187 auto checkUses = [CanHaveExtraUses](SDValue Op, unsigned NUses) {
3188 return CanHaveExtraUses ||
3189 Op.getNode()->hasNUsesOfValue(NUses, Op.getResNo());
3190 };
3191 auto checkOneUse = [checkUses](SDValue Op) { return checkUses(Op, 1); };
3192 auto checkTwoUse = [checkUses](SDValue Op) { return checkUses(Op, 2); };
3193
3194 auto peekThroughOneUseTruncation = [checkOneUse](SDValue V) {
3195 if (V->getOpcode() == ISD::TRUNCATE && checkOneUse(V)) {
3196 assert(V.getSimpleValueType() == MVT::i32 &&((V.getSimpleValueType() == MVT::i32 && V.getOperand(
0).getSimpleValueType() == MVT::i64 && "Expected i64 -> i32 truncation"
) ? static_cast<void> (0) : __assert_fail ("V.getSimpleValueType() == MVT::i32 && V.getOperand(0).getSimpleValueType() == MVT::i64 && \"Expected i64 -> i32 truncation\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 3198, __PRETTY_FUNCTION__))
3197 V.getOperand(0).getSimpleValueType() == MVT::i64 &&((V.getSimpleValueType() == MVT::i32 && V.getOperand(
0).getSimpleValueType() == MVT::i64 && "Expected i64 -> i32 truncation"
) ? static_cast<void> (0) : __assert_fail ("V.getSimpleValueType() == MVT::i32 && V.getOperand(0).getSimpleValueType() == MVT::i64 && \"Expected i64 -> i32 truncation\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 3198, __PRETTY_FUNCTION__))
3198 "Expected i64 -> i32 truncation")((V.getSimpleValueType() == MVT::i32 && V.getOperand(
0).getSimpleValueType() == MVT::i64 && "Expected i64 -> i32 truncation"
) ? static_cast<void> (0) : __assert_fail ("V.getSimpleValueType() == MVT::i32 && V.getOperand(0).getSimpleValueType() == MVT::i64 && \"Expected i64 -> i32 truncation\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 3198, __PRETTY_FUNCTION__))
;
3199 V = V.getOperand(0);
3200 }
3201 return V;
3202 };
3203
3204 // a) x & ((1 << nbits) + (-1))
3205 auto matchPatternA = [checkOneUse, peekThroughOneUseTruncation,
3206 &NBits](SDValue Mask) -> bool {
3207 // Match `add`. Must only have one use!
3208 if (Mask->getOpcode() != ISD::ADD || !checkOneUse(Mask))
3209 return false;
3210 // We should be adding all-ones constant (i.e. subtracting one.)
3211 if (!isAllOnesConstant(Mask->getOperand(1)))
3212 return false;
3213 // Match `1 << nbits`. Might be truncated. Must only have one use!
3214 SDValue M0 = peekThroughOneUseTruncation(Mask->getOperand(0));
3215 if (M0->getOpcode() != ISD::SHL || !checkOneUse(M0))
3216 return false;
3217 if (!isOneConstant(M0->getOperand(0)))
3218 return false;
3219 NBits = M0->getOperand(1);
3220 return true;
3221 };
3222
3223 auto isAllOnes = [this, peekThroughOneUseTruncation, NVT](SDValue V) {
3224 V = peekThroughOneUseTruncation(V);
3225 return CurDAG->MaskedValueIsAllOnes(
3226 V, APInt::getLowBitsSet(V.getSimpleValueType().getSizeInBits(),
3227 NVT.getSizeInBits()));
3228 };
3229
3230 // b) x & ~(-1 << nbits)
3231 auto matchPatternB = [checkOneUse, isAllOnes, peekThroughOneUseTruncation,
3232 &NBits](SDValue Mask) -> bool {
3233 // Match `~()`. Must only have one use!
3234 if (Mask.getOpcode() != ISD::XOR || !checkOneUse(Mask))
3235 return false;
3236 // The -1 only has to be all-ones for the final Node's NVT.
3237 if (!isAllOnes(Mask->getOperand(1)))
3238 return false;
3239 // Match `-1 << nbits`. Might be truncated. Must only have one use!
3240 SDValue M0 = peekThroughOneUseTruncation(Mask->getOperand(0));
3241 if (M0->getOpcode() != ISD::SHL || !checkOneUse(M0))
3242 return false;
3243 // The -1 only has to be all-ones for the final Node's NVT.
3244 if (!isAllOnes(M0->getOperand(0)))
3245 return false;
3246 NBits = M0->getOperand(1);
3247 return true;
3248 };
3249
3250 // Match potentially-truncated (bitwidth - y)
3251 auto matchShiftAmt = [checkOneUse, &NBits](SDValue ShiftAmt,
3252 unsigned Bitwidth) {
3253 // Skip over a truncate of the shift amount.
3254 if (ShiftAmt.getOpcode() == ISD::TRUNCATE) {
3255 ShiftAmt = ShiftAmt.getOperand(0);
3256 // The trunc should have been the only user of the real shift amount.
3257 if (!checkOneUse(ShiftAmt))
3258 return false;
3259 }
3260 // Match the shift amount as: (bitwidth - y). It should go away, too.
3261 if (ShiftAmt.getOpcode() != ISD::SUB)
3262 return false;
3263 auto V0 = dyn_cast<ConstantSDNode>(ShiftAmt.getOperand(0));
3264 if (!V0 || V0->getZExtValue() != Bitwidth)
3265 return false;
3266 NBits = ShiftAmt.getOperand(1);
3267 return true;
3268 };
3269
3270 // c) x & (-1 >> (32 - y))
3271 auto matchPatternC = [checkOneUse, peekThroughOneUseTruncation,
3272 matchShiftAmt](SDValue Mask) -> bool {
3273 // The mask itself may be truncated.
3274 Mask = peekThroughOneUseTruncation(Mask);
3275 unsigned Bitwidth = Mask.getSimpleValueType().getSizeInBits();
3276 // Match `l>>`. Must only have one use!
3277 if (Mask.getOpcode() != ISD::SRL || !checkOneUse(Mask))
3278 return false;
3279 // We should be shifting truly all-ones constant.
3280 if (!isAllOnesConstant(Mask.getOperand(0)))
3281 return false;
3282 SDValue M1 = Mask.getOperand(1);
3283 // The shift amount should not be used externally.
3284 if (!checkOneUse(M1))
3285 return false;
3286 return matchShiftAmt(M1, Bitwidth);
3287 };
3288
3289 SDValue X;
3290
3291 // d) x << (32 - y) >> (32 - y)
3292 auto matchPatternD = [checkOneUse, checkTwoUse, matchShiftAmt,
3293 &X](SDNode *Node) -> bool {
3294 if (Node->getOpcode() != ISD::SRL)
3295 return false;
3296 SDValue N0 = Node->getOperand(0);
3297 if (N0->getOpcode() != ISD::SHL || !checkOneUse(N0))
3298 return false;
3299 unsigned Bitwidth = N0.getSimpleValueType().getSizeInBits();
3300 SDValue N1 = Node->getOperand(1);
3301 SDValue N01 = N0->getOperand(1);
3302 // Both of the shifts must be by the exact same value.
3303 // There should not be any uses of the shift amount outside of the pattern.
3304 if (N1 != N01 || !checkTwoUse(N1))
3305 return false;
3306 if (!matchShiftAmt(N1, Bitwidth))
3307 return false;
3308 X = N0->getOperand(0);
3309 return true;
3310 };
3311
3312 auto matchLowBitMask = [matchPatternA, matchPatternB,
3313 matchPatternC](SDValue Mask) -> bool {
3314 return matchPatternA(Mask) || matchPatternB(Mask) || matchPatternC(Mask);
3315 };
3316
3317 if (Node->getOpcode() == ISD::AND) {
3318 X = Node->getOperand(0);
3319 SDValue Mask = Node->getOperand(1);
3320
3321 if (matchLowBitMask(Mask)) {
3322 // Great.
3323 } else {
3324 std::swap(X, Mask);
3325 if (!matchLowBitMask(Mask))
3326 return false;
3327 }
3328 } else if (!matchPatternD(Node))
3329 return false;
3330
3331 SDLoc DL(Node);
3332
3333 // Truncate the shift amount.
3334 NBits = CurDAG->getNode(ISD::TRUNCATE, DL, MVT::i8, NBits);
3335 insertDAGNode(*CurDAG, SDValue(Node, 0), NBits);
3336
3337 // Insert 8-bit NBits into lowest 8 bits of 32-bit register.
3338 // All the other bits are undefined, we do not care about them.
3339 SDValue ImplDef = SDValue(
3340 CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, MVT::i32), 0);
3341 insertDAGNode(*CurDAG, SDValue(Node, 0), ImplDef);
3342
3343 SDValue SRIdxVal = CurDAG->getTargetConstant(X86::sub_8bit, DL, MVT::i32);
3344 insertDAGNode(*CurDAG, SDValue(Node, 0), SRIdxVal);
3345 NBits = SDValue(
3346 CurDAG->getMachineNode(TargetOpcode::INSERT_SUBREG, DL, MVT::i32, ImplDef,
3347 NBits, SRIdxVal), 0);
3348 insertDAGNode(*CurDAG, SDValue(Node, 0), NBits);
3349
3350 if (Subtarget->hasBMI2()) {
3351 // Great, just emit the the BZHI..
3352 if (NVT != MVT::i32) {
3353 // But have to place the bit count into the wide-enough register first.
3354 NBits = CurDAG->getNode(ISD::ANY_EXTEND, DL, NVT, NBits);
3355 insertDAGNode(*CurDAG, SDValue(Node, 0), NBits);
3356 }
3357
3358 SDValue Extract = CurDAG->getNode(X86ISD::BZHI, DL, NVT, X, NBits);
3359 ReplaceNode(Node, Extract.getNode());
3360 SelectCode(Extract.getNode());
3361 return true;
3362 }
3363
3364 // Else, if we do *NOT* have BMI2, let's find out if the if the 'X' is
3365 // *logically* shifted (potentially with one-use trunc inbetween),
3366 // and the truncation was the only use of the shift,
3367 // and if so look past one-use truncation.
3368 {
3369 SDValue RealX = peekThroughOneUseTruncation(X);
3370 // FIXME: only if the shift is one-use?
3371 if (RealX != X && RealX.getOpcode() == ISD::SRL)
3372 X = RealX;
3373 }
3374
3375 MVT XVT = X.getSimpleValueType();
3376
3377 // Else, emitting BEXTR requires one more step.
3378 // The 'control' of BEXTR has the pattern of:
3379 // [15...8 bit][ 7...0 bit] location
3380 // [ bit count][ shift] name
3381 // I.e. 0b000000011'00000001 means (x >> 0b1) & 0b11
3382
3383 // Shift NBits left by 8 bits, thus producing 'control'.
3384 // This makes the low 8 bits to be zero.
3385 SDValue C8 = CurDAG->getConstant(8, DL, MVT::i8);
3386 SDValue Control = CurDAG->getNode(ISD::SHL, DL, MVT::i32, NBits, C8);
3387 insertDAGNode(*CurDAG, SDValue(Node, 0), Control);
3388
3389 // If the 'X' is *logically* shifted, we can fold that shift into 'control'.
3390 // FIXME: only if the shift is one-use?
3391 if (X.getOpcode() == ISD::SRL) {
3392 SDValue ShiftAmt = X.getOperand(1);
3393 X = X.getOperand(0);
3394
3395 assert(ShiftAmt.getValueType() == MVT::i8 &&((ShiftAmt.getValueType() == MVT::i8 && "Expected shift amount to be i8"
) ? static_cast<void> (0) : __assert_fail ("ShiftAmt.getValueType() == MVT::i8 && \"Expected shift amount to be i8\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 3396, __PRETTY_FUNCTION__))
3396 "Expected shift amount to be i8")((ShiftAmt.getValueType() == MVT::i8 && "Expected shift amount to be i8"
) ? static_cast<void> (0) : __assert_fail ("ShiftAmt.getValueType() == MVT::i8 && \"Expected shift amount to be i8\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 3396, __PRETTY_FUNCTION__))
;
3397
3398 // Now, *zero*-extend the shift amount. The bits 8...15 *must* be zero!
3399 // We could zext to i16 in some form, but we intentionally don't do that.
3400 SDValue OrigShiftAmt = ShiftAmt;
3401 ShiftAmt = CurDAG->getNode(ISD::ZERO_EXTEND, DL, MVT::i32, ShiftAmt);
3402 insertDAGNode(*CurDAG, OrigShiftAmt, ShiftAmt);
3403
3404 // And now 'or' these low 8 bits of shift amount into the 'control'.
3405 Control = CurDAG->getNode(ISD::OR, DL, MVT::i32, Control, ShiftAmt);
3406 insertDAGNode(*CurDAG, SDValue(Node, 0), Control);
3407 }
3408
3409 // But have to place the 'control' into the wide-enough register first.
3410 if (XVT != MVT::i32) {
3411 Control = CurDAG->getNode(ISD::ANY_EXTEND, DL, XVT, Control);
3412 insertDAGNode(*CurDAG, SDValue(Node, 0), Control);
3413 }
3414
3415 // And finally, form the BEXTR itself.
3416 SDValue Extract = CurDAG->getNode(X86ISD::BEXTR, DL, XVT, X, Control);
3417
3418 // The 'X' was originally truncated. Do that now.
3419 if (XVT != NVT) {
3420 insertDAGNode(*CurDAG, SDValue(Node, 0), Extract);
3421 Extract = CurDAG->getNode(ISD::TRUNCATE, DL, NVT, Extract);
3422 }
3423
3424 ReplaceNode(Node, Extract.getNode());
3425 SelectCode(Extract.getNode());
3426
3427 return true;
3428}
3429
3430// See if this is an (X >> C1) & C2 that we can match to BEXTR/BEXTRI.
3431MachineSDNode *X86DAGToDAGISel::matchBEXTRFromAndImm(SDNode *Node) {
3432 MVT NVT = Node->getSimpleValueType(0);
3433 SDLoc dl(Node);
3434
3435 SDValue N0 = Node->getOperand(0);
3436 SDValue N1 = Node->getOperand(1);
3437
3438 // If we have TBM we can use an immediate for the control. If we have BMI
3439 // we should only do this if the BEXTR instruction is implemented well.
3440 // Otherwise moving the control into a register makes this more costly.
3441 // TODO: Maybe load folding, greater than 32-bit masks, or a guarantee of LICM
3442 // hoisting the move immediate would make it worthwhile with a less optimal
3443 // BEXTR?
3444 if (!Subtarget->hasTBM() &&
3445 !(Subtarget->hasBMI() && Subtarget->hasFastBEXTR()))
3446 return nullptr;
3447
3448 // Must have a shift right.
3449 if (N0->getOpcode() != ISD::SRL && N0->getOpcode() != ISD::SRA)
3450 return nullptr;
3451
3452 // Shift can't have additional users.
3453 if (!N0->hasOneUse())
3454 return nullptr;
3455
3456 // Only supported for 32 and 64 bits.
3457 if (NVT != MVT::i32 && NVT != MVT::i64)
3458 return nullptr;
3459
3460 // Shift amount and RHS of and must be constant.
3461 ConstantSDNode *MaskCst = dyn_cast<ConstantSDNode>(N1);
3462 ConstantSDNode *ShiftCst = dyn_cast<ConstantSDNode>(N0->getOperand(1));
3463 if (!MaskCst || !ShiftCst)
3464 return nullptr;
3465
3466 // And RHS must be a mask.
3467 uint64_t Mask = MaskCst->getZExtValue();
3468 if (!isMask_64(Mask))
3469 return nullptr;
3470
3471 uint64_t Shift = ShiftCst->getZExtValue();
3472 uint64_t MaskSize = countPopulation(Mask);
3473
3474 // Don't interfere with something that can be handled by extracting AH.
3475 // TODO: If we are able to fold a load, BEXTR might still be better than AH.
3476 if (Shift == 8 && MaskSize == 8)
3477 return nullptr;
3478
3479 // Make sure we are only using bits that were in the original value, not
3480 // shifted in.
3481 if (Shift + MaskSize > NVT.getSizeInBits())
3482 return nullptr;
3483
3484 SDValue New = CurDAG->getTargetConstant(Shift | (MaskSize << 8), dl, NVT);
3485 unsigned ROpc = NVT == MVT::i64 ? X86::BEXTRI64ri : X86::BEXTRI32ri;
3486 unsigned MOpc = NVT == MVT::i64 ? X86::BEXTRI64mi : X86::BEXTRI32mi;
3487
3488 // BMI requires the immediate to placed in a register.
3489 if (!Subtarget->hasTBM()) {
3490 ROpc = NVT == MVT::i64 ? X86::BEXTR64rr : X86::BEXTR32rr;
3491 MOpc = NVT == MVT::i64 ? X86::BEXTR64rm : X86::BEXTR32rm;
3492 unsigned NewOpc = NVT == MVT::i64 ? X86::MOV32ri64 : X86::MOV32ri;
3493 New = SDValue(CurDAG->getMachineNode(NewOpc, dl, NVT, New), 0);
3494 }
3495
3496 MachineSDNode *NewNode;
3497 SDValue Input = N0->getOperand(0);
3498 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
3499 if (tryFoldLoad(Node, N0.getNode(), Input, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
3500 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, New, Input.getOperand(0) };
3501 SDVTList VTs = CurDAG->getVTList(NVT, MVT::i32, MVT::Other);
3502 NewNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
3503 // Update the chain.
3504 ReplaceUses(Input.getValue(1), SDValue(NewNode, 2));
3505 // Record the mem-refs
3506 CurDAG->setNodeMemRefs(NewNode, {cast<LoadSDNode>(Input)->getMemOperand()});
3507 } else {
3508 NewNode = CurDAG->getMachineNode(ROpc, dl, NVT, MVT::i32, Input, New);
3509 }
3510
3511 return NewNode;
3512}
3513
3514// Emit a PCMISTR(I/M) instruction.
3515MachineSDNode *X86DAGToDAGISel::emitPCMPISTR(unsigned ROpc, unsigned MOpc,
3516 bool MayFoldLoad, const SDLoc &dl,
3517 MVT VT, SDNode *Node) {
3518 SDValue N0 = Node->getOperand(0);
3519 SDValue N1 = Node->getOperand(1);
3520 SDValue Imm = Node->getOperand(2);
3521 const ConstantInt *Val = cast<ConstantSDNode>(Imm)->getConstantIntValue();
3522 Imm = CurDAG->getTargetConstant(*Val, SDLoc(Node), Imm.getValueType());
3523
3524 // Try to fold a load. No need to check alignment.
3525 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
3526 if (MayFoldLoad && tryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
3527 SDValue Ops[] = { N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Imm,
3528 N1.getOperand(0) };
3529 SDVTList VTs = CurDAG->getVTList(VT, MVT::i32, MVT::Other);
3530 MachineSDNode *CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
3531 // Update the chain.
3532 ReplaceUses(N1.getValue(1), SDValue(CNode, 2));
3533 // Record the mem-refs
3534 CurDAG->setNodeMemRefs(CNode, {cast<LoadSDNode>(N1)->getMemOperand()});
3535 return CNode;
3536 }
3537
3538 SDValue Ops[] = { N0, N1, Imm };
3539 SDVTList VTs = CurDAG->getVTList(VT, MVT::i32);
3540 MachineSDNode *CNode = CurDAG->getMachineNode(ROpc, dl, VTs, Ops);
3541 return CNode;
3542}
3543
3544// Emit a PCMESTR(I/M) instruction. Also return the Glue result in case we need
3545// to emit a second instruction after this one. This is needed since we have two
3546// copyToReg nodes glued before this and we need to continue that glue through.
3547MachineSDNode *X86DAGToDAGISel::emitPCMPESTR(unsigned ROpc, unsigned MOpc,
3548 bool MayFoldLoad, const SDLoc &dl,
3549 MVT VT, SDNode *Node,
3550 SDValue &InFlag) {
3551 SDValue N0 = Node->getOperand(0);
3552 SDValue N2 = Node->getOperand(2);
3553 SDValue Imm = Node->getOperand(4);
3554 const ConstantInt *Val = cast<ConstantSDNode>(Imm)->getConstantIntValue();
3555 Imm = CurDAG->getTargetConstant(*Val, SDLoc(Node), Imm.getValueType());
3556
3557 // Try to fold a load. No need to check alignment.
3558 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
3559 if (MayFoldLoad && tryFoldLoad(Node, N2, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
3560 SDValue Ops[] = { N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Imm,
3561 N2.getOperand(0), InFlag };
3562 SDVTList VTs = CurDAG->getVTList(VT, MVT::i32, MVT::Other, MVT::Glue);
3563 MachineSDNode *CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
3564 InFlag = SDValue(CNode, 3);
3565 // Update the chain.
3566 ReplaceUses(N2.getValue(1), SDValue(CNode, 2));
3567 // Record the mem-refs
3568 CurDAG->setNodeMemRefs(CNode, {cast<LoadSDNode>(N2)->getMemOperand()});
3569 return CNode;
3570 }
3571
3572 SDValue Ops[] = { N0, N2, Imm, InFlag };
3573 SDVTList VTs = CurDAG->getVTList(VT, MVT::i32, MVT::Glue);
3574 MachineSDNode *CNode = CurDAG->getMachineNode(ROpc, dl, VTs, Ops);
3575 InFlag = SDValue(CNode, 2);
3576 return CNode;
3577}
3578
3579bool X86DAGToDAGISel::tryShiftAmountMod(SDNode *N) {
3580 EVT VT = N->getValueType(0);
3581
3582 // Only handle scalar shifts.
3583 if (VT.isVector())
3584 return false;
3585
3586 // Narrower shifts only mask to 5 bits in hardware.
3587 unsigned Size = VT == MVT::i64 ? 64 : 32;
3588
3589 SDValue OrigShiftAmt = N->getOperand(1);
3590 SDValue ShiftAmt = OrigShiftAmt;
3591 SDLoc DL(N);
3592
3593 // Skip over a truncate of the shift amount.
3594 if (ShiftAmt->getOpcode() == ISD::TRUNCATE)
3595 ShiftAmt = ShiftAmt->getOperand(0);
3596
3597 // This function is called after X86DAGToDAGISel::matchBitExtract(),
3598 // so we are not afraid that we might mess up BZHI/BEXTR pattern.
3599
3600 SDValue NewShiftAmt;
3601 if (ShiftAmt->getOpcode() == ISD::ADD || ShiftAmt->getOpcode() == ISD::SUB) {
3602 SDValue Add0 = ShiftAmt->getOperand(0);
3603 SDValue Add1 = ShiftAmt->getOperand(1);
3604 // If we are shifting by X+/-N where N == 0 mod Size, then just shift by X
3605 // to avoid the ADD/SUB.
3606 if (isa<ConstantSDNode>(Add1) &&
3607 cast<ConstantSDNode>(Add1)->getZExtValue() % Size == 0) {
3608 NewShiftAmt = Add0;
3609 // If we are shifting by N-X where N == 0 mod Size, then just shift by -X to
3610 // generate a NEG instead of a SUB of a constant.
3611 } else if (ShiftAmt->getOpcode() == ISD::SUB &&
3612 isa<ConstantSDNode>(Add0) &&
3613 cast<ConstantSDNode>(Add0)->getZExtValue() != 0 &&
3614 cast<ConstantSDNode>(Add0)->getZExtValue() % Size == 0) {
3615 // Insert a negate op.
3616 // TODO: This isn't guaranteed to replace the sub if there is a logic cone
3617 // that uses it that's not a shift.
3618 EVT SubVT = ShiftAmt.getValueType();
3619 SDValue Zero = CurDAG->getConstant(0, DL, SubVT);
3620 SDValue Neg = CurDAG->getNode(ISD::SUB, DL, SubVT, Zero, Add1);
3621 NewShiftAmt = Neg;
3622
3623 // Insert these operands into a valid topological order so they can
3624 // get selected independently.
3625 insertDAGNode(*CurDAG, OrigShiftAmt, Zero);
3626 insertDAGNode(*CurDAG, OrigShiftAmt, Neg);
3627 } else
3628 return false;
3629 } else
3630 return false;
3631
3632 if (NewShiftAmt.getValueType() != MVT::i8) {
3633 // Need to truncate the shift amount.
3634 NewShiftAmt = CurDAG->getNode(ISD::TRUNCATE, DL, MVT::i8, NewShiftAmt);
3635 // Add to a correct topological ordering.
3636 insertDAGNode(*CurDAG, OrigShiftAmt, NewShiftAmt);
3637 }
3638
3639 // Insert a new mask to keep the shift amount legal. This should be removed
3640 // by isel patterns.
3641 NewShiftAmt = CurDAG->getNode(ISD::AND, DL, MVT::i8, NewShiftAmt,
3642 CurDAG->getConstant(Size - 1, DL, MVT::i8));
3643 // Place in a correct topological ordering.
3644 insertDAGNode(*CurDAG, OrigShiftAmt, NewShiftAmt);
3645
3646 SDNode *UpdatedNode = CurDAG->UpdateNodeOperands(N, N->getOperand(0),
3647 NewShiftAmt);
3648 if (UpdatedNode != N) {
3649 // If we found an existing node, we should replace ourselves with that node
3650 // and wait for it to be selected after its other users.
3651 ReplaceNode(N, UpdatedNode);
3652 return true;
3653 }
3654
3655 // If the original shift amount is now dead, delete it so that we don't run
3656 // it through isel.
3657 if (OrigShiftAmt.getNode()->use_empty())
3658 CurDAG->RemoveDeadNode(OrigShiftAmt.getNode());
3659
3660 // Now that we've optimized the shift amount, defer to normal isel to get
3661 // load folding and legacy vs BMI2 selection without repeating it here.
3662 SelectCode(N);
3663 return true;
3664}
3665
3666bool X86DAGToDAGISel::tryShrinkShlLogicImm(SDNode *N) {
3667 MVT NVT = N->getSimpleValueType(0);
3668 unsigned Opcode = N->getOpcode();
3669 SDLoc dl(N);
3670
3671 // For operations of the form (x << C1) op C2, check if we can use a smaller
3672 // encoding for C2 by transforming it into (x op (C2>>C1)) << C1.
3673 SDValue Shift = N->getOperand(0);
3674 SDValue N1 = N->getOperand(1);
3675
3676 ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N1);
3677 if (!Cst)
3678 return false;
3679
3680 int64_t Val = Cst->getSExtValue();
3681
3682 // If we have an any_extend feeding the AND, look through it to see if there
3683 // is a shift behind it. But only if the AND doesn't use the extended bits.
3684 // FIXME: Generalize this to other ANY_EXTEND than i32 to i64?
3685 bool FoundAnyExtend = false;
3686 if (Shift.getOpcode() == ISD::ANY_EXTEND && Shift.hasOneUse() &&
3687 Shift.getOperand(0).getSimpleValueType() == MVT::i32 &&
3688 isUInt<32>(Val)) {
3689 FoundAnyExtend = true;
3690 Shift = Shift.getOperand(0);
3691 }
3692
3693 if (Shift.getOpcode() != ISD::SHL || !Shift.hasOneUse())
3694 return false;
3695
3696 // i8 is unshrinkable, i16 should be promoted to i32.
3697 if (NVT != MVT::i32 && NVT != MVT::i64)
3698 return false;
3699
3700 ConstantSDNode *ShlCst = dyn_cast<ConstantSDNode>(Shift.getOperand(1));
3701 if (!ShlCst)
3702 return false;
3703
3704 uint64_t ShAmt = ShlCst->getZExtValue();
3705
3706 // Make sure that we don't change the operation by removing bits.
3707 // This only matters for OR and XOR, AND is unaffected.
3708 uint64_t RemovedBitsMask = (1ULL << ShAmt) - 1;
3709 if (Opcode != ISD::AND && (Val & RemovedBitsMask) != 0)
3710 return false;
3711
3712 // Check the minimum bitwidth for the new constant.
3713 // TODO: Using 16 and 8 bit operations is also possible for or32 & xor32.
3714 auto CanShrinkImmediate = [&](int64_t &ShiftedVal) {
3715 if (Opcode == ISD::AND) {
3716 // AND32ri is the same as AND64ri32 with zext imm.
3717 // Try this before sign extended immediates below.
3718 ShiftedVal = (uint64_t)Val >> ShAmt;
3719 if (NVT == MVT::i64 && !isUInt<32>(Val) && isUInt<32>(ShiftedVal))
3720 return true;
3721 // Also swap order when the AND can become MOVZX.
3722 if (ShiftedVal == UINT8_MAX(255) || ShiftedVal == UINT16_MAX(65535))
3723 return true;
3724 }
3725 ShiftedVal = Val >> ShAmt;
3726 if ((!isInt<8>(Val) && isInt<8>(ShiftedVal)) ||
3727 (!isInt<32>(Val) && isInt<32>(ShiftedVal)))
3728 return true;
3729 if (Opcode != ISD::AND) {
3730 // MOV32ri+OR64r/XOR64r is cheaper than MOV64ri64+OR64rr/XOR64rr
3731 ShiftedVal = (uint64_t)Val >> ShAmt;
3732 if (NVT == MVT::i64 && !isUInt<32>(Val) && isUInt<32>(ShiftedVal))
3733 return true;
3734 }
3735 return false;
3736 };
3737
3738 int64_t ShiftedVal;
3739 if (!CanShrinkImmediate(ShiftedVal))
3740 return false;
3741
3742 // Ok, we can reorder to get a smaller immediate.
3743
3744 // But, its possible the original immediate allowed an AND to become MOVZX.
3745 // Doing this late due to avoid the MakedValueIsZero call as late as
3746 // possible.
3747 if (Opcode == ISD::AND) {
3748 // Find the smallest zext this could possibly be.
3749 unsigned ZExtWidth = Cst->getAPIntValue().getActiveBits();
3750 ZExtWidth = PowerOf2Ceil(std::max(ZExtWidth, 8U));
3751
3752 // Figure out which bits need to be zero to achieve that mask.
3753 APInt NeededMask = APInt::getLowBitsSet(NVT.getSizeInBits(),
3754 ZExtWidth);
3755 NeededMask &= ~Cst->getAPIntValue();
3756
3757 if (CurDAG->MaskedValueIsZero(N->getOperand(0), NeededMask))
3758 return false;
3759 }
3760
3761 SDValue X = Shift.getOperand(0);
3762 if (FoundAnyExtend) {
3763 SDValue NewX = CurDAG->getNode(ISD::ANY_EXTEND, dl, NVT, X);
3764 insertDAGNode(*CurDAG, SDValue(N, 0), NewX);
3765 X = NewX;
3766 }
3767
3768 SDValue NewCst = CurDAG->getConstant(ShiftedVal, dl, NVT);
3769 insertDAGNode(*CurDAG, SDValue(N, 0), NewCst);
3770 SDValue NewBinOp = CurDAG->getNode(Opcode, dl, NVT, X, NewCst);
3771 insertDAGNode(*CurDAG, SDValue(N, 0), NewBinOp);
3772 SDValue NewSHL = CurDAG->getNode(ISD::SHL, dl, NVT, NewBinOp,
3773 Shift.getOperand(1));
3774 ReplaceNode(N, NewSHL.getNode());
3775 SelectCode(NewSHL.getNode());
3776 return true;
3777}
3778
3779/// Convert vector increment or decrement to sub/add with an all-ones constant:
3780/// add X, <1, 1...> --> sub X, <-1, -1...>
3781/// sub X, <1, 1...> --> add X, <-1, -1...>
3782/// The all-ones vector constant can be materialized using a pcmpeq instruction
3783/// that is commonly recognized as an idiom (has no register dependency), so
3784/// that's better/smaller than loading a splat 1 constant.
3785bool X86DAGToDAGISel::combineIncDecVector(SDNode *Node) {
3786 assert((Node->getOpcode() == ISD::ADD || Node->getOpcode() == ISD::SUB) &&(((Node->getOpcode() == ISD::ADD || Node->getOpcode() ==
ISD::SUB) && "Unexpected opcode for increment/decrement transform"
) ? static_cast<void> (0) : __assert_fail ("(Node->getOpcode() == ISD::ADD || Node->getOpcode() == ISD::SUB) && \"Unexpected opcode for increment/decrement transform\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 3787, __PRETTY_FUNCTION__))
3787 "Unexpected opcode for increment/decrement transform")(((Node->getOpcode() == ISD::ADD || Node->getOpcode() ==
ISD::SUB) && "Unexpected opcode for increment/decrement transform"
) ? static_cast<void> (0) : __assert_fail ("(Node->getOpcode() == ISD::ADD || Node->getOpcode() == ISD::SUB) && \"Unexpected opcode for increment/decrement transform\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 3787, __PRETTY_FUNCTION__))
;
3788
3789 EVT VT = Node->getValueType(0);
3790 assert(VT.isVector() && "Should only be called for vectors.")((VT.isVector() && "Should only be called for vectors."
) ? static_cast<void> (0) : __assert_fail ("VT.isVector() && \"Should only be called for vectors.\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 3790, __PRETTY_FUNCTION__))
;
3791
3792 SDValue X = Node->getOperand(0);
3793 SDValue OneVec = Node->getOperand(1);
3794
3795 APInt SplatVal;
3796 if (!X86::isConstantSplat(OneVec, SplatVal) || !SplatVal.isOneValue())
3797 return false;
3798
3799 SDLoc DL(Node);
3800 SDValue OneConstant, AllOnesVec;
3801
3802 APInt Ones = APInt::getAllOnesValue(32);
3803 assert(VT.getSizeInBits() % 32 == 0 &&((VT.getSizeInBits() % 32 == 0 && "Expected bit count to be a multiple of 32"
) ? static_cast<void> (0) : __assert_fail ("VT.getSizeInBits() % 32 == 0 && \"Expected bit count to be a multiple of 32\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 3804, __PRETTY_FUNCTION__))
3804 "Expected bit count to be a multiple of 32")((VT.getSizeInBits() % 32 == 0 && "Expected bit count to be a multiple of 32"
) ? static_cast<void> (0) : __assert_fail ("VT.getSizeInBits() % 32 == 0 && \"Expected bit count to be a multiple of 32\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 3804, __PRETTY_FUNCTION__))
;
3805 OneConstant = CurDAG->getConstant(Ones, DL, MVT::i32);
3806 insertDAGNode(*CurDAG, X, OneConstant);
3807
3808 unsigned NumElts = VT.getSizeInBits() / 32;
3809 assert(NumElts > 0 && "Expected to get non-empty vector.")((NumElts > 0 && "Expected to get non-empty vector."
) ? static_cast<void> (0) : __assert_fail ("NumElts > 0 && \"Expected to get non-empty vector.\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 3809, __PRETTY_FUNCTION__))
;
3810 AllOnesVec = CurDAG->getSplatBuildVector(MVT::getVectorVT(MVT::i32, NumElts),
3811 DL, OneConstant);
3812 insertDAGNode(*CurDAG, X, AllOnesVec);
3813
3814 AllOnesVec = CurDAG->getBitcast(VT, AllOnesVec);
3815 insertDAGNode(*CurDAG, X, AllOnesVec);
3816
3817 unsigned NewOpcode = Node->getOpcode() == ISD::ADD ? ISD::SUB : ISD::ADD;
3818 SDValue NewNode = CurDAG->getNode(NewOpcode, DL, VT, X, AllOnesVec);
3819
3820 ReplaceNode(Node, NewNode.getNode());
3821 SelectCode(NewNode.getNode());
3822 return true;
3823}
3824
3825/// If the high bits of an 'and' operand are known zero, try setting the
3826/// high bits of an 'and' constant operand to produce a smaller encoding by
3827/// creating a small, sign-extended negative immediate rather than a large
3828/// positive one. This reverses a transform in SimplifyDemandedBits that
3829/// shrinks mask constants by clearing bits. There is also a possibility that
3830/// the 'and' mask can be made -1, so the 'and' itself is unnecessary. In that
3831/// case, just replace the 'and'. Return 'true' if the node is replaced.
3832bool X86DAGToDAGISel::shrinkAndImmediate(SDNode *And) {
3833 // i8 is unshrinkable, i16 should be promoted to i32, and vector ops don't
3834 // have immediate operands.
3835 MVT VT = And->getSimpleValueType(0);
3836 if (VT != MVT::i32 && VT != MVT::i64)
3837 return false;
3838
3839 auto *And1C = dyn_cast<ConstantSDNode>(And->getOperand(1));
3840 if (!And1C)
3841 return false;
3842
3843 // Bail out if the mask constant is already negative. It's can't shrink more.
3844 // If the upper 32 bits of a 64 bit mask are all zeros, we have special isel
3845 // patterns to use a 32-bit and instead of a 64-bit and by relying on the
3846 // implicit zeroing of 32 bit ops. So we should check if the lower 32 bits
3847 // are negative too.
3848 APInt MaskVal = And1C->getAPIntValue();
3849 unsigned MaskLZ = MaskVal.countLeadingZeros();
3850 if (!MaskLZ || (VT == MVT::i64 && MaskLZ == 32))
3851 return false;
3852
3853 // Don't extend into the upper 32 bits of a 64 bit mask.
3854 if (VT == MVT::i64 && MaskLZ >= 32) {
3855 MaskLZ -= 32;
3856 MaskVal = MaskVal.trunc(32);
3857 }
3858
3859 SDValue And0 = And->getOperand(0);
3860 APInt HighZeros = APInt::getHighBitsSet(MaskVal.getBitWidth(), MaskLZ);
3861 APInt NegMaskVal = MaskVal | HighZeros;
3862
3863 // If a negative constant would not allow a smaller encoding, there's no need
3864 // to continue. Only change the constant when we know it's a win.
3865 unsigned MinWidth = NegMaskVal.getMinSignedBits();
3866 if (MinWidth > 32 || (MinWidth > 8 && MaskVal.getMinSignedBits() <= 32))
3867 return false;
3868
3869 // Extend masks if we truncated above.
3870 if (VT == MVT::i64 && MaskVal.getBitWidth() < 64) {
3871 NegMaskVal = NegMaskVal.zext(64);
3872 HighZeros = HighZeros.zext(64);
3873 }
3874
3875 // The variable operand must be all zeros in the top bits to allow using the
3876 // new, negative constant as the mask.
3877 if (!CurDAG->MaskedValueIsZero(And0, HighZeros))
3878 return false;
3879
3880 // Check if the mask is -1. In that case, this is an unnecessary instruction
3881 // that escaped earlier analysis.
3882 if (NegMaskVal.isAllOnesValue()) {
3883 ReplaceNode(And, And0.getNode());
3884 return true;
3885 }
3886
3887 // A negative mask allows a smaller encoding. Create a new 'and' node.
3888 SDValue NewMask = CurDAG->getConstant(NegMaskVal, SDLoc(And), VT);
3889 SDValue NewAnd = CurDAG->getNode(ISD::AND, SDLoc(And), VT, And0, NewMask);
3890 ReplaceNode(And, NewAnd.getNode());
3891 SelectCode(NewAnd.getNode());
3892 return true;
3893}
3894
3895static unsigned getVPTESTMOpc(MVT TestVT, bool IsTestN, bool FoldedLoad,
3896 bool FoldedBCast, bool Masked) {
3897 if (Masked) {
3898 if (FoldedLoad) {
3899 switch (TestVT.SimpleTy) {
3900 default: llvm_unreachable("Unexpected VT!")::llvm::llvm_unreachable_internal("Unexpected VT!", "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 3900)
;
3901 case MVT::v16i8:
3902 return IsTestN ? X86::VPTESTNMBZ128rmk : X86::VPTESTMBZ128rmk;
3903 case MVT::v8i16:
3904 return IsTestN ? X86::VPTESTNMWZ128rmk : X86::VPTESTMWZ128rmk;
3905 case MVT::v4i32:
3906 return IsTestN ? X86::VPTESTNMDZ128rmk : X86::VPTESTMDZ128rmk;
3907 case MVT::v2i64:
3908 return IsTestN ? X86::VPTESTNMQZ128rmk : X86::VPTESTMQZ128rmk;
3909 case MVT::v32i8:
3910 return IsTestN ? X86::VPTESTNMBZ256rmk : X86::VPTESTMBZ256rmk;
3911 case MVT::v16i16:
3912 return IsTestN ? X86::VPTESTNMWZ256rmk : X86::VPTESTMWZ256rmk;
3913 case MVT::v8i32:
3914 return IsTestN ? X86::VPTESTNMDZ256rmk : X86::VPTESTMDZ256rmk;
3915 case MVT::v4i64:
3916 return IsTestN ? X86::VPTESTNMQZ256rmk : X86::VPTESTMQZ256rmk;
3917 case MVT::v64i8:
3918 return IsTestN ? X86::VPTESTNMBZrmk : X86::VPTESTMBZrmk;
3919 case MVT::v32i16:
3920 return IsTestN ? X86::VPTESTNMWZrmk : X86::VPTESTMWZrmk;
3921 case MVT::v16i32:
3922 return IsTestN ? X86::VPTESTNMDZrmk : X86::VPTESTMDZrmk;
3923 case MVT::v8i64:
3924 return IsTestN ? X86::VPTESTNMQZrmk : X86::VPTESTMQZrmk;
3925 }
3926 }
3927
3928 if (FoldedBCast) {
3929 switch (TestVT.SimpleTy) {
3930 default: llvm_unreachable("Unexpected VT!")::llvm::llvm_unreachable_internal("Unexpected VT!", "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 3930)
;
3931 case MVT::v4i32:
3932 return IsTestN ? X86::VPTESTNMDZ128rmbk : X86::VPTESTMDZ128rmbk;
3933 case MVT::v2i64:
3934 return IsTestN ? X86::VPTESTNMQZ128rmbk : X86::VPTESTMQZ128rmbk;
3935 case MVT::v8i32:
3936 return IsTestN ? X86::VPTESTNMDZ256rmbk : X86::VPTESTMDZ256rmbk;
3937 case MVT::v4i64:
3938 return IsTestN ? X86::VPTESTNMQZ256rmbk : X86::VPTESTMQZ256rmbk;
3939 case MVT::v16i32:
3940 return IsTestN ? X86::VPTESTNMDZrmbk : X86::VPTESTMDZrmbk;
3941 case MVT::v8i64:
3942 return IsTestN ? X86::VPTESTNMQZrmbk : X86::VPTESTMQZrmbk;
3943 }
3944 }
3945
3946 switch (TestVT.SimpleTy) {
3947 default: llvm_unreachable("Unexpected VT!")::llvm::llvm_unreachable_internal("Unexpected VT!", "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 3947)
;
3948 case MVT::v16i8:
3949 return IsTestN ? X86::VPTESTNMBZ128rrk : X86::VPTESTMBZ128rrk;
3950 case MVT::v8i16:
3951 return IsTestN ? X86::VPTESTNMWZ128rrk : X86::VPTESTMWZ128rrk;
3952 case MVT::v4i32:
3953 return IsTestN ? X86::VPTESTNMDZ128rrk : X86::VPTESTMDZ128rrk;
3954 case MVT::v2i64:
3955 return IsTestN ? X86::VPTESTNMQZ128rrk : X86::VPTESTMQZ128rrk;
3956 case MVT::v32i8:
3957 return IsTestN ? X86::VPTESTNMBZ256rrk : X86::VPTESTMBZ256rrk;
3958 case MVT::v16i16:
3959 return IsTestN ? X86::VPTESTNMWZ256rrk : X86::VPTESTMWZ256rrk;
3960 case MVT::v8i32:
3961 return IsTestN ? X86::VPTESTNMDZ256rrk : X86::VPTESTMDZ256rrk;
3962 case MVT::v4i64:
3963 return IsTestN ? X86::VPTESTNMQZ256rrk : X86::VPTESTMQZ256rrk;
3964 case MVT::v64i8:
3965 return IsTestN ? X86::VPTESTNMBZrrk : X86::VPTESTMBZrrk;
3966 case MVT::v32i16:
3967 return IsTestN ? X86::VPTESTNMWZrrk : X86::VPTESTMWZrrk;
3968 case MVT::v16i32:
3969 return IsTestN ? X86::VPTESTNMDZrrk : X86::VPTESTMDZrrk;
3970 case MVT::v8i64:
3971 return IsTestN ? X86::VPTESTNMQZrrk : X86::VPTESTMQZrrk;
3972 }
3973 }
3974
3975 if (FoldedLoad) {
3976 switch (TestVT.SimpleTy) {
3977 default: llvm_unreachable("Unexpected VT!")::llvm::llvm_unreachable_internal("Unexpected VT!", "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 3977)
;
3978 case MVT::v16i8:
3979 return IsTestN ? X86::VPTESTNMBZ128rm : X86::VPTESTMBZ128rm;
3980 case MVT::v8i16:
3981 return IsTestN ? X86::VPTESTNMWZ128rm : X86::VPTESTMWZ128rm;
3982 case MVT::v4i32:
3983 return IsTestN ? X86::VPTESTNMDZ128rm : X86::VPTESTMDZ128rm;
3984 case MVT::v2i64:
3985 return IsTestN ? X86::VPTESTNMQZ128rm : X86::VPTESTMQZ128rm;
3986 case MVT::v32i8:
3987 return IsTestN ? X86::VPTESTNMBZ256rm : X86::VPTESTMBZ256rm;
3988 case MVT::v16i16:
3989 return IsTestN ? X86::VPTESTNMWZ256rm : X86::VPTESTMWZ256rm;
3990 case MVT::v8i32:
3991 return IsTestN ? X86::VPTESTNMDZ256rm : X86::VPTESTMDZ256rm;
3992 case MVT::v4i64:
3993 return IsTestN ? X86::VPTESTNMQZ256rm : X86::VPTESTMQZ256rm;
3994 case MVT::v64i8:
3995 return IsTestN ? X86::VPTESTNMBZrm : X86::VPTESTMBZrm;
3996 case MVT::v32i16:
3997 return IsTestN ? X86::VPTESTNMWZrm : X86::VPTESTMWZrm;
3998 case MVT::v16i32:
3999 return IsTestN ? X86::VPTESTNMDZrm : X86::VPTESTMDZrm;
4000 case MVT::v8i64:
4001 return IsTestN ? X86::VPTESTNMQZrm : X86::VPTESTMQZrm;
4002 }
4003 }
4004
4005 if (FoldedBCast) {
4006 switch (TestVT.SimpleTy) {
4007 default: llvm_unreachable("Unexpected VT!")::llvm::llvm_unreachable_internal("Unexpected VT!", "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 4007)
;
4008 case MVT::v4i32:
4009 return IsTestN ? X86::VPTESTNMDZ128rmb : X86::VPTESTMDZ128rmb;
4010 case MVT::v2i64:
4011 return IsTestN ? X86::VPTESTNMQZ128rmb : X86::VPTESTMQZ128rmb;
4012 case MVT::v8i32:
4013 return IsTestN ? X86::VPTESTNMDZ256rmb : X86::VPTESTMDZ256rmb;
4014 case MVT::v4i64:
4015 return IsTestN ? X86::VPTESTNMQZ256rmb : X86::VPTESTMQZ256rmb;
4016 case MVT::v16i32:
4017 return IsTestN ? X86::VPTESTNMDZrmb : X86::VPTESTMDZrmb;
4018 case MVT::v8i64:
4019 return IsTestN ? X86::VPTESTNMQZrmb : X86::VPTESTMQZrmb;
4020 }
4021 }
4022
4023 switch (TestVT.SimpleTy) {
4024 default: llvm_unreachable("Unexpected VT!")::llvm::llvm_unreachable_internal("Unexpected VT!", "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 4024)
;
4025 case MVT::v16i8:
4026 return IsTestN ? X86::VPTESTNMBZ128rr : X86::VPTESTMBZ128rr;
4027 case MVT::v8i16:
4028 return IsTestN ? X86::VPTESTNMWZ128rr : X86::VPTESTMWZ128rr;
4029 case MVT::v4i32:
4030 return IsTestN ? X86::VPTESTNMDZ128rr : X86::VPTESTMDZ128rr;
4031 case MVT::v2i64:
4032 return IsTestN ? X86::VPTESTNMQZ128rr : X86::VPTESTMQZ128rr;
4033 case MVT::v32i8:
4034 return IsTestN ? X86::VPTESTNMBZ256rr : X86::VPTESTMBZ256rr;
4035 case MVT::v16i16:
4036 return IsTestN ? X86::VPTESTNMWZ256rr : X86::VPTESTMWZ256rr;
4037 case MVT::v8i32:
4038 return IsTestN ? X86::VPTESTNMDZ256rr : X86::VPTESTMDZ256rr;
4039 case MVT::v4i64:
4040 return IsTestN ? X86::VPTESTNMQZ256rr : X86::VPTESTMQZ256rr;
4041 case MVT::v64i8:
4042 return IsTestN ? X86::VPTESTNMBZrr : X86::VPTESTMBZrr;
4043 case MVT::v32i16:
4044 return IsTestN ? X86::VPTESTNMWZrr : X86::VPTESTMWZrr;
4045 case MVT::v16i32:
4046 return IsTestN ? X86::VPTESTNMDZrr : X86::VPTESTMDZrr;
4047 case MVT::v8i64:
4048 return IsTestN ? X86::VPTESTNMQZrr : X86::VPTESTMQZrr;
4049 }
4050}
4051
4052// Try to create VPTESTM instruction. If InMask is not null, it will be used
4053// to form a masked operation.
4054bool X86DAGToDAGISel::tryVPTESTM(SDNode *Root, SDValue Setcc,
4055 SDValue InMask) {
4056 assert(Subtarget->hasAVX512() && "Expected AVX512!")((Subtarget->hasAVX512() && "Expected AVX512!") ? static_cast
<void> (0) : __assert_fail ("Subtarget->hasAVX512() && \"Expected AVX512!\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 4056, __PRETTY_FUNCTION__))
;
1
'?' condition is true
4057 assert(Setcc.getSimpleValueType().getVectorElementType() == MVT::i1 &&((Setcc.getSimpleValueType().getVectorElementType() == MVT::i1
&& "Unexpected VT!") ? static_cast<void> (0) :
__assert_fail ("Setcc.getSimpleValueType().getVectorElementType() == MVT::i1 && \"Unexpected VT!\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 4058, __PRETTY_FUNCTION__))
2
'?' condition is true
4058 "Unexpected VT!")((Setcc.getSimpleValueType().getVectorElementType() == MVT::i1
&& "Unexpected VT!") ? static_cast<void> (0) :
__assert_fail ("Setcc.getSimpleValueType().getVectorElementType() == MVT::i1 && \"Unexpected VT!\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 4058, __PRETTY_FUNCTION__))
;
4059
4060 // Look for equal and not equal compares.
4061 ISD::CondCode CC = cast<CondCodeSDNode>(Setcc.getOperand(2))->get();
4062 if (CC != ISD::SETEQ && CC != ISD::SETNE)
3
Assuming 'CC' is equal to SETEQ
4063 return false;
4064
4065 // See if we're comparing against zero. This should have been canonicalized
4066 // to RHS during lowering.
4067 if (!ISD::isBuildVectorAllZeros(Setcc.getOperand(1).getNode()))
4
Assuming the condition is false
5
Taking false branch
4068 return false;
4069
4070 SDValue N0 = Setcc.getOperand(0);
4071
4072 MVT CmpVT = N0.getSimpleValueType();
4073 MVT CmpSVT = CmpVT.getVectorElementType();
4074
4075 // Start with both operands the same. We'll try to refine this.
4076 SDValue Src0 = N0;
4077 SDValue Src1 = N0;
4078
4079 {
4080 // Look through single use bitcasts.
4081 SDValue N0Temp = N0;
4082 if (N0Temp.getOpcode() == ISD::BITCAST && N0Temp.hasOneUse())
6
Assuming the condition is false
4083 N0Temp = N0.getOperand(0);
4084
4085 // Look for single use AND.
4086 if (N0Temp.getOpcode() == ISD::AND && N0Temp.hasOneUse()) {
7
Assuming the condition is true
8
Assuming the condition is true
9
Taking true branch
4087 Src0 = N0Temp.getOperand(0);
4088 Src1 = N0Temp.getOperand(1);
4089 }
4090 }
4091
4092 // Without VLX we need to widen the load.
4093 bool Widen = !Subtarget->hasVLX() && !CmpVT.is512BitVector();
10
Assuming the condition is false
4094
4095 // We can only fold loads if the sources are unique.
4096 bool CanFoldLoads = Src0 != Src1;
4097
4098 // Try to fold loads unless we need to widen.
4099 bool FoldedLoad = false;
4100 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Load;
4101 if (!Widen
10.1
'Widen' is false
10.1
'Widen' is false
10.1
'Widen' is false
&& CanFoldLoads
10.2
'CanFoldLoads' is true
10.2
'CanFoldLoads' is true
10.2
'CanFoldLoads' is true
) {
11
Taking true branch
4102 Load = Src1;
4103 FoldedLoad = tryFoldLoad(Root, N0.getNode(), Load, Tmp0, Tmp1, Tmp2, Tmp3,
4104 Tmp4);
4105 if (!FoldedLoad
11.1
'FoldedLoad' is false
11.1
'FoldedLoad' is false
11.1
'FoldedLoad' is false
) {
12
Taking true branch
4106 // And is computative.
4107 Load = Src0;
4108 FoldedLoad = tryFoldLoad(Root, N0.getNode(), Load, Tmp0, Tmp1, Tmp2,
4109 Tmp3, Tmp4);
4110 if (FoldedLoad
12.1
'FoldedLoad' is false
12.1
'FoldedLoad' is false
12.1
'FoldedLoad' is false
)
13
Taking false branch
4111 std::swap(Src0, Src1);
4112 }
4113 }
4114
4115 auto findBroadcastedOp = [](SDValue Src, MVT CmpSVT, SDNode *&Parent) {
4116 // Look through single use bitcasts.
4117 if (Src.getOpcode() == ISD::BITCAST && Src.hasOneUse())
17
Assuming the condition is false
4118 Src = Src.getOperand(0);
4119
4120 if (Src.getOpcode() == X86ISD::VBROADCAST && Src.hasOneUse()) {
18
Assuming the condition is true
19
Assuming the condition is true
20
Taking true branch
4121 Parent = Src.getNode();
4122 Src = Src.getOperand(0);
4123 if (Src.getSimpleValueType() == CmpSVT)
21
Taking true branch
4124 return Src;
22
Returning without writing to 'Parent'
4125 }
4126
4127 return SDValue();
4128 };
4129
4130 // If we didn't fold a load, try to match broadcast. No widening limitation
4131 // for this. But only 32 and 64 bit types are supported.
4132 bool FoldedBCast = false;
4133 if (!FoldedLoad
13.1
'FoldedLoad' is false
13.1
'FoldedLoad' is false
13.1
'FoldedLoad' is false
&& CanFoldLoads
13.2
'CanFoldLoads' is true
13.2
'CanFoldLoads' is true
13.2
'CanFoldLoads' is true
&&
14
Taking true branch
4134 (CmpSVT == MVT::i32 || CmpSVT == MVT::i64)) {
4135 SDNode *ParentNode = nullptr;
15
'ParentNode' initialized to a null pointer value
4136 if ((Load = findBroadcastedOp(Src1, CmpSVT, ParentNode))) {
16
Calling 'operator()'
23
Returning from 'operator()'
24
Calling 'SDValue::operator bool'
26
Returning from 'SDValue::operator bool'
27
Taking true branch
4137 FoldedBCast = tryFoldLoad(Root, ParentNode, Load, Tmp0,
28
Passing null pointer value via 2nd parameter 'P'
29
Calling 'X86DAGToDAGISel::tryFoldLoad'
4138 Tmp1, Tmp2, Tmp3, Tmp4);
4139 }
4140
4141 // Try the other operand.
4142 if (!FoldedBCast) {
4143 if ((Load = findBroadcastedOp(Src0, CmpSVT, ParentNode))) {
4144 FoldedBCast = tryFoldLoad(Root, ParentNode, Load, Tmp0,
4145 Tmp1, Tmp2, Tmp3, Tmp4);
4146 if (FoldedBCast)
4147 std::swap(Src0, Src1);
4148 }
4149 }
4150 }
4151
4152 auto getMaskRC = [](MVT MaskVT) {
4153 switch (MaskVT.SimpleTy) {
4154 default: llvm_unreachable("Unexpected VT!")::llvm::llvm_unreachable_internal("Unexpected VT!", "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 4154)
;
4155 case MVT::v2i1: return X86::VK2RegClassID;
4156 case MVT::v4i1: return X86::VK4RegClassID;
4157 case MVT::v8i1: return X86::VK8RegClassID;
4158 case MVT::v16i1: return X86::VK16RegClassID;
4159 case MVT::v32i1: return X86::VK32RegClassID;
4160 case MVT::v64i1: return X86::VK64RegClassID;
4161 }
4162 };
4163
4164 bool IsMasked = InMask.getNode() != nullptr;
4165
4166 SDLoc dl(Root);
4167
4168 MVT ResVT = Setcc.getSimpleValueType();
4169 MVT MaskVT = ResVT;
4170 if (Widen) {
4171 // Widen the inputs using insert_subreg or copy_to_regclass.
4172 unsigned Scale = CmpVT.is128BitVector() ? 4 : 2;
4173 unsigned SubReg = CmpVT.is128BitVector() ? X86::sub_xmm : X86::sub_ymm;
4174 unsigned NumElts = CmpVT.getVectorNumElements() * Scale;
4175 CmpVT = MVT::getVectorVT(CmpSVT, NumElts);
4176 MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
4177 SDValue ImplDef = SDValue(CurDAG->getMachineNode(X86::IMPLICIT_DEF, dl,
4178 CmpVT), 0);
4179 Src0 = CurDAG->getTargetInsertSubreg(SubReg, dl, CmpVT, ImplDef, Src0);
4180
4181 assert(!FoldedLoad && "Shouldn't have folded the load")((!FoldedLoad && "Shouldn't have folded the load") ? static_cast
<void> (0) : __assert_fail ("!FoldedLoad && \"Shouldn't have folded the load\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 4181, __PRETTY_FUNCTION__))
;
4182 if (!FoldedBCast)
4183 Src1 = CurDAG->getTargetInsertSubreg(SubReg, dl, CmpVT, ImplDef, Src1);
4184
4185 if (IsMasked) {
4186 // Widen the mask.
4187 unsigned RegClass = getMaskRC(MaskVT);
4188 SDValue RC = CurDAG->getTargetConstant(RegClass, dl, MVT::i32);
4189 InMask = SDValue(CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
4190 dl, MaskVT, InMask, RC), 0);
4191 }
4192 }
4193
4194 bool IsTestN = CC == ISD::SETEQ;
4195 unsigned Opc = getVPTESTMOpc(CmpVT, IsTestN, FoldedLoad, FoldedBCast,
4196 IsMasked);
4197
4198 MachineSDNode *CNode;
4199 if (FoldedLoad || FoldedBCast) {
4200 SDVTList VTs = CurDAG->getVTList(MaskVT, MVT::Other);
4201
4202 if (IsMasked) {
4203 SDValue Ops[] = { InMask, Src0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4,
4204 Load.getOperand(0) };
4205 CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops);
4206 } else {
4207 SDValue Ops[] = { Src0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4,
4208 Load.getOperand(0) };
4209 CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops);
4210 }
4211
4212 // Update the chain.
4213 ReplaceUses(Load.getValue(1), SDValue(CNode, 1));
4214 // Record the mem-refs
4215 CurDAG->setNodeMemRefs(CNode, {cast<LoadSDNode>(Load)->getMemOperand()});
4216 } else {
4217 if (IsMasked)
4218 CNode = CurDAG->getMachineNode(Opc, dl, MaskVT, InMask, Src0, Src1);
4219 else
4220 CNode = CurDAG->getMachineNode(Opc, dl, MaskVT, Src0, Src1);
4221 }
4222
4223 // If we widened, we need to shrink the mask VT.
4224 if (Widen) {
4225 unsigned RegClass = getMaskRC(ResVT);
4226 SDValue RC = CurDAG->getTargetConstant(RegClass, dl, MVT::i32);
4227 CNode = CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
4228 dl, ResVT, SDValue(CNode, 0), RC);
4229 }
4230
4231 ReplaceUses(SDValue(Root, 0), SDValue(CNode, 0));
4232 CurDAG->RemoveDeadNode(Root);
4233 return true;
4234}
4235
4236void X86DAGToDAGISel::Select(SDNode *Node) {
4237 MVT NVT = Node->getSimpleValueType(0);
4238 unsigned Opcode = Node->getOpcode();
4239 SDLoc dl(Node);
4240
4241 if (Node->isMachineOpcode()) {
4242 LLVM_DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-isel")) { dbgs() << "== "; Node->dump(CurDAG);
dbgs() << '\n'; } } while (false)
;
4243 Node->setNodeId(-1);
4244 return; // Already selected.
4245 }
4246
4247 switch (Opcode) {
4248 default: break;
4249 case ISD::INTRINSIC_VOID: {
4250 unsigned IntNo = Node->getConstantOperandVal(1);
4251 switch (IntNo) {
4252 default: break;
4253 case Intrinsic::x86_sse3_monitor:
4254 case Intrinsic::x86_monitorx:
4255 case Intrinsic::x86_clzero: {
4256 bool Use64BitPtr = Node->getOperand(2).getValueType() == MVT::i64;
4257
4258 unsigned Opc = 0;
4259 switch (IntNo) {
4260 default: llvm_unreachable("Unexpected intrinsic!")::llvm::llvm_unreachable_internal("Unexpected intrinsic!", "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 4260)
;
4261 case Intrinsic::x86_sse3_monitor:
4262 if (!Subtarget->hasSSE3())
4263 break;
4264 Opc = Use64BitPtr ? X86::MONITOR64rrr : X86::MONITOR32rrr;
4265 break;
4266 case Intrinsic::x86_monitorx:
4267 if (!Subtarget->hasMWAITX())
4268 break;
4269 Opc = Use64BitPtr ? X86::MONITORX64rrr : X86::MONITORX32rrr;
4270 break;
4271 case Intrinsic::x86_clzero:
4272 if (!Subtarget->hasCLZERO())
4273 break;
4274 Opc = Use64BitPtr ? X86::CLZERO64r : X86::CLZERO32r;
4275 break;
4276 }
4277
4278 if (Opc) {
4279 unsigned PtrReg = Use64BitPtr ? X86::RAX : X86::EAX;
4280 SDValue Chain = CurDAG->getCopyToReg(Node->getOperand(0), dl, PtrReg,
4281 Node->getOperand(2), SDValue());
4282 SDValue InFlag = Chain.getValue(1);
4283
4284 if (IntNo == Intrinsic::x86_sse3_monitor ||
4285 IntNo == Intrinsic::x86_monitorx) {
4286 // Copy the other two operands to ECX and EDX.
4287 Chain = CurDAG->getCopyToReg(Chain, dl, X86::ECX, Node->getOperand(3),
4288 InFlag);
4289 InFlag = Chain.getValue(1);
4290 Chain = CurDAG->getCopyToReg(Chain, dl, X86::EDX, Node->getOperand(4),
4291 InFlag);
4292 InFlag = Chain.getValue(1);
4293 }
4294
4295 MachineSDNode *CNode = CurDAG->getMachineNode(Opc, dl, MVT::Other,
4296 { Chain, InFlag});
4297 ReplaceNode(Node, CNode);
4298 return;
4299 }
4300 }
4301 }
4302
4303 break;
4304 }
4305 case ISD::BRIND: {
4306 if (Subtarget->isTargetNaCl())
4307 // NaCl has its own pass where jmp %r32 are converted to jmp %r64. We
4308 // leave the instruction alone.
4309 break;
4310 if (Subtarget->isTarget64BitILP32()) {
4311 // Converts a 32-bit register to a 64-bit, zero-extended version of
4312 // it. This is needed because x86-64 can do many things, but jmp %r32
4313 // ain't one of them.
4314 const SDValue &Target = Node->getOperand(1);
4315 assert(Target.getSimpleValueType() == llvm::MVT::i32)((Target.getSimpleValueType() == llvm::MVT::i32) ? static_cast
<void> (0) : __assert_fail ("Target.getSimpleValueType() == llvm::MVT::i32"
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 4315, __PRETTY_FUNCTION__))
;
4316 SDValue ZextTarget = CurDAG->getZExtOrTrunc(Target, dl, EVT(MVT::i64));
4317 SDValue Brind = CurDAG->getNode(ISD::BRIND, dl, MVT::Other,
4318 Node->getOperand(0), ZextTarget);
4319 ReplaceNode(Node, Brind.getNode());
4320 SelectCode(ZextTarget.getNode());
4321 SelectCode(Brind.getNode());
4322 return;
4323 }
4324 break;
4325 }
4326 case X86ISD::GlobalBaseReg:
4327 ReplaceNode(Node, getGlobalBaseReg());
4328 return;
4329
4330 case ISD::BITCAST:
4331 // Just drop all 128/256/512-bit bitcasts.
4332 if (NVT.is512BitVector() || NVT.is256BitVector() || NVT.is128BitVector() ||
4333 NVT == MVT::f128) {
4334 ReplaceUses(SDValue(Node, 0), Node->getOperand(0));
4335 CurDAG->RemoveDeadNode(Node);
4336 return;
4337 }
4338 break;
4339
4340 case ISD::VSELECT: {
4341 // Replace VSELECT with non-mask conditions with with BLENDV.
4342 if (Node->getOperand(0).getValueType().getVectorElementType() == MVT::i1)
4343 break;
4344
4345 assert(Subtarget->hasSSE41() && "Expected SSE4.1 support!")((Subtarget->hasSSE41() && "Expected SSE4.1 support!"
) ? static_cast<void> (0) : __assert_fail ("Subtarget->hasSSE41() && \"Expected SSE4.1 support!\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 4345, __PRETTY_FUNCTION__))
;
4346 SDValue Blendv = CurDAG->getNode(
4347 X86ISD::BLENDV, SDLoc(Node), Node->getValueType(0), Node->getOperand(0),
4348 Node->getOperand(1), Node->getOperand(2));
4349 ReplaceNode(Node, Blendv.getNode());
4350 SelectCode(Blendv.getNode());
4351 // We already called ReplaceUses.
4352 return;
4353 }
4354
4355 case ISD::SRL:
4356 if (matchBitExtract(Node))
4357 return;
4358 LLVM_FALLTHROUGH[[gnu::fallthrough]];
4359 case ISD::SRA:
4360 case ISD::SHL:
4361 if (tryShiftAmountMod(Node))
4362 return;
4363 break;
4364
4365 case ISD::AND:
4366 if (NVT.isVector() && NVT.getVectorElementType() == MVT::i1) {
4367 // Try to form a masked VPTESTM. Operands can be in either order.
4368 SDValue N0 = Node->getOperand(0);
4369 SDValue N1 = Node->getOperand(1);
4370 if (N0.getOpcode() == ISD::SETCC && N0.hasOneUse() &&
4371 tryVPTESTM(Node, N0, N1))
4372 return;
4373 if (N1.getOpcode() == ISD::SETCC && N1.hasOneUse() &&
4374 tryVPTESTM(Node, N1, N0))
4375 return;
4376 }
4377
4378 if (MachineSDNode *NewNode = matchBEXTRFromAndImm(Node)) {
4379 ReplaceUses(SDValue(Node, 0), SDValue(NewNode, 0));
4380 CurDAG->RemoveDeadNode(Node);
4381 return;
4382 }
4383 if (matchBitExtract(Node))
4384 return;
4385 if (AndImmShrink && shrinkAndImmediate(Node))
4386 return;
4387
4388 LLVM_FALLTHROUGH[[gnu::fallthrough]];
4389 case ISD::OR:
4390 case ISD::XOR:
4391 if (tryShrinkShlLogicImm(Node))
4392 return;
4393
4394 LLVM_FALLTHROUGH[[gnu::fallthrough]];
4395 case ISD::ADD:
4396 case ISD::SUB: {
4397 if ((Opcode == ISD::ADD || Opcode == ISD::SUB) && NVT.isVector() &&
4398 combineIncDecVector(Node))
4399 return;
4400
4401 // Try to avoid folding immediates with multiple uses for optsize.
4402 // This code tries to select to register form directly to avoid going
4403 // through the isel table which might fold the immediate. We can't change
4404 // the patterns on the add/sub/and/or/xor with immediate paterns in the
4405 // tablegen files to check immediate use count without making the patterns
4406 // unavailable to the fast-isel table.
4407 if (!OptForSize)
4408 break;
4409
4410 // Only handle i8/i16/i32/i64.
4411 if (NVT != MVT::i8 && NVT != MVT::i16 && NVT != MVT::i32 && NVT != MVT::i64)
4412 break;
4413
4414 SDValue N0 = Node->getOperand(0);
4415 SDValue N1 = Node->getOperand(1);
4416
4417 ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N1);
4418 if (!Cst)
4419 break;
4420
4421 int64_t Val = Cst->getSExtValue();
4422
4423 // Make sure its an immediate that is considered foldable.
4424 // FIXME: Handle unsigned 32 bit immediates for 64-bit AND.
4425 if (!isInt<8>(Val) && !isInt<32>(Val))
4426 break;
4427
4428 // If this can match to INC/DEC, let it go.
4429 if (Opcode == ISD::ADD && (Val == 1 || Val == -1))
4430 break;
4431
4432 // Check if we should avoid folding this immediate.
4433 if (!shouldAvoidImmediateInstFormsForSize(N1.getNode()))
4434 break;
4435
4436 // We should not fold the immediate. So we need a register form instead.
4437 unsigned ROpc, MOpc;
4438 switch (NVT.SimpleTy) {
4439 default: llvm_unreachable("Unexpected VT!")::llvm::llvm_unreachable_internal("Unexpected VT!", "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 4439)
;
4440 case MVT::i8:
4441 switch (Opcode) {
4442 default: llvm_unreachable("Unexpected opcode!")::llvm::llvm_unreachable_internal("Unexpected opcode!", "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 4442)
;
4443 case ISD::ADD: ROpc = X86::ADD8rr; MOpc = X86::ADD8rm; break;
4444 case ISD::SUB: ROpc = X86::SUB8rr; MOpc = X86::SUB8rm; break;
4445 case ISD::AND: ROpc = X86::AND8rr; MOpc = X86::AND8rm; break;
4446 case ISD::OR: ROpc = X86::OR8rr; MOpc = X86::OR8rm; break;
4447 case ISD::XOR: ROpc = X86::XOR8rr; MOpc = X86::XOR8rm; break;
4448 }
4449 break;
4450 case MVT::i16:
4451 switch (Opcode) {
4452 default: llvm_unreachable("Unexpected opcode!")::llvm::llvm_unreachable_internal("Unexpected opcode!", "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 4452)
;
4453 case ISD::ADD: ROpc = X86::ADD16rr; MOpc = X86::ADD16rm; break;
4454 case ISD::SUB: ROpc = X86::SUB16rr; MOpc = X86::SUB16rm; break;
4455 case ISD::AND: ROpc = X86::AND16rr; MOpc = X86::AND16rm; break;
4456 case ISD::OR: ROpc = X86::OR16rr; MOpc = X86::OR16rm; break;
4457 case ISD::XOR: ROpc = X86::XOR16rr; MOpc = X86::XOR16rm; break;
4458 }
4459 break;
4460 case MVT::i32:
4461 switch (Opcode) {
4462 default: llvm_unreachable("Unexpected opcode!")::llvm::llvm_unreachable_internal("Unexpected opcode!", "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 4462)
;
4463 case ISD::ADD: ROpc = X86::ADD32rr; MOpc = X86::ADD32rm; break;
4464 case ISD::SUB: ROpc = X86::SUB32rr; MOpc = X86::SUB32rm; break;
4465 case ISD::AND: ROpc = X86::AND32rr; MOpc = X86::AND32rm; break;
4466 case ISD::OR: ROpc = X86::OR32rr; MOpc = X86::OR32rm; break;
4467 case ISD::XOR: ROpc = X86::XOR32rr; MOpc = X86::XOR32rm; break;
4468 }
4469 break;
4470 case MVT::i64:
4471 switch (Opcode) {
4472 default: llvm_unreachable("Unexpected opcode!")::llvm::llvm_unreachable_internal("Unexpected opcode!", "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 4472)
;
4473 case ISD::ADD: ROpc = X86::ADD64rr; MOpc = X86::ADD64rm; break;
4474 case ISD::SUB: ROpc = X86::SUB64rr; MOpc = X86::SUB64rm; break;
4475 case ISD::AND: ROpc = X86::AND64rr; MOpc = X86::AND64rm; break;
4476 case ISD::OR: ROpc = X86::OR64rr; MOpc = X86::OR64rm; break;
4477 case ISD::XOR: ROpc = X86::XOR64rr; MOpc = X86::XOR64rm; break;
4478 }
4479 break;
4480 }
4481
4482 // Ok this is a AND/OR/XOR/ADD/SUB with constant.
4483
4484 // If this is a not a subtract, we can still try to fold a load.
4485 if (Opcode != ISD::SUB) {
4486 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
4487 if (tryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
4488 SDValue Ops[] = { N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N0.getOperand(0) };
4489 SDVTList VTs = CurDAG->getVTList(NVT, MVT::i32, MVT::Other);
4490 MachineSDNode *CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
4491 // Update the chain.
4492 ReplaceUses(N0.getValue(1), SDValue(CNode, 2));
4493 // Record the mem-refs
4494 CurDAG->setNodeMemRefs(CNode, {cast<LoadSDNode>(N0)->getMemOperand()});
4495 ReplaceUses(SDValue(Node, 0), SDValue(CNode, 0));
4496 CurDAG->RemoveDeadNode(Node);
4497 return;
4498 }
4499 }
4500
4501 CurDAG->SelectNodeTo(Node, ROpc, NVT, MVT::i32, N0, N1);
4502 return;
4503 }
4504
4505 case X86ISD::SMUL:
4506 // i16/i32/i64 are handled with isel patterns.
4507 if (NVT != MVT::i8)
4508 break;
4509 LLVM_FALLTHROUGH[[gnu::fallthrough]];
4510 case X86ISD::UMUL: {
4511 SDValue N0 = Node->getOperand(0);
4512 SDValue N1 = Node->getOperand(1);
4513
4514 unsigned LoReg, ROpc, MOpc;
4515 switch (NVT.SimpleTy) {
4516 default: llvm_unreachable("Unsupported VT!")::llvm::llvm_unreachable_internal("Unsupported VT!", "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 4516)
;
4517 case MVT::i8:
4518 LoReg = X86::AL;
4519 ROpc = Opcode == X86ISD::SMUL ? X86::IMUL8r : X86::MUL8r;
4520 MOpc = Opcode == X86ISD::SMUL ? X86::IMUL8m : X86::MUL8m;
4521 break;
4522 case MVT::i16:
4523 LoReg = X86::AX;
4524 ROpc = X86::MUL16r;
4525 MOpc = X86::MUL16m;
4526 break;
4527 case MVT::i32:
4528 LoReg = X86::EAX;
4529 ROpc = X86::MUL32r;
4530 MOpc = X86::MUL32m;
4531 break;
4532 case MVT::i64:
4533 LoReg = X86::RAX;
4534 ROpc = X86::MUL64r;
4535 MOpc = X86::MUL64m;
4536 break;
4537 }
4538
4539 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
4540 bool FoldedLoad = tryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
4541 // Multiply is commmutative.
4542 if (!FoldedLoad) {
4543 FoldedLoad = tryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
4544 if (FoldedLoad)
4545 std::swap(N0, N1);
4546 }
4547
4548 SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, LoReg,
4549 N0, SDValue()).getValue(1);
4550
4551 MachineSDNode *CNode;
4552 if (FoldedLoad) {
4553 // i16/i32/i64 use an instruction that produces a low and high result even
4554 // though only the low result is used.
4555 SDVTList VTs;
4556 if (NVT == MVT::i8)
4557 VTs = CurDAG->getVTList(NVT, MVT::i32, MVT::Other);
4558 else
4559 VTs = CurDAG->getVTList(NVT, NVT, MVT::i32, MVT::Other);
4560
4561 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0),
4562 InFlag };
4563 CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
4564
4565 // Update the chain.
4566 ReplaceUses(N1.getValue(1), SDValue(CNode, NVT == MVT::i8 ? 2 : 3));
4567 // Record the mem-refs
4568 CurDAG->setNodeMemRefs(CNode, {cast<LoadSDNode>(N1)->getMemOperand()});
4569 } else {
4570 // i16/i32/i64 use an instruction that produces a low and high result even
4571 // though only the low result is used.
4572 SDVTList VTs;
4573 if (NVT == MVT::i8)
4574 VTs = CurDAG->getVTList(NVT, MVT::i32);
4575 else
4576 VTs = CurDAG->getVTList(NVT, NVT, MVT::i32);
4577
4578 CNode = CurDAG->getMachineNode(ROpc, dl, VTs, {N1, InFlag});
4579 }
4580
4581 ReplaceUses(SDValue(Node, 0), SDValue(CNode, 0));
4582 ReplaceUses(SDValue(Node, 1), SDValue(CNode, NVT == MVT::i8 ? 1 : 2));
4583 CurDAG->RemoveDeadNode(Node);
4584 return;
4585 }
4586
4587 case ISD::SMUL_LOHI:
4588 case ISD::UMUL_LOHI: {
4589 SDValue N0 = Node->getOperand(0);
4590 SDValue N1 = Node->getOperand(1);
4591
4592 unsigned Opc, MOpc;
4593 bool isSigned = Opcode == ISD::SMUL_LOHI;
4594 if (!isSigned) {
4595 switch (NVT.SimpleTy) {
4596 default: llvm_unreachable("Unsupported VT!")::llvm::llvm_unreachable_internal("Unsupported VT!", "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 4596)
;
4597 case MVT::i32: Opc = X86::MUL32r; MOpc = X86::MUL32m; break;
4598 case MVT::i64: Opc = X86::MUL64r; MOpc = X86::MUL64m; break;
4599 }
4600 } else {
4601 switch (NVT.SimpleTy) {
4602 default: llvm_unreachable("Unsupported VT!")::llvm::llvm_unreachable_internal("Unsupported VT!", "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 4602)
;
4603 case MVT::i32: Opc = X86::IMUL32r; MOpc = X86::IMUL32m; break;
4604 case MVT::i64: Opc = X86::IMUL64r; MOpc = X86::IMUL64m; break;
4605 }
4606 }
4607
4608 unsigned SrcReg, LoReg, HiReg;
4609 switch (Opc) {
4610 default: llvm_unreachable("Unknown MUL opcode!")::llvm::llvm_unreachable_internal("Unknown MUL opcode!", "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 4610)
;
4611 case X86::IMUL32r:
4612 case X86::MUL32r:
4613 SrcReg = LoReg = X86::EAX; HiReg = X86::EDX;
4614 break;
4615 case X86::IMUL64r:
4616 case X86::MUL64r:
4617 SrcReg = LoReg = X86::RAX; HiReg = X86::RDX;
4618 break;
4619 }
4620
4621 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
4622 bool foldedLoad = tryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
4623 // Multiply is commmutative.
4624 if (!foldedLoad) {
4625 foldedLoad = tryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
4626 if (foldedLoad)
4627 std::swap(N0, N1);
4628 }
4629
4630 SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, SrcReg,
4631 N0, SDValue()).getValue(1);
4632 if (foldedLoad) {
4633 SDValue Chain;
4634 MachineSDNode *CNode = nullptr;
4635 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0),
4636 InFlag };
4637 SDVTList VTs = CurDAG->getVTList(MVT::Other, MVT::Glue);
4638 CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
4639 Chain = SDValue(CNode, 0);
4640 InFlag = SDValue(CNode, 1);
4641
4642 // Update the chain.
4643 ReplaceUses(N1.getValue(1), Chain);
4644 // Record the mem-refs
4645 CurDAG->setNodeMemRefs(CNode, {cast<LoadSDNode>(N1)->getMemOperand()});
4646 } else {
4647 SDValue Ops[] = { N1, InFlag };
4648 SDVTList VTs = CurDAG->getVTList(MVT::Glue);
4649 SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops);
4650 InFlag = SDValue(CNode, 0);
4651 }
4652
4653 // Copy the low half of the result, if it is needed.
4654 if (!SDValue(Node, 0).use_empty()) {
4655 assert(LoReg && "Register for low half is not defined!")((LoReg && "Register for low half is not defined!") ?
static_cast<void> (0) : __assert_fail ("LoReg && \"Register for low half is not defined!\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 4655, __PRETTY_FUNCTION__))
;
4656 SDValue ResLo = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, LoReg,
4657 NVT, InFlag);
4658 InFlag = ResLo.getValue(2);
4659 ReplaceUses(SDValue(Node, 0), ResLo);
4660 LLVM_DEBUG(dbgs() << "=> "; ResLo.getNode()->dump(CurDAG);do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-isel")) { dbgs() << "=> "; ResLo.getNode()->
dump(CurDAG); dbgs() << '\n'; } } while (false)
4661 dbgs() << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-isel")) { dbgs() << "=> "; ResLo.getNode()->
dump(CurDAG); dbgs() << '\n'; } } while (false)
;
4662 }
4663 // Copy the high half of the result, if it is needed.
4664 if (!SDValue(Node, 1).use_empty()) {
4665 assert(HiReg && "Register for high half is not defined!")((HiReg && "Register for high half is not defined!") ?
static_cast<void> (0) : __assert_fail ("HiReg && \"Register for high half is not defined!\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 4665, __PRETTY_FUNCTION__))
;
4666 SDValue ResHi = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, HiReg,
4667 NVT, InFlag);
4668 InFlag = ResHi.getValue(2);
4669 ReplaceUses(SDValue(Node, 1), ResHi);
4670 LLVM_DEBUG(dbgs() << "=> "; ResHi.getNode()->dump(CurDAG);do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-isel")) { dbgs() << "=> "; ResHi.getNode()->
dump(CurDAG); dbgs() << '\n'; } } while (false)
4671 dbgs() << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-isel")) { dbgs() << "=> "; ResHi.getNode()->
dump(CurDAG); dbgs() << '\n'; } } while (false)
;
4672 }
4673
4674 CurDAG->RemoveDeadNode(Node);
4675 return;
4676 }
4677
4678 case ISD::SDIVREM:
4679 case ISD::UDIVREM: {
4680 SDValue N0 = Node->getOperand(0);
4681 SDValue N1 = Node->getOperand(1);
4682
4683 unsigned Opc, MOpc;
4684 bool isSigned = Opcode == ISD::SDIVREM;
4685 if (!isSigned) {
4686 switch (NVT.SimpleTy) {
4687 default: llvm_unreachable("Unsupported VT!")::llvm::llvm_unreachable_internal("Unsupported VT!", "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 4687)
;
4688 case MVT::i8: Opc = X86::DIV8r; MOpc = X86::DIV8m; break;
4689 case MVT::i16: Opc = X86::DIV16r; MOpc = X86::DIV16m; break;
4690 case MVT::i32: Opc = X86::DIV32r; MOpc = X86::DIV32m; break;
4691 case MVT::i64: Opc = X86::DIV64r; MOpc = X86::DIV64m; break;
4692 }
4693 } else {
4694 switch (NVT.SimpleTy) {
4695 default: llvm_unreachable("Unsupported VT!")::llvm::llvm_unreachable_internal("Unsupported VT!", "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 4695)
;
4696 case MVT::i8: Opc = X86::IDIV8r; MOpc = X86::IDIV8m; break;
4697 case MVT::i16: Opc = X86::IDIV16r; MOpc = X86::IDIV16m; break;
4698 case MVT::i32: Opc = X86::IDIV32r; MOpc = X86::IDIV32m; break;
4699 case MVT::i64: Opc = X86::IDIV64r; MOpc = X86::IDIV64m; break;
4700 }
4701 }
4702
4703 unsigned LoReg, HiReg, ClrReg;
4704 unsigned SExtOpcode;
4705 switch (NVT.SimpleTy) {
4706 default: llvm_unreachable("Unsupported VT!")::llvm::llvm_unreachable_internal("Unsupported VT!", "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 4706)
;
4707 case MVT::i8:
4708 LoReg = X86::AL; ClrReg = HiReg = X86::AH;
4709 SExtOpcode = 0; // Not used.
4710 break;
4711 case MVT::i16:
4712 LoReg = X86::AX; HiReg = X86::DX;
4713 ClrReg = X86::DX;
4714 SExtOpcode = X86::CWD;
4715 break;
4716 case MVT::i32:
4717 LoReg = X86::EAX; ClrReg = HiReg = X86::EDX;
4718 SExtOpcode = X86::CDQ;
4719 break;
4720 case MVT::i64:
4721 LoReg = X86::RAX; ClrReg = HiReg = X86::RDX;
4722 SExtOpcode = X86::CQO;
4723 break;
4724 }
4725
4726 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
4727 bool foldedLoad = tryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
4728 bool signBitIsZero = CurDAG->SignBitIsZero(N0);
4729
4730 SDValue InFlag;
4731 if (NVT == MVT::i8) {
4732 // Special case for div8, just use a move with zero extension to AX to
4733 // clear the upper 8 bits (AH).
4734 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Chain;
4735 MachineSDNode *Move;
4736 if (tryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
4737 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N0.getOperand(0) };
4738 unsigned Opc = (isSigned && !signBitIsZero) ? X86::MOVSX16rm8
4739 : X86::MOVZX16rm8;
4740 Move = CurDAG->getMachineNode(Opc, dl, MVT::i16, MVT::Other, Ops);
4741 Chain = SDValue(Move, 1);
4742 ReplaceUses(N0.getValue(1), Chain);
4743 // Record the mem-refs
4744 CurDAG->setNodeMemRefs(Move, {cast<LoadSDNode>(N0)->getMemOperand()});
4745 } else {
4746 unsigned Opc = (isSigned && !signBitIsZero) ? X86::MOVSX16rr8
4747 : X86::MOVZX16rr8;
4748 Move = CurDAG->getMachineNode(Opc, dl, MVT::i16, N0);
4749 Chain = CurDAG->getEntryNode();
4750 }
4751 Chain = CurDAG->getCopyToReg(Chain, dl, X86::AX, SDValue(Move, 0),
4752 SDValue());
4753 InFlag = Chain.getValue(1);
4754 } else {
4755 InFlag =
4756 CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl,
4757 LoReg, N0, SDValue()).getValue(1);
4758 if (isSigned && !signBitIsZero) {
4759 // Sign extend the low part into the high part.
4760 InFlag =
4761 SDValue(CurDAG->getMachineNode(SExtOpcode, dl, MVT::Glue, InFlag),0);
4762 } else {
4763 // Zero out the high part, effectively zero extending the input.
4764 SDValue ClrNode = SDValue(CurDAG->getMachineNode(X86::MOV32r0, dl, NVT), 0);
4765 switch (NVT.SimpleTy) {
4766 case MVT::i16:
4767 ClrNode =
4768 SDValue(CurDAG->getMachineNode(
4769 TargetOpcode::EXTRACT_SUBREG, dl, MVT::i16, ClrNode,
4770 CurDAG->getTargetConstant(X86::sub_16bit, dl,
4771 MVT::i32)),
4772 0);
4773 break;
4774 case MVT::i32:
4775 break;
4776 case MVT::i64:
4777 ClrNode =
4778 SDValue(CurDAG->getMachineNode(
4779 TargetOpcode::SUBREG_TO_REG, dl, MVT::i64,
4780 CurDAG->getTargetConstant(0, dl, MVT::i64), ClrNode,
4781 CurDAG->getTargetConstant(X86::sub_32bit, dl,
4782 MVT::i32)),
4783 0);
4784 break;
4785 default:
4786 llvm_unreachable("Unexpected division source")::llvm::llvm_unreachable_internal("Unexpected division source"
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 4786)
;
4787 }
4788
4789 InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, ClrReg,
4790 ClrNode, InFlag).getValue(1);
4791 }
4792 }
4793
4794 if (foldedLoad) {
4795 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0),
4796 InFlag };
4797 MachineSDNode *CNode =
4798 CurDAG->getMachineNode(MOpc, dl, MVT::Other, MVT::Glue, Ops);
4799 InFlag = SDValue(CNode, 1);
4800 // Update the chain.
4801 ReplaceUses(N1.getValue(1), SDValue(CNode, 0));
4802 // Record the mem-refs
4803 CurDAG->setNodeMemRefs(CNode, {cast<LoadSDNode>(N1)->getMemOperand()});
4804 } else {
4805 InFlag =
4806 SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Glue, N1, InFlag), 0);
4807 }
4808
4809 // Prevent use of AH in a REX instruction by explicitly copying it to
4810 // an ABCD_L register.
4811 //
4812 // The current assumption of the register allocator is that isel
4813 // won't generate explicit references to the GR8_ABCD_H registers. If
4814 // the allocator and/or the backend get enhanced to be more robust in
4815 // that regard, this can be, and should be, removed.
4816 if (HiReg == X86::AH && !SDValue(Node, 1).use_empty()) {
4817 SDValue AHCopy = CurDAG->getRegister(X86::AH, MVT::i8);
4818 unsigned AHExtOpcode =
4819 isSigned ? X86::MOVSX32rr8_NOREX : X86::MOVZX32rr8_NOREX;
4820
4821 SDNode *RNode = CurDAG->getMachineNode(AHExtOpcode, dl, MVT::i32,
4822 MVT::Glue, AHCopy, InFlag);
4823 SDValue Result(RNode, 0);
4824 InFlag = SDValue(RNode, 1);
4825
4826 Result =
4827 CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result);
4828
4829 ReplaceUses(SDValue(Node, 1), Result);
4830 LLVM_DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG);do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-isel")) { dbgs() << "=> "; Result.getNode()->
dump(CurDAG); dbgs() << '\n'; } } while (false)
4831 dbgs() << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-isel")) { dbgs() << "=> "; Result.getNode()->
dump(CurDAG); dbgs() << '\n'; } } while (false)
;
4832 }
4833 // Copy the division (low) result, if it is needed.
4834 if (!SDValue(Node, 0).use_empty()) {
4835 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
4836 LoReg, NVT, InFlag);
4837 InFlag = Result.getValue(2);
4838 ReplaceUses(SDValue(Node, 0), Result);
4839 LLVM_DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG);do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-isel")) { dbgs() << "=> "; Result.getNode()->
dump(CurDAG); dbgs() << '\n'; } } while (false)
4840 dbgs() << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-isel")) { dbgs() << "=> "; Result.getNode()->
dump(CurDAG); dbgs() << '\n'; } } while (false)
;
4841 }
4842 // Copy the remainder (high) result, if it is needed.
4843 if (!SDValue(Node, 1).use_empty()) {
4844 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
4845 HiReg, NVT, InFlag);
4846 InFlag = Result.getValue(2);
4847 ReplaceUses(SDValue(Node, 1), Result);
4848 LLVM_DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG);do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-isel")) { dbgs() << "=> "; Result.getNode()->
dump(CurDAG); dbgs() << '\n'; } } while (false)
4849 dbgs() << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-isel")) { dbgs() << "=> "; Result.getNode()->
dump(CurDAG); dbgs() << '\n'; } } while (false)
;
4850 }
4851 CurDAG->RemoveDeadNode(Node);
4852 return;
4853 }
4854
4855 case X86ISD::CMP: {
4856 SDValue N0 = Node->getOperand(0);
4857 SDValue N1 = Node->getOperand(1);
4858
4859 // Optimizations for TEST compares.
4860 if (!isNullConstant(N1))
4861 break;
4862
4863 // Save the original VT of the compare.
4864 MVT CmpVT = N0.getSimpleValueType();
4865
4866 // If we are comparing (and (shr X, C, Mask) with 0, emit a BEXTR followed
4867 // by a test instruction. The test should be removed later by
4868 // analyzeCompare if we are using only the zero flag.
4869 // TODO: Should we check the users and use the BEXTR flags directly?
4870 if (N0.getOpcode() == ISD::AND && N0.hasOneUse()) {
4871 if (MachineSDNode *NewNode = matchBEXTRFromAndImm(N0.getNode())) {
4872 unsigned TestOpc = CmpVT == MVT::i64 ? X86::TEST64rr
4873 : X86::TEST32rr;
4874 SDValue BEXTR = SDValue(NewNode, 0);
4875 NewNode = CurDAG->getMachineNode(TestOpc, dl, MVT::i32, BEXTR, BEXTR);
4876 ReplaceUses(SDValue(Node, 0), SDValue(NewNode, 0));
4877 CurDAG->RemoveDeadNode(Node);
4878 return;
4879 }
4880 }
4881
4882 // We can peek through truncates, but we need to be careful below.
4883 if (N0.getOpcode() == ISD::TRUNCATE && N0.hasOneUse())
4884 N0 = N0.getOperand(0);
4885
4886 // Look for (X86cmp (and $op, $imm), 0) and see if we can convert it to
4887 // use a smaller encoding.
4888 // Look past the truncate if CMP is the only use of it.
4889 if (N0.getOpcode() == ISD::AND &&
4890 N0.getNode()->hasOneUse() &&
4891 N0.getValueType() != MVT::i8) {
4892 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
4893 if (!C) break;
4894 uint64_t Mask = C->getZExtValue();
4895
4896 // Check if we can replace AND+IMM64 with a shift. This is possible for
4897 // masks/ like 0xFF000000 or 0x00FFFFFF and if we care only about the zero
4898 // flag.
4899 if (CmpVT == MVT::i64 && !isInt<32>(Mask) &&
4900 onlyUsesZeroFlag(SDValue(Node, 0))) {
4901 if (isMask_64(~Mask)) {
4902 unsigned TrailingZeros = countTrailingZeros(Mask);
4903 SDValue Imm = CurDAG->getTargetConstant(TrailingZeros, dl, MVT::i64);
4904 SDValue Shift =
4905 SDValue(CurDAG->getMachineNode(X86::SHR64ri, dl, MVT::i64, MVT::i32,
4906 N0.getOperand(0), Imm), 0);
4907 MachineSDNode *Test = CurDAG->getMachineNode(X86::TEST64rr, dl,
4908 MVT::i32, Shift, Shift);
4909 ReplaceNode(Node, Test);
4910 return;
4911 }
4912 if (isMask_64(Mask)) {
4913 unsigned LeadingZeros = countLeadingZeros(Mask);
4914 SDValue Imm = CurDAG->getTargetConstant(LeadingZeros, dl, MVT::i64);
4915 SDValue Shift =
4916 SDValue(CurDAG->getMachineNode(X86::SHL64ri, dl, MVT::i64, MVT::i32,
4917 N0.getOperand(0), Imm), 0);
4918 MachineSDNode *Test = CurDAG->getMachineNode(X86::TEST64rr, dl,
4919 MVT::i32, Shift, Shift);
4920 ReplaceNode(Node, Test);
4921 return;
4922 }
4923 }
4924
4925 MVT VT;
4926 int SubRegOp;
4927 unsigned ROpc, MOpc;
4928
4929 // For each of these checks we need to be careful if the sign flag is
4930 // being used. It is only safe to use the sign flag in two conditions,
4931 // either the sign bit in the shrunken mask is zero or the final test
4932 // size is equal to the original compare size.
4933
4934 if (isUInt<8>(Mask) &&
4935 (!(Mask & 0x80) || CmpVT == MVT::i8 ||
4936 hasNoSignFlagUses(SDValue(Node, 0)))) {
4937 // For example, convert "testl %eax, $8" to "testb %al, $8"
4938 VT = MVT::i8;
4939 SubRegOp = X86::sub_8bit;
4940 ROpc = X86::TEST8ri;
4941 MOpc = X86::TEST8mi;
4942 } else if (OptForMinSize && isUInt<16>(Mask) &&
4943 (!(Mask & 0x8000) || CmpVT == MVT::i16 ||
4944 hasNoSignFlagUses(SDValue(Node, 0)))) {
4945 // For example, "testl %eax, $32776" to "testw %ax, $32776".
4946 // NOTE: We only want to form TESTW instructions if optimizing for
4947 // min size. Otherwise we only save one byte and possibly get a length
4948 // changing prefix penalty in the decoders.
4949 VT = MVT::i16;
4950 SubRegOp = X86::sub_16bit;
4951 ROpc = X86::TEST16ri;
4952 MOpc = X86::TEST16mi;
4953 } else if (isUInt<32>(Mask) && N0.getValueType() != MVT::i16 &&
4954 ((!(Mask & 0x80000000) &&
4955 // Without minsize 16-bit Cmps can get here so we need to
4956 // be sure we calculate the correct sign flag if needed.
4957 (CmpVT != MVT::i16 || !(Mask & 0x8000))) ||
4958 CmpVT == MVT::i32 ||
4959 hasNoSignFlagUses(SDValue(Node, 0)))) {
4960 // For example, "testq %rax, $268468232" to "testl %eax, $268468232".
4961 // NOTE: We only want to run that transform if N0 is 32 or 64 bits.
4962 // Otherwize, we find ourselves in a position where we have to do
4963 // promotion. If previous passes did not promote the and, we assume
4964 // they had a good reason not to and do not promote here.
4965 VT = MVT::i32;
4966 SubRegOp = X86::sub_32bit;
4967 ROpc = X86::TEST32ri;
4968 MOpc = X86::TEST32mi;
4969 } else {
4970 // No eligible transformation was found.
4971 break;
4972 }
4973
4974 SDValue Imm = CurDAG->getTargetConstant(Mask, dl, VT);
4975 SDValue Reg = N0.getOperand(0);
4976
4977 // Emit a testl or testw.
4978 MachineSDNode *NewNode;
4979 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
4980 if (tryFoldLoad(Node, N0.getNode(), Reg, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
4981 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Imm,
4982 Reg.getOperand(0) };
4983 NewNode = CurDAG->getMachineNode(MOpc, dl, MVT::i32, MVT::Other, Ops);
4984 // Update the chain.
4985 ReplaceUses(Reg.getValue(1), SDValue(NewNode, 1));
4986 // Record the mem-refs
4987 CurDAG->setNodeMemRefs(NewNode,
4988 {cast<LoadSDNode>(Reg)->getMemOperand()});
4989 } else {
4990 // Extract the subregister if necessary.
4991 if (N0.getValueType() != VT)
4992 Reg = CurDAG->getTargetExtractSubreg(SubRegOp, dl, VT, Reg);
4993
4994 NewNode = CurDAG->getMachineNode(ROpc, dl, MVT::i32, Reg, Imm);
4995 }
4996 // Replace CMP with TEST.
4997 ReplaceNode(Node, NewNode);
4998 return;
4999 }
5000 break;
5001 }
5002 case X86ISD::PCMPISTR: {
5003 if (!Subtarget->hasSSE42())
5004 break;
5005
5006 bool NeedIndex = !SDValue(Node, 0).use_empty();
5007 bool NeedMask = !SDValue(Node, 1).use_empty();
5008 // We can't fold a load if we are going to make two instructions.
5009 bool MayFoldLoad = !NeedIndex || !NeedMask;
5010
5011 MachineSDNode *CNode;
5012 if (NeedMask) {
5013 unsigned ROpc = Subtarget->hasAVX() ? X86::VPCMPISTRMrr : X86::PCMPISTRMrr;
5014 unsigned MOpc = Subtarget->hasAVX() ? X86::VPCMPISTRMrm : X86::PCMPISTRMrm;
5015 CNode = emitPCMPISTR(ROpc, MOpc, MayFoldLoad, dl, MVT::v16i8, Node);
5016 ReplaceUses(SDValue(Node, 1), SDValue(CNode, 0));
5017 }
5018 if (NeedIndex || !NeedMask) {
5019 unsigned ROpc = Subtarget->hasAVX() ? X86::VPCMPISTRIrr : X86::PCMPISTRIrr;
5020 unsigned MOpc = Subtarget->hasAVX() ? X86::VPCMPISTRIrm : X86::PCMPISTRIrm;
5021 CNode = emitPCMPISTR(ROpc, MOpc, MayFoldLoad, dl, MVT::i32, Node);
5022 ReplaceUses(SDValue(Node, 0), SDValue(CNode, 0));
5023 }
5024
5025 // Connect the flag usage to the last instruction created.
5026 ReplaceUses(SDValue(Node, 2), SDValue(CNode, 1));
5027 CurDAG->RemoveDeadNode(Node);
5028 return;
5029 }
5030 case X86ISD::PCMPESTR: {
5031 if (!Subtarget->hasSSE42())
5032 break;
5033
5034 // Copy the two implicit register inputs.
5035 SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, X86::EAX,
5036 Node->getOperand(1),
5037 SDValue()).getValue(1);
5038 InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, X86::EDX,
5039 Node->getOperand(3), InFlag).getValue(1);
5040
5041 bool NeedIndex = !SDValue(Node, 0).use_empty();
5042 bool NeedMask = !SDValue(Node, 1).use_empty();
5043 // We can't fold a load if we are going to make two instructions.
5044 bool MayFoldLoad = !NeedIndex || !NeedMask;
5045
5046 MachineSDNode *CNode;
5047 if (NeedMask) {
5048 unsigned ROpc = Subtarget->hasAVX() ? X86::VPCMPESTRMrr : X86::PCMPESTRMrr;
5049 unsigned MOpc = Subtarget->hasAVX() ? X86::VPCMPESTRMrm : X86::PCMPESTRMrm;
5050 CNode = emitPCMPESTR(ROpc, MOpc, MayFoldLoad, dl, MVT::v16i8, Node,
5051 InFlag);
5052 ReplaceUses(SDValue(Node, 1), SDValue(CNode, 0));
5053 }
5054 if (NeedIndex || !NeedMask) {
5055 unsigned ROpc = Subtarget->hasAVX() ? X86::VPCMPESTRIrr : X86::PCMPESTRIrr;
5056 unsigned MOpc = Subtarget->hasAVX() ? X86::VPCMPESTRIrm : X86::PCMPESTRIrm;
5057 CNode = emitPCMPESTR(ROpc, MOpc, MayFoldLoad, dl, MVT::i32, Node, InFlag);
5058 ReplaceUses(SDValue(Node, 0), SDValue(CNode, 0));
5059 }
5060 // Connect the flag usage to the last instruction created.
5061 ReplaceUses(SDValue(Node, 2), SDValue(CNode, 1));
5062 CurDAG->RemoveDeadNode(Node);
5063 return;
5064 }
5065
5066 case ISD::SETCC: {
5067 if (NVT.isVector() && tryVPTESTM(Node, SDValue(Node, 0), SDValue()))
5068 return;
5069
5070 break;
5071 }
5072
5073 case ISD::STORE:
5074 if (foldLoadStoreIntoMemOperand(Node))
5075 return;
5076 break;
5077 case ISD::FCEIL:
5078 case ISD::FFLOOR:
5079 case ISD::FTRUNC:
5080 case ISD::FNEARBYINT:
5081 case ISD::FRINT: {
5082 // Replace fp rounding with their X86 specific equivalent so we don't
5083 // need 2 sets of patterns.
5084 // FIXME: This can only happen when the nodes started as STRICT_* and have
5085 // been mutated into their non-STRICT equivalents. Eventually this
5086 // mutation will be removed and we should switch the STRICT_ nodes to a
5087 // strict version of RNDSCALE in PreProcessISelDAG.
5088 unsigned Imm;
5089 switch (Node->getOpcode()) {
5090 default: llvm_unreachable("Unexpected opcode!")::llvm::llvm_unreachable_internal("Unexpected opcode!", "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 5090)
;
5091 case ISD::FCEIL: Imm = 0xA; break;
5092 case ISD::FFLOOR: Imm = 0x9; break;
5093 case ISD::FTRUNC: Imm = 0xB; break;
5094 case ISD::FNEARBYINT: Imm = 0xC; break;
5095 case ISD::FRINT: Imm = 0x4; break;
5096 }
5097 SDLoc dl(Node);
5098 SDValue Res = CurDAG->getNode(X86ISD::VRNDSCALE, dl,
5099 Node->getValueType(0),
5100 Node->getOperand(0),
5101 CurDAG->getConstant(Imm, dl, MVT::i8));
5102 ReplaceNode(Node, Res.getNode());
5103 SelectCode(Res.getNode());
5104 return;
5105 }
5106 }
5107
5108 SelectCode(Node);
5109}
5110
5111bool X86DAGToDAGISel::
5112SelectInlineAsmMemoryOperand(const SDValue &Op, unsigned ConstraintID,
5113 std::vector<SDValue> &OutOps) {
5114 SDValue Op0, Op1, Op2, Op3, Op4;
5115 switch (ConstraintID) {
5116 default:
5117 llvm_unreachable("Unexpected asm memory constraint")::llvm::llvm_unreachable_internal("Unexpected asm memory constraint"
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 5117)
;
5118 case InlineAsm::Constraint_i:
5119 // FIXME: It seems strange that 'i' is needed here since it's supposed to
5120 // be an immediate and not a memory constraint.
5121 LLVM_FALLTHROUGH[[gnu::fallthrough]];
5122 case InlineAsm::Constraint_o: // offsetable ??
5123 case InlineAsm::Constraint_v: // not offsetable ??
5124 case InlineAsm::Constraint_m: // memory
5125 case InlineAsm::Constraint_X:
5126 if (!selectAddr(nullptr, Op, Op0, Op1, Op2, Op3, Op4))
5127 return true;
5128 break;
5129 }
5130
5131 OutOps.push_back(Op0);
5132 OutOps.push_back(Op1);
5133 OutOps.push_back(Op2);
5134 OutOps.push_back(Op3);
5135 OutOps.push_back(Op4);
5136 return false;
5137}
5138
5139/// This pass converts a legalized DAG into a X86-specific DAG,
5140/// ready for instruction scheduling.
5141FunctionPass *llvm::createX86ISelDag(X86TargetMachine &TM,
5142 CodeGenOpt::Level OptLevel) {
5143 return new X86DAGToDAGISel(TM, OptLevel);
5144}

/build/llvm-toolchain-snapshot-10~svn372087/include/llvm/CodeGen/SelectionDAGNodes.h

1//===- llvm/CodeGen/SelectionDAGNodes.h - SelectionDAG Nodes ----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file declares the SDNode class and derived classes, which are used to
10// represent the nodes and operations present in a SelectionDAG. These nodes
11// and operations are machine code level operations, with some similarities to
12// the GCC RTL representation.
13//
14// Clients should include the SelectionDAG.h file instead of this file directly.
15//
16//===----------------------------------------------------------------------===//
17
18#ifndef LLVM_CODEGEN_SELECTIONDAGNODES_H
19#define LLVM_CODEGEN_SELECTIONDAGNODES_H
20
21#include "llvm/ADT/APFloat.h"
22#include "llvm/ADT/ArrayRef.h"
23#include "llvm/ADT/BitVector.h"
24#include "llvm/ADT/FoldingSet.h"
25#include "llvm/ADT/GraphTraits.h"
26#include "llvm/ADT/SmallPtrSet.h"
27#include "llvm/ADT/SmallVector.h"
28#include "llvm/ADT/ilist_node.h"
29#include "llvm/ADT/iterator.h"
30#include "llvm/ADT/iterator_range.h"
31#include "llvm/CodeGen/ISDOpcodes.h"
32#include "llvm/CodeGen/MachineMemOperand.h"
33#include "llvm/CodeGen/ValueTypes.h"
34#include "llvm/IR/Constants.h"
35#include "llvm/IR/DebugLoc.h"
36#include "llvm/IR/Instruction.h"
37#include "llvm/IR/Instructions.h"
38#include "llvm/IR/Metadata.h"
39#include "llvm/IR/Operator.h"
40#include "llvm/Support/AlignOf.h"
41#include "llvm/Support/AtomicOrdering.h"
42#include "llvm/Support/Casting.h"
43#include "llvm/Support/ErrorHandling.h"
44#include "llvm/Support/MachineValueType.h"
45#include <algorithm>
46#include <cassert>
47#include <climits>
48#include <cstddef>
49#include <cstdint>
50#include <cstring>
51#include <iterator>
52#include <string>
53#include <tuple>
54
55namespace llvm {
56
57class APInt;
58class Constant;
59template <typename T> struct DenseMapInfo;
60class GlobalValue;
61class MachineBasicBlock;
62class MachineConstantPoolValue;
63class MCSymbol;
64class raw_ostream;
65class SDNode;
66class SelectionDAG;
67class Type;
68class Value;
69
70void checkForCycles(const SDNode *N, const SelectionDAG *DAG = nullptr,
71 bool force = false);
72
73/// This represents a list of ValueType's that has been intern'd by
74/// a SelectionDAG. Instances of this simple value class are returned by
75/// SelectionDAG::getVTList(...).
76///
77struct SDVTList {
78 const EVT *VTs;
79 unsigned int NumVTs;
80};
81
82namespace ISD {
83
84 /// Node predicates
85
86 /// If N is a BUILD_VECTOR node whose elements are all the same constant or
87 /// undefined, return true and return the constant value in \p SplatValue.
88 bool isConstantSplatVector(const SDNode *N, APInt &SplatValue);
89
90 /// Return true if the specified node is a BUILD_VECTOR where all of the
91 /// elements are ~0 or undef.
92 bool isBuildVectorAllOnes(const SDNode *N);
93
94 /// Return true if the specified node is a BUILD_VECTOR where all of the
95 /// elements are 0 or undef.
96 bool isBuildVectorAllZeros(const SDNode *N);
97
98 /// Return true if the specified node is a BUILD_VECTOR node of all
99 /// ConstantSDNode or undef.
100 bool isBuildVectorOfConstantSDNodes(const SDNode *N);
101
102 /// Return true if the specified node is a BUILD_VECTOR node of all
103 /// ConstantFPSDNode or undef.
104 bool isBuildVectorOfConstantFPSDNodes(const SDNode *N);
105
106 /// Return true if the node has at least one operand and all operands of the
107 /// specified node are ISD::UNDEF.
108 bool allOperandsUndef(const SDNode *N);
109
110} // end namespace ISD
111
112//===----------------------------------------------------------------------===//
113/// Unlike LLVM values, Selection DAG nodes may return multiple
114/// values as the result of a computation. Many nodes return multiple values,
115/// from loads (which define a token and a return value) to ADDC (which returns
116/// a result and a carry value), to calls (which may return an arbitrary number
117/// of values).
118///
119/// As such, each use of a SelectionDAG computation must indicate the node that
120/// computes it as well as which return value to use from that node. This pair
121/// of information is represented with the SDValue value type.
122///
123class SDValue {
124 friend struct DenseMapInfo<SDValue>;
125
126 SDNode *Node = nullptr; // The node defining the value we are using.
127 unsigned ResNo = 0; // Which return value of the node we are using.
128
129public:
130 SDValue() = default;
131 SDValue(SDNode *node, unsigned resno);
132
133 /// get the index which selects a specific result in the SDNode
134 unsigned getResNo() const { return ResNo; }
135
136 /// get the SDNode which holds the desired result
137 SDNode *getNode() const { return Node; }
138
139 /// set the SDNode
140 void setNode(SDNode *N) { Node = N; }
141
142 inline SDNode *operator->() const { return Node; }
143
144 bool operator==(const SDValue &O) const {
145 return Node == O.Node && ResNo == O.ResNo;
146 }
147 bool operator!=(const SDValue &O) const {
148 return !operator==(O);
149 }
150 bool operator<(const SDValue &O) const {
151 return std::tie(Node, ResNo) < std::tie(O.Node, O.ResNo);
152 }
153 explicit operator bool() const {
154 return Node != nullptr;
25
Returning the value 1, which participates in a condition later
155 }
156
157 SDValue getValue(unsigned R) const {
158 return SDValue(Node, R);
159 }
160
161 /// Return true if this node is an operand of N.
162 bool isOperandOf(const SDNode *N) const;
163
164 /// Return the ValueType of the referenced return value.
165 inline EVT getValueType() const;
166
167 /// Return the simple ValueType of the referenced return value.
168 MVT getSimpleValueType() const {
169 return getValueType().getSimpleVT();
170 }
171
172 /// Returns the size of the value in bits.
173 unsigned getValueSizeInBits() const {
174 return getValueType().getSizeInBits();
175 }
176
177 unsigned getScalarValueSizeInBits() const {
178 return getValueType().getScalarType().getSizeInBits();
179 }
180
181 // Forwarding methods - These forward to the corresponding methods in SDNode.
182 inline unsigned getOpcode() const;
183 inline unsigned getNumOperands() const;
184 inline const SDValue &getOperand(unsigned i) const;
185 inline uint64_t getConstantOperandVal(unsigned i) const;
186 inline const APInt &getConstantOperandAPInt(unsigned i) const;
187 inline bool isTargetMemoryOpcode() const;
188 inline bool isTargetOpcode() const;
189 inline bool isMachineOpcode() const;
190 inline bool isUndef() const;
191 inline unsigned getMachineOpcode() const;
192 inline const DebugLoc &getDebugLoc() const;
193 inline void dump() const;
194 inline void dump(const SelectionDAG *G) const;
195 inline void dumpr() const;
196 inline void dumpr(const SelectionDAG *G) const;
197
198 /// Return true if this operand (which must be a chain) reaches the
199 /// specified operand without crossing any side-effecting instructions.
200 /// In practice, this looks through token factors and non-volatile loads.
201 /// In order to remain efficient, this only
202 /// looks a couple of nodes in, it does not do an exhaustive search.
203 bool reachesChainWithoutSideEffects(SDValue Dest,
204 unsigned Depth = 2) const;
205
206 /// Return true if there are no nodes using value ResNo of Node.
207 inline bool use_empty() const;
208
209 /// Return true if there is exactly one node using value ResNo of Node.
210 inline bool hasOneUse() const;
211};
212
213template<> struct DenseMapInfo<SDValue> {
214 static inline SDValue getEmptyKey() {
215 SDValue V;
216 V.ResNo = -1U;
217 return V;
218 }
219
220 static inline SDValue getTombstoneKey() {
221 SDValue V;
222 V.ResNo = -2U;
223 return V;
224 }
225
226 static unsigned getHashValue(const SDValue &Val) {
227 return ((unsigned)((uintptr_t)Val.getNode() >> 4) ^
228 (unsigned)((uintptr_t)Val.getNode() >> 9)) + Val.getResNo();
229 }
230
231 static bool isEqual(const SDValue &LHS, const SDValue &RHS) {
232 return LHS == RHS;
233 }
234};
235
236/// Allow casting operators to work directly on
237/// SDValues as if they were SDNode*'s.
238template<> struct simplify_type<SDValue> {
239 using SimpleType = SDNode *;
240
241 static SimpleType getSimplifiedValue(SDValue &Val) {
242 return Val.getNode();
243 }
244};
245template<> struct simplify_type<const SDValue> {
246 using SimpleType = /*const*/ SDNode *;
247
248 static SimpleType getSimplifiedValue(const SDValue &Val) {
249 return Val.getNode();
250 }
251};
252
253/// Represents a use of a SDNode. This class holds an SDValue,
254/// which records the SDNode being used and the result number, a
255/// pointer to the SDNode using the value, and Next and Prev pointers,
256/// which link together all the uses of an SDNode.
257///
258class SDUse {
259 /// Val - The value being used.
260 SDValue Val;
261 /// User - The user of this value.
262 SDNode *User = nullptr;
263 /// Prev, Next - Pointers to the uses list of the SDNode referred by
264 /// this operand.
265 SDUse **Prev = nullptr;
266 SDUse *Next = nullptr;
267
268public:
269 SDUse() = default;
270 SDUse(const SDUse &U) = delete;
271 SDUse &operator=(const SDUse &) = delete;
272
273 /// Normally SDUse will just implicitly convert to an SDValue that it holds.
274 operator const SDValue&() const { return Val; }
275
276 /// If implicit conversion to SDValue doesn't work, the get() method returns
277 /// the SDValue.
278 const SDValue &get() const { return Val; }
279
280 /// This returns the SDNode that contains this Use.
281 SDNode *getUser() { return User; }
282
283 /// Get the next SDUse in the use list.
284 SDUse *getNext() const { return Next; }
285
286 /// Convenience function for get().getNode().
287 SDNode *getNode() const { return Val.getNode(); }
288 /// Convenience function for get().getResNo().
289 unsigned getResNo() const { return Val.getResNo(); }
290 /// Convenience function for get().getValueType().
291 EVT getValueType() const { return Val.getValueType(); }
292
293 /// Convenience function for get().operator==
294 bool operator==(const SDValue &V) const {
295 return Val == V;
296 }
297
298 /// Convenience function for get().operator!=
299 bool operator!=(const SDValue &V) const {
300 return Val != V;
301 }
302
303 /// Convenience function for get().operator<
304 bool operator<(const SDValue &V) const {
305 return Val < V;
306 }
307
308private:
309 friend class SelectionDAG;
310 friend class SDNode;
311 // TODO: unfriend HandleSDNode once we fix its operand handling.
312 friend class HandleSDNode;
313
314 void setUser(SDNode *p) { User = p; }
315
316 /// Remove this use from its existing use list, assign it the
317 /// given value, and add it to the new value's node's use list.
318 inline void set(const SDValue &V);
319 /// Like set, but only supports initializing a newly-allocated
320 /// SDUse with a non-null value.
321 inline void setInitial(const SDValue &V);
322 /// Like set, but only sets the Node portion of the value,
323 /// leaving the ResNo portion unmodified.
324 inline void setNode(SDNode *N);
325
326 void addToList(SDUse **List) {
327 Next = *List;
328 if (Next) Next->Prev = &Next;
329 Prev = List;
330 *List = this;
331 }
332
333 void removeFromList() {
334 *Prev = Next;
335 if (Next) Next->Prev = Prev;
336 }
337};
338
339/// simplify_type specializations - Allow casting operators to work directly on
340/// SDValues as if they were SDNode*'s.
341template<> struct simplify_type<SDUse> {
342 using SimpleType = SDNode *;
343
344 static SimpleType getSimplifiedValue(SDUse &Val) {
345 return Val.getNode();
346 }
347};
348
349/// These are IR-level optimization flags that may be propagated to SDNodes.
350/// TODO: This data structure should be shared by the IR optimizer and the
351/// the backend.
352struct SDNodeFlags {
353private:
354 // This bit is used to determine if the flags are in a defined state.
355 // Flag bits can only be masked out during intersection if the masking flags
356 // are defined.
357 bool AnyDefined : 1;
358
359 bool NoUnsignedWrap : 1;
360 bool NoSignedWrap : 1;
361 bool Exact : 1;
362 bool NoNaNs : 1;
363 bool NoInfs : 1;
364 bool NoSignedZeros : 1;
365 bool AllowReciprocal : 1;
366 bool VectorReduction : 1;
367 bool AllowContract : 1;
368 bool ApproximateFuncs : 1;
369 bool AllowReassociation : 1;
370
371 // We assume instructions do not raise floating-point exceptions by default,
372 // and only those marked explicitly may do so. We could choose to represent
373 // this via a positive "FPExcept" flags like on the MI level, but having a
374 // negative "NoFPExcept" flag here (that defaults to true) makes the flag
375 // intersection logic more straightforward.
376 bool NoFPExcept : 1;
377
378public:
379 /// Default constructor turns off all optimization flags.
380 SDNodeFlags()
381 : AnyDefined(false), NoUnsignedWrap(false), NoSignedWrap(false),
382 Exact(false), NoNaNs(false), NoInfs(false),
383 NoSignedZeros(false), AllowReciprocal(false), VectorReduction(false),
384 AllowContract(false), ApproximateFuncs(false),
385 AllowReassociation(false), NoFPExcept(true) {}
386
387 /// Propagate the fast-math-flags from an IR FPMathOperator.
388 void copyFMF(const FPMathOperator &FPMO) {
389 setNoNaNs(FPMO.hasNoNaNs());
390 setNoInfs(FPMO.hasNoInfs());
391 setNoSignedZeros(FPMO.hasNoSignedZeros());
392 setAllowReciprocal(FPMO.hasAllowReciprocal());
393 setAllowContract(FPMO.hasAllowContract());
394 setApproximateFuncs(FPMO.hasApproxFunc());
395 setAllowReassociation(FPMO.hasAllowReassoc());
396 }
397
398 /// Sets the state of the flags to the defined state.
399 void setDefined() { AnyDefined = true; }
400 /// Returns true if the flags are in a defined state.
401 bool isDefined() const { return AnyDefined; }
402
403 // These are mutators for each flag.
404 void setNoUnsignedWrap(bool b) {
405 setDefined();
406 NoUnsignedWrap = b;
407 }
408 void setNoSignedWrap(bool b) {
409 setDefined();
410 NoSignedWrap = b;
411 }
412 void setExact(bool b) {
413 setDefined();
414 Exact = b;
415 }
416 void setNoNaNs(bool b) {
417 setDefined();
418 NoNaNs = b;
419 }
420 void setNoInfs(bool b) {
421 setDefined();
422 NoInfs = b;
423 }
424 void setNoSignedZeros(bool b) {
425 setDefined();
426 NoSignedZeros = b;
427 }
428 void setAllowReciprocal(bool b) {
429 setDefined();
430 AllowReciprocal = b;
431 }
432 void setVectorReduction(bool b) {
433 setDefined();
434 VectorReduction = b;
435 }
436 void setAllowContract(bool b) {
437 setDefined();
438 AllowContract = b;
439 }
440 void setApproximateFuncs(bool b) {
441 setDefined();
442 ApproximateFuncs = b;
443 }
444 void setAllowReassociation(bool b) {
445 setDefined();
446 AllowReassociation = b;
447 }
448 void setFPExcept(bool b) {
449 setDefined();
450 NoFPExcept = !b;
451 }
452
453 // These are accessors for each flag.
454 bool hasNoUnsignedWrap() const { return NoUnsignedWrap; }
455 bool hasNoSignedWrap() const { return NoSignedWrap; }
456 bool hasExact() const { return Exact; }
457 bool hasNoNaNs() const { return NoNaNs; }
458 bool hasNoInfs() const { return NoInfs; }
459 bool hasNoSignedZeros() const { return NoSignedZeros; }
460 bool hasAllowReciprocal() const { return AllowReciprocal; }
461 bool hasVectorReduction() const { return VectorReduction; }
462 bool hasAllowContract() const { return AllowContract; }
463 bool hasApproximateFuncs() const { return ApproximateFuncs; }
464 bool hasAllowReassociation() const { return AllowReassociation; }
465 bool hasFPExcept() const { return !NoFPExcept; }
466
467 bool isFast() const {
468 return NoSignedZeros && AllowReciprocal && NoNaNs && NoInfs && NoFPExcept &&
469 AllowContract && ApproximateFuncs && AllowReassociation;
470 }
471
472 /// Clear any flags in this flag set that aren't also set in Flags.
473 /// If the given Flags are undefined then don't do anything.
474 void intersectWith(const SDNodeFlags Flags) {
475 if (!Flags.isDefined())
476 return;
477 NoUnsignedWrap &= Flags.NoUnsignedWrap;
478 NoSignedWrap &= Flags.NoSignedWrap;
479 Exact &= Flags.Exact;
480 NoNaNs &= Flags.NoNaNs;
481 NoInfs &= Flags.NoInfs;
482 NoSignedZeros &= Flags.NoSignedZeros;
483 AllowReciprocal &= Flags.AllowReciprocal;
484 VectorReduction &= Flags.VectorReduction;
485 AllowContract &= Flags.AllowContract;
486 ApproximateFuncs &= Flags.ApproximateFuncs;
487 AllowReassociation &= Flags.AllowReassociation;
488 NoFPExcept &= Flags.NoFPExcept;
489 }
490};
491
492/// Represents one node in the SelectionDAG.
493///
494class SDNode : public FoldingSetNode, public ilist_node<SDNode> {
495private:
496 /// The operation that this node performs.
497 int16_t NodeType;
498
499protected:
500 // We define a set of mini-helper classes to help us interpret the bits in our
501 // SubclassData. These are designed to fit within a uint16_t so they pack
502 // with NodeType.
503
504#if defined(_AIX) && (!defined(__GNUC__4) || defined(__ibmxl__))
505// Except for GCC; by default, AIX compilers store bit-fields in 4-byte words
506// and give the `pack` pragma push semantics.
507#define BEGIN_TWO_BYTE_PACK() _Pragma("pack(2)")pack(2)
508#define END_TWO_BYTE_PACK() _Pragma("pack(pop)")pack(pop)
509#else
510#define BEGIN_TWO_BYTE_PACK()
511#define END_TWO_BYTE_PACK()
512#endif
513
514BEGIN_TWO_BYTE_PACK()
515 class SDNodeBitfields {
516 friend class SDNode;
517 friend class MemIntrinsicSDNode;
518 friend class MemSDNode;
519 friend class SelectionDAG;
520
521 uint16_t HasDebugValue : 1;
522 uint16_t IsMemIntrinsic : 1;
523 uint16_t IsDivergent : 1;
524 };
525 enum { NumSDNodeBits = 3 };
526
527 class ConstantSDNodeBitfields {
528 friend class ConstantSDNode;
529
530 uint16_t : NumSDNodeBits;
531
532 uint16_t IsOpaque : 1;
533 };
534
535 class MemSDNodeBitfields {
536 friend class MemSDNode;
537 friend class MemIntrinsicSDNode;
538 friend class AtomicSDNode;
539
540 uint16_t : NumSDNodeBits;
541
542 uint16_t IsVolatile : 1;
543 uint16_t IsNonTemporal : 1;
544 uint16_t IsDereferenceable : 1;
545 uint16_t IsInvariant : 1;
546 };
547 enum { NumMemSDNodeBits = NumSDNodeBits + 4 };
548
549 class LSBaseSDNodeBitfields {
550 friend class LSBaseSDNode;
551 friend class MaskedGatherScatterSDNode;
552
553 uint16_t : NumMemSDNodeBits;
554
555 // This storage is shared between disparate class hierarchies to hold an
556 // enumeration specific to the class hierarchy in use.
557 // LSBaseSDNode => enum ISD::MemIndexedMode
558 // MaskedGatherScatterSDNode => enum ISD::MemIndexType
559 uint16_t AddressingMode : 3;
560 };
561 enum { NumLSBaseSDNodeBits = NumMemSDNodeBits + 3 };
562
563 class LoadSDNodeBitfields {
564 friend class LoadSDNode;
565 friend class MaskedLoadSDNode;
566
567 uint16_t : NumLSBaseSDNodeBits;
568
569 uint16_t ExtTy : 2; // enum ISD::LoadExtType
570 uint16_t IsExpanding : 1;
571 };
572
573 class StoreSDNodeBitfields {
574 friend class StoreSDNode;
575 friend class MaskedStoreSDNode;
576
577 uint16_t : NumLSBaseSDNodeBits;
578
579 uint16_t IsTruncating : 1;
580 uint16_t IsCompressing : 1;
581 };
582
583 union {
584 char RawSDNodeBits[sizeof(uint16_t)];
585 SDNodeBitfields SDNodeBits;
586 ConstantSDNodeBitfields ConstantSDNodeBits;
587 MemSDNodeBitfields MemSDNodeBits;
588 LSBaseSDNodeBitfields LSBaseSDNodeBits;
589 LoadSDNodeBitfields LoadSDNodeBits;
590 StoreSDNodeBitfields StoreSDNodeBits;
591 };
592END_TWO_BYTE_PACK()
593#undef BEGIN_TWO_BYTE_PACK
594#undef END_TWO_BYTE_PACK
595
596 // RawSDNodeBits must cover the entirety of the union. This means that all of
597 // the union's members must have size <= RawSDNodeBits. We write the RHS as
598 // "2" instead of sizeof(RawSDNodeBits) because MSVC can't handle the latter.
599 static_assert(sizeof(SDNodeBitfields) <= 2, "field too wide");
600 static_assert(sizeof(ConstantSDNodeBitfields) <= 2, "field too wide");
601 static_assert(sizeof(MemSDNodeBitfields) <= 2, "field too wide");
602 static_assert(sizeof(LSBaseSDNodeBitfields) <= 2, "field too wide");
603 static_assert(sizeof(LoadSDNodeBitfields) <= 2, "field too wide");
604 static_assert(sizeof(StoreSDNodeBitfields) <= 2, "field too wide");
605
606private:
607 friend class SelectionDAG;
608 // TODO: unfriend HandleSDNode once we fix its operand handling.
609 friend class HandleSDNode;
610
611 /// Unique id per SDNode in the DAG.
612 int NodeId = -1;
613
614 /// The values that are used by this operation.
615 SDUse *OperandList = nullptr;
616
617 /// The types of the values this node defines. SDNode's may
618 /// define multiple values simultaneously.
619 const EVT *ValueList;
620
621 /// List of uses for this SDNode.
622 SDUse *UseList = nullptr;
623
624 /// The number of entries in the Operand/Value list.
625 unsigned short NumOperands = 0;
626 unsigned short NumValues;
627
628 // The ordering of the SDNodes. It roughly corresponds to the ordering of the
629 // original LLVM instructions.
630 // This is used for turning off scheduling, because we'll forgo
631 // the normal scheduling algorithms and output the instructions according to
632 // this ordering.
633 unsigned IROrder;
634
635 /// Source line information.
636 DebugLoc debugLoc;
637
638 /// Return a pointer to the specified value type.
639 static const EVT *getValueTypeList(EVT VT);
640
641 SDNodeFlags Flags;
642
643public:
644 /// Unique and persistent id per SDNode in the DAG.
645 /// Used for debug printing.
646 uint16_t PersistentId;
647
648 //===--------------------------------------------------------------------===//
649 // Accessors
650 //
651
652 /// Return the SelectionDAG opcode value for this node. For
653 /// pre-isel nodes (those for which isMachineOpcode returns false), these
654 /// are the opcode values in the ISD and <target>ISD namespaces. For
655 /// post-isel opcodes, see getMachineOpcode.
656 unsigned getOpcode() const { return (unsigned short)NodeType; }
657
658 /// Test if this node has a target-specific opcode (in the
659 /// \<target\>ISD namespace).
660 bool isTargetOpcode() const { return NodeType >= ISD::BUILTIN_OP_END; }
661
662 /// Test if this node has a target-specific
663 /// memory-referencing opcode (in the \<target\>ISD namespace and
664 /// greater than FIRST_TARGET_MEMORY_OPCODE).
665 bool isTargetMemoryOpcode() const {
666 return NodeType >= ISD::FIRST_TARGET_MEMORY_OPCODE;
667 }
668
669 /// Return true if the type of the node type undefined.
670 bool isUndef() const { return NodeType == ISD::UNDEF; }
671
672 /// Test if this node is a memory intrinsic (with valid pointer information).
673 /// INTRINSIC_W_CHAIN and INTRINSIC_VOID nodes are sometimes created for
674 /// non-memory intrinsics (with chains) that are not really instances of
675 /// MemSDNode. For such nodes, we need some extra state to determine the
676 /// proper classof relationship.
677 bool isMemIntrinsic() const {
678 return (NodeType == ISD::INTRINSIC_W_CHAIN ||
679 NodeType == ISD::INTRINSIC_VOID) &&
680 SDNodeBits.IsMemIntrinsic;
681 }
682
683 /// Test if this node is a strict floating point pseudo-op.
684 bool isStrictFPOpcode() {
685 switch (NodeType) {
686 default:
687 return false;
688 case ISD::STRICT_FADD:
689 case ISD::STRICT_FSUB:
690 case ISD::STRICT_FMUL:
691 case ISD::STRICT_FDIV:
692 case ISD::STRICT_FREM:
693 case ISD::STRICT_FMA:
694 case ISD::STRICT_FSQRT:
695 case ISD::STRICT_FPOW:
696 case ISD::STRICT_FPOWI:
697 case ISD::STRICT_FSIN:
698 case ISD::STRICT_FCOS:
699 case ISD::STRICT_FEXP:
700 case ISD::STRICT_FEXP2:
701 case ISD::STRICT_FLOG:
702 case ISD::STRICT_FLOG10:
703 case ISD::STRICT_FLOG2:
704 case ISD::STRICT_FRINT:
705 case ISD::STRICT_FNEARBYINT:
706 case ISD::STRICT_FMAXNUM:
707 case ISD::STRICT_FMINNUM:
708 case ISD::STRICT_FCEIL:
709 case ISD::STRICT_FFLOOR:
710 case ISD::STRICT_FROUND:
711 case ISD::STRICT_FTRUNC:
712 case ISD::STRICT_FP_TO_SINT:
713 case ISD::STRICT_FP_TO_UINT:
714 case ISD::STRICT_FP_ROUND:
715 case ISD::STRICT_FP_EXTEND:
716 return true;
717 }
718 }
719
720 /// Test if this node has a post-isel opcode, directly
721 /// corresponding to a MachineInstr opcode.
722 bool isMachineOpcode() const { return NodeType < 0; }
723
724 /// This may only be called if isMachineOpcode returns
725 /// true. It returns the MachineInstr opcode value that the node's opcode
726 /// corresponds to.
727 unsigned getMachineOpcode() const {
728 assert(isMachineOpcode() && "Not a MachineInstr opcode!")((isMachineOpcode() && "Not a MachineInstr opcode!") ?
static_cast<void> (0) : __assert_fail ("isMachineOpcode() && \"Not a MachineInstr opcode!\""
, "/build/llvm-toolchain-snapshot-10~svn372087/include/llvm/CodeGen/SelectionDAGNodes.h"
, 728, __PRETTY_FUNCTION__))
;
729 return ~NodeType;
730 }
731
732 bool getHasDebugValue() const { return SDNodeBits.HasDebugValue; }
733 void setHasDebugValue(bool b) { SDNodeBits.HasDebugValue = b; }
734
735 bool isDivergent() const { return SDNodeBits.IsDivergent; }
736
737 /// Return true if there are no uses of this node.
738 bool use_empty() const { return UseList == nullptr; }
739
740 /// Return true if there is exactly one use of this node.
741 bool hasOneUse() const {
742 return !use_empty() && std::next(use_begin()) == use_end();
743 }
744
745 /// Return the number of uses of this node. This method takes
746 /// time proportional to the number of uses.
747 size_t use_size() const { return std::distance(use_begin(), use_end()); }
748
749 /// Return the unique node id.
750 int getNodeId() const { return NodeId; }
751
752 /// Set unique node id.
753 void setNodeId(int Id) { NodeId = Id; }
754
755 /// Return the node ordering.
756 unsigned getIROrder() const { return IROrder; }
757
758 /// Set the node ordering.
759 void setIROrder(unsigned Order) { IROrder = Order; }
760
761 /// Return the source location info.
762 const DebugLoc &getDebugLoc() const { return debugLoc; }
763
764 /// Set source location info. Try to avoid this, putting
765 /// it in the constructor is preferable.
766 void setDebugLoc(DebugLoc dl) { debugLoc = std::move(dl); }
767
768 /// This class provides iterator support for SDUse
769 /// operands that use a specific SDNode.
770 class use_iterator
771 : public std::iterator<std::forward_iterator_tag, SDUse, ptrdiff_t> {
772 friend class SDNode;
773
774 SDUse *Op = nullptr;
775
776 explicit use_iterator(SDUse *op) : Op(op) {}
777
778 public:
779 using reference = std::iterator<std::forward_iterator_tag,
780 SDUse, ptrdiff_t>::reference;
781 using pointer = std::iterator<std::forward_iterator_tag,
782 SDUse, ptrdiff_t>::pointer;
783
784 use_iterator() = default;
785 use_iterator(const use_iterator &I) : Op(I.Op) {}
786
787 bool operator==(const use_iterator &x) const {
788 return Op == x.Op;
789 }
790 bool operator!=(const use_iterator &x) const {
791 return !operator==(x);
792 }
793
794 /// Return true if this iterator is at the end of uses list.
795 bool atEnd() const { return Op == nullptr; }
796
797 // Iterator traversal: forward iteration only.
798 use_iterator &operator++() { // Preincrement
799 assert(Op && "Cannot increment end iterator!")((Op && "Cannot increment end iterator!") ? static_cast
<void> (0) : __assert_fail ("Op && \"Cannot increment end iterator!\""
, "/build/llvm-toolchain-snapshot-10~svn372087/include/llvm/CodeGen/SelectionDAGNodes.h"
, 799, __PRETTY_FUNCTION__))
;
800 Op = Op->getNext();
801 return *this;
802 }
803
804 use_iterator operator++(int) { // Postincrement
805 use_iterator tmp = *this; ++*this; return tmp;
806 }
807
808 /// Retrieve a pointer to the current user node.
809 SDNode *operator*() const {
810 assert(Op && "Cannot dereference end iterator!")((Op && "Cannot dereference end iterator!") ? static_cast
<void> (0) : __assert_fail ("Op && \"Cannot dereference end iterator!\""
, "/build/llvm-toolchain-snapshot-10~svn372087/include/llvm/CodeGen/SelectionDAGNodes.h"
, 810, __PRETTY_FUNCTION__))
;
811 return Op->getUser();
812 }
813
814 SDNode *operator->() const { return operator*(); }
815
816 SDUse &getUse() const { return *Op; }
817
818 /// Retrieve the operand # of this use in its user.
819 unsigned getOperandNo() const {
820 assert(Op && "Cannot dereference end iterator!")((Op && "Cannot dereference end iterator!") ? static_cast
<void> (0) : __assert_fail ("Op && \"Cannot dereference end iterator!\""
, "/build/llvm-toolchain-snapshot-10~svn372087/include/llvm/CodeGen/SelectionDAGNodes.h"
, 820, __PRETTY_FUNCTION__))
;
821 return (unsigned)(Op - Op->getUser()->OperandList);
822 }
823 };
824
825 /// Provide iteration support to walk over all uses of an SDNode.
826 use_iterator use_begin() const {
827 return use_iterator(UseList);
828 }
829
830 static use_iterator use_end() { return use_iterator(nullptr); }
831
832 inline iterator_range<use_iterator> uses() {
833 return make_range(use_begin(), use_end());
834 }
835 inline iterator_range<use_iterator> uses() const {
836 return make_range(use_begin(), use_end());
837 }
838
839 /// Return true if there are exactly NUSES uses of the indicated value.
840 /// This method ignores uses of other values defined by this operation.
841 bool hasNUsesOfValue(unsigned NUses, unsigned Value) const;
842
843 /// Return true if there are any use of the indicated value.
844 /// This method ignores uses of other values defined by this operation.
845 bool hasAnyUseOfValue(unsigned Value) const;
846
847 /// Return true if this node is the only use of N.
848 bool isOnlyUserOf(const SDNode *N) const;
849
850 /// Return true if this node is an operand of N.
851 bool isOperandOf(const SDNode *N) const;
852
853 /// Return true if this node is a predecessor of N.
854 /// NOTE: Implemented on top of hasPredecessor and every bit as
855 /// expensive. Use carefully.
856 bool isPredecessorOf(const SDNode *N) const {
857 return N->hasPredecessor(this);
858 }
859
860 /// Return true if N is a predecessor of this node.
861 /// N is either an operand of this node, or can be reached by recursively
862 /// traversing up the operands.
863 /// NOTE: This is an expensive method. Use it carefully.
864 bool hasPredecessor(const SDNode *N) const;
865
866 /// Returns true if N is a predecessor of any node in Worklist. This
867 /// helper keeps Visited and Worklist sets externally to allow unions
868 /// searches to be performed in parallel, caching of results across
869 /// queries and incremental addition to Worklist. Stops early if N is
870 /// found but will resume. Remember to clear Visited and Worklists
871 /// if DAG changes. MaxSteps gives a maximum number of nodes to visit before
872 /// giving up. The TopologicalPrune flag signals that positive NodeIds are
873 /// topologically ordered (Operands have strictly smaller node id) and search
874 /// can be pruned leveraging this.
875 static bool hasPredecessorHelper(const SDNode *N,
876 SmallPtrSetImpl<const SDNode *> &Visited,
877 SmallVectorImpl<const SDNode *> &Worklist,
878 unsigned int MaxSteps = 0,
879 bool TopologicalPrune = false) {
880 SmallVector<const SDNode *, 8> DeferredNodes;
881 if (Visited.count(N))
882 return true;
883
884 // Node Id's are assigned in three places: As a topological
885 // ordering (> 0), during legalization (results in values set to
886 // 0), new nodes (set to -1). If N has a topolgical id then we
887 // know that all nodes with ids smaller than it cannot be
888 // successors and we need not check them. Filter out all node
889 // that can't be matches. We add them to the worklist before exit
890 // in case of multiple calls. Note that during selection the topological id
891 // may be violated if a node's predecessor is selected before it. We mark
892 // this at selection negating the id of unselected successors and
893 // restricting topological pruning to positive ids.
894
895 int NId = N->getNodeId();
896 // If we Invalidated the Id, reconstruct original NId.
897 if (NId < -1)
898 NId = -(NId + 1);
899
900 bool Found = false;
901 while (!Worklist.empty()) {
902 const SDNode *M = Worklist.pop_back_val();
903 int MId = M->getNodeId();
904 if (TopologicalPrune && M->getOpcode() != ISD::TokenFactor && (NId > 0) &&
905 (MId > 0) && (MId < NId)) {
906 DeferredNodes.push_back(M);
907 continue;
908 }
909 for (const SDValue &OpV : M->op_values()) {
910 SDNode *Op = OpV.getNode();
911 if (Visited.insert(Op).second)
912 Worklist.push_back(Op);
913 if (Op == N)
914 Found = true;
915 }
916 if (Found)
917 break;
918 if (MaxSteps != 0 && Visited.size() >= MaxSteps)
919 break;
920 }
921 // Push deferred nodes back on worklist.
922 Worklist.append(DeferredNodes.begin(), DeferredNodes.end());
923 // If we bailed early, conservatively return found.
924 if (MaxSteps != 0 && Visited.size() >= MaxSteps)
925 return true;
926 return Found;
927 }
928
929 /// Return true if all the users of N are contained in Nodes.
930 /// NOTE: Requires at least one match, but doesn't require them all.
931 static bool areOnlyUsersOf(ArrayRef<const SDNode *> Nodes, const SDNode *N);
932
933 /// Return the number of values used by this operation.
934 unsigned getNumOperands() const { return NumOperands; }
935
936 /// Return the maximum number of operands that a SDNode can hold.
937 static constexpr size_t getMaxNumOperands() {
938 return std::numeric_limits<decltype(SDNode::NumOperands)>::max();
939 }
940
941 /// Helper method returns the integer value of a ConstantSDNode operand.
942 inline uint64_t getConstantOperandVal(unsigned Num) const;
943
944 /// Helper method returns the APInt of a ConstantSDNode operand.
945 inline const APInt &getConstantOperandAPInt(unsigned Num) const;
946
947 const SDValue &getOperand(unsigned Num) const {
948 assert(Num < NumOperands && "Invalid child # of SDNode!")((Num < NumOperands && "Invalid child # of SDNode!"
) ? static_cast<void> (0) : __assert_fail ("Num < NumOperands && \"Invalid child # of SDNode!\""
, "/build/llvm-toolchain-snapshot-10~svn372087/include/llvm/CodeGen/SelectionDAGNodes.h"
, 948, __PRETTY_FUNCTION__))
;
949 return OperandList[Num];
950 }
951
952 using op_iterator = SDUse *;
953
954 op_iterator op_begin() const { return OperandList; }
955 op_iterator op_end() const { return OperandList+NumOperands; }
956 ArrayRef<SDUse> ops() const { return makeArrayRef(op_begin(), op_end()); }
957
958 /// Iterator for directly iterating over the operand SDValue's.
959 struct value_op_iterator
960 : iterator_adaptor_base<value_op_iterator, op_iterator,
961 std::random_access_iterator_tag, SDValue,
962 ptrdiff_t, value_op_iterator *,
963 value_op_iterator *> {
964 explicit value_op_iterator(SDUse *U = nullptr)
965 : iterator_adaptor_base(U) {}
966
967 const SDValue &operator*() const { return I->get(); }
968 };
969
970 iterator_range<value_op_iterator> op_values() const {
971 return make_range(value_op_iterator(op_begin()),
972 value_op_iterator(op_end()));
973 }
974
975 SDVTList getVTList() const {
976 SDVTList X = { ValueList, NumValues };
977 return X;
978 }
979
980 /// If this node has a glue operand, return the node
981 /// to which the glue operand points. Otherwise return NULL.
982 SDNode *getGluedNode() const {
983 if (getNumOperands() != 0 &&
984 getOperand(getNumOperands()-1).getValueType() == MVT::Glue)
985 return getOperand(getNumOperands()-1).getNode();
986 return nullptr;
987 }
988
989 /// If this node has a glue value with a user, return
990 /// the user (there is at most one). Otherwise return NULL.
991 SDNode *getGluedUser() const {
992 for (use_iterator UI = use_begin(), UE = use_end(); UI != UE; ++UI)
993 if (UI.getUse().get().getValueType() == MVT::Glue)
994 return *UI;
995 return nullptr;
996 }
997
998 const SDNodeFlags getFlags() const { return Flags; }
999 void setFlags(SDNodeFlags NewFlags) { Flags = NewFlags; }
1000 bool isFast() { return Flags.isFast(); }
1001
1002 /// Clear any flags in this node that aren't also set in Flags.
1003 /// If Flags is not in a defined state then this has no effect.
1004 void intersectFlagsWith(const SDNodeFlags Flags);
1005
1006 /// Return the number of values defined/returned by this operator.
1007 unsigned getNumValues() const { return NumValues; }
1008
1009 /// Return the type of a specified result.
1010 EVT getValueType(unsigned ResNo) const {
1011 assert(ResNo < NumValues && "Illegal result number!")((ResNo < NumValues && "Illegal result number!") ?
static_cast<void> (0) : __assert_fail ("ResNo < NumValues && \"Illegal result number!\""
, "/build/llvm-toolchain-snapshot-10~svn372087/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1011, __PRETTY_FUNCTION__))
;
1012 return ValueList[ResNo];
1013 }
1014
1015 /// Return the type of a specified result as a simple type.
1016 MVT getSimpleValueType(unsigned ResNo) const {
1017 return getValueType(ResNo).getSimpleVT();
1018 }
1019
1020 /// Returns MVT::getSizeInBits(getValueType(ResNo)).
1021 unsigned getValueSizeInBits(unsigned ResNo) const {
1022 return getValueType(ResNo).getSizeInBits();
1023 }
1024
1025 using value_iterator = const EVT *;
1026
1027 value_iterator value_begin() const { return ValueList; }
1028 value_iterator value_end() const { return ValueList+NumValues; }
1029
1030 /// Return the opcode of this operation for printing.
1031 std::string getOperationName(const SelectionDAG *G = nullptr) const;
1032 static const char* getIndexedModeName(ISD::MemIndexedMode AM);
1033 void print_types(raw_ostream &OS, const SelectionDAG *G) const;
1034 void print_details(raw_ostream &OS, const SelectionDAG *G) const;
1035 void print(raw_ostream &OS, const SelectionDAG *G = nullptr) const;
1036 void printr(raw_ostream &OS, const SelectionDAG *G = nullptr) const;
1037
1038 /// Print a SelectionDAG node and all children down to
1039 /// the leaves. The given SelectionDAG allows target-specific nodes
1040 /// to be printed in human-readable form. Unlike printr, this will
1041 /// print the whole DAG, including children that appear multiple
1042 /// times.
1043 ///
1044 void printrFull(raw_ostream &O, const SelectionDAG *G = nullptr) const;
1045
1046 /// Print a SelectionDAG node and children up to
1047 /// depth "depth." The given SelectionDAG allows target-specific
1048 /// nodes to be printed in human-readable form. Unlike printr, this
1049 /// will print children that appear multiple times wherever they are
1050 /// used.
1051 ///
1052 void printrWithDepth(raw_ostream &O, const SelectionDAG *G = nullptr,
1053 unsigned depth = 100) const;
1054
1055 /// Dump this node, for debugging.
1056 void dump() const;
1057
1058 /// Dump (recursively) this node and its use-def subgraph.
1059 void dumpr() const;
1060
1061 /// Dump this node, for debugging.
1062 /// The given SelectionDAG allows target-specific nodes to be printed
1063 /// in human-readable form.
1064 void dump(const SelectionDAG *G) const;
1065
1066 /// Dump (recursively) this node and its use-def subgraph.
1067 /// The given SelectionDAG allows target-specific nodes to be printed
1068 /// in human-readable form.
1069 void dumpr(const SelectionDAG *G) const;
1070
1071 /// printrFull to dbgs(). The given SelectionDAG allows
1072 /// target-specific nodes to be printed in human-readable form.
1073 /// Unlike dumpr, this will print the whole DAG, including children
1074 /// that appear multiple times.
1075 void dumprFull(const SelectionDAG *G = nullptr) const;
1076
1077 /// printrWithDepth to dbgs(). The given
1078 /// SelectionDAG allows target-specific nodes to be printed in
1079 /// human-readable form. Unlike dumpr, this will print children
1080 /// that appear multiple times wherever they are used.
1081 ///
1082 void dumprWithDepth(const SelectionDAG *G = nullptr,
1083 unsigned depth = 100) const;
1084
1085 /// Gather unique data for the node.
1086 void Profile(FoldingSetNodeID &ID) const;
1087
1088 /// This method should only be used by the SDUse class.
1089 void addUse(SDUse &U) { U.addToList(&UseList); }
1090
1091protected:
1092 static SDVTList getSDVTList(EVT VT) {
1093 SDVTList Ret = { getValueTypeList(VT), 1 };
1094 return Ret;
1095 }
1096
1097 /// Create an SDNode.
1098 ///
1099 /// SDNodes are created without any operands, and never own the operand
1100 /// storage. To add operands, see SelectionDAG::createOperands.
1101 SDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs)
1102 : NodeType(Opc), ValueList(VTs.VTs), NumValues(VTs.NumVTs),
1103 IROrder(Order), debugLoc(std::move(dl)) {
1104 memset(&RawSDNodeBits, 0, sizeof(RawSDNodeBits));
1105 assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor")((debugLoc.hasTrivialDestructor() && "Expected trivial destructor"
) ? static_cast<void> (0) : __assert_fail ("debugLoc.hasTrivialDestructor() && \"Expected trivial destructor\""
, "/build/llvm-toolchain-snapshot-10~svn372087/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1105, __PRETTY_FUNCTION__))
;
1106 assert(NumValues == VTs.NumVTs &&((NumValues == VTs.NumVTs && "NumValues wasn't wide enough for its operands!"
) ? static_cast<void> (0) : __assert_fail ("NumValues == VTs.NumVTs && \"NumValues wasn't wide enough for its operands!\""
, "/build/llvm-toolchain-snapshot-10~svn372087/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1107, __PRETTY_FUNCTION__))
1107 "NumValues wasn't wide enough for its operands!")((NumValues == VTs.NumVTs && "NumValues wasn't wide enough for its operands!"
) ? static_cast<void> (0) : __assert_fail ("NumValues == VTs.NumVTs && \"NumValues wasn't wide enough for its operands!\""
, "/build/llvm-toolchain-snapshot-10~svn372087/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1107, __PRETTY_FUNCTION__))
;
1108 }
1109
1110 /// Release the operands and set this node to have zero operands.
1111 void DropOperands();
1112};
1113
1114/// Wrapper class for IR location info (IR ordering and DebugLoc) to be passed
1115/// into SDNode creation functions.
1116/// When an SDNode is created from the DAGBuilder, the DebugLoc is extracted
1117/// from the original Instruction, and IROrder is the ordinal position of
1118/// the instruction.
1119/// When an SDNode is created after the DAG is being built, both DebugLoc and
1120/// the IROrder are propagated from the original SDNode.
1121/// So SDLoc class provides two constructors besides the default one, one to
1122/// be used by the DAGBuilder, the other to be used by others.
1123class SDLoc {
1124private:
1125 DebugLoc DL;
1126 int IROrder = 0;
1127
1128public:
1129 SDLoc() = default;
1130 SDLoc(const SDNode *N) : DL(N->getDebugLoc()), IROrder(N->getIROrder()) {}
1131 SDLoc(const SDValue V) : SDLoc(V.getNode()) {}
1132 SDLoc(const Instruction *I, int Order) : IROrder(Order) {
1133 assert(Order >= 0 && "bad IROrder")((Order >= 0 && "bad IROrder") ? static_cast<void
> (0) : __assert_fail ("Order >= 0 && \"bad IROrder\""
, "/build/llvm-toolchain-snapshot-10~svn372087/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1133, __PRETTY_FUNCTION__))
;
1134 if (I)
1135 DL = I->getDebugLoc();
1136 }
1137
1138 unsigned getIROrder() const { return IROrder; }
1139 const DebugLoc &getDebugLoc() const { return DL; }
1140};
1141
1142// Define inline functions from the SDValue class.
1143
1144inline SDValue::SDValue(SDNode *node, unsigned resno)
1145 : Node(node), ResNo(resno) {
1146 // Explicitly check for !ResNo to avoid use-after-free, because there are
1147 // callers that use SDValue(N, 0) with a deleted N to indicate successful
1148 // combines.
1149 assert((!Node || !ResNo || ResNo < Node->getNumValues()) &&(((!Node || !ResNo || ResNo < Node->getNumValues()) &&
"Invalid result number for the given node!") ? static_cast<
void> (0) : __assert_fail ("(!Node || !ResNo || ResNo < Node->getNumValues()) && \"Invalid result number for the given node!\""
, "/build/llvm-toolchain-snapshot-10~svn372087/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1150, __PRETTY_FUNCTION__))
1150 "Invalid result number for the given node!")(((!Node || !ResNo || ResNo < Node->getNumValues()) &&
"Invalid result number for the given node!") ? static_cast<
void> (0) : __assert_fail ("(!Node || !ResNo || ResNo < Node->getNumValues()) && \"Invalid result number for the given node!\""
, "/build/llvm-toolchain-snapshot-10~svn372087/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1150, __PRETTY_FUNCTION__))
;
1151 assert(ResNo < -2U && "Cannot use result numbers reserved for DenseMaps.")((ResNo < -2U && "Cannot use result numbers reserved for DenseMaps."
) ? static_cast<void> (0) : __assert_fail ("ResNo < -2U && \"Cannot use result numbers reserved for DenseMaps.\""
, "/build/llvm-toolchain-snapshot-10~svn372087/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1151, __PRETTY_FUNCTION__))
;
1152}
1153
1154inline unsigned SDValue::getOpcode() const {
1155 return Node->getOpcode();
1156}
1157
1158inline EVT SDValue::getValueType() const {
1159 return Node->getValueType(ResNo);
1160}
1161
1162inline unsigned SDValue::getNumOperands() const {
1163 return Node->getNumOperands();
1164}
1165
1166inline const SDValue &SDValue::getOperand(unsigned i) const {
1167 return Node->getOperand(i);
1168}
1169
1170inline uint64_t SDValue::getConstantOperandVal(unsigned i) const {
1171 return Node->getConstantOperandVal(i);
1172}
1173
1174inline const APInt &SDValue::getConstantOperandAPInt(unsigned i) const {
1175 return Node->getConstantOperandAPInt(i);
1176}
1177
1178inline bool SDValue::isTargetOpcode() const {
1179 return Node->isTargetOpcode();
1180}
1181
1182inline bool SDValue::isTargetMemoryOpcode() const {
1183 return Node->isTargetMemoryOpcode();
1184}
1185
1186inline bool SDValue::isMachineOpcode() const {
1187 return Node->isMachineOpcode();
1188}
1189
1190inline unsigned SDValue::getMachineOpcode() const {
1191 return Node->getMachineOpcode();
1192}
1193
1194inline bool SDValue::isUndef() const {
1195 return Node->isUndef();
1196}
1197
1198inline bool SDValue::use_empty() const {
1199 return !Node->hasAnyUseOfValue(ResNo);
1200}
1201
1202inline bool SDValue::hasOneUse() const {
1203 return Node->hasNUsesOfValue(1, ResNo);
1204}
1205
1206inline const DebugLoc &SDValue::getDebugLoc() const {
1207 return Node->getDebugLoc();
1208}
1209
1210inline void SDValue::dump() const {
1211 return Node->dump();
1212}
1213
1214inline void SDValue::dump(const SelectionDAG *G) const {
1215 return Node->dump(G);
1216}
1217
1218inline void SDValue::dumpr() const {
1219 return Node->dumpr();
1220}
1221
1222inline void SDValue::dumpr(const SelectionDAG *G) const {
1223 return Node->dumpr(G);
1224}
1225
1226// Define inline functions from the SDUse class.
1227
1228inline void SDUse::set(const SDValue &V) {
1229 if (Val.getNode()) removeFromList();
1230 Val = V;
1231 if (V.getNode()) V.getNode()->addUse(*this);
1232}
1233
1234inline void SDUse::setInitial(const SDValue &V) {
1235 Val = V;
1236 V.getNode()->addUse(*this);
1237}
1238
1239inline void SDUse::setNode(SDNode *N) {
1240 if (Val.getNode()) removeFromList();
1241 Val.setNode(N);
1242 if (N) N->addUse(*this);
1243}
1244
1245/// This class is used to form a handle around another node that
1246/// is persistent and is updated across invocations of replaceAllUsesWith on its
1247/// operand. This node should be directly created by end-users and not added to
1248/// the AllNodes list.
1249class HandleSDNode : public SDNode {
1250 SDUse Op;
1251
1252public:
1253 explicit HandleSDNode(SDValue X)
1254 : SDNode(ISD::HANDLENODE, 0, DebugLoc(), getSDVTList(MVT::Other)) {
1255 // HandleSDNodes are never inserted into the DAG, so they won't be
1256 // auto-numbered. Use ID 65535 as a sentinel.
1257 PersistentId = 0xffff;
1258
1259 // Manually set up the operand list. This node type is special in that it's
1260 // always stack allocated and SelectionDAG does not manage its operands.
1261 // TODO: This should either (a) not be in the SDNode hierarchy, or (b) not
1262 // be so special.
1263 Op.setUser(this);
1264 Op.setInitial(X);
1265 NumOperands = 1;
1266 OperandList = &Op;
1267 }
1268 ~HandleSDNode();
1269
1270 const SDValue &getValue() const { return Op; }
1271};
1272
1273class AddrSpaceCastSDNode : public SDNode {
1274private:
1275 unsigned SrcAddrSpace;
1276 unsigned DestAddrSpace;
1277
1278public:
1279 AddrSpaceCastSDNode(unsigned Order, const DebugLoc &dl, EVT VT,
1280 unsigned SrcAS, unsigned DestAS);
1281
1282 unsigned getSrcAddressSpace() const { return SrcAddrSpace; }
1283 unsigned getDestAddressSpace() const { return DestAddrSpace; }
1284
1285 static bool classof(const SDNode *N) {
1286 return N->getOpcode() == ISD::ADDRSPACECAST;
1287 }
1288};
1289
1290/// This is an abstract virtual class for memory operations.
1291class MemSDNode : public SDNode {
1292private:
1293 // VT of in-memory value.
1294 EVT MemoryVT;
1295
1296protected:
1297 /// Memory reference information.
1298 MachineMemOperand *MMO;
1299
1300public:
1301 MemSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, SDVTList VTs,
1302 EVT memvt, MachineMemOperand *MMO);
1303
1304 bool readMem() const { return MMO->isLoad(); }
1305 bool writeMem() const { return MMO->isStore(); }
1306
1307 /// Returns alignment and volatility of the memory access
1308 unsigned getOriginalAlignment() const {
1309 return MMO->getBaseAlignment();
1310 }
1311 unsigned getAlignment() const {
1312 return MMO->getAlignment();
1313 }
1314
1315 /// Return the SubclassData value, without HasDebugValue. This contains an
1316 /// encoding of the volatile flag, as well as bits used by subclasses. This
1317 /// function should only be used to compute a FoldingSetNodeID value.
1318 /// The HasDebugValue bit is masked out because CSE map needs to match
1319 /// nodes with debug info with nodes without debug info. Same is about
1320 /// isDivergent bit.
1321 unsigned getRawSubclassData() const {
1322 uint16_t Data;
1323 union {
1324 char RawSDNodeBits[sizeof(uint16_t)];
1325 SDNodeBitfields SDNodeBits;
1326 };
1327 memcpy(&RawSDNodeBits, &this->RawSDNodeBits, sizeof(this->RawSDNodeBits));
1328 SDNodeBits.HasDebugValue = 0;
1329 SDNodeBits.IsDivergent = false;
1330 memcpy(&Data, &RawSDNodeBits, sizeof(RawSDNodeBits));
1331 return Data;
1332 }
1333
1334 bool isVolatile() const { return MemSDNodeBits.IsVolatile; }
1335 bool isNonTemporal() const { return MemSDNodeBits.IsNonTemporal; }
1336 bool isDereferenceable() const { return MemSDNodeBits.IsDereferenceable; }
1337 bool isInvariant() const { return MemSDNodeBits.IsInvariant; }
1338
1339 // Returns the offset from the location of the access.
1340 int64_t getSrcValueOffset() const { return MMO->getOffset(); }
1341
1342 /// Returns the AA info that describes the dereference.
1343 AAMDNodes getAAInfo() const { return MMO->getAAInfo(); }
1344
1345 /// Returns the Ranges that describes the dereference.
1346 const MDNode *getRanges() const { return MMO->getRanges(); }
1347
1348 /// Returns the synchronization scope ID for this memory operation.
1349 SyncScope::ID getSyncScopeID() const { return MMO->getSyncScopeID(); }
1350
1351 /// Return the atomic ordering requirements for this memory operation. For
1352 /// cmpxchg atomic operations, return the atomic ordering requirements when
1353 /// store occurs.
1354 AtomicOrdering getOrdering() const { return MMO->getOrdering(); }
1355
1356 /// Return true if the memory operation ordering is Unordered or higher.
1357 bool isAtomic() const { return MMO->isAtomic(); }
1358
1359 /// Returns true if the memory operation doesn't imply any ordering
1360 /// constraints on surrounding memory operations beyond the normal memory
1361 /// aliasing rules.
1362 bool isUnordered() const { return MMO->isUnordered(); }
1363
1364 /// Returns true if the memory operation is neither atomic or volatile.
1365 bool isSimple() const { return !isAtomic() && !isVolatile(); }
1366
1367 /// Return the type of the in-memory value.
1368 EVT getMemoryVT() const { return MemoryVT; }
1369
1370 /// Return a MachineMemOperand object describing the memory
1371 /// reference performed by operation.
1372 MachineMemOperand *getMemOperand() const { return MMO; }
1373
1374 const MachinePointerInfo &getPointerInfo() const {
1375 return MMO->getPointerInfo();
1376 }
1377
1378 /// Return the address space for the associated pointer
1379 unsigned getAddressSpace() const {
1380 return getPointerInfo().getAddrSpace();
1381 }
1382
1383 /// Update this MemSDNode's MachineMemOperand information
1384 /// to reflect the alignment of NewMMO, if it has a greater alignment.
1385 /// This must only be used when the new alignment applies to all users of
1386 /// this MachineMemOperand.
1387 void refineAlignment(const MachineMemOperand *NewMMO) {
1388 MMO->refineAlignment(NewMMO);
1389 }
1390
1391 const SDValue &getChain() const { return getOperand(0); }
1392 const SDValue &getBasePtr() const {
1393 return getOperand(getOpcode() == ISD::STORE ? 2 : 1);
1394 }
1395
1396 // Methods to support isa and dyn_cast
1397 static bool classof(const SDNode *N) {
1398 // For some targets, we lower some target intrinsics to a MemIntrinsicNode
1399 // with either an intrinsic or a target opcode.
1400 return N->getOpcode() == ISD::LOAD ||
1401 N->getOpcode() == ISD::STORE ||
1402 N->getOpcode() == ISD::PREFETCH ||
1403 N->getOpcode() == ISD::ATOMIC_CMP_SWAP ||
1404 N->getOpcode() == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS ||
1405 N->getOpcode() == ISD::ATOMIC_SWAP ||
1406 N->getOpcode() == ISD::ATOMIC_LOAD_ADD ||
1407 N->getOpcode() == ISD::ATOMIC_LOAD_SUB ||
1408 N->getOpcode() == ISD::ATOMIC_LOAD_AND ||
1409 N->getOpcode() == ISD::ATOMIC_LOAD_CLR ||
1410 N->getOpcode() == ISD::ATOMIC_LOAD_OR ||
1411 N->getOpcode() == ISD::ATOMIC_LOAD_XOR ||
1412 N->getOpcode() == ISD::ATOMIC_LOAD_NAND ||
1413 N->getOpcode() == ISD::ATOMIC_LOAD_MIN ||
1414 N->getOpcode() == ISD::ATOMIC_LOAD_MAX ||
1415 N->getOpcode() == ISD::ATOMIC_LOAD_UMIN ||
1416 N->getOpcode() == ISD::ATOMIC_LOAD_UMAX ||
1417 N->getOpcode() == ISD::ATOMIC_LOAD_FADD ||
1418 N->getOpcode() == ISD::ATOMIC_LOAD_FSUB ||
1419 N->getOpcode() == ISD::ATOMIC_LOAD ||
1420 N->getOpcode() == ISD::ATOMIC_STORE ||
1421 N->getOpcode() == ISD::MLOAD ||
1422 N->getOpcode() == ISD::MSTORE ||
1423 N->getOpcode() == ISD::MGATHER ||
1424 N->getOpcode() == ISD::MSCATTER ||
1425 N->isMemIntrinsic() ||
1426 N->isTargetMemoryOpcode();
1427 }
1428};
1429
1430/// This is an SDNode representing atomic operations.
1431class AtomicSDNode : public MemSDNode {
1432public:
1433 AtomicSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, SDVTList VTL,
1434 EVT MemVT, MachineMemOperand *MMO)
1435 : MemSDNode(Opc, Order, dl, VTL, MemVT, MMO) {
1436 assert(((Opc != ISD::ATOMIC_LOAD && Opc != ISD::ATOMIC_STORE) ||((((Opc != ISD::ATOMIC_LOAD && Opc != ISD::ATOMIC_STORE
) || MMO->isAtomic()) && "then why are we using an AtomicSDNode?"
) ? static_cast<void> (0) : __assert_fail ("((Opc != ISD::ATOMIC_LOAD && Opc != ISD::ATOMIC_STORE) || MMO->isAtomic()) && \"then why are we using an AtomicSDNode?\""
, "/build/llvm-toolchain-snapshot-10~svn372087/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1437, __PRETTY_FUNCTION__))
1437 MMO->isAtomic()) && "then why are we using an AtomicSDNode?")((((Opc != ISD::ATOMIC_LOAD && Opc != ISD::ATOMIC_STORE
) || MMO->isAtomic()) && "then why are we using an AtomicSDNode?"
) ? static_cast<void> (0) : __assert_fail ("((Opc != ISD::ATOMIC_LOAD && Opc != ISD::ATOMIC_STORE) || MMO->isAtomic()) && \"then why are we using an AtomicSDNode?\""
, "/build/llvm-toolchain-snapshot-10~svn372087/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1437, __PRETTY_FUNCTION__))
;
1438 }
1439
1440 const SDValue &getBasePtr() const { return getOperand(1); }
1441 const SDValue &getVal() const { return getOperand(2); }
1442
1443 /// Returns true if this SDNode represents cmpxchg atomic operation, false
1444 /// otherwise.
1445 bool isCompareAndSwap() const {
1446 unsigned Op = getOpcode();
1447 return Op == ISD::ATOMIC_CMP_SWAP ||
1448 Op == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS;
1449 }
1450
1451 /// For cmpxchg atomic operations, return the atomic ordering requirements
1452 /// when store does not occur.
1453 AtomicOrdering getFailureOrdering() const {
1454 assert(isCompareAndSwap() && "Must be cmpxchg operation")((isCompareAndSwap() && "Must be cmpxchg operation") ?
static_cast<void> (0) : __assert_fail ("isCompareAndSwap() && \"Must be cmpxchg operation\""
, "/build/llvm-toolchain-snapshot-10~svn372087/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1454, __PRETTY_FUNCTION__))
;
1455 return MMO->getFailureOrdering();
1456 }
1457
1458 // Methods to support isa and dyn_cast
1459 static bool classof(const SDNode *N) {
1460 return N->getOpcode() == ISD::ATOMIC_CMP_SWAP ||
1461 N->getOpcode() == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS ||
1462 N->getOpcode() == ISD::ATOMIC_SWAP ||
1463 N->getOpcode() == ISD::ATOMIC_LOAD_ADD ||
1464 N->getOpcode() == ISD::ATOMIC_LOAD_SUB ||
1465 N->getOpcode() == ISD::ATOMIC_LOAD_AND ||
1466 N->getOpcode() == ISD::ATOMIC_LOAD_CLR ||
1467 N->getOpcode() == ISD::ATOMIC_LOAD_OR ||
1468 N->getOpcode() == ISD::ATOMIC_LOAD_XOR ||
1469 N->getOpcode() == ISD::ATOMIC_LOAD_NAND ||
1470 N->getOpcode() == ISD::ATOMIC_LOAD_MIN ||
1471 N->getOpcode() == ISD::ATOMIC_LOAD_MAX ||
1472 N->getOpcode() == ISD::ATOMIC_LOAD_UMIN ||
1473 N->getOpcode() == ISD::ATOMIC_LOAD_UMAX ||
1474 N->getOpcode() == ISD::ATOMIC_LOAD_FADD ||
1475 N->getOpcode() == ISD::ATOMIC_LOAD_FSUB ||
1476 N->getOpcode() == ISD::ATOMIC_LOAD ||
1477 N->getOpcode() == ISD::ATOMIC_STORE;
1478 }
1479};
1480
1481/// This SDNode is used for target intrinsics that touch
1482/// memory and need an associated MachineMemOperand. Its opcode may be
1483/// INTRINSIC_VOID, INTRINSIC_W_CHAIN, PREFETCH, or a target-specific opcode
1484/// with a value not less than FIRST_TARGET_MEMORY_OPCODE.
1485class MemIntrinsicSDNode : public MemSDNode {
1486public:
1487 MemIntrinsicSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl,
1488 SDVTList VTs, EVT MemoryVT, MachineMemOperand *MMO)
1489 : MemSDNode(Opc, Order, dl, VTs, MemoryVT, MMO) {
1490 SDNodeBits.IsMemIntrinsic = true;
1491 }
1492
1493 // Methods to support isa and dyn_cast
1494 static bool classof(const SDNode *N) {
1495 // We lower some target intrinsics to their target opcode
1496 // early a node with a target opcode can be of this class
1497 return N->isMemIntrinsic() ||
1498 N->getOpcode() == ISD::PREFETCH ||
1499 N->isTargetMemoryOpcode();
1500 }
1501};
1502
1503/// This SDNode is used to implement the code generator
1504/// support for the llvm IR shufflevector instruction. It combines elements
1505/// from two input vectors into a new input vector, with the selection and
1506/// ordering of elements determined by an array of integers, referred to as
1507/// the shuffle mask. For input vectors of width N, mask indices of 0..N-1
1508/// refer to elements from the LHS input, and indices from N to 2N-1 the RHS.
1509/// An index of -1 is treated as undef, such that the code generator may put
1510/// any value in the corresponding element of the result.
1511class ShuffleVectorSDNode : public SDNode {
1512 // The memory for Mask is owned by the SelectionDAG's OperandAllocator, and
1513 // is freed when the SelectionDAG object is destroyed.
1514 const int *Mask;
1515
1516protected:
1517 friend class SelectionDAG;
1518
1519 ShuffleVectorSDNode(EVT VT, unsigned Order, const DebugLoc &dl, const int *M)
1520 : SDNode(ISD::VECTOR_SHUFFLE, Order, dl, getSDVTList(VT)), Mask(M) {}
1521
1522public:
1523 ArrayRef<int> getMask() const {
1524 EVT VT = getValueType(0);
1525 return makeArrayRef(Mask, VT.getVectorNumElements());
1526 }
1527
1528 int getMaskElt(unsigned Idx) const {
1529 assert(Idx < getValueType(0).getVectorNumElements() && "Idx out of range!")((Idx < getValueType(0).getVectorNumElements() && "Idx out of range!"
) ? static_cast<void> (0) : __assert_fail ("Idx < getValueType(0).getVectorNumElements() && \"Idx out of range!\""
, "/build/llvm-toolchain-snapshot-10~svn372087/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1529, __PRETTY_FUNCTION__))
;
1530 return Mask[Idx];
1531 }
1532
1533 bool isSplat() const { return isSplatMask(Mask, getValueType(0)); }
1534
1535 int getSplatIndex() const {
1536 assert(isSplat() && "Cannot get splat index for non-splat!")((isSplat() && "Cannot get splat index for non-splat!"
) ? static_cast<void> (0) : __assert_fail ("isSplat() && \"Cannot get splat index for non-splat!\""
, "/build/llvm-toolchain-snapshot-10~svn372087/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1536, __PRETTY_FUNCTION__))
;
1537 EVT VT = getValueType(0);
1538 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i)
1539 if (Mask[i] >= 0)
1540 return Mask[i];
1541
1542 // We can choose any index value here and be correct because all elements
1543 // are undefined. Return 0 for better potential for callers to simplify.
1544 return 0;
1545 }
1546
1547 static bool isSplatMask(const int *Mask, EVT VT);
1548
1549 /// Change values in a shuffle permute mask assuming
1550 /// the two vector operands have swapped position.
1551 static void commuteMask(MutableArrayRef<int> Mask) {
1552 unsigned NumElems = Mask.size();
1553 for (unsigned i = 0; i != NumElems; ++i) {
1554 int idx = Mask[i];
1555 if (idx < 0)
1556 continue;
1557 else if (idx < (int)NumElems)
1558 Mask[i] = idx + NumElems;
1559 else
1560 Mask[i] = idx - NumElems;
1561 }
1562 }
1563
1564 static bool classof(const SDNode *N) {
1565 return N->getOpcode() == ISD::VECTOR_SHUFFLE;
1566 }
1567};
1568
1569class ConstantSDNode : public SDNode {
1570 friend class SelectionDAG;
1571
1572 const ConstantInt *Value;
1573
1574 ConstantSDNode(bool isTarget, bool isOpaque, const ConstantInt *val, EVT VT)
1575 : SDNode(isTarget ? ISD::TargetConstant : ISD::Constant, 0, DebugLoc(),
1576 getSDVTList(VT)),
1577 Value(val) {
1578 ConstantSDNodeBits.IsOpaque = isOpaque;
1579 }
1580
1581public:
1582 const ConstantInt *getConstantIntValue() const { return Value; }
1583 const APInt &getAPIntValue() const { return Value->getValue(); }
1584 uint64_t getZExtValue() const { return Value->getZExtValue(); }
1585 int64_t getSExtValue() const { return Value->getSExtValue(); }
1586 uint64_t getLimitedValue(uint64_t Limit = UINT64_MAX(18446744073709551615UL)) {
1587 return Value->getLimitedValue(Limit);
1588 }
1589
1590 bool isOne() const { return Value->isOne(); }
1591 bool isNullValue() const { return Value->isZero(); }
1592 bool isAllOnesValue() const { return Value->isMinusOne(); }
1593
1594 bool isOpaque() const { return ConstantSDNodeBits.IsOpaque; }
1595
1596 static bool classof(const SDNode *N) {
1597 return N->getOpcode() == ISD::Constant ||
1598 N->getOpcode() == ISD::TargetConstant;
1599 }
1600};
1601
1602uint64_t SDNode::getConstantOperandVal(unsigned Num) const {
1603 return cast<ConstantSDNode>(getOperand(Num))->getZExtValue();
1604}
1605
1606const APInt &SDNode::getConstantOperandAPInt(unsigned Num) const {
1607 return cast<ConstantSDNode>(getOperand(Num))->getAPIntValue();
1608}
1609
1610class ConstantFPSDNode : public SDNode {
1611 friend class SelectionDAG;
1612
1613 const ConstantFP *Value;
1614
1615 ConstantFPSDNode(bool isTarget, const ConstantFP *val, EVT VT)
1616 : SDNode(isTarget ? ISD::TargetConstantFP : ISD::ConstantFP, 0,
1617 DebugLoc(), getSDVTList(VT)),
1618 Value(val) {}
1619
1620public:
1621 const APFloat& getValueAPF() const { return Value->getValueAPF(); }
1622 const ConstantFP *getConstantFPValue() const { return Value; }
1623
1624 /// Return true if the value is positive or negative zero.
1625 bool isZero() const { return Value->isZero(); }
1626
1627 /// Return true if the value is a NaN.
1628 bool isNaN() const { return Value->isNaN(); }
1629
1630 /// Return true if the value is an infinity
1631 bool isInfinity() const { return Value->isInfinity(); }
1632
1633 /// Return true if the value is negative.
1634 bool isNegative() const { return Value->isNegative(); }
1635
1636 /// We don't rely on operator== working on double values, as
1637 /// it returns true for things that are clearly not equal, like -0.0 and 0.0.
1638 /// As such, this method can be used to do an exact bit-for-bit comparison of
1639 /// two floating point values.
1640
1641 /// We leave the version with the double argument here because it's just so
1642 /// convenient to write "2.0" and the like. Without this function we'd
1643 /// have to duplicate its logic everywhere it's called.
1644 bool isExactlyValue(double V) const {
1645 return Value->getValueAPF().isExactlyValue(V);
1646 }
1647 bool isExactlyValue(const APFloat& V) const;
1648
1649 static bool isValueValidForType(EVT VT, const APFloat& Val);
1650
1651 static bool classof(const SDNode *N) {
1652 return N->getOpcode() == ISD::ConstantFP ||
1653 N->getOpcode() == ISD::TargetConstantFP;
1654 }
1655};
1656
1657/// Returns true if \p V is a constant integer zero.
1658bool isNullConstant(SDValue V);
1659
1660/// Returns true if \p V is an FP constant with a value of positive zero.
1661bool isNullFPConstant(SDValue V);
1662
1663/// Returns true if \p V is an integer constant with all bits set.
1664bool isAllOnesConstant(SDValue V);
1665
1666/// Returns true if \p V is a constant integer one.
1667bool isOneConstant(SDValue V);
1668
1669/// Return the non-bitcasted source operand of \p V if it exists.
1670/// If \p V is not a bitcasted value, it is returned as-is.
1671SDValue peekThroughBitcasts(SDValue V);
1672
1673/// Return the non-bitcasted and one-use source operand of \p V if it exists.
1674/// If \p V is not a bitcasted one-use value, it is returned as-is.
1675SDValue peekThroughOneUseBitcasts(SDValue V);
1676
1677/// Return the non-extracted vector source operand of \p V if it exists.
1678/// If \p V is not an extracted subvector, it is returned as-is.
1679SDValue peekThroughExtractSubvectors(SDValue V);
1680
1681/// Returns true if \p V is a bitwise not operation. Assumes that an all ones
1682/// constant is canonicalized to be operand 1.
1683bool isBitwiseNot(SDValue V, bool AllowUndefs = false);
1684
1685/// Returns the SDNode if it is a constant splat BuildVector or constant int.
1686ConstantSDNode *isConstOrConstSplat(SDValue N, bool AllowUndefs = false,
1687 bool AllowTruncation = false);
1688
1689/// Returns the SDNode if it is a demanded constant splat BuildVector or
1690/// constant int.
1691ConstantSDNode *isConstOrConstSplat(SDValue N, const APInt &DemandedElts,
1692 bool AllowUndefs = false,
1693 bool AllowTruncation = false);
1694
1695/// Returns the SDNode if it is a constant splat BuildVector or constant float.
1696ConstantFPSDNode *isConstOrConstSplatFP(SDValue N, bool AllowUndefs = false);
1697
1698/// Returns the SDNode if it is a demanded constant splat BuildVector or
1699/// constant float.
1700ConstantFPSDNode *isConstOrConstSplatFP(SDValue N, const APInt &DemandedElts,
1701 bool AllowUndefs = false);
1702
1703/// Return true if the value is a constant 0 integer or a splatted vector of
1704/// a constant 0 integer (with no undefs by default).
1705/// Build vector implicit truncation is not an issue for null values.
1706bool isNullOrNullSplat(SDValue V, bool AllowUndefs = false);
1707
1708/// Return true if the value is a constant 1 integer or a splatted vector of a
1709/// constant 1 integer (with no undefs).
1710/// Does not permit build vector implicit truncation.
1711bool isOneOrOneSplat(SDValue V);
1712
1713/// Return true if the value is a constant -1 integer or a splatted vector of a
1714/// constant -1 integer (with no undefs).
1715/// Does not permit build vector implicit truncation.
1716bool isAllOnesOrAllOnesSplat(SDValue V);
1717
1718class GlobalAddressSDNode : public SDNode {
1719 friend class SelectionDAG;
1720
1721 const GlobalValue *TheGlobal;
1722 int64_t Offset;
1723 unsigned TargetFlags;
1724
1725 GlobalAddressSDNode(unsigned Opc, unsigned Order, const DebugLoc &DL,
1726 const GlobalValue *GA, EVT VT, int64_t o,
1727 unsigned TF);
1728
1729public:
1730 const GlobalValue *getGlobal() const { return TheGlobal; }
1731 int64_t getOffset() const { return Offset; }
1732 unsigned getTargetFlags() const { return TargetFlags; }
1733 // Return the address space this GlobalAddress belongs to.
1734 unsigned getAddressSpace() const;
1735
1736 static bool classof(const SDNode *N) {
1737 return N->getOpcode() == ISD::GlobalAddress ||
1738 N->getOpcode() == ISD::TargetGlobalAddress ||
1739 N->getOpcode() == ISD::GlobalTLSAddress ||
1740 N->getOpcode() == ISD::TargetGlobalTLSAddress;
1741 }
1742};
1743
1744class FrameIndexSDNode : public SDNode {
1745 friend class SelectionDAG;
1746
1747 int FI;
1748
1749 FrameIndexSDNode(int fi, EVT VT, bool isTarg)
1750 : SDNode(isTarg ? ISD::TargetFrameIndex : ISD::FrameIndex,
1751 0, DebugLoc(), getSDVTList(VT)), FI(fi) {
1752 }
1753
1754public:
1755 int getIndex() const { return FI; }
1756
1757 static bool classof(const SDNode *N) {
1758 return N->getOpcode() == ISD::FrameIndex ||
1759 N->getOpcode() == ISD::TargetFrameIndex;
1760 }
1761};
1762
1763/// This SDNode is used for LIFETIME_START/LIFETIME_END values, which indicate
1764/// the offet and size that are started/ended in the underlying FrameIndex.
1765class LifetimeSDNode : public SDNode {
1766 friend class SelectionDAG;
1767 int64_t Size;
1768 int64_t Offset; // -1 if offset is unknown.
1769
1770 LifetimeSDNode(unsigned Opcode, unsigned Order, const DebugLoc &dl,
1771 SDVTList VTs, int64_t Size, int64_t Offset)
1772 : SDNode(Opcode, Order, dl, VTs), Size(Size), Offset(Offset) {}
1773public:
1774 int64_t getFrameIndex() const {
1775 return cast<FrameIndexSDNode>(getOperand(1))->getIndex();
1776 }
1777
1778 bool hasOffset() const { return Offset >= 0; }
1779 int64_t getOffset() const {
1780 assert(hasOffset() && "offset is unknown")((hasOffset() && "offset is unknown") ? static_cast<
void> (0) : __assert_fail ("hasOffset() && \"offset is unknown\""
, "/build/llvm-toolchain-snapshot-10~svn372087/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1780, __PRETTY_FUNCTION__))
;
1781 return Offset;
1782 }
1783 int64_t getSize() const {
1784 assert(hasOffset() && "offset is unknown")((hasOffset() && "offset is unknown") ? static_cast<
void> (0) : __assert_fail ("hasOffset() && \"offset is unknown\""
, "/build/llvm-toolchain-snapshot-10~svn372087/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1784, __PRETTY_FUNCTION__))
;
1785 return Size;
1786 }
1787
1788 // Methods to support isa and dyn_cast
1789 static bool classof(const SDNode *N) {
1790 return N->getOpcode() == ISD::LIFETIME_START ||
1791 N->getOpcode() == ISD::LIFETIME_END;
1792 }
1793};
1794
1795class JumpTableSDNode : public SDNode {
1796 friend class SelectionDAG;
1797
1798 int JTI;
1799 unsigned TargetFlags;
1800
1801 JumpTableSDNode(int jti, EVT VT, bool isTarg, unsigned TF)
1802 : SDNode(isTarg ? ISD::TargetJumpTable : ISD::JumpTable,
1803 0, DebugLoc(), getSDVTList(VT)), JTI(jti), TargetFlags(TF) {
1804 }
1805
1806public:
1807 int getIndex() const { return JTI; }
1808 unsigned getTargetFlags() const { return TargetFlags; }
1809
1810 static bool classof(const SDNode *N) {
1811 return N->getOpcode() == ISD::JumpTable ||
1812 N->getOpcode() == ISD::TargetJumpTable;
1813 }
1814};
1815
1816class ConstantPoolSDNode : public SDNode {
1817 friend class SelectionDAG;
1818
1819 union {
1820 const Constant *ConstVal;
1821 MachineConstantPoolValue *MachineCPVal;
1822 } Val;
1823 int Offset; // It's a MachineConstantPoolValue if top bit is set.
1824 unsigned Alignment; // Minimum alignment requirement of CP (not log2 value).
1825 unsigned TargetFlags;
1826
1827 ConstantPoolSDNode(bool isTarget, const Constant *c, EVT VT, int o,
1828 unsigned Align, unsigned TF)
1829 : SDNode(isTarget ? ISD::TargetConstantPool : ISD::ConstantPool, 0,
1830 DebugLoc(), getSDVTList(VT)), Offset(o), Alignment(Align),
1831 TargetFlags(TF) {
1832 assert(Offset >= 0 && "Offset is too large")((Offset >= 0 && "Offset is too large") ? static_cast
<void> (0) : __assert_fail ("Offset >= 0 && \"Offset is too large\""
, "/build/llvm-toolchain-snapshot-10~svn372087/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1832, __PRETTY_FUNCTION__))
;
1833 Val.ConstVal = c;
1834 }
1835
1836 ConstantPoolSDNode(bool isTarget, MachineConstantPoolValue *v,
1837 EVT VT, int o, unsigned Align, unsigned TF)
1838 : SDNode(isTarget ? ISD::TargetConstantPool : ISD::ConstantPool, 0,
1839 DebugLoc(), getSDVTList(VT)), Offset(o), Alignment(Align),
1840 TargetFlags(TF) {
1841 assert(Offset >= 0 && "Offset is too large")((Offset >= 0 && "Offset is too large") ? static_cast
<void> (0) : __assert_fail ("Offset >= 0 && \"Offset is too large\""
, "/build/llvm-toolchain-snapshot-10~svn372087/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1841, __PRETTY_FUNCTION__))
;
1842 Val.MachineCPVal = v;
1843 Offset |= 1 << (sizeof(unsigned)*CHAR_BIT8-1);
1844 }
1845
1846public:
1847 bool isMachineConstantPoolEntry() const {
1848 return Offset < 0;
1849 }
1850
1851 const Constant *getConstVal() const {
1852 assert(!isMachineConstantPoolEntry() && "Wrong constantpool type")((!isMachineConstantPoolEntry() && "Wrong constantpool type"
) ? static_cast<void> (0) : __assert_fail ("!isMachineConstantPoolEntry() && \"Wrong constantpool type\""
, "/build/llvm-toolchain-snapshot-10~svn372087/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1852, __PRETTY_FUNCTION__))
;
1853 return Val.ConstVal;
1854 }
1855
1856 MachineConstantPoolValue *getMachineCPVal() const {
1857 assert(isMachineConstantPoolEntry() && "Wrong constantpool type")((isMachineConstantPoolEntry() && "Wrong constantpool type"
) ? static_cast<void> (0) : __assert_fail ("isMachineConstantPoolEntry() && \"Wrong constantpool type\""
, "/build/llvm-toolchain-snapshot-10~svn372087/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1857, __PRETTY_FUNCTION__))
;
1858 return Val.MachineCPVal;
1859 }
1860
1861 int getOffset() const {
1862 return Offset & ~(1 << (sizeof(unsigned)*CHAR_BIT8-1));
1863 }
1864
1865 // Return the alignment of this constant pool object, which is either 0 (for
1866 // default alignment) or the desired value.
1867 unsigned getAlignment() const { return Alignment; }
1868 unsigned getTargetFlags() const { return TargetFlags; }
1869
1870 Type *getType() const;
1871
1872 static bool classof(const SDNode *N) {
1873 return N->getOpcode() == ISD::ConstantPool ||
1874 N->getOpcode() == ISD::TargetConstantPool;
1875 }
1876};
1877
1878/// Completely target-dependent object reference.
1879class TargetIndexSDNode : public SDNode {
1880 friend class SelectionDAG;
1881
1882 unsigned TargetFlags;
1883 int Index;
1884 int64_t Offset;
1885
1886public:
1887 TargetIndexSDNode(int Idx, EVT VT, int64_t Ofs, unsigned TF)
1888 : SDNode(ISD::TargetIndex, 0, DebugLoc(), getSDVTList(VT)),
1889 TargetFlags(TF), Index(Idx), Offset(Ofs) {}
1890
1891 unsigned getTargetFlags() const { return TargetFlags; }
1892 int getIndex() const { return Index; }
1893 int64_t getOffset() const { return Offset; }
1894
1895 static bool classof(const SDNode *N) {
1896 return N->getOpcode() == ISD::TargetIndex;
1897 }
1898};
1899
1900class BasicBlockSDNode : public SDNode {
1901 friend class SelectionDAG;
1902
1903 MachineBasicBlock *MBB;
1904
1905 /// Debug info is meaningful and potentially useful here, but we create
1906 /// blocks out of order when they're jumped to, which makes it a bit
1907 /// harder. Let's see if we need it first.
1908 explicit BasicBlockSDNode(MachineBasicBlock *mbb)
1909 : SDNode(ISD::BasicBlock, 0, DebugLoc(), getSDVTList(MVT::Other)), MBB(mbb)
1910 {}
1911
1912public:
1913 MachineBasicBlock *getBasicBlock() const { return MBB; }
1914
1915 static bool classof(const SDNode *N) {
1916 return N->getOpcode() == ISD::BasicBlock;
1917 }
1918};
1919
1920/// A "pseudo-class" with methods for operating on BUILD_VECTORs.
1921class BuildVectorSDNode : public SDNode {
1922public:
1923 // These are constructed as SDNodes and then cast to BuildVectorSDNodes.
1924 explicit BuildVectorSDNode() = delete;
1925
1926 /// Check if this is a constant splat, and if so, find the
1927 /// smallest element size that splats the vector. If MinSplatBits is
1928 /// nonzero, the element size must be at least that large. Note that the
1929 /// splat element may be the entire vector (i.e., a one element vector).
1930 /// Returns the splat element value in SplatValue. Any undefined bits in
1931 /// that value are zero, and the corresponding bits in the SplatUndef mask
1932 /// are set. The SplatBitSize value is set to the splat element size in
1933 /// bits. HasAnyUndefs is set to true if any bits in the vector are
1934 /// undefined. isBigEndian describes the endianness of the target.
1935 bool isConstantSplat(APInt &SplatValue, APInt &SplatUndef,
1936 unsigned &SplatBitSize, bool &HasAnyUndefs,
1937 unsigned MinSplatBits = 0,
1938 bool isBigEndian = false) const;
1939
1940 /// Returns the demanded splatted value or a null value if this is not a
1941 /// splat.
1942 ///
1943 /// The DemandedElts mask indicates the elements that must be in the splat.
1944 /// If passed a non-null UndefElements bitvector, it will resize it to match
1945 /// the vector width and set the bits where elements are undef.
1946 SDValue getSplatValue(const APInt &DemandedElts,
1947 BitVector *UndefElements = nullptr) const;
1948
1949 /// Returns the splatted value or a null value if this is not a splat.
1950 ///
1951 /// If passed a non-null UndefElements bitvector, it will resize it to match
1952 /// the vector width and set the bits where elements are undef.
1953 SDValue getSplatValue(BitVector *UndefElements = nullptr) const;
1954
1955 /// Returns the demanded splatted constant or null if this is not a constant
1956 /// splat.
1957 ///
1958 /// The DemandedElts mask indicates the elements that must be in the splat.
1959 /// If passed a non-null UndefElements bitvector, it will resize it to match
1960 /// the vector width and set the bits where elements are undef.
1961 ConstantSDNode *
1962 getConstantSplatNode(const APInt &DemandedElts,
1963 BitVector *UndefElements = nullptr) const;
1964
1965 /// Returns the splatted constant or null if this is not a constant
1966 /// splat.
1967 ///
1968 /// If passed a non-null UndefElements bitvector, it will resize it to match
1969 /// the vector width and set the bits where elements are undef.
1970 ConstantSDNode *
1971 getConstantSplatNode(BitVector *UndefElements = nullptr) const;
1972
1973 /// Returns the demanded splatted constant FP or null if this is not a
1974 /// constant FP splat.
1975 ///
1976 /// The DemandedElts mask indicates the elements that must be in the splat.
1977 /// If passed a non-null UndefElements bitvector, it will resize it to match
1978 /// the vector width and set the bits where elements are undef.
1979 ConstantFPSDNode *
1980 getConstantFPSplatNode(const APInt &DemandedElts,
1981 BitVector *UndefElements = nullptr) const;
1982
1983 /// Returns the splatted constant FP or null if this is not a constant
1984 /// FP splat.
1985 ///
1986 /// If passed a non-null UndefElements bitvector, it will resize it to match
1987 /// the vector width and set the bits where elements are undef.
1988 ConstantFPSDNode *
1989 getConstantFPSplatNode(BitVector *UndefElements = nullptr) const;
1990
1991 /// If this is a constant FP splat and the splatted constant FP is an
1992 /// exact power or 2, return the log base 2 integer value. Otherwise,
1993 /// return -1.
1994 ///
1995 /// The BitWidth specifies the necessary bit precision.
1996 int32_t getConstantFPSplatPow2ToLog2Int(BitVector *UndefElements,
1997 uint32_t BitWidth) const;
1998
1999 bool isConstant() const;
2000
2001 static bool classof(const SDNode *N) {
2002 return N->getOpcode() == ISD::BUILD_VECTOR;
2003 }
2004};
2005
2006/// An SDNode that holds an arbitrary LLVM IR Value. This is
2007/// used when the SelectionDAG needs to make a simple reference to something
2008/// in the LLVM IR representation.
2009///
2010class SrcValueSDNode : public SDNode {
2011 friend class SelectionDAG;
2012
2013 const Value *V;
2014
2015 /// Create a SrcValue for a general value.
2016 explicit SrcValueSDNode(const Value *v)
2017 : SDNode(ISD::SRCVALUE, 0, DebugLoc(), getSDVTList(MVT::Other)), V(v) {}
2018
2019public:
2020 /// Return the contained Value.
2021 const Value *getValue() const { return V; }
2022
2023 static bool classof(const SDNode *N) {
2024 return N->getOpcode() == ISD::SRCVALUE;
2025 }
2026};
2027
2028class MDNodeSDNode : public SDNode {
2029 friend class SelectionDAG;
2030
2031 const MDNode *MD;
2032
2033 explicit MDNodeSDNode(const MDNode *md)
2034 : SDNode(ISD::MDNODE_SDNODE, 0, DebugLoc(), getSDVTList(MVT::Other)), MD(md)
2035 {}
2036
2037public:
2038 const MDNode *getMD() const { return MD; }
2039
2040 static bool classof(const SDNode *N) {
2041 return N->getOpcode() == ISD::MDNODE_SDNODE;
2042 }
2043};
2044
2045class RegisterSDNode : public SDNode {
2046 friend class SelectionDAG;
2047
2048 unsigned Reg;
2049
2050 RegisterSDNode(unsigned reg, EVT VT)
2051 : SDNode(ISD::Register, 0, DebugLoc(), getSDVTList(VT)), Reg(reg) {}
2052
2053public:
2054 unsigned getReg() const { return Reg; }
2055
2056 static bool classof(const SDNode *N) {
2057 return N->getOpcode() == ISD::Register;
2058 }
2059};
2060
2061class RegisterMaskSDNode : public SDNode {
2062 friend class SelectionDAG;
2063
2064 // The memory for RegMask is not owned by the node.
2065 const uint32_t *RegMask;
2066
2067 RegisterMaskSDNode(const uint32_t *mask)
2068 : SDNode(ISD::RegisterMask, 0, DebugLoc(), getSDVTList(MVT::Untyped)),
2069 RegMask(mask) {}
2070
2071public:
2072 const uint32_t *getRegMask() const { return RegMask; }
2073
2074 static bool classof(const SDNode *N) {
2075 return N->getOpcode() == ISD::RegisterMask;
2076 }
2077};
2078
2079class BlockAddressSDNode : public SDNode {
2080 friend class SelectionDAG;
2081
2082 const BlockAddress *BA;
2083 int64_t Offset;
2084 unsigned TargetFlags;
2085
2086 BlockAddressSDNode(unsigned NodeTy, EVT VT, const BlockAddress *ba,
2087 int64_t o, unsigned Flags)
2088 : SDNode(NodeTy, 0, DebugLoc(), getSDVTList(VT)),
2089 BA(ba), Offset(o), TargetFlags(Flags) {}
2090
2091public:
2092 const BlockAddress *getBlockAddress() const { return BA; }
2093 int64_t getOffset() const { return Offset; }
2094 unsigned getTargetFlags() const { return TargetFlags; }
2095
2096 static bool classof(const SDNode *N) {
2097 return N->getOpcode() == ISD::BlockAddress ||
2098 N->getOpcode() == ISD::TargetBlockAddress;
2099 }
2100};
2101
2102class LabelSDNode : public SDNode {
2103 friend class SelectionDAG;
2104
2105 MCSymbol *Label;
2106
2107 LabelSDNode(unsigned Opcode, unsigned Order, const DebugLoc &dl, MCSymbol *L)
2108 : SDNode(Opcode, Order, dl, getSDVTList(MVT::Other)), Label(L) {
2109 assert(LabelSDNode::classof(this) && "not a label opcode")((LabelSDNode::classof(this) && "not a label opcode")
? static_cast<void> (0) : __assert_fail ("LabelSDNode::classof(this) && \"not a label opcode\""
, "/build/llvm-toolchain-snapshot-10~svn372087/include/llvm/CodeGen/SelectionDAGNodes.h"
, 2109, __PRETTY_FUNCTION__))
;
2110 }
2111
2112public:
2113 MCSymbol *getLabel() const { return Label; }
2114
2115 static bool classof(const SDNode *N) {
2116 return N->getOpcode() == ISD::EH_LABEL ||
2117 N->getOpcode() == ISD::ANNOTATION_LABEL;
2118 }
2119};
2120
2121class ExternalSymbolSDNode : public SDNode {
2122 friend class SelectionDAG;
2123
2124 const char *Symbol;
2125 unsigned TargetFlags;
2126
2127 ExternalSymbolSDNode(bool isTarget, const char *Sym, unsigned TF, EVT VT)
2128 : SDNode(isTarget ? ISD::TargetExternalSymbol : ISD::ExternalSymbol, 0,
2129 DebugLoc(), getSDVTList(VT)),
2130 Symbol(Sym), TargetFlags(TF) {}
2131
2132public:
2133 const char *getSymbol() const { return Symbol; }
2134 unsigned getTargetFlags() const { return TargetFlags; }
2135
2136 static bool classof(const SDNode *N) {
2137 return N->getOpcode() == ISD::ExternalSymbol ||
2138 N->getOpcode() == ISD::TargetExternalSymbol;
2139 }
2140};
2141
2142class MCSymbolSDNode : public SDNode {
2143 friend class SelectionDAG;
2144
2145 MCSymbol *Symbol;
2146
2147 MCSymbolSDNode(MCSymbol *Symbol, EVT VT)
2148 : SDNode(ISD::MCSymbol, 0, DebugLoc(), getSDVTList(VT)), Symbol(Symbol) {}
2149
2150public:
2151 MCSymbol *getMCSymbol() const { return Symbol; }
2152
2153 static bool classof(const SDNode *N) {
2154 return N->getOpcode() == ISD::MCSymbol;
2155 }
2156};
2157
2158class CondCodeSDNode : public SDNode {
2159 friend class SelectionDAG;
2160
2161 ISD::CondCode Condition;
2162
2163 explicit CondCodeSDNode(ISD::CondCode Cond)
2164 : SDNode(ISD::CONDCODE, 0, DebugLoc(), getSDVTList(MVT::Other)),
2165 Condition(Cond) {}
2166
2167public:
2168 ISD::CondCode get() const { return Condition; }
2169
2170 static bool classof(const SDNode *N) {
2171 return N->getOpcode() == ISD::CONDCODE;
2172 }
2173};
2174
2175/// This class is used to represent EVT's, which are used
2176/// to parameterize some operations.
2177class VTSDNode : public SDNode {
2178 friend class SelectionDAG;
2179
2180 EVT ValueType;
2181
2182 explicit VTSDNode(EVT VT)
2183 : SDNode(ISD::VALUETYPE, 0, DebugLoc(), getSDVTList(MVT::Other)),
2184 ValueType(VT) {}
2185
2186public:
2187 EVT getVT() const { return ValueType; }
2188
2189 static bool classof(const SDNode *N) {
2190 return N->getOpcode() == ISD::VALUETYPE;
2191 }
2192};
2193
2194/// Base class for LoadSDNode and StoreSDNode
2195class LSBaseSDNode : public MemSDNode {
2196public:
2197 LSBaseSDNode(ISD::NodeType NodeTy, unsigned Order, const DebugLoc &dl,
2198 SDVTList VTs, ISD::MemIndexedMode AM, EVT MemVT,
2199 MachineMemOperand *MMO)
2200 : MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) {
2201 LSBaseSDNodeBits.AddressingMode = AM;
2202 assert(getAddressingMode() == AM && "Value truncated")((getAddressingMode() == AM && "Value truncated") ? static_cast
<void> (0) : __assert_fail ("getAddressingMode() == AM && \"Value truncated\""
, "/build/llvm-toolchain-snapshot-10~svn372087/include/llvm/CodeGen/SelectionDAGNodes.h"
, 2202, __PRETTY_FUNCTION__))
;
2203 }
2204
2205 const SDValue &getOffset() const {
2206 return getOperand(getOpcode() == ISD::LOAD ? 2 : 3);
2207 }
2208
2209 /// Return the addressing mode for this load or store:
2210 /// unindexed, pre-inc, pre-dec, post-inc, or post-dec.
2211 ISD::MemIndexedMode getAddressingMode() const {
2212 return static_cast<ISD::MemIndexedMode>(LSBaseSDNodeBits.AddressingMode);
2213 }
2214
2215 /// Return true if this is a pre/post inc/dec load/store.
2216 bool isIndexed() const { return getAddressingMode() != ISD::UNINDEXED; }
2217
2218 /// Return true if this is NOT a pre/post inc/dec load/store.
2219 bool isUnindexed() const { return getAddressingMode() == ISD::UNINDEXED; }
2220
2221 static bool classof(const SDNode *N) {
2222 return N->getOpcode() == ISD::LOAD ||
2223 N->getOpcode() == ISD::STORE;
2224 }
2225};
2226
2227/// This class is used to represent ISD::LOAD nodes.
2228class LoadSDNode : public LSBaseSDNode {
2229 friend class SelectionDAG;
2230
2231 LoadSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
2232 ISD::MemIndexedMode AM, ISD::LoadExtType ETy, EVT MemVT,
2233 MachineMemOperand *MMO)
2234 : LSBaseSDNode(ISD::LOAD, Order, dl, VTs, AM, MemVT, MMO) {
2235 LoadSDNodeBits.ExtTy = ETy;
2236 assert(readMem() && "Load MachineMemOperand is not a load!")((readMem() && "Load MachineMemOperand is not a load!"
) ? static_cast<void> (0) : __assert_fail ("readMem() && \"Load MachineMemOperand is not a load!\""
, "/build/llvm-toolchain-snapshot-10~svn372087/include/llvm/CodeGen/SelectionDAGNodes.h"
, 2236, __PRETTY_FUNCTION__))
;
2237 assert(!writeMem() && "Load MachineMemOperand is a store!")((!writeMem() && "Load MachineMemOperand is a store!"
) ? static_cast<void> (0) : __assert_fail ("!writeMem() && \"Load MachineMemOperand is a store!\""
, "/build/llvm-toolchain-snapshot-10~svn372087/include/llvm/CodeGen/SelectionDAGNodes.h"
, 2237, __PRETTY_FUNCTION__))
;
2238 }
2239
2240public:
2241 /// Return whether this is a plain node,
2242 /// or one of the varieties of value-extending loads.
2243 ISD::LoadExtType getExtensionType() const {
2244 return static_cast<ISD::LoadExtType>(LoadSDNodeBits.ExtTy);
2245 }
2246
2247 const SDValue &getBasePtr() const { return getOperand(1); }
2248 const SDValue &getOffset() const { return getOperand(2); }
2249
2250 static bool classof(const SDNode *N) {
2251 return N->getOpcode() == ISD::LOAD;
2252 }
2253};
2254
2255/// This class is used to represent ISD::STORE nodes.
2256class StoreSDNode : public LSBaseSDNode {
2257 friend class SelectionDAG;
2258
2259 StoreSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
2260 ISD::MemIndexedMode AM, bool isTrunc, EVT MemVT,
2261 MachineMemOperand *MMO)
2262 : LSBaseSDNode(ISD::STORE, Order, dl, VTs, AM, MemVT, MMO) {
2263 StoreSDNodeBits.IsTruncating = isTrunc;
2264 assert(!readMem() && "Store MachineMemOperand is a load!")((!readMem() && "Store MachineMemOperand is a load!")
? static_cast<void> (0) : __assert_fail ("!readMem() && \"Store MachineMemOperand is a load!\""
, "/build/llvm-toolchain-snapshot-10~svn372087/include/llvm/CodeGen/SelectionDAGNodes.h"
, 2264, __PRETTY_FUNCTION__))
;
2265 assert(writeMem() && "Store MachineMemOperand is not a store!")((writeMem() && "Store MachineMemOperand is not a store!"
) ? static_cast<void> (0) : __assert_fail ("writeMem() && \"Store MachineMemOperand is not a store!\""
, "/build/llvm-toolchain-snapshot-10~svn372087/include/llvm/CodeGen/SelectionDAGNodes.h"
, 2265, __PRETTY_FUNCTION__))
;
2266 }
2267
2268public:
2269 /// Return true if the op does a truncation before store.
2270 /// For integers this is the same as doing a TRUNCATE and storing the result.
2271 /// For floats, it is the same as doing an FP_ROUND and storing the result.
2272 bool isTruncatingStore() const { return StoreSDNodeBits.IsTruncating; }
2273 void setTruncatingStore(bool Truncating) {
2274 StoreSDNodeBits.IsTruncating = Truncating;
2275 }
2276
2277 const SDValue &getValue() const { return getOperand(1); }
2278 const SDValue &getBasePtr() const { return getOperand(2); }
2279 const SDValue &getOffset() const { return getOperand(3); }
2280
2281 static bool classof(const SDNode *N) {
2282 return N->getOpcode() == ISD::STORE;
2283 }
2284};
2285
2286/// This base class is used to represent MLOAD and MSTORE nodes
2287class MaskedLoadStoreSDNode : public MemSDNode {
2288public:
2289 friend class SelectionDAG;
2290
2291 MaskedLoadStoreSDNode(ISD::NodeType NodeTy, unsigned Order,
2292 const DebugLoc &dl, SDVTList VTs, EVT MemVT,
2293 MachineMemOperand *MMO)
2294 : MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) {}
2295
2296 // MaskedLoadSDNode (Chain, ptr, mask, passthru)
2297 // MaskedStoreSDNode (Chain, data, ptr, mask)
2298 // Mask is a vector of i1 elements
2299 const SDValue &getBasePtr() const {
2300 return getOperand(getOpcode() == ISD::MLOAD ? 1 : 2);
2301 }
2302 const SDValue &getMask() const {
2303 return getOperand(getOpcode() == ISD::MLOAD ? 2 : 3);
2304 }
2305
2306 static bool classof(const SDNode *N) {
2307 return N->getOpcode() == ISD::MLOAD ||
2308 N->getOpcode() == ISD::MSTORE;
2309 }
2310};
2311
2312/// This class is used to represent an MLOAD node
2313class MaskedLoadSDNode : public MaskedLoadStoreSDNode {
2314public:
2315 friend class SelectionDAG;
2316
2317 MaskedLoadSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
2318 ISD::LoadExtType ETy, bool IsExpanding, EVT MemVT,
2319 MachineMemOperand *MMO)
2320 : MaskedLoadStoreSDNode(ISD::MLOAD, Order, dl, VTs, MemVT, MMO) {
2321 LoadSDNodeBits.ExtTy = ETy;
2322 LoadSDNodeBits.IsExpanding = IsExpanding;
2323 }
2324
2325 ISD::LoadExtType getExtensionType() const {
2326 return static_cast<ISD::LoadExtType>(LoadSDNodeBits.ExtTy);
2327 }
2328
2329 const SDValue &getBasePtr() const { return getOperand(1); }
2330 const SDValue &getMask() const { return getOperand(2); }
2331 const SDValue &getPassThru() const { return getOperand(3); }
2332
2333 static bool classof(const SDNode *N) {
2334 return N->getOpcode() == ISD::MLOAD;
2335 }
2336
2337 bool isExpandingLoad() const { return LoadSDNodeBits.IsExpanding; }
2338};
2339
2340/// This class is used to represent an MSTORE node
2341class MaskedStoreSDNode : public MaskedLoadStoreSDNode {
2342public:
2343 friend class SelectionDAG;
2344
2345 MaskedStoreSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
2346 bool isTrunc, bool isCompressing, EVT MemVT,
2347 MachineMemOperand *MMO)
2348 : MaskedLoadStoreSDNode(ISD::MSTORE, Order, dl, VTs, MemVT, MMO) {
2349 StoreSDNodeBits.IsTruncating = isTrunc;
2350 StoreSDNodeBits.IsCompressing = isCompressing;
2351 }
2352
2353 /// Return true if the op does a truncation before store.
2354 /// For integers this is the same as doing a TRUNCATE and storing the result.
2355 /// For floats, it is the same as doing an FP_ROUND and storing the result.
2356 bool isTruncatingStore() const { return StoreSDNodeBits.IsTruncating; }
2357
2358 /// Returns true if the op does a compression to the vector before storing.
2359 /// The node contiguously stores the active elements (integers or floats)
2360 /// in src (those with their respective bit set in writemask k) to unaligned
2361 /// memory at base_addr.
2362 bool isCompressingStore() const { return StoreSDNodeBits.IsCompressing; }
2363
2364 const SDValue &getValue() const { return getOperand(1); }
2365 const SDValue &getBasePtr() const { return getOperand(2); }
2366 const SDValue &getMask() const { return getOperand(3); }
2367
2368 static bool classof(const SDNode *N) {
2369 return N->getOpcode() == ISD::MSTORE;
2370 }
2371};
2372
2373/// This is a base class used to represent
2374/// MGATHER and MSCATTER nodes
2375///
2376class MaskedGatherScatterSDNode : public MemSDNode {
2377public:
2378 friend class SelectionDAG;
2379
2380 MaskedGatherScatterSDNode(ISD::NodeType NodeTy, unsigned Order,
2381 const DebugLoc &dl, SDVTList VTs, EVT MemVT,
2382 MachineMemOperand *MMO, ISD::MemIndexType IndexType)
2383 : MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) {
2384 LSBaseSDNodeBits.AddressingMode = IndexType;
2385 assert(getIndexType() == IndexType && "Value truncated")((getIndexType() == IndexType && "Value truncated") ?
static_cast<void> (0) : __assert_fail ("getIndexType() == IndexType && \"Value truncated\""
, "/build/llvm-toolchain-snapshot-10~svn372087/include/llvm/CodeGen/SelectionDAGNodes.h"
, 2385, __PRETTY_FUNCTION__))
;
2386 }
2387
2388 /// How is Index applied to BasePtr when computing addresses.
2389 ISD::MemIndexType getIndexType() const {
2390 return static_cast<ISD::MemIndexType>(LSBaseSDNodeBits.AddressingMode);
2391 }
2392 bool isIndexScaled() const {
2393 return (getIndexType() == ISD::SIGNED_SCALED) ||
2394 (getIndexType() == ISD::UNSIGNED_SCALED);
2395 }
2396 bool isIndexSigned() const {
2397 return (getIndexType() == ISD::SIGNED_SCALED) ||
2398 (getIndexType() == ISD::SIGNED_UNSCALED);
2399 }
2400
2401 // In the both nodes address is Op1, mask is Op2:
2402 // MaskedGatherSDNode (Chain, passthru, mask, base, index, scale)
2403 // MaskedScatterSDNode (Chain, value, mask, base, index, scale)
2404 // Mask is a vector of i1 elements
2405 const SDValue &getBasePtr() const { return getOperand(3); }
2406 const SDValue &getIndex() const { return getOperand(4); }
2407 const SDValue &getMask() const { return getOperand(2); }
2408 const SDValue &getScale() const { return getOperand(5); }
2409
2410 static bool classof(const SDNode *N) {
2411 return N->getOpcode() == ISD::MGATHER ||
2412 N->getOpcode() == ISD::MSCATTER;
2413 }
2414};
2415
2416/// This class is used to represent an MGATHER node
2417///
2418class MaskedGatherSDNode : public MaskedGatherScatterSDNode {
2419public:
2420 friend class SelectionDAG;
2421
2422 MaskedGatherSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
2423 EVT MemVT, MachineMemOperand *MMO,
2424 ISD::MemIndexType IndexType)
2425 : MaskedGatherScatterSDNode(ISD::MGATHER, Order, dl, VTs, MemVT, MMO,
2426 IndexType) {}
2427
2428 const SDValue &getPassThru() const { return getOperand(1); }
2429
2430 static bool classof(const SDNode *N) {
2431 return N->getOpcode() == ISD::MGATHER;
2432 }
2433};
2434
2435/// This class is used to represent an MSCATTER node
2436///
2437class MaskedScatterSDNode : public MaskedGatherScatterSDNode {
2438public:
2439 friend class SelectionDAG;
2440
2441 MaskedScatterSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
2442 EVT MemVT, MachineMemOperand *MMO,
2443 ISD::MemIndexType IndexType)
2444 : MaskedGatherScatterSDNode(ISD::MSCATTER, Order, dl, VTs, MemVT, MMO,
2445 IndexType) {}
2446
2447 const SDValue &getValue() const { return getOperand(1); }
2448
2449 static bool classof(const SDNode *N) {
2450 return N->getOpcode() == ISD::MSCATTER;
2451 }
2452};
2453
2454/// An SDNode that represents everything that will be needed
2455/// to construct a MachineInstr. These nodes are created during the
2456/// instruction selection proper phase.
2457///
2458/// Note that the only supported way to set the `memoperands` is by calling the
2459/// `SelectionDAG::setNodeMemRefs` function as the memory management happens
2460/// inside the DAG rather than in the node.
2461class MachineSDNode : public SDNode {
2462private:
2463 friend class SelectionDAG;
2464
2465 MachineSDNode(unsigned Opc, unsigned Order, const DebugLoc &DL, SDVTList VTs)
2466 : SDNode(Opc, Order, DL, VTs) {}
2467
2468 // We use a pointer union between a single `MachineMemOperand` pointer and
2469 // a pointer to an array of `MachineMemOperand` pointers. This is null when
2470 // the number of these is zero, the single pointer variant used when the
2471 // number is one, and the array is used for larger numbers.
2472 //
2473 // The array is allocated via the `SelectionDAG`'s allocator and so will
2474 // always live until the DAG is cleaned up and doesn't require ownership here.
2475 //
2476 // We can't use something simpler like `TinyPtrVector` here because `SDNode`
2477 // subclasses aren't managed in a conforming C++ manner. See the comments on
2478 // `SelectionDAG::MorphNodeTo` which details what all goes on, but the
2479 // constraint here is that these don't manage memory with their constructor or
2480 // destructor and can be initialized to a good state even if they start off
2481 // uninitialized.
2482 PointerUnion<MachineMemOperand *, MachineMemOperand **> MemRefs = {};
2483
2484 // Note that this could be folded into the above `MemRefs` member if doing so
2485 // is advantageous at some point. We don't need to store this in most cases.
2486 // However, at the moment this doesn't appear to make the allocation any
2487 // smaller and makes the code somewhat simpler to read.
2488 int NumMemRefs = 0;
2489
2490public:
2491 using mmo_iterator = ArrayRef<MachineMemOperand *>::const_iterator;
2492
2493 ArrayRef<MachineMemOperand *> memoperands() const {
2494 // Special case the common cases.
2495 if (NumMemRefs == 0)
2496 return {};
2497 if (NumMemRefs == 1)
2498 return makeArrayRef(MemRefs.getAddrOfPtr1(), 1);
2499
2500 // Otherwise we have an actual array.
2501 return makeArrayRef(MemRefs.get<MachineMemOperand **>(), NumMemRefs);
2502 }
2503 mmo_iterator memoperands_begin() const { return memoperands().begin(); }
2504 mmo_iterator memoperands_end() const { return memoperands().end(); }
2505 bool memoperands_empty() const { return memoperands().empty(); }
2506
2507 /// Clear out the memory reference descriptor list.
2508 void clearMemRefs() {
2509 MemRefs = nullptr;
2510 NumMemRefs = 0;
2511 }
2512
2513 static bool classof(const SDNode *N) {
2514 return N->isMachineOpcode();
2515 }
2516};
2517
2518class SDNodeIterator : public std::iterator<std::forward_iterator_tag,
2519 SDNode, ptrdiff_t> {
2520 const SDNode *Node;
2521 unsigned Operand;
2522
2523 SDNodeIterator(const SDNode *N, unsigned Op) : Node(N), Operand(Op) {}
2524
2525public:
2526 bool operator==(const SDNodeIterator& x) const {
2527 return Operand == x.Operand;
2528 }
2529 bool operator!=(const SDNodeIterator& x) const { return !operator==(x); }
2530
2531 pointer operator*() const {
2532 return Node->getOperand(Operand).getNode();
2533 }
2534 pointer operator->() const { return operator*(); }
2535
2536 SDNodeIterator& operator++() { // Preincrement
2537 ++Operand;
2538 return *this;
2539 }
2540 SDNodeIterator operator++(int) { // Postincrement
2541 SDNodeIterator tmp = *this; ++*this; return tmp;
2542 }
2543 size_t operator-(SDNodeIterator Other) const {
2544 assert(Node == Other.Node &&((Node == Other.Node && "Cannot compare iterators of two different nodes!"
) ? static_cast<void> (0) : __assert_fail ("Node == Other.Node && \"Cannot compare iterators of two different nodes!\""
, "/build/llvm-toolchain-snapshot-10~svn372087/include/llvm/CodeGen/SelectionDAGNodes.h"
, 2545, __PRETTY_FUNCTION__))
2545 "Cannot compare iterators of two different nodes!")((Node == Other.Node && "Cannot compare iterators of two different nodes!"
) ? static_cast<void> (0) : __assert_fail ("Node == Other.Node && \"Cannot compare iterators of two different nodes!\""
, "/build/llvm-toolchain-snapshot-10~svn372087/include/llvm/CodeGen/SelectionDAGNodes.h"
, 2545, __PRETTY_FUNCTION__))
;
2546 return Operand - Other.Operand;
2547 }
2548
2549 static SDNodeIterator begin(const SDNode *N) { return SDNodeIterator(N, 0); }
2550 static SDNodeIterator end (const SDNode *N) {
2551 return SDNodeIterator(N, N->getNumOperands());
2552 }
2553
2554 unsigned getOperand() const { return Operand; }
2555 const SDNode *getNode() const { return Node; }
2556};
2557
2558template <> struct GraphTraits<SDNode*> {
2559 using NodeRef = SDNode *;
2560 using ChildIteratorType = SDNodeIterator;
2561
2562 static NodeRef getEntryNode(SDNode *N) { return N; }
2563
2564 static ChildIteratorType child_begin(NodeRef N) {
2565 return SDNodeIterator::begin(N);
2566 }
2567
2568 static ChildIteratorType child_end(NodeRef N) {
2569 return SDNodeIterator::end(N);
2570 }
2571};
2572
2573/// A representation of the largest SDNode, for use in sizeof().
2574///
2575/// This needs to be a union because the largest node differs on 32 bit systems
2576/// with 4 and 8 byte pointer alignment, respectively.
2577using LargestSDNode = AlignedCharArrayUnion<AtomicSDNode, TargetIndexSDNode,
2578 BlockAddressSDNode,
2579 GlobalAddressSDNode>;
2580
2581/// The SDNode class with the greatest alignment requirement.
2582using MostAlignedSDNode = GlobalAddressSDNode;
2583
2584namespace ISD {
2585
2586 /// Returns true if the specified node is a non-extending and unindexed load.
2587 inline bool isNormalLoad(const SDNode *N) {
2588 const LoadSDNode *Ld = dyn_cast<LoadSDNode>(N);
2589 return Ld && Ld->getExtensionType() == ISD::NON_EXTLOAD &&
2590 Ld->getAddressingMode() == ISD::UNINDEXED;
2591 }
2592
2593 /// Returns true if the specified node is a non-extending load.
2594 inline bool isNON_EXTLoad(const SDNode *N) {
2595 return isa<LoadSDNode>(N) &&
31
Assuming 'N' is a 'LoadSDNode'
34
Returning the value 1, which participates in a condition later
2596 cast<LoadSDNode>(N)->getExtensionType() == ISD::NON_EXTLOAD;
32
'N' is a 'LoadSDNode'
33
Assuming the condition is true
2597 }
2598
2599 /// Returns true if the specified node is a EXTLOAD.
2600 inline bool isEXTLoad(const SDNode *N) {
2601 return isa<LoadSDNode>(N) &&
2602 cast<LoadSDNode>(N)->getExtensionType() == ISD::EXTLOAD;
2603 }
2604
2605 /// Returns true if the specified node is a SEXTLOAD.
2606 inline bool isSEXTLoad(const SDNode *N) {
2607 return isa<LoadSDNode>(N) &&
2608 cast<LoadSDNode>(N)->getExtensionType() == ISD::SEXTLOAD;
2609 }
2610
2611 /// Returns true if the specified node is a ZEXTLOAD.
2612 inline bool isZEXTLoad(const SDNode *N) {
2613 return isa<LoadSDNode>(N) &&
2614 cast<LoadSDNode>(N)->getExtensionType() == ISD::ZEXTLOAD;
2615 }
2616
2617 /// Returns true if the specified node is an unindexed load.
2618 inline bool isUNINDEXEDLoad(const SDNode *N) {
2619 return isa<LoadSDNode>(N) &&
2620 cast<LoadSDNode>(N)->getAddressingMode() == ISD::UNINDEXED;
2621 }
2622
2623 /// Returns true if the specified node is a non-truncating
2624 /// and unindexed store.
2625 inline bool isNormalStore(const SDNode *N) {
2626 const StoreSDNode *St = dyn_cast<StoreSDNode>(N);
2627 return St && !St->isTruncatingStore() &&
2628 St->getAddressingMode() == ISD::UNINDEXED;
2629 }
2630
2631 /// Returns true if the specified node is a non-truncating store.
2632 inline bool isNON_TRUNCStore(const SDNode *N) {
2633 return isa<StoreSDNode>(N) && !cast<StoreSDNode>(N)->isTruncatingStore();
2634 }
2635
2636 /// Returns true if the specified node is a truncating store.
2637 inline bool isTRUNCStore(const SDNode *N) {
2638 return isa<StoreSDNode>(N) && cast<StoreSDNode>(N)->isTruncatingStore();
2639 }
2640
2641 /// Returns true if the specified node is an unindexed store.
2642 inline bool isUNINDEXEDStore(const SDNode *N) {
2643 return isa<StoreSDNode>(N) &&
2644 cast<StoreSDNode>(N)->getAddressingMode() == ISD::UNINDEXED;
2645 }
2646
2647 /// Attempt to match a unary predicate against a scalar/splat constant or
2648 /// every element of a constant BUILD_VECTOR.
2649 /// If AllowUndef is true, then UNDEF elements will pass nullptr to Match.
2650 bool matchUnaryPredicate(SDValue Op,
2651 std::function<bool(ConstantSDNode *)> Match,
2652 bool AllowUndefs = false);
2653
2654 /// Attempt to match a binary predicate against a pair of scalar/splat
2655 /// constants or every element of a pair of constant BUILD_VECTORs.
2656 /// If AllowUndef is true, then UNDEF elements will pass nullptr to Match.
2657 /// If AllowTypeMismatch is true then RetType + ArgTypes don't need to match.
2658 bool matchBinaryPredicate(
2659 SDValue LHS, SDValue RHS,
2660 std::function<bool(ConstantSDNode *, ConstantSDNode *)> Match,
2661 bool AllowUndefs = false, bool AllowTypeMismatch = false);
2662} // end namespace ISD
2663
2664} // end namespace llvm
2665
2666#endif // LLVM_CODEGEN_SELECTIONDAGNODES_H

/build/llvm-toolchain-snapshot-10~svn372087/include/llvm/Support/Casting.h

1//===- llvm/Support/Casting.h - Allow flexible, checked, casts --*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the isa<X>(), cast<X>(), dyn_cast<X>(), cast_or_null<X>(),
10// and dyn_cast_or_null<X>() templates.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_SUPPORT_CASTING_H
15#define LLVM_SUPPORT_CASTING_H
16
17#include "llvm/Support/Compiler.h"
18#include "llvm/Support/type_traits.h"
19#include <cassert>
20#include <memory>
21#include <type_traits>
22
23namespace llvm {
24
25//===----------------------------------------------------------------------===//
26// isa<x> Support Templates
27//===----------------------------------------------------------------------===//
28
29// Define a template that can be specialized by smart pointers to reflect the
30// fact that they are automatically dereferenced, and are not involved with the
31// template selection process... the default implementation is a noop.
32//
33template<typename From> struct simplify_type {
34 using SimpleType = From; // The real type this represents...
35
36 // An accessor to get the real value...
37 static SimpleType &getSimplifiedValue(From &Val) { return Val; }
38};
39
40template<typename From> struct simplify_type<const From> {
41 using NonConstSimpleType = typename simplify_type<From>::SimpleType;
42 using SimpleType =
43 typename add_const_past_pointer<NonConstSimpleType>::type;
44 using RetType =
45 typename add_lvalue_reference_if_not_pointer<SimpleType>::type;
46
47 static RetType getSimplifiedValue(const From& Val) {
48 return simplify_type<From>::getSimplifiedValue(const_cast<From&>(Val));
49 }
50};
51
52// The core of the implementation of isa<X> is here; To and From should be
53// the names of classes. This template can be specialized to customize the
54// implementation of isa<> without rewriting it from scratch.
55template <typename To, typename From, typename Enabler = void>
56struct isa_impl {
57 static inline bool doit(const From &Val) {
58 return To::classof(&Val);
59 }
60};
61
62/// Always allow upcasts, and perform no dynamic check for them.
63template <typename To, typename From>
64struct isa_impl<
65 To, From, typename std::enable_if<std::is_base_of<To, From>::value>::type> {
66 static inline bool doit(const From &) { return true; }
67};
68
69template <typename To, typename From> struct isa_impl_cl {
70 static inline bool doit(const From &Val) {
71 return isa_impl<To, From>::doit(Val);
72 }
73};
74
75template <typename To, typename From> struct isa_impl_cl<To, const From> {
76 static inline bool doit(const From &Val) {
77 return isa_impl<To, From>::doit(Val);
78 }
79};
80
81template <typename To, typename From>
82struct isa_impl_cl<To, const std::unique_ptr<From>> {
83 static inline bool doit(const std::unique_ptr<From> &Val) {
84 assert(Val && "isa<> used on a null pointer")((Val && "isa<> used on a null pointer") ? static_cast
<void> (0) : __assert_fail ("Val && \"isa<> used on a null pointer\""
, "/build/llvm-toolchain-snapshot-10~svn372087/include/llvm/Support/Casting.h"
, 84, __PRETTY_FUNCTION__))
;
85 return isa_impl_cl<To, From>::doit(*Val);
86 }
87};
88
89template <typename To, typename From> struct isa_impl_cl<To, From*> {
90 static inline bool doit(const From *Val) {
91 assert(Val && "isa<> used on a null pointer")((Val && "isa<> used on a null pointer") ? static_cast
<void> (0) : __assert_fail ("Val && \"isa<> used on a null pointer\""
, "/build/llvm-toolchain-snapshot-10~svn372087/include/llvm/Support/Casting.h"
, 91, __PRETTY_FUNCTION__))
;
92 return isa_impl<To, From>::doit(*Val);
93 }
94};
95
96template <typename To, typename From> struct isa_impl_cl<To, From*const> {
97 static inline bool doit(const From *Val) {
98 assert(Val && "isa<> used on a null pointer")((Val && "isa<> used on a null pointer") ? static_cast
<void> (0) : __assert_fail ("Val && \"isa<> used on a null pointer\""
, "/build/llvm-toolchain-snapshot-10~svn372087/include/llvm/Support/Casting.h"
, 98, __PRETTY_FUNCTION__))
;
99 return isa_impl<To, From>::doit(*Val);
100 }
101};
102
103template <typename To, typename From> struct isa_impl_cl<To, const From*> {
104 static inline bool doit(const From *Val) {
105 assert(Val && "isa<> used on a null pointer")((Val && "isa<> used on a null pointer") ? static_cast
<void> (0) : __assert_fail ("Val && \"isa<> used on a null pointer\""
, "/build/llvm-toolchain-snapshot-10~svn372087/include/llvm/Support/Casting.h"
, 105, __PRETTY_FUNCTION__))
;
106 return isa_impl<To, From>::doit(*Val);
107 }
108};
109
110template <typename To, typename From> struct isa_impl_cl<To, const From*const> {
111 static inline bool doit(const From *Val) {
112 assert(Val && "isa<> used on a null pointer")((Val && "isa<> used on a null pointer") ? static_cast
<void> (0) : __assert_fail ("Val && \"isa<> used on a null pointer\""
, "/build/llvm-toolchain-snapshot-10~svn372087/include/llvm/Support/Casting.h"
, 112, __PRETTY_FUNCTION__))
;
113 return isa_impl<To, From>::doit(*Val);
114 }
115};
116
117template<typename To, typename From, typename SimpleFrom>
118struct isa_impl_wrap {
119 // When From != SimplifiedType, we can simplify the type some more by using
120 // the simplify_type template.
121 static bool doit(const From &Val) {
122 return isa_impl_wrap<To, SimpleFrom,
123 typename simplify_type<SimpleFrom>::SimpleType>::doit(
124 simplify_type<const From>::getSimplifiedValue(Val));
125 }
126};
127
128template<typename To, typename FromTy>
129struct isa_impl_wrap<To, FromTy, FromTy> {
130 // When From == SimpleType, we are as simple as we are going to get.
131 static bool doit(const FromTy &Val) {
132 return isa_impl_cl<To,FromTy>::doit(Val);
133 }
134};
135
136// isa<X> - Return true if the parameter to the template is an instance of the
137// template type argument. Used like this:
138//
139// if (isa<Type>(myVal)) { ... }
140//
141template <class X, class Y> LLVM_NODISCARD[[clang::warn_unused_result]] inline bool isa(const Y &Val) {
142 return isa_impl_wrap<X, const Y,
143 typename simplify_type<const Y>::SimpleType>::doit(Val);
144}
145
146// isa_and_nonnull<X> - Functionally identical to isa, except that a null value
147// is accepted.
148//
149template <class X, class Y>
150LLVM_NODISCARD[[clang::warn_unused_result]] inline bool isa_and_nonnull(const Y &Val) {
151 if (!Val)
152 return false;
153 return isa<X>(Val);
154}
155
156//===----------------------------------------------------------------------===//
157// cast<x> Support Templates
158//===----------------------------------------------------------------------===//
159
160template<class To, class From> struct cast_retty;
161
162// Calculate what type the 'cast' function should return, based on a requested
163// type of To and a source type of From.
164template<class To, class From> struct cast_retty_impl {
165 using ret_type = To &; // Normal case, return Ty&
166};
167template<class To, class From> struct cast_retty_impl<To, const From> {
168 using ret_type = const To &; // Normal case, return Ty&
169};
170
171template<class To, class From> struct cast_retty_impl<To, From*> {
172 using ret_type = To *; // Pointer arg case, return Ty*
173};
174
175template<class To, class From> struct cast_retty_impl<To, const From*> {
176 using ret_type = const To *; // Constant pointer arg case, return const Ty*
177};
178
179template<class To, class From> struct cast_retty_impl<To, const From*const> {
180 using ret_type = const To *; // Constant pointer arg case, return const Ty*
181};
182
183template <class To, class From>
184struct cast_retty_impl<To, std::unique_ptr<From>> {
185private:
186 using PointerType = typename cast_retty_impl<To, From *>::ret_type;
187 using ResultType = typename std::remove_pointer<PointerType>::type;
188
189public:
190 using ret_type = std::unique_ptr<ResultType>;
191};
192
193template<class To, class From, class SimpleFrom>
194struct cast_retty_wrap {
195 // When the simplified type and the from type are not the same, use the type
196 // simplifier to reduce the type, then reuse cast_retty_impl to get the
197 // resultant type.
198 using ret_type = typename cast_retty<To, SimpleFrom>::ret_type;
199};
200
201template<class To, class FromTy>
202struct cast_retty_wrap<To, FromTy, FromTy> {
203 // When the simplified type is equal to the from type, use it directly.
204 using ret_type = typename cast_retty_impl<To,FromTy>::ret_type;
205};
206
207template<class To, class From>
208struct cast_retty {
209 using ret_type = typename cast_retty_wrap<
210 To, From, typename simplify_type<From>::SimpleType>::ret_type;
211};
212
213// Ensure the non-simple values are converted using the simplify_type template
214// that may be specialized by smart pointers...
215//
216template<class To, class From, class SimpleFrom> struct cast_convert_val {
217 // This is not a simple type, use the template to simplify it...
218 static typename cast_retty<To, From>::ret_type doit(From &Val) {
219 return cast_convert_val<To, SimpleFrom,
48
Calling 'cast_convert_val::doit'
50
Returning from 'cast_convert_val::doit'
220 typename simplify_type<SimpleFrom>::SimpleType>::doit(
221 simplify_type<From>::getSimplifiedValue(Val));
222 }
223};
224
225template<class To, class FromTy> struct cast_convert_val<To,FromTy,FromTy> {
226 // This _is_ a simple type, just cast it.
227 static typename cast_retty<To, FromTy>::ret_type doit(const FromTy &Val) {
228 typename cast_retty<To, FromTy>::ret_type Res2
229 = (typename cast_retty<To, FromTy>::ret_type)const_cast<FromTy&>(Val);
230 return Res2;
49
Returning without writing to 'Val->.MemSDNodeBits.IsNonTemporal', which participates in a condition later
231 }
232};
233
234template <class X> struct is_simple_type {
235 static const bool value =
236 std::is_same<X, typename simplify_type<X>::SimpleType>::value;
237};
238
239// cast<X> - Return the argument parameter cast to the specified type. This
240// casting operator asserts that the type is correct, so it does not return null
241// on failure. It does not allow a null argument (use cast_or_null for that).
242// It is typically used like this:
243//
244// cast<Instruction>(myVal)->getParent()
245//
246template <class X, class Y>
247inline typename std::enable_if<!is_simple_type<Y>::value,
248 typename cast_retty<X, const Y>::ret_type>::type
249cast(const Y &Val) {
250 assert(isa<X>(Val) && "cast<Ty>() argument of incompatible type!")((isa<X>(Val) && "cast<Ty>() argument of incompatible type!"
) ? static_cast<void> (0) : __assert_fail ("isa<X>(Val) && \"cast<Ty>() argument of incompatible type!\""
, "/build/llvm-toolchain-snapshot-10~svn372087/include/llvm/Support/Casting.h"
, 250, __PRETTY_FUNCTION__))
;
251 return cast_convert_val<
252 X, const Y, typename simplify_type<const Y>::SimpleType>::doit(Val);
253}
254
255template <class X, class Y>
256inline typename cast_retty<X, Y>::ret_type cast(Y &Val) {
257 assert(isa<X>(Val) && "cast<Ty>() argument of incompatible type!")((isa<X>(Val) && "cast<Ty>() argument of incompatible type!"
) ? static_cast<void> (0) : __assert_fail ("isa<X>(Val) && \"cast<Ty>() argument of incompatible type!\""
, "/build/llvm-toolchain-snapshot-10~svn372087/include/llvm/Support/Casting.h"
, 257, __PRETTY_FUNCTION__))
;
45
Assuming 'Val' is a 'LoadSDNode'
46
'?' condition is true
258 return cast_convert_val<X, Y,
47
Calling 'cast_convert_val::doit'
51
Returning from 'cast_convert_val::doit'
259 typename simplify_type<Y>::SimpleType>::doit(Val);
260}
261
262template <class X, class Y>
263inline typename cast_retty<X, Y *>::ret_type cast(Y *Val) {
264 assert(isa<X>(Val) && "cast<Ty>() argument of incompatible type!")((isa<X>(Val) && "cast<Ty>() argument of incompatible type!"
) ? static_cast<void> (0) : __assert_fail ("isa<X>(Val) && \"cast<Ty>() argument of incompatible type!\""
, "/build/llvm-toolchain-snapshot-10~svn372087/include/llvm/Support/Casting.h"
, 264, __PRETTY_FUNCTION__))
;
265 return cast_convert_val<X, Y*,
266 typename simplify_type<Y*>::SimpleType>::doit(Val);
267}
268
269template <class X, class Y>
270inline typename cast_retty<X, std::unique_ptr<Y>>::ret_type
271cast(std::unique_ptr<Y> &&Val) {
272 assert(isa<X>(Val.get()) && "cast<Ty>() argument of incompatible type!")((isa<X>(Val.get()) && "cast<Ty>() argument of incompatible type!"
) ? static_cast<void> (0) : __assert_fail ("isa<X>(Val.get()) && \"cast<Ty>() argument of incompatible type!\""
, "/build/llvm-toolchain-snapshot-10~svn372087/include/llvm/Support/Casting.h"
, 272, __PRETTY_FUNCTION__))
;
273 using ret_type = typename cast_retty<X, std::unique_ptr<Y>>::ret_type;
274 return ret_type(
275 cast_convert_val<X, Y *, typename simplify_type<Y *>::SimpleType>::doit(
276 Val.release()));
277}
278
279// cast_or_null<X> - Functionally identical to cast, except that a null value is
280// accepted.
281//
282template <class X, class Y>
283LLVM_NODISCARD[[clang::warn_unused_result]] inline
284 typename std::enable_if<!is_simple_type<Y>::value,
285 typename cast_retty<X, const Y>::ret_type>::type
286 cast_or_null(const Y &Val) {
287 if (!Val)
288 return nullptr;
289 assert(isa<X>(Val) && "cast_or_null<Ty>() argument of incompatible type!")((isa<X>(Val) && "cast_or_null<Ty>() argument of incompatible type!"
) ? static_cast<void> (0) : __assert_fail ("isa<X>(Val) && \"cast_or_null<Ty>() argument of incompatible type!\""
, "/build/llvm-toolchain-snapshot-10~svn372087/include/llvm/Support/Casting.h"
, 289, __PRETTY_FUNCTION__))
;
290 return cast<X>(Val);
291}
292
293template <class X, class Y>
294LLVM_NODISCARD[[clang::warn_unused_result]] inline
295 typename std::enable_if<!is_simple_type<Y>::value,
296 typename cast_retty<X, Y>::ret_type>::type
297 cast_or_null(Y &Val) {
298 if (!Val)
299 return nullptr;
300 assert(isa<X>(Val) && "cast_or_null<Ty>() argument of incompatible type!")((isa<X>(Val) && "cast_or_null<Ty>() argument of incompatible type!"
) ? static_cast<void> (0) : __assert_fail ("isa<X>(Val) && \"cast_or_null<Ty>() argument of incompatible type!\""
, "/build/llvm-toolchain-snapshot-10~svn372087/include/llvm/Support/Casting.h"
, 300, __PRETTY_FUNCTION__))
;
301 return cast<X>(Val);
302}
303
304template <class X, class Y>
305LLVM_NODISCARD[[clang::warn_unused_result]] inline typename cast_retty<X, Y *>::ret_type
306cast_or_null(Y *Val) {
307 if (!Val) return nullptr;
308 assert(isa<X>(Val) && "cast_or_null<Ty>() argument of incompatible type!")((isa<X>(Val) && "cast_or_null<Ty>() argument of incompatible type!"
) ? static_cast<void> (0) : __assert_fail ("isa<X>(Val) && \"cast_or_null<Ty>() argument of incompatible type!\""
, "/build/llvm-toolchain-snapshot-10~svn372087/include/llvm/Support/Casting.h"
, 308, __PRETTY_FUNCTION__))
;
309 return cast<X>(Val);
310}
311
312template <class X, class Y>
313inline typename cast_retty<X, std::unique_ptr<Y>>::ret_type
314cast_or_null(std::unique_ptr<Y> &&Val) {
315 if (!Val)
316 return nullptr;
317 return cast<X>(std::move(Val));
318}
319
320// dyn_cast<X> - Return the argument parameter cast to the specified type. This
321// casting operator returns null if the argument is of the wrong type, so it can
322// be used to test for a type as well as cast if successful. This should be
323// used in the context of an if statement like this:
324//
325// if (const Instruction *I = dyn_cast<Instruction>(myVal)) { ... }
326//
327
328template <class X, class Y>
329LLVM_NODISCARD[[clang::warn_unused_result]] inline
330 typename std::enable_if<!is_simple_type<Y>::value,
331 typename cast_retty<X, const Y>::ret_type>::type
332 dyn_cast(const Y &Val) {
333 return isa<X>(Val) ? cast<X>(Val) : nullptr;
334}
335
336template <class X, class Y>
337LLVM_NODISCARD[[clang::warn_unused_result]] inline typename cast_retty<X, Y>::ret_type dyn_cast(Y &Val) {
338 return isa<X>(Val) ? cast<X>(Val) : nullptr;
339}
340
341template <class X, class Y>
342LLVM_NODISCARD[[clang::warn_unused_result]] inline typename cast_retty<X, Y *>::ret_type dyn_cast(Y *Val) {
343 return isa<X>(Val) ? cast<X>(Val) : nullptr;
344}
345
346// dyn_cast_or_null<X> - Functionally identical to dyn_cast, except that a null
347// value is accepted.
348//
349template <class X, class Y>
350LLVM_NODISCARD[[clang::warn_unused_result]] inline
351 typename std::enable_if<!is_simple_type<Y>::value,
352 typename cast_retty<X, const Y>::ret_type>::type
353 dyn_cast_or_null(const Y &Val) {
354 return (Val && isa<X>(Val)) ? cast<X>(Val) : nullptr;
355}
356
357template <class X, class Y>
358LLVM_NODISCARD[[clang::warn_unused_result]] inline
359 typename std::enable_if<!is_simple_type<Y>::value,
360 typename cast_retty<X, Y>::ret_type>::type
361 dyn_cast_or_null(Y &Val) {
362 return (Val && isa<X>(Val)) ? cast<X>(Val) : nullptr;
363}
364
365template <class X, class Y>
366LLVM_NODISCARD[[clang::warn_unused_result]] inline typename cast_retty<X, Y *>::ret_type
367dyn_cast_or_null(Y *Val) {
368 return (Val && isa<X>(Val)) ? cast<X>(Val) : nullptr;
369}
370
371// unique_dyn_cast<X> - Given a unique_ptr<Y>, try to return a unique_ptr<X>,
372// taking ownership of the input pointer iff isa<X>(Val) is true. If the
373// cast is successful, From refers to nullptr on exit and the casted value
374// is returned. If the cast is unsuccessful, the function returns nullptr
375// and From is unchanged.
376template <class X, class Y>
377LLVM_NODISCARD[[clang::warn_unused_result]] inline auto unique_dyn_cast(std::unique_ptr<Y> &Val)
378 -> decltype(cast<X>(Val)) {
379 if (!isa<X>(Val))
380 return nullptr;
381 return cast<X>(std::move(Val));
382}
383
384template <class X, class Y>
385LLVM_NODISCARD[[clang::warn_unused_result]] inline auto unique_dyn_cast(std::unique_ptr<Y> &&Val)
386 -> decltype(cast<X>(Val)) {
387 return unique_dyn_cast<X, Y>(Val);
388}
389
390// dyn_cast_or_null<X> - Functionally identical to unique_dyn_cast, except that
391// a null value is accepted.
392template <class X, class Y>
393LLVM_NODISCARD[[clang::warn_unused_result]] inline auto unique_dyn_cast_or_null(std::unique_ptr<Y> &Val)
394 -> decltype(cast<X>(Val)) {
395 if (!Val)
396 return nullptr;
397 return unique_dyn_cast<X, Y>(Val);
398}
399
400template <class X, class Y>
401LLVM_NODISCARD[[clang::warn_unused_result]] inline auto unique_dyn_cast_or_null(std::unique_ptr<Y> &&Val)
402 -> decltype(cast<X>(Val)) {
403 return unique_dyn_cast_or_null<X, Y>(Val);
404}
405
406} // end namespace llvm
407
408#endif // LLVM_SUPPORT_CASTING_H