Bug Summary

File:lib/Target/X86/X86ISelDAGToDAG.cpp
Warning:line 581, column 13
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name X86ISelDAGToDAG.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mthread-model posix -mframe-pointer=none -fmath-errno -masm-verbose -mconstructor-aliases -munwind-tables -fuse-init-array -target-cpu x86-64 -dwarf-column-info -debugger-tuning=gdb -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-10/lib/clang/10.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-10~svn372087/build-llvm/lib/Target/X86 -I /build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86 -I /build/llvm-toolchain-snapshot-10~svn372087/build-llvm/include -I /build/llvm-toolchain-snapshot-10~svn372087/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-10/lib/clang/10.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-10~svn372087/build-llvm/lib/Target/X86 -fdebug-prefix-map=/build/llvm-toolchain-snapshot-10~svn372087=. -ferror-limit 19 -fmessage-length 0 -fvisibility-inlines-hidden -stack-protector 2 -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -o /tmp/scan-build-2019-09-17-145504-7198-1 -x c++ /build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp

/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp

1//===- X86ISelDAGToDAG.cpp - A DAG pattern matching inst selector for X86 -===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines a DAG pattern matching instruction selector for X86,
10// converting from a legalized dag to a X86 dag.
11//
12//===----------------------------------------------------------------------===//
13
14#include "X86.h"
15#include "X86MachineFunctionInfo.h"
16#include "X86RegisterInfo.h"
17#include "X86Subtarget.h"
18#include "X86TargetMachine.h"
19#include "llvm/ADT/Statistic.h"
20#include "llvm/CodeGen/MachineFrameInfo.h"
21#include "llvm/CodeGen/MachineFunction.h"
22#include "llvm/CodeGen/SelectionDAGISel.h"
23#include "llvm/Config/llvm-config.h"
24#include "llvm/IR/ConstantRange.h"
25#include "llvm/IR/Function.h"
26#include "llvm/IR/Instructions.h"
27#include "llvm/IR/Intrinsics.h"
28#include "llvm/IR/Type.h"
29#include "llvm/Support/Debug.h"
30#include "llvm/Support/ErrorHandling.h"
31#include "llvm/Support/KnownBits.h"
32#include "llvm/Support/MathExtras.h"
33#include "llvm/Support/raw_ostream.h"
34#include "llvm/Target/TargetMachine.h"
35#include "llvm/Target/TargetOptions.h"
36#include <stdint.h>
37using namespace llvm;
38
39#define DEBUG_TYPE"x86-isel" "x86-isel"
40
41STATISTIC(NumLoadMoved, "Number of loads moved below TokenFactor")static llvm::Statistic NumLoadMoved = {"x86-isel", "NumLoadMoved"
, "Number of loads moved below TokenFactor", {0}, {false}}
;
42
43static cl::opt<bool> AndImmShrink("x86-and-imm-shrink", cl::init(true),
44 cl::desc("Enable setting constant bits to reduce size of mask immediates"),
45 cl::Hidden);
46
47//===----------------------------------------------------------------------===//
48// Pattern Matcher Implementation
49//===----------------------------------------------------------------------===//
50
51namespace {
52 /// This corresponds to X86AddressMode, but uses SDValue's instead of register
53 /// numbers for the leaves of the matched tree.
54 struct X86ISelAddressMode {
55 enum {
56 RegBase,
57 FrameIndexBase
58 } BaseType;
59
60 // This is really a union, discriminated by BaseType!
61 SDValue Base_Reg;
62 int Base_FrameIndex;
63
64 unsigned Scale;
65 SDValue IndexReg;
66 int32_t Disp;
67 SDValue Segment;
68 const GlobalValue *GV;
69 const Constant *CP;
70 const BlockAddress *BlockAddr;
71 const char *ES;
72 MCSymbol *MCSym;
73 int JT;
74 unsigned Align; // CP alignment.
75 unsigned char SymbolFlags; // X86II::MO_*
76 bool NegateIndex = false;
77
78 X86ISelAddressMode()
79 : BaseType(RegBase), Base_FrameIndex(0), Scale(1), IndexReg(), Disp(0),
80 Segment(), GV(nullptr), CP(nullptr), BlockAddr(nullptr), ES(nullptr),
81 MCSym(nullptr), JT(-1), Align(0), SymbolFlags(X86II::MO_NO_FLAG) {}
82
83 bool hasSymbolicDisplacement() const {
84 return GV != nullptr || CP != nullptr || ES != nullptr ||
85 MCSym != nullptr || JT != -1 || BlockAddr != nullptr;
86 }
87
88 bool hasBaseOrIndexReg() const {
89 return BaseType == FrameIndexBase ||
90 IndexReg.getNode() != nullptr || Base_Reg.getNode() != nullptr;
91 }
92
93 /// Return true if this addressing mode is already RIP-relative.
94 bool isRIPRelative() const {
95 if (BaseType != RegBase) return false;
96 if (RegisterSDNode *RegNode =
97 dyn_cast_or_null<RegisterSDNode>(Base_Reg.getNode()))
98 return RegNode->getReg() == X86::RIP;
99 return false;
100 }
101
102 void setBaseReg(SDValue Reg) {
103 BaseType = RegBase;
104 Base_Reg = Reg;
105 }
106
107#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
108 void dump(SelectionDAG *DAG = nullptr) {
109 dbgs() << "X86ISelAddressMode " << this << '\n';
110 dbgs() << "Base_Reg ";
111 if (Base_Reg.getNode())
112 Base_Reg.getNode()->dump(DAG);
113 else
114 dbgs() << "nul\n";
115 if (BaseType == FrameIndexBase)
116 dbgs() << " Base.FrameIndex " << Base_FrameIndex << '\n';
117 dbgs() << " Scale " << Scale << '\n'
118 << "IndexReg ";
119 if (NegateIndex)
120 dbgs() << "negate ";
121 if (IndexReg.getNode())
122 IndexReg.getNode()->dump(DAG);
123 else
124 dbgs() << "nul\n";
125 dbgs() << " Disp " << Disp << '\n'
126 << "GV ";
127 if (GV)
128 GV->dump();
129 else
130 dbgs() << "nul";
131 dbgs() << " CP ";
132 if (CP)
133 CP->dump();
134 else
135 dbgs() << "nul";
136 dbgs() << '\n'
137 << "ES ";
138 if (ES)
139 dbgs() << ES;
140 else
141 dbgs() << "nul";
142 dbgs() << " MCSym ";
143 if (MCSym)
144 dbgs() << MCSym;
145 else
146 dbgs() << "nul";
147 dbgs() << " JT" << JT << " Align" << Align << '\n';
148 }
149#endif
150 };
151}
152
153namespace {
154 //===--------------------------------------------------------------------===//
155 /// ISel - X86-specific code to select X86 machine instructions for
156 /// SelectionDAG operations.
157 ///
158 class X86DAGToDAGISel final : public SelectionDAGISel {
159 /// Keep a pointer to the X86Subtarget around so that we can
160 /// make the right decision when generating code for different targets.
161 const X86Subtarget *Subtarget;
162
163 /// If true, selector should try to optimize for code size instead of
164 /// performance.
165 bool OptForSize;
166
167 /// If true, selector should try to optimize for minimum code size.
168 bool OptForMinSize;
169
170 /// Disable direct TLS access through segment registers.
171 bool IndirectTlsSegRefs;
172
173 public:
174 explicit X86DAGToDAGISel(X86TargetMachine &tm, CodeGenOpt::Level OptLevel)
175 : SelectionDAGISel(tm, OptLevel), Subtarget(nullptr), OptForSize(false),
176 OptForMinSize(false), IndirectTlsSegRefs(false) {}
177
178 StringRef getPassName() const override {
179 return "X86 DAG->DAG Instruction Selection";
180 }
181
182 bool runOnMachineFunction(MachineFunction &MF) override {
183 // Reset the subtarget each time through.
184 Subtarget = &MF.getSubtarget<X86Subtarget>();
185 IndirectTlsSegRefs = MF.getFunction().hasFnAttribute(
186 "indirect-tls-seg-refs");
187
188 // OptFor[Min]Size are used in pattern predicates that isel is matching.
189 OptForSize = MF.getFunction().hasOptSize();
190 OptForMinSize = MF.getFunction().hasMinSize();
191 assert((!OptForMinSize || OptForSize) &&(((!OptForMinSize || OptForSize) && "OptForMinSize implies OptForSize"
) ? static_cast<void> (0) : __assert_fail ("(!OptForMinSize || OptForSize) && \"OptForMinSize implies OptForSize\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 192, __PRETTY_FUNCTION__))
192 "OptForMinSize implies OptForSize")(((!OptForMinSize || OptForSize) && "OptForMinSize implies OptForSize"
) ? static_cast<void> (0) : __assert_fail ("(!OptForMinSize || OptForSize) && \"OptForMinSize implies OptForSize\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 192, __PRETTY_FUNCTION__))
;
193
194 SelectionDAGISel::runOnMachineFunction(MF);
195 return true;
196 }
197
198 void EmitFunctionEntryCode() override;
199
200 bool IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const override;
201
202 void PreprocessISelDAG() override;
203 void PostprocessISelDAG() override;
204
205// Include the pieces autogenerated from the target description.
206#include "X86GenDAGISel.inc"
207
208 private:
209 void Select(SDNode *N) override;
210
211 bool foldOffsetIntoAddress(uint64_t Offset, X86ISelAddressMode &AM);
212 bool matchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM);
213 bool matchWrapper(SDValue N, X86ISelAddressMode &AM);
214 bool matchAddress(SDValue N, X86ISelAddressMode &AM);
215 bool matchVectorAddress(SDValue N, X86ISelAddressMode &AM);
216 bool matchAdd(SDValue &N, X86ISelAddressMode &AM, unsigned Depth);
217 bool matchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
218 unsigned Depth);
219 bool matchAddressBase(SDValue N, X86ISelAddressMode &AM);
220 bool selectAddr(SDNode *Parent, SDValue N, SDValue &Base,
221 SDValue &Scale, SDValue &Index, SDValue &Disp,
222 SDValue &Segment);
223 bool selectVectorAddr(SDNode *Parent, SDValue N, SDValue &Base,
224 SDValue &Scale, SDValue &Index, SDValue &Disp,
225 SDValue &Segment);
226 bool selectMOV64Imm32(SDValue N, SDValue &Imm);
227 bool selectLEAAddr(SDValue N, SDValue &Base,
228 SDValue &Scale, SDValue &Index, SDValue &Disp,
229 SDValue &Segment);
230 bool selectLEA64_32Addr(SDValue N, SDValue &Base,
231 SDValue &Scale, SDValue &Index, SDValue &Disp,
232 SDValue &Segment);
233 bool selectTLSADDRAddr(SDValue N, SDValue &Base,
234 SDValue &Scale, SDValue &Index, SDValue &Disp,
235 SDValue &Segment);
236 bool selectScalarSSELoad(SDNode *Root, SDNode *Parent, SDValue N,
237 SDValue &Base, SDValue &Scale,
238 SDValue &Index, SDValue &Disp,
239 SDValue &Segment,
240 SDValue &NodeWithChain);
241 bool selectRelocImm(SDValue N, SDValue &Op);
242
243 bool tryFoldLoad(SDNode *Root, SDNode *P, SDValue N,
244 SDValue &Base, SDValue &Scale,
245 SDValue &Index, SDValue &Disp,
246 SDValue &Segment);
247
248 // Convenience method where P is also root.
249 bool tryFoldLoad(SDNode *P, SDValue N,
250 SDValue &Base, SDValue &Scale,
251 SDValue &Index, SDValue &Disp,
252 SDValue &Segment) {
253 return tryFoldLoad(P, P, N, Base, Scale, Index, Disp, Segment);
254 }
255
256 /// Implement addressing mode selection for inline asm expressions.
257 bool SelectInlineAsmMemoryOperand(const SDValue &Op,
258 unsigned ConstraintID,
259 std::vector<SDValue> &OutOps) override;
260
261 void emitSpecialCodeForMain();
262
263 inline void getAddressOperands(X86ISelAddressMode &AM, const SDLoc &DL,
264 MVT VT, SDValue &Base, SDValue &Scale,
265 SDValue &Index, SDValue &Disp,
266 SDValue &Segment) {
267 if (AM.BaseType == X86ISelAddressMode::FrameIndexBase)
268 Base = CurDAG->getTargetFrameIndex(
269 AM.Base_FrameIndex, TLI->getPointerTy(CurDAG->getDataLayout()));
270 else if (AM.Base_Reg.getNode())
271 Base = AM.Base_Reg;
272 else
273 Base = CurDAG->getRegister(0, VT);
274
275 Scale = getI8Imm(AM.Scale, DL);
276
277 // Negate the index if needed.
278 if (AM.NegateIndex) {
279 unsigned NegOpc = VT == MVT::i64 ? X86::NEG64r : X86::NEG32r;
280 SDValue Neg = SDValue(CurDAG->getMachineNode(NegOpc, DL, VT, MVT::i32,
281 AM.IndexReg), 0);
282 AM.IndexReg = Neg;
283 }
284
285 if (AM.IndexReg.getNode())
286 Index = AM.IndexReg;
287 else
288 Index = CurDAG->getRegister(0, VT);
289
290 // These are 32-bit even in 64-bit mode since RIP-relative offset
291 // is 32-bit.
292 if (AM.GV)
293 Disp = CurDAG->getTargetGlobalAddress(AM.GV, SDLoc(),
294 MVT::i32, AM.Disp,
295 AM.SymbolFlags);
296 else if (AM.CP)
297 Disp = CurDAG->getTargetConstantPool(AM.CP, MVT::i32,
298 AM.Align, AM.Disp, AM.SymbolFlags);
299 else if (AM.ES) {
300 assert(!AM.Disp && "Non-zero displacement is ignored with ES.")((!AM.Disp && "Non-zero displacement is ignored with ES."
) ? static_cast<void> (0) : __assert_fail ("!AM.Disp && \"Non-zero displacement is ignored with ES.\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 300, __PRETTY_FUNCTION__))
;
301 Disp = CurDAG->getTargetExternalSymbol(AM.ES, MVT::i32, AM.SymbolFlags);
302 } else if (AM.MCSym) {
303 assert(!AM.Disp && "Non-zero displacement is ignored with MCSym.")((!AM.Disp && "Non-zero displacement is ignored with MCSym."
) ? static_cast<void> (0) : __assert_fail ("!AM.Disp && \"Non-zero displacement is ignored with MCSym.\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 303, __PRETTY_FUNCTION__))
;
304 assert(AM.SymbolFlags == 0 && "oo")((AM.SymbolFlags == 0 && "oo") ? static_cast<void>
(0) : __assert_fail ("AM.SymbolFlags == 0 && \"oo\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 304, __PRETTY_FUNCTION__))
;
305 Disp = CurDAG->getMCSymbol(AM.MCSym, MVT::i32);
306 } else if (AM.JT != -1) {
307 assert(!AM.Disp && "Non-zero displacement is ignored with JT.")((!AM.Disp && "Non-zero displacement is ignored with JT."
) ? static_cast<void> (0) : __assert_fail ("!AM.Disp && \"Non-zero displacement is ignored with JT.\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 307, __PRETTY_FUNCTION__))
;
308 Disp = CurDAG->getTargetJumpTable(AM.JT, MVT::i32, AM.SymbolFlags);
309 } else if (AM.BlockAddr)
310 Disp = CurDAG->getTargetBlockAddress(AM.BlockAddr, MVT::i32, AM.Disp,
311 AM.SymbolFlags);
312 else
313 Disp = CurDAG->getTargetConstant(AM.Disp, DL, MVT::i32);
314
315 if (AM.Segment.getNode())
316 Segment = AM.Segment;
317 else
318 Segment = CurDAG->getRegister(0, MVT::i16);
319 }
320
321 // Utility function to determine whether we should avoid selecting
322 // immediate forms of instructions for better code size or not.
323 // At a high level, we'd like to avoid such instructions when
324 // we have similar constants used within the same basic block
325 // that can be kept in a register.
326 //
327 bool shouldAvoidImmediateInstFormsForSize(SDNode *N) const {
328 uint32_t UseCount = 0;
329
330 // Do not want to hoist if we're not optimizing for size.
331 // TODO: We'd like to remove this restriction.
332 // See the comment in X86InstrInfo.td for more info.
333 if (!OptForSize)
334 return false;
335
336 // Walk all the users of the immediate.
337 for (SDNode::use_iterator UI = N->use_begin(),
338 UE = N->use_end(); (UI != UE) && (UseCount < 2); ++UI) {
339
340 SDNode *User = *UI;
341
342 // This user is already selected. Count it as a legitimate use and
343 // move on.
344 if (User->isMachineOpcode()) {
345 UseCount++;
346 continue;
347 }
348
349 // We want to count stores of immediates as real uses.
350 if (User->getOpcode() == ISD::STORE &&
351 User->getOperand(1).getNode() == N) {
352 UseCount++;
353 continue;
354 }
355
356 // We don't currently match users that have > 2 operands (except
357 // for stores, which are handled above)
358 // Those instruction won't match in ISEL, for now, and would
359 // be counted incorrectly.
360 // This may change in the future as we add additional instruction
361 // types.
362 if (User->getNumOperands() != 2)
363 continue;
364
365 // If this can match to INC/DEC, don't count it as a use.
366 if (User->getOpcode() == ISD::ADD &&
367 (isOneConstant(SDValue(N, 0)) || isAllOnesConstant(SDValue(N, 0))))
368 continue;
369
370 // Immediates that are used for offsets as part of stack
371 // manipulation should be left alone. These are typically
372 // used to indicate SP offsets for argument passing and
373 // will get pulled into stores/pushes (implicitly).
374 if (User->getOpcode() == X86ISD::ADD ||
375 User->getOpcode() == ISD::ADD ||
376 User->getOpcode() == X86ISD::SUB ||
377 User->getOpcode() == ISD::SUB) {
378
379 // Find the other operand of the add/sub.
380 SDValue OtherOp = User->getOperand(0);
381 if (OtherOp.getNode() == N)
382 OtherOp = User->getOperand(1);
383
384 // Don't count if the other operand is SP.
385 RegisterSDNode *RegNode;
386 if (OtherOp->getOpcode() == ISD::CopyFromReg &&
387 (RegNode = dyn_cast_or_null<RegisterSDNode>(
388 OtherOp->getOperand(1).getNode())))
389 if ((RegNode->getReg() == X86::ESP) ||
390 (RegNode->getReg() == X86::RSP))
391 continue;
392 }
393
394 // ... otherwise, count this and move on.
395 UseCount++;
396 }
397
398 // If we have more than 1 use, then recommend for hoisting.
399 return (UseCount > 1);
400 }
401
402 /// Return a target constant with the specified value of type i8.
403 inline SDValue getI8Imm(unsigned Imm, const SDLoc &DL) {
404 return CurDAG->getTargetConstant(Imm, DL, MVT::i8);
405 }
406
407 /// Return a target constant with the specified value, of type i32.
408 inline SDValue getI32Imm(unsigned Imm, const SDLoc &DL) {
409 return CurDAG->getTargetConstant(Imm, DL, MVT::i32);
410 }
411
412 /// Return a target constant with the specified value, of type i64.
413 inline SDValue getI64Imm(uint64_t Imm, const SDLoc &DL) {
414 return CurDAG->getTargetConstant(Imm, DL, MVT::i64);
415 }
416
417 SDValue getExtractVEXTRACTImmediate(SDNode *N, unsigned VecWidth,
418 const SDLoc &DL) {
419 assert((VecWidth == 128 || VecWidth == 256) && "Unexpected vector width")(((VecWidth == 128 || VecWidth == 256) && "Unexpected vector width"
) ? static_cast<void> (0) : __assert_fail ("(VecWidth == 128 || VecWidth == 256) && \"Unexpected vector width\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 419, __PRETTY_FUNCTION__))
;
420 uint64_t Index = N->getConstantOperandVal(1);
421 MVT VecVT = N->getOperand(0).getSimpleValueType();
422 return getI8Imm((Index * VecVT.getScalarSizeInBits()) / VecWidth, DL);
423 }
424
425 SDValue getInsertVINSERTImmediate(SDNode *N, unsigned VecWidth,
426 const SDLoc &DL) {
427 assert((VecWidth == 128 || VecWidth == 256) && "Unexpected vector width")(((VecWidth == 128 || VecWidth == 256) && "Unexpected vector width"
) ? static_cast<void> (0) : __assert_fail ("(VecWidth == 128 || VecWidth == 256) && \"Unexpected vector width\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 427, __PRETTY_FUNCTION__))
;
428 uint64_t Index = N->getConstantOperandVal(2);
429 MVT VecVT = N->getSimpleValueType(0);
430 return getI8Imm((Index * VecVT.getScalarSizeInBits()) / VecWidth, DL);
431 }
432
433 // Helper to detect unneeded and instructions on shift amounts. Called
434 // from PatFrags in tablegen.
435 bool isUnneededShiftMask(SDNode *N, unsigned Width) const {
436 assert(N->getOpcode() == ISD::AND && "Unexpected opcode")((N->getOpcode() == ISD::AND && "Unexpected opcode"
) ? static_cast<void> (0) : __assert_fail ("N->getOpcode() == ISD::AND && \"Unexpected opcode\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 436, __PRETTY_FUNCTION__))
;
437 const APInt &Val = cast<ConstantSDNode>(N->getOperand(1))->getAPIntValue();
438
439 if (Val.countTrailingOnes() >= Width)
440 return true;
441
442 APInt Mask = Val | CurDAG->computeKnownBits(N->getOperand(0)).Zero;
443 return Mask.countTrailingOnes() >= Width;
444 }
445
446 /// Return an SDNode that returns the value of the global base register.
447 /// Output instructions required to initialize the global base register,
448 /// if necessary.
449 SDNode *getGlobalBaseReg();
450
451 /// Return a reference to the TargetMachine, casted to the target-specific
452 /// type.
453 const X86TargetMachine &getTargetMachine() const {
454 return static_cast<const X86TargetMachine &>(TM);
455 }
456
457 /// Return a reference to the TargetInstrInfo, casted to the target-specific
458 /// type.
459 const X86InstrInfo *getInstrInfo() const {
460 return Subtarget->getInstrInfo();
461 }
462
463 /// Address-mode matching performs shift-of-and to and-of-shift
464 /// reassociation in order to expose more scaled addressing
465 /// opportunities.
466 bool ComplexPatternFuncMutatesDAG() const override {
467 return true;
468 }
469
470 bool isSExtAbsoluteSymbolRef(unsigned Width, SDNode *N) const;
471
472 /// Returns whether this is a relocatable immediate in the range
473 /// [-2^Width .. 2^Width-1].
474 template <unsigned Width> bool isSExtRelocImm(SDNode *N) const {
475 if (auto *CN = dyn_cast<ConstantSDNode>(N))
476 return isInt<Width>(CN->getSExtValue());
477 return isSExtAbsoluteSymbolRef(Width, N);
478 }
479
480 // Indicates we should prefer to use a non-temporal load for this load.
481 bool useNonTemporalLoad(LoadSDNode *N) const {
482 if (!N->isNonTemporal())
54
Assuming the condition is true
55
Taking true branch
483 return false;
56
Returning zero, which participates in a condition later
484
485 unsigned StoreSize = N->getMemoryVT().getStoreSize();
486
487 if (N->getAlignment() < StoreSize)
488 return false;
489
490 switch (StoreSize) {
491 default: llvm_unreachable("Unsupported store size")::llvm::llvm_unreachable_internal("Unsupported store size", "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 491)
;
492 case 4:
493 case 8:
494 return false;
495 case 16:
496 return Subtarget->hasSSE41();
497 case 32:
498 return Subtarget->hasAVX2();
499 case 64:
500 return Subtarget->hasAVX512();
501 }
502 }
503
504 bool foldLoadStoreIntoMemOperand(SDNode *Node);
505 MachineSDNode *matchBEXTRFromAndImm(SDNode *Node);
506 bool matchBitExtract(SDNode *Node);
507 bool shrinkAndImmediate(SDNode *N);
508 bool isMaskZeroExtended(SDNode *N) const;
509 bool tryShiftAmountMod(SDNode *N);
510 bool combineIncDecVector(SDNode *Node);
511 bool tryShrinkShlLogicImm(SDNode *N);
512 bool tryVPTESTM(SDNode *Root, SDValue Setcc, SDValue Mask);
513
514 MachineSDNode *emitPCMPISTR(unsigned ROpc, unsigned MOpc, bool MayFoldLoad,
515 const SDLoc &dl, MVT VT, SDNode *Node);
516 MachineSDNode *emitPCMPESTR(unsigned ROpc, unsigned MOpc, bool MayFoldLoad,
517 const SDLoc &dl, MVT VT, SDNode *Node,
518 SDValue &InFlag);
519
520 bool tryOptimizeRem8Extend(SDNode *N);
521
522 bool onlyUsesZeroFlag(SDValue Flags) const;
523 bool hasNoSignFlagUses(SDValue Flags) const;
524 bool hasNoCarryFlagUses(SDValue Flags) const;
525 };
526}
527
528
529// Returns true if this masked compare can be implemented legally with this
530// type.
531static bool isLegalMaskCompare(SDNode *N, const X86Subtarget *Subtarget) {
532 unsigned Opcode = N->getOpcode();
533 if (Opcode == X86ISD::CMPM || Opcode == ISD::SETCC ||
534 Opcode == X86ISD::CMPM_SAE || Opcode == X86ISD::VFPCLASS) {
535 // We can get 256-bit 8 element types here without VLX being enabled. When
536 // this happens we will use 512-bit operations and the mask will not be
537 // zero extended.
538 EVT OpVT = N->getOperand(0).getValueType();
539 if (OpVT.is256BitVector() || OpVT.is128BitVector())
540 return Subtarget->hasVLX();
541
542 return true;
543 }
544 // Scalar opcodes use 128 bit registers, but aren't subject to the VLX check.
545 if (Opcode == X86ISD::VFPCLASSS || Opcode == X86ISD::FSETCCM ||
546 Opcode == X86ISD::FSETCCM_SAE)
547 return true;
548
549 return false;
550}
551
552// Returns true if we can assume the writer of the mask has zero extended it
553// for us.
554bool X86DAGToDAGISel::isMaskZeroExtended(SDNode *N) const {
555 // If this is an AND, check if we have a compare on either side. As long as
556 // one side guarantees the mask is zero extended, the AND will preserve those
557 // zeros.
558 if (N->getOpcode() == ISD::AND)
559 return isLegalMaskCompare(N->getOperand(0).getNode(), Subtarget) ||
560 isLegalMaskCompare(N->getOperand(1).getNode(), Subtarget);
561
562 return isLegalMaskCompare(N, Subtarget);
563}
564
565bool
566X86DAGToDAGISel::IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const {
567 if (OptLevel == CodeGenOpt::None) return false;
38
Assuming field 'OptLevel' is not equal to None
39
Taking false branch
568
569 if (!N.hasOneUse())
40
Assuming the condition is false
41
Taking false branch
570 return false;
571
572 if (N.getOpcode() != ISD::LOAD)
42
Assuming the condition is false
43
Taking false branch
573 return true;
574
575 // Don't fold non-temporal loads if we have an instruction for them.
576 if (useNonTemporalLoad(cast<LoadSDNode>(N)))
44
Calling 'cast<llvm::LoadSDNode, llvm::SDValue>'
52
Returning from 'cast<llvm::LoadSDNode, llvm::SDValue>'
53
Calling 'X86DAGToDAGISel::useNonTemporalLoad'
57
Returning from 'X86DAGToDAGISel::useNonTemporalLoad'
58
Taking false branch
577 return false;
578
579 // If N is a load, do additional profitability checks.
580 if (U == Root) {
59
Assuming 'U' is equal to 'Root'
60
Taking true branch
581 switch (U->getOpcode()) {
61
Called C++ object pointer is null
582 default: break;
583 case X86ISD::ADD:
584 case X86ISD::ADC:
585 case X86ISD::SUB:
586 case X86ISD::SBB:
587 case X86ISD::AND:
588 case X86ISD::XOR:
589 case X86ISD::OR:
590 case ISD::ADD:
591 case ISD::ADDCARRY:
592 case ISD::AND:
593 case ISD::OR:
594 case ISD::XOR: {
595 SDValue Op1 = U->getOperand(1);
596
597 // If the other operand is a 8-bit immediate we should fold the immediate
598 // instead. This reduces code size.
599 // e.g.
600 // movl 4(%esp), %eax
601 // addl $4, %eax
602 // vs.
603 // movl $4, %eax
604 // addl 4(%esp), %eax
605 // The former is 2 bytes shorter. In case where the increment is 1, then
606 // the saving can be 4 bytes (by using incl %eax).
607 if (ConstantSDNode *Imm = dyn_cast<ConstantSDNode>(Op1)) {
608 if (Imm->getAPIntValue().isSignedIntN(8))
609 return false;
610
611 // If this is a 64-bit AND with an immediate that fits in 32-bits,
612 // prefer using the smaller and over folding the load. This is needed to
613 // make sure immediates created by shrinkAndImmediate are always folded.
614 // Ideally we would narrow the load during DAG combine and get the
615 // best of both worlds.
616 if (U->getOpcode() == ISD::AND &&
617 Imm->getAPIntValue().getBitWidth() == 64 &&
618 Imm->getAPIntValue().isIntN(32))
619 return false;
620
621 // If this really a zext_inreg that can be represented with a movzx
622 // instruction, prefer that.
623 // TODO: We could shrink the load and fold if it is non-volatile.
624 if (U->getOpcode() == ISD::AND &&
625 (Imm->getAPIntValue() == UINT8_MAX(255) ||
626 Imm->getAPIntValue() == UINT16_MAX(65535) ||
627 Imm->getAPIntValue() == UINT32_MAX(4294967295U)))
628 return false;
629
630 // ADD/SUB with can negate the immediate and use the opposite operation
631 // to fit 128 into a sign extended 8 bit immediate.
632 if ((U->getOpcode() == ISD::ADD || U->getOpcode() == ISD::SUB) &&
633 (-Imm->getAPIntValue()).isSignedIntN(8))
634 return false;
635 }
636
637 // If the other operand is a TLS address, we should fold it instead.
638 // This produces
639 // movl %gs:0, %eax
640 // leal i@NTPOFF(%eax), %eax
641 // instead of
642 // movl $i@NTPOFF, %eax
643 // addl %gs:0, %eax
644 // if the block also has an access to a second TLS address this will save
645 // a load.
646 // FIXME: This is probably also true for non-TLS addresses.
647 if (Op1.getOpcode() == X86ISD::Wrapper) {
648 SDValue Val = Op1.getOperand(0);
649 if (Val.getOpcode() == ISD::TargetGlobalTLSAddress)
650 return false;
651 }
652
653 // Don't fold load if this matches the BTS/BTR/BTC patterns.
654 // BTS: (or X, (shl 1, n))
655 // BTR: (and X, (rotl -2, n))
656 // BTC: (xor X, (shl 1, n))
657 if (U->getOpcode() == ISD::OR || U->getOpcode() == ISD::XOR) {
658 if (U->getOperand(0).getOpcode() == ISD::SHL &&
659 isOneConstant(U->getOperand(0).getOperand(0)))
660 return false;
661
662 if (U->getOperand(1).getOpcode() == ISD::SHL &&
663 isOneConstant(U->getOperand(1).getOperand(0)))
664 return false;
665 }
666 if (U->getOpcode() == ISD::AND) {
667 SDValue U0 = U->getOperand(0);
668 SDValue U1 = U->getOperand(1);
669 if (U0.getOpcode() == ISD::ROTL) {
670 auto *C = dyn_cast<ConstantSDNode>(U0.getOperand(0));
671 if (C && C->getSExtValue() == -2)
672 return false;
673 }
674
675 if (U1.getOpcode() == ISD::ROTL) {
676 auto *C = dyn_cast<ConstantSDNode>(U1.getOperand(0));
677 if (C && C->getSExtValue() == -2)
678 return false;
679 }
680 }
681
682 break;
683 }
684 case ISD::SHL:
685 case ISD::SRA:
686 case ISD::SRL:
687 // Don't fold a load into a shift by immediate. The BMI2 instructions
688 // support folding a load, but not an immediate. The legacy instructions
689 // support folding an immediate, but can't fold a load. Folding an
690 // immediate is preferable to folding a load.
691 if (isa<ConstantSDNode>(U->getOperand(1)))
692 return false;
693
694 break;
695 }
696 }
697
698 // Prevent folding a load if this can implemented with an insert_subreg or
699 // a move that implicitly zeroes.
700 if (Root->getOpcode() == ISD::INSERT_SUBVECTOR &&
701 isNullConstant(Root->getOperand(2)) &&
702 (Root->getOperand(0).isUndef() ||
703 ISD::isBuildVectorAllZeros(Root->getOperand(0).getNode())))
704 return false;
705
706 return true;
707}
708
709/// Replace the original chain operand of the call with
710/// load's chain operand and move load below the call's chain operand.
711static void moveBelowOrigChain(SelectionDAG *CurDAG, SDValue Load,
712 SDValue Call, SDValue OrigChain) {
713 SmallVector<SDValue, 8> Ops;
714 SDValue Chain = OrigChain.getOperand(0);
715 if (Chain.getNode() == Load.getNode())
716 Ops.push_back(Load.getOperand(0));
717 else {
718 assert(Chain.getOpcode() == ISD::TokenFactor &&((Chain.getOpcode() == ISD::TokenFactor && "Unexpected chain operand"
) ? static_cast<void> (0) : __assert_fail ("Chain.getOpcode() == ISD::TokenFactor && \"Unexpected chain operand\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 719, __PRETTY_FUNCTION__))
719 "Unexpected chain operand")((Chain.getOpcode() == ISD::TokenFactor && "Unexpected chain operand"
) ? static_cast<void> (0) : __assert_fail ("Chain.getOpcode() == ISD::TokenFactor && \"Unexpected chain operand\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 719, __PRETTY_FUNCTION__))
;
720 for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i)
721 if (Chain.getOperand(i).getNode() == Load.getNode())
722 Ops.push_back(Load.getOperand(0));
723 else
724 Ops.push_back(Chain.getOperand(i));
725 SDValue NewChain =
726 CurDAG->getNode(ISD::TokenFactor, SDLoc(Load), MVT::Other, Ops);
727 Ops.clear();
728 Ops.push_back(NewChain);
729 }
730 Ops.append(OrigChain->op_begin() + 1, OrigChain->op_end());
731 CurDAG->UpdateNodeOperands(OrigChain.getNode(), Ops);
732 CurDAG->UpdateNodeOperands(Load.getNode(), Call.getOperand(0),
733 Load.getOperand(1), Load.getOperand(2));
734
735 Ops.clear();
736 Ops.push_back(SDValue(Load.getNode(), 1));
737 Ops.append(Call->op_begin() + 1, Call->op_end());
738 CurDAG->UpdateNodeOperands(Call.getNode(), Ops);
739}
740
741/// Return true if call address is a load and it can be
742/// moved below CALLSEQ_START and the chains leading up to the call.
743/// Return the CALLSEQ_START by reference as a second output.
744/// In the case of a tail call, there isn't a callseq node between the call
745/// chain and the load.
746static bool isCalleeLoad(SDValue Callee, SDValue &Chain, bool HasCallSeq) {
747 // The transformation is somewhat dangerous if the call's chain was glued to
748 // the call. After MoveBelowOrigChain the load is moved between the call and
749 // the chain, this can create a cycle if the load is not folded. So it is
750 // *really* important that we are sure the load will be folded.
751 if (Callee.getNode() == Chain.getNode() || !Callee.hasOneUse())
752 return false;
753 LoadSDNode *LD = dyn_cast<LoadSDNode>(Callee.getNode());
754 if (!LD ||
755 !LD->isSimple() ||
756 LD->getAddressingMode() != ISD::UNINDEXED ||
757 LD->getExtensionType() != ISD::NON_EXTLOAD)
758 return false;
759
760 // Now let's find the callseq_start.
761 while (HasCallSeq && Chain.getOpcode() != ISD::CALLSEQ_START) {
762 if (!Chain.hasOneUse())
763 return false;
764 Chain = Chain.getOperand(0);
765 }
766
767 if (!Chain.getNumOperands())
768 return false;
769 // Since we are not checking for AA here, conservatively abort if the chain
770 // writes to memory. It's not safe to move the callee (a load) across a store.
771 if (isa<MemSDNode>(Chain.getNode()) &&
772 cast<MemSDNode>(Chain.getNode())->writeMem())
773 return false;
774 if (Chain.getOperand(0).getNode() == Callee.getNode())
775 return true;
776 if (Chain.getOperand(0).getOpcode() == ISD::TokenFactor &&
777 Callee.getValue(1).isOperandOf(Chain.getOperand(0).getNode()) &&
778 Callee.getValue(1).hasOneUse())
779 return true;
780 return false;
781}
782
783void X86DAGToDAGISel::PreprocessISelDAG() {
784 for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
785 E = CurDAG->allnodes_end(); I != E; ) {
786 SDNode *N = &*I++; // Preincrement iterator to avoid invalidation issues.
787
788 // If this is a target specific AND node with no flag usages, turn it back
789 // into ISD::AND to enable test instruction matching.
790 if (N->getOpcode() == X86ISD::AND && !N->hasAnyUseOfValue(1)) {
791 SDValue Res = CurDAG->getNode(ISD::AND, SDLoc(N), N->getValueType(0),
792 N->getOperand(0), N->getOperand(1));
793 --I;
794 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Res);
795 ++I;
796 CurDAG->DeleteNode(N);
797 continue;
798 }
799
800 switch (N->getOpcode()) {
801 case ISD::FP_TO_SINT:
802 case ISD::FP_TO_UINT: {
803 // Replace vector fp_to_s/uint with their X86 specific equivalent so we
804 // don't need 2 sets of patterns.
805 if (!N->getSimpleValueType(0).isVector())
806 break;
807
808 unsigned NewOpc;
809 switch (N->getOpcode()) {
810 default: llvm_unreachable("Unexpected opcode!")::llvm::llvm_unreachable_internal("Unexpected opcode!", "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 810)
;
811 case ISD::FP_TO_SINT: NewOpc = X86ISD::CVTTP2SI; break;
812 case ISD::FP_TO_UINT: NewOpc = X86ISD::CVTTP2UI; break;
813 }
814 SDValue Res = CurDAG->getNode(NewOpc, SDLoc(N), N->getValueType(0),
815 N->getOperand(0));
816 --I;
817 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Res);
818 ++I;
819 CurDAG->DeleteNode(N);
820 continue;
821 }
822 case ISD::SHL:
823 case ISD::SRA:
824 case ISD::SRL: {
825 // Replace vector shifts with their X86 specific equivalent so we don't
826 // need 2 sets of patterns.
827 if (!N->getValueType(0).isVector())
828 break;
829
830 unsigned NewOpc;
831 switch (N->getOpcode()) {
832 default: llvm_unreachable("Unexpected opcode!")::llvm::llvm_unreachable_internal("Unexpected opcode!", "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 832)
;
833 case ISD::SHL: NewOpc = X86ISD::VSHLV; break;
834 case ISD::SRA: NewOpc = X86ISD::VSRAV; break;
835 case ISD::SRL: NewOpc = X86ISD::VSRLV; break;
836 }
837 SDValue Res = CurDAG->getNode(NewOpc, SDLoc(N), N->getValueType(0),
838 N->getOperand(0), N->getOperand(1));
839 --I;
840 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Res);
841 ++I;
842 CurDAG->DeleteNode(N);
843 continue;
844 }
845 case ISD::ANY_EXTEND:
846 case ISD::ANY_EXTEND_VECTOR_INREG: {
847 // Replace vector any extend with the zero extend equivalents so we don't
848 // need 2 sets of patterns. Ignore vXi1 extensions.
849 if (!N->getValueType(0).isVector() ||
850 N->getOperand(0).getScalarValueSizeInBits() == 1)
851 break;
852
853 unsigned NewOpc = N->getOpcode() == ISD::ANY_EXTEND
854 ? ISD::ZERO_EXTEND
855 : ISD::ZERO_EXTEND_VECTOR_INREG;
856
857 SDValue Res = CurDAG->getNode(NewOpc, SDLoc(N), N->getValueType(0),
858 N->getOperand(0));
859 --I;
860 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Res);
861 ++I;
862 CurDAG->DeleteNode(N);
863 continue;
864 }
865 case ISD::FCEIL:
866 case ISD::FFLOOR:
867 case ISD::FTRUNC:
868 case ISD::FNEARBYINT:
869 case ISD::FRINT: {
870 // Replace fp rounding with their X86 specific equivalent so we don't
871 // need 2 sets of patterns.
872 unsigned Imm;
873 switch (N->getOpcode()) {
874 default: llvm_unreachable("Unexpected opcode!")::llvm::llvm_unreachable_internal("Unexpected opcode!", "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 874)
;
875 case ISD::FCEIL: Imm = 0xA; break;
876 case ISD::FFLOOR: Imm = 0x9; break;
877 case ISD::FTRUNC: Imm = 0xB; break;
878 case ISD::FNEARBYINT: Imm = 0xC; break;
879 case ISD::FRINT: Imm = 0x4; break;
880 }
881 SDLoc dl(N);
882 SDValue Res = CurDAG->getNode(X86ISD::VRNDSCALE, dl,
883 N->getValueType(0),
884 N->getOperand(0),
885 CurDAG->getConstant(Imm, dl, MVT::i8));
886 --I;
887 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Res);
888 ++I;
889 CurDAG->DeleteNode(N);
890 continue;
891 }
892 case X86ISD::FANDN:
893 case X86ISD::FAND:
894 case X86ISD::FOR:
895 case X86ISD::FXOR: {
896 // Widen scalar fp logic ops to vector to reduce isel patterns.
897 // FIXME: Can we do this during lowering/combine.
898 MVT VT = N->getSimpleValueType(0);
899 if (VT.isVector() || VT == MVT::f128)
900 break;
901
902 MVT VecVT = VT == MVT::f64 ? MVT::v2f64 : MVT::v4f32;
903 SDLoc dl(N);
904 SDValue Op0 = CurDAG->getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT,
905 N->getOperand(0));
906 SDValue Op1 = CurDAG->getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT,
907 N->getOperand(1));
908
909 SDValue Res;
910 if (Subtarget->hasSSE2()) {
911 EVT IntVT = EVT(VecVT).changeVectorElementTypeToInteger();
912 Op0 = CurDAG->getNode(ISD::BITCAST, dl, IntVT, Op0);
913 Op1 = CurDAG->getNode(ISD::BITCAST, dl, IntVT, Op1);
914 unsigned Opc;
915 switch (N->getOpcode()) {
916 default: llvm_unreachable("Unexpected opcode!")::llvm::llvm_unreachable_internal("Unexpected opcode!", "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 916)
;
917 case X86ISD::FANDN: Opc = X86ISD::ANDNP; break;
918 case X86ISD::FAND: Opc = ISD::AND; break;
919 case X86ISD::FOR: Opc = ISD::OR; break;
920 case X86ISD::FXOR: Opc = ISD::XOR; break;
921 }
922 Res = CurDAG->getNode(Opc, dl, IntVT, Op0, Op1);
923 Res = CurDAG->getNode(ISD::BITCAST, dl, VecVT, Res);
924 } else {
925 Res = CurDAG->getNode(N->getOpcode(), dl, VecVT, Op0, Op1);
926 }
927 Res = CurDAG->getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Res,
928 CurDAG->getIntPtrConstant(0, dl));
929 --I;
930 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Res);
931 ++I;
932 CurDAG->DeleteNode(N);
933 continue;
934 }
935 }
936
937 if (OptLevel != CodeGenOpt::None &&
938 // Only do this when the target can fold the load into the call or
939 // jmp.
940 !Subtarget->useRetpolineIndirectCalls() &&
941 ((N->getOpcode() == X86ISD::CALL && !Subtarget->slowTwoMemOps()) ||
942 (N->getOpcode() == X86ISD::TC_RETURN &&
943 (Subtarget->is64Bit() ||
944 !getTargetMachine().isPositionIndependent())))) {
945 /// Also try moving call address load from outside callseq_start to just
946 /// before the call to allow it to be folded.
947 ///
948 /// [Load chain]
949 /// ^
950 /// |
951 /// [Load]
952 /// ^ ^
953 /// | |
954 /// / \--
955 /// / |
956 ///[CALLSEQ_START] |
957 /// ^ |
958 /// | |
959 /// [LOAD/C2Reg] |
960 /// | |
961 /// \ /
962 /// \ /
963 /// [CALL]
964 bool HasCallSeq = N->getOpcode() == X86ISD::CALL;
965 SDValue Chain = N->getOperand(0);
966 SDValue Load = N->getOperand(1);
967 if (!isCalleeLoad(Load, Chain, HasCallSeq))
968 continue;
969 moveBelowOrigChain(CurDAG, Load, SDValue(N, 0), Chain);
970 ++NumLoadMoved;
971 continue;
972 }
973
974 // Lower fpround and fpextend nodes that target the FP stack to be store and
975 // load to the stack. This is a gross hack. We would like to simply mark
976 // these as being illegal, but when we do that, legalize produces these when
977 // it expands calls, then expands these in the same legalize pass. We would
978 // like dag combine to be able to hack on these between the call expansion
979 // and the node legalization. As such this pass basically does "really
980 // late" legalization of these inline with the X86 isel pass.
981 // FIXME: This should only happen when not compiled with -O0.
982 switch (N->getOpcode()) {
983 default: continue;
984 case ISD::FP_ROUND:
985 case ISD::FP_EXTEND:
986 {
987 MVT SrcVT = N->getOperand(0).getSimpleValueType();
988 MVT DstVT = N->getSimpleValueType(0);
989
990 // If any of the sources are vectors, no fp stack involved.
991 if (SrcVT.isVector() || DstVT.isVector())
992 continue;
993
994 // If the source and destination are SSE registers, then this is a legal
995 // conversion that should not be lowered.
996 const X86TargetLowering *X86Lowering =
997 static_cast<const X86TargetLowering *>(TLI);
998 bool SrcIsSSE = X86Lowering->isScalarFPTypeInSSEReg(SrcVT);
999 bool DstIsSSE = X86Lowering->isScalarFPTypeInSSEReg(DstVT);
1000 if (SrcIsSSE && DstIsSSE)
1001 continue;
1002
1003 if (!SrcIsSSE && !DstIsSSE) {
1004 // If this is an FPStack extension, it is a noop.
1005 if (N->getOpcode() == ISD::FP_EXTEND)
1006 continue;
1007 // If this is a value-preserving FPStack truncation, it is a noop.
1008 if (N->getConstantOperandVal(1))
1009 continue;
1010 }
1011
1012 // Here we could have an FP stack truncation or an FPStack <-> SSE convert.
1013 // FPStack has extload and truncstore. SSE can fold direct loads into other
1014 // operations. Based on this, decide what we want to do.
1015 MVT MemVT;
1016 if (N->getOpcode() == ISD::FP_ROUND)
1017 MemVT = DstVT; // FP_ROUND must use DstVT, we can't do a 'trunc load'.
1018 else
1019 MemVT = SrcIsSSE ? SrcVT : DstVT;
1020
1021 SDValue MemTmp = CurDAG->CreateStackTemporary(MemVT);
1022 SDLoc dl(N);
1023
1024 // FIXME: optimize the case where the src/dest is a load or store?
1025
1026 SDValue Store = CurDAG->getTruncStore(CurDAG->getEntryNode(), dl, N->getOperand(0),
1027 MemTmp, MachinePointerInfo(), MemVT);
1028 SDValue Result = CurDAG->getExtLoad(ISD::EXTLOAD, dl, DstVT, Store, MemTmp,
1029 MachinePointerInfo(), MemVT);
1030
1031 // We're about to replace all uses of the FP_ROUND/FP_EXTEND with the
1032 // extload we created. This will cause general havok on the dag because
1033 // anything below the conversion could be folded into other existing nodes.
1034 // To avoid invalidating 'I', back it up to the convert node.
1035 --I;
1036 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Result);
1037 break;
1038 }
1039
1040 //The sequence of events for lowering STRICT_FP versions of these nodes requires
1041 //dealing with the chain differently, as there is already a preexisting chain.
1042 case ISD::STRICT_FP_ROUND:
1043 case ISD::STRICT_FP_EXTEND:
1044 {
1045 MVT SrcVT = N->getOperand(1).getSimpleValueType();
1046 MVT DstVT = N->getSimpleValueType(0);
1047
1048 // If any of the sources are vectors, no fp stack involved.
1049 if (SrcVT.isVector() || DstVT.isVector())
1050 continue;
1051
1052 // If the source and destination are SSE registers, then this is a legal
1053 // conversion that should not be lowered.
1054 const X86TargetLowering *X86Lowering =
1055 static_cast<const X86TargetLowering *>(TLI);
1056 bool SrcIsSSE = X86Lowering->isScalarFPTypeInSSEReg(SrcVT);
1057 bool DstIsSSE = X86Lowering->isScalarFPTypeInSSEReg(DstVT);
1058 if (SrcIsSSE && DstIsSSE)
1059 continue;
1060
1061 if (!SrcIsSSE && !DstIsSSE) {
1062 // If this is an FPStack extension, it is a noop.
1063 if (N->getOpcode() == ISD::STRICT_FP_EXTEND)
1064 continue;
1065 // If this is a value-preserving FPStack truncation, it is a noop.
1066 if (N->getConstantOperandVal(2))
1067 continue;
1068 }
1069
1070 // Here we could have an FP stack truncation or an FPStack <-> SSE convert.
1071 // FPStack has extload and truncstore. SSE can fold direct loads into other
1072 // operations. Based on this, decide what we want to do.
1073 MVT MemVT;
1074 if (N->getOpcode() == ISD::STRICT_FP_ROUND)
1075 MemVT = DstVT; // FP_ROUND must use DstVT, we can't do a 'trunc load'.
1076 else
1077 MemVT = SrcIsSSE ? SrcVT : DstVT;
1078
1079 SDValue MemTmp = CurDAG->CreateStackTemporary(MemVT);
1080 SDLoc dl(N);
1081
1082 // FIXME: optimize the case where the src/dest is a load or store?
1083
1084 //Since the operation is StrictFP, use the preexisting chain.
1085 SDValue Store = CurDAG->getTruncStore(N->getOperand(0), dl, N->getOperand(1),
1086 MemTmp, MachinePointerInfo(), MemVT);
1087 SDValue Result = CurDAG->getExtLoad(ISD::EXTLOAD, dl, DstVT, Store, MemTmp,
1088 MachinePointerInfo(), MemVT);
1089
1090 // We're about to replace all uses of the FP_ROUND/FP_EXTEND with the
1091 // extload we created. This will cause general havok on the dag because
1092 // anything below the conversion could be folded into other existing nodes.
1093 // To avoid invalidating 'I', back it up to the convert node.
1094 --I;
1095 CurDAG->ReplaceAllUsesWith(N, Result.getNode());
1096 break;
1097 }
1098 }
1099
1100
1101 // Now that we did that, the node is dead. Increment the iterator to the
1102 // next node to process, then delete N.
1103 ++I;
1104 CurDAG->DeleteNode(N);
1105 }
1106
1107 // The load+call transform above can leave some dead nodes in the graph. Make
1108 // sure we remove them. Its possible some of the other transforms do to so
1109 // just remove dead nodes unconditionally.
1110 CurDAG->RemoveDeadNodes();
1111}
1112
1113// Look for a redundant movzx/movsx that can occur after an 8-bit divrem.
1114bool X86DAGToDAGISel::tryOptimizeRem8Extend(SDNode *N) {
1115 unsigned Opc = N->getMachineOpcode();
1116 if (Opc != X86::MOVZX32rr8 && Opc != X86::MOVSX32rr8 &&
1117 Opc != X86::MOVSX64rr8)
1118 return false;
1119
1120 SDValue N0 = N->getOperand(0);
1121
1122 // We need to be extracting the lower bit of an extend.
1123 if (!N0.isMachineOpcode() ||
1124 N0.getMachineOpcode() != TargetOpcode::EXTRACT_SUBREG ||
1125 N0.getConstantOperandVal(1) != X86::sub_8bit)
1126 return false;
1127
1128 // We're looking for either a movsx or movzx to match the original opcode.
1129 unsigned ExpectedOpc = Opc == X86::MOVZX32rr8 ? X86::MOVZX32rr8_NOREX
1130 : X86::MOVSX32rr8_NOREX;
1131 SDValue N00 = N0.getOperand(0);
1132 if (!N00.isMachineOpcode() || N00.getMachineOpcode() != ExpectedOpc)
1133 return false;
1134
1135 if (Opc == X86::MOVSX64rr8) {
1136 // If we had a sign extend from 8 to 64 bits. We still need to go from 32
1137 // to 64.
1138 MachineSDNode *Extend = CurDAG->getMachineNode(X86::MOVSX64rr32, SDLoc(N),
1139 MVT::i64, N00);
1140 ReplaceUses(N, Extend);
1141 } else {
1142 // Ok we can drop this extend and just use the original extend.
1143 ReplaceUses(N, N00.getNode());
1144 }
1145
1146 return true;
1147}
1148
1149void X86DAGToDAGISel::PostprocessISelDAG() {
1150 // Skip peepholes at -O0.
1151 if (TM.getOptLevel() == CodeGenOpt::None)
1152 return;
1153
1154 SelectionDAG::allnodes_iterator Position = CurDAG->allnodes_end();
1155
1156 bool MadeChange = false;
1157 while (Position != CurDAG->allnodes_begin()) {
1158 SDNode *N = &*--Position;
1159 // Skip dead nodes and any non-machine opcodes.
1160 if (N->use_empty() || !N->isMachineOpcode())
1161 continue;
1162
1163 if (tryOptimizeRem8Extend(N)) {
1164 MadeChange = true;
1165 continue;
1166 }
1167
1168 // Look for a TESTrr+ANDrr pattern where both operands of the test are
1169 // the same. Rewrite to remove the AND.
1170 unsigned Opc = N->getMachineOpcode();
1171 if ((Opc == X86::TEST8rr || Opc == X86::TEST16rr ||
1172 Opc == X86::TEST32rr || Opc == X86::TEST64rr) &&
1173 N->getOperand(0) == N->getOperand(1) &&
1174 N->isOnlyUserOf(N->getOperand(0).getNode()) &&
1175 N->getOperand(0).isMachineOpcode()) {
1176 SDValue And = N->getOperand(0);
1177 unsigned N0Opc = And.getMachineOpcode();
1178 if (N0Opc == X86::AND8rr || N0Opc == X86::AND16rr ||
1179 N0Opc == X86::AND32rr || N0Opc == X86::AND64rr) {
1180 MachineSDNode *Test = CurDAG->getMachineNode(Opc, SDLoc(N),
1181 MVT::i32,
1182 And.getOperand(0),
1183 And.getOperand(1));
1184 ReplaceUses(N, Test);
1185 MadeChange = true;
1186 continue;
1187 }
1188 if (N0Opc == X86::AND8rm || N0Opc == X86::AND16rm ||
1189 N0Opc == X86::AND32rm || N0Opc == X86::AND64rm) {
1190 unsigned NewOpc;
1191 switch (N0Opc) {
1192 case X86::AND8rm: NewOpc = X86::TEST8mr; break;
1193 case X86::AND16rm: NewOpc = X86::TEST16mr; break;
1194 case X86::AND32rm: NewOpc = X86::TEST32mr; break;
1195 case X86::AND64rm: NewOpc = X86::TEST64mr; break;
1196 }
1197
1198 // Need to swap the memory and register operand.
1199 SDValue Ops[] = { And.getOperand(1),
1200 And.getOperand(2),
1201 And.getOperand(3),
1202 And.getOperand(4),
1203 And.getOperand(5),
1204 And.getOperand(0),
1205 And.getOperand(6) /* Chain */ };
1206 MachineSDNode *Test = CurDAG->getMachineNode(NewOpc, SDLoc(N),
1207 MVT::i32, MVT::Other, Ops);
1208 ReplaceUses(N, Test);
1209 MadeChange = true;
1210 continue;
1211 }
1212 }
1213
1214 // Look for a KAND+KORTEST and turn it into KTEST if only the zero flag is
1215 // used. We're doing this late so we can prefer to fold the AND into masked
1216 // comparisons. Doing that can be better for the live range of the mask
1217 // register.
1218 if ((Opc == X86::KORTESTBrr || Opc == X86::KORTESTWrr ||
1219 Opc == X86::KORTESTDrr || Opc == X86::KORTESTQrr) &&
1220 N->getOperand(0) == N->getOperand(1) &&
1221 N->isOnlyUserOf(N->getOperand(0).getNode()) &&
1222 N->getOperand(0).isMachineOpcode() &&
1223 onlyUsesZeroFlag(SDValue(N, 0))) {
1224 SDValue And = N->getOperand(0);
1225 unsigned N0Opc = And.getMachineOpcode();
1226 // KANDW is legal with AVX512F, but KTESTW requires AVX512DQ. The other
1227 // KAND instructions and KTEST use the same ISA feature.
1228 if (N0Opc == X86::KANDBrr ||
1229 (N0Opc == X86::KANDWrr && Subtarget->hasDQI()) ||
1230 N0Opc == X86::KANDDrr || N0Opc == X86::KANDQrr) {
1231 unsigned NewOpc;
1232 switch (Opc) {
1233 default: llvm_unreachable("Unexpected opcode!")::llvm::llvm_unreachable_internal("Unexpected opcode!", "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 1233)
;
1234 case X86::KORTESTBrr: NewOpc = X86::KTESTBrr; break;
1235 case X86::KORTESTWrr: NewOpc = X86::KTESTWrr; break;
1236 case X86::KORTESTDrr: NewOpc = X86::KTESTDrr; break;
1237 case X86::KORTESTQrr: NewOpc = X86::KTESTQrr; break;
1238 }
1239 MachineSDNode *KTest = CurDAG->getMachineNode(NewOpc, SDLoc(N),
1240 MVT::i32,
1241 And.getOperand(0),
1242 And.getOperand(1));
1243 ReplaceUses(N, KTest);
1244 MadeChange = true;
1245 continue;
1246 }
1247 }
1248
1249 // Attempt to remove vectors moves that were inserted to zero upper bits.
1250 if (Opc != TargetOpcode::SUBREG_TO_REG)
1251 continue;
1252
1253 unsigned SubRegIdx = N->getConstantOperandVal(2);
1254 if (SubRegIdx != X86::sub_xmm && SubRegIdx != X86::sub_ymm)
1255 continue;
1256
1257 SDValue Move = N->getOperand(1);
1258 if (!Move.isMachineOpcode())
1259 continue;
1260
1261 // Make sure its one of the move opcodes we recognize.
1262 switch (Move.getMachineOpcode()) {
1263 default:
1264 continue;
1265 case X86::VMOVAPDrr: case X86::VMOVUPDrr:
1266 case X86::VMOVAPSrr: case X86::VMOVUPSrr:
1267 case X86::VMOVDQArr: case X86::VMOVDQUrr:
1268 case X86::VMOVAPDYrr: case X86::VMOVUPDYrr:
1269 case X86::VMOVAPSYrr: case X86::VMOVUPSYrr:
1270 case X86::VMOVDQAYrr: case X86::VMOVDQUYrr:
1271 case X86::VMOVAPDZ128rr: case X86::VMOVUPDZ128rr:
1272 case X86::VMOVAPSZ128rr: case X86::VMOVUPSZ128rr:
1273 case X86::VMOVDQA32Z128rr: case X86::VMOVDQU32Z128rr:
1274 case X86::VMOVDQA64Z128rr: case X86::VMOVDQU64Z128rr:
1275 case X86::VMOVAPDZ256rr: case X86::VMOVUPDZ256rr:
1276 case X86::VMOVAPSZ256rr: case X86::VMOVUPSZ256rr:
1277 case X86::VMOVDQA32Z256rr: case X86::VMOVDQU32Z256rr:
1278 case X86::VMOVDQA64Z256rr: case X86::VMOVDQU64Z256rr:
1279 break;
1280 }
1281
1282 SDValue In = Move.getOperand(0);
1283 if (!In.isMachineOpcode() ||
1284 In.getMachineOpcode() <= TargetOpcode::GENERIC_OP_END)
1285 continue;
1286
1287 // Make sure the instruction has a VEX, XOP, or EVEX prefix. This covers
1288 // the SHA instructions which use a legacy encoding.
1289 uint64_t TSFlags = getInstrInfo()->get(In.getMachineOpcode()).TSFlags;
1290 if ((TSFlags & X86II::EncodingMask) != X86II::VEX &&
1291 (TSFlags & X86II::EncodingMask) != X86II::EVEX &&
1292 (TSFlags & X86II::EncodingMask) != X86II::XOP)
1293 continue;
1294
1295 // Producing instruction is another vector instruction. We can drop the
1296 // move.
1297 CurDAG->UpdateNodeOperands(N, N->getOperand(0), In, N->getOperand(2));
1298 MadeChange = true;
1299 }
1300
1301 if (MadeChange)
1302 CurDAG->RemoveDeadNodes();
1303}
1304
1305
1306/// Emit any code that needs to be executed only in the main function.
1307void X86DAGToDAGISel::emitSpecialCodeForMain() {
1308 if (Subtarget->isTargetCygMing()) {
1309 TargetLowering::ArgListTy Args;
1310 auto &DL = CurDAG->getDataLayout();
1311
1312 TargetLowering::CallLoweringInfo CLI(*CurDAG);
1313 CLI.setChain(CurDAG->getRoot())
1314 .setCallee(CallingConv::C, Type::getVoidTy(*CurDAG->getContext()),
1315 CurDAG->getExternalSymbol("__main", TLI->getPointerTy(DL)),
1316 std::move(Args));
1317 const TargetLowering &TLI = CurDAG->getTargetLoweringInfo();
1318 std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
1319 CurDAG->setRoot(Result.second);
1320 }
1321}
1322
1323void X86DAGToDAGISel::EmitFunctionEntryCode() {
1324 // If this is main, emit special code for main.
1325 const Function &F = MF->getFunction();
1326 if (F.hasExternalLinkage() && F.getName() == "main")
1327 emitSpecialCodeForMain();
1328}
1329
1330static bool isDispSafeForFrameIndex(int64_t Val) {
1331 // On 64-bit platforms, we can run into an issue where a frame index
1332 // includes a displacement that, when added to the explicit displacement,
1333 // will overflow the displacement field. Assuming that the frame index
1334 // displacement fits into a 31-bit integer (which is only slightly more
1335 // aggressive than the current fundamental assumption that it fits into
1336 // a 32-bit integer), a 31-bit disp should always be safe.
1337 return isInt<31>(Val);
1338}
1339
1340bool X86DAGToDAGISel::foldOffsetIntoAddress(uint64_t Offset,
1341 X86ISelAddressMode &AM) {
1342 // If there's no offset to fold, we don't need to do any work.
1343 if (Offset == 0)
1344 return false;
1345
1346 // Cannot combine ExternalSymbol displacements with integer offsets.
1347 if (AM.ES || AM.MCSym)
1348 return true;
1349
1350 int64_t Val = AM.Disp + Offset;
1351 CodeModel::Model M = TM.getCodeModel();
1352 if (Subtarget->is64Bit()) {
1353 if (!X86::isOffsetSuitableForCodeModel(Val, M,
1354 AM.hasSymbolicDisplacement()))
1355 return true;
1356 // In addition to the checks required for a register base, check that
1357 // we do not try to use an unsafe Disp with a frame index.
1358 if (AM.BaseType == X86ISelAddressMode::FrameIndexBase &&
1359 !isDispSafeForFrameIndex(Val))
1360 return true;
1361 }
1362 AM.Disp = Val;
1363 return false;
1364
1365}
1366
1367bool X86DAGToDAGISel::matchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM){
1368 SDValue Address = N->getOperand(1);
1369
1370 // load gs:0 -> GS segment register.
1371 // load fs:0 -> FS segment register.
1372 //
1373 // This optimization is valid because the GNU TLS model defines that
1374 // gs:0 (or fs:0 on X86-64) contains its own address.
1375 // For more information see http://people.redhat.com/drepper/tls.pdf
1376 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Address))
1377 if (C->getSExtValue() == 0 && AM.Segment.getNode() == nullptr &&
1378 !IndirectTlsSegRefs &&
1379 (Subtarget->isTargetGlibc() || Subtarget->isTargetAndroid() ||
1380 Subtarget->isTargetFuchsia()))
1381 switch (N->getPointerInfo().getAddrSpace()) {
1382 case 256:
1383 AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16);
1384 return false;
1385 case 257:
1386 AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16);
1387 return false;
1388 // Address space 258 is not handled here, because it is not used to
1389 // address TLS areas.
1390 }
1391
1392 return true;
1393}
1394
1395/// Try to match X86ISD::Wrapper and X86ISD::WrapperRIP nodes into an addressing
1396/// mode. These wrap things that will resolve down into a symbol reference.
1397/// If no match is possible, this returns true, otherwise it returns false.
1398bool X86DAGToDAGISel::matchWrapper(SDValue N, X86ISelAddressMode &AM) {
1399 // If the addressing mode already has a symbol as the displacement, we can
1400 // never match another symbol.
1401 if (AM.hasSymbolicDisplacement())
1402 return true;
1403
1404 bool IsRIPRelTLS = false;
1405 bool IsRIPRel = N.getOpcode() == X86ISD::WrapperRIP;
1406 if (IsRIPRel) {
1407 SDValue Val = N.getOperand(0);
1408 if (Val.getOpcode() == ISD::TargetGlobalTLSAddress)
1409 IsRIPRelTLS = true;
1410 }
1411
1412 // We can't use an addressing mode in the 64-bit large code model.
1413 // Global TLS addressing is an exception. In the medium code model,
1414 // we use can use a mode when RIP wrappers are present.
1415 // That signifies access to globals that are known to be "near",
1416 // such as the GOT itself.
1417 CodeModel::Model M = TM.getCodeModel();
1418 if (Subtarget->is64Bit() &&
1419 ((M == CodeModel::Large && !IsRIPRelTLS) ||
1420 (M == CodeModel::Medium && !IsRIPRel)))
1421 return true;
1422
1423 // Base and index reg must be 0 in order to use %rip as base.
1424 if (IsRIPRel && AM.hasBaseOrIndexReg())
1425 return true;
1426
1427 // Make a local copy in case we can't do this fold.
1428 X86ISelAddressMode Backup = AM;
1429
1430 int64_t Offset = 0;
1431 SDValue N0 = N.getOperand(0);
1432 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
1433 AM.GV = G->getGlobal();
1434 AM.SymbolFlags = G->getTargetFlags();
1435 Offset = G->getOffset();
1436 } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
1437 AM.CP = CP->getConstVal();
1438 AM.Align = CP->getAlignment();
1439 AM.SymbolFlags = CP->getTargetFlags();
1440 Offset = CP->getOffset();
1441 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(N0)) {
1442 AM.ES = S->getSymbol();
1443 AM.SymbolFlags = S->getTargetFlags();
1444 } else if (auto *S = dyn_cast<MCSymbolSDNode>(N0)) {
1445 AM.MCSym = S->getMCSymbol();
1446 } else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) {
1447 AM.JT = J->getIndex();
1448 AM.SymbolFlags = J->getTargetFlags();
1449 } else if (BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(N0)) {
1450 AM.BlockAddr = BA->getBlockAddress();
1451 AM.SymbolFlags = BA->getTargetFlags();
1452 Offset = BA->getOffset();
1453 } else
1454 llvm_unreachable("Unhandled symbol reference node.")::llvm::llvm_unreachable_internal("Unhandled symbol reference node."
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 1454)
;
1455
1456 if (foldOffsetIntoAddress(Offset, AM)) {
1457 AM = Backup;
1458 return true;
1459 }
1460
1461 if (IsRIPRel)
1462 AM.setBaseReg(CurDAG->getRegister(X86::RIP, MVT::i64));
1463
1464 // Commit the changes now that we know this fold is safe.
1465 return false;
1466}
1467
1468/// Add the specified node to the specified addressing mode, returning true if
1469/// it cannot be done. This just pattern matches for the addressing mode.
1470bool X86DAGToDAGISel::matchAddress(SDValue N, X86ISelAddressMode &AM) {
1471 if (matchAddressRecursively(N, AM, 0))
1472 return true;
1473
1474 // Post-processing: Convert lea(,%reg,2) to lea(%reg,%reg), which has
1475 // a smaller encoding and avoids a scaled-index.
1476 if (AM.Scale == 2 &&
1477 AM.BaseType == X86ISelAddressMode::RegBase &&
1478 AM.Base_Reg.getNode() == nullptr) {
1479 AM.Base_Reg = AM.IndexReg;
1480 AM.Scale = 1;
1481 }
1482
1483 // Post-processing: Convert foo to foo(%rip), even in non-PIC mode,
1484 // because it has a smaller encoding.
1485 // TODO: Which other code models can use this?
1486 switch (TM.getCodeModel()) {
1487 default: break;
1488 case CodeModel::Small:
1489 case CodeModel::Kernel:
1490 if (Subtarget->is64Bit() &&
1491 AM.Scale == 1 &&
1492 AM.BaseType == X86ISelAddressMode::RegBase &&
1493 AM.Base_Reg.getNode() == nullptr &&
1494 AM.IndexReg.getNode() == nullptr &&
1495 AM.SymbolFlags == X86II::MO_NO_FLAG &&
1496 AM.hasSymbolicDisplacement())
1497 AM.Base_Reg = CurDAG->getRegister(X86::RIP, MVT::i64);
1498 break;
1499 }
1500
1501 return false;
1502}
1503
1504bool X86DAGToDAGISel::matchAdd(SDValue &N, X86ISelAddressMode &AM,
1505 unsigned Depth) {
1506 // Add an artificial use to this node so that we can keep track of
1507 // it if it gets CSE'd with a different node.
1508 HandleSDNode Handle(N);
1509
1510 X86ISelAddressMode Backup = AM;
1511 if (!matchAddressRecursively(N.getOperand(0), AM, Depth+1) &&
1512 !matchAddressRecursively(Handle.getValue().getOperand(1), AM, Depth+1))
1513 return false;
1514 AM = Backup;
1515
1516 // Try again after commuting the operands.
1517 if (!matchAddressRecursively(Handle.getValue().getOperand(1), AM, Depth+1) &&
1518 !matchAddressRecursively(Handle.getValue().getOperand(0), AM, Depth+1))
1519 return false;
1520 AM = Backup;
1521
1522 // If we couldn't fold both operands into the address at the same time,
1523 // see if we can just put each operand into a register and fold at least
1524 // the add.
1525 if (AM.BaseType == X86ISelAddressMode::RegBase &&
1526 !AM.Base_Reg.getNode() &&
1527 !AM.IndexReg.getNode()) {
1528 N = Handle.getValue();
1529 AM.Base_Reg = N.getOperand(0);
1530 AM.IndexReg = N.getOperand(1);
1531 AM.Scale = 1;
1532 return false;
1533 }
1534 N = Handle.getValue();
1535 return true;
1536}
1537
1538// Insert a node into the DAG at least before the Pos node's position. This
1539// will reposition the node as needed, and will assign it a node ID that is <=
1540// the Pos node's ID. Note that this does *not* preserve the uniqueness of node
1541// IDs! The selection DAG must no longer depend on their uniqueness when this
1542// is used.
1543static void insertDAGNode(SelectionDAG &DAG, SDValue Pos, SDValue N) {
1544 if (N->getNodeId() == -1 ||
1545 (SelectionDAGISel::getUninvalidatedNodeId(N.getNode()) >
1546 SelectionDAGISel::getUninvalidatedNodeId(Pos.getNode()))) {
1547 DAG.RepositionNode(Pos->getIterator(), N.getNode());
1548 // Mark Node as invalid for pruning as after this it may be a successor to a
1549 // selected node but otherwise be in the same position of Pos.
1550 // Conservatively mark it with the same -abs(Id) to assure node id
1551 // invariant is preserved.
1552 N->setNodeId(Pos->getNodeId());
1553 SelectionDAGISel::InvalidateNodeId(N.getNode());
1554 }
1555}
1556
1557// Transform "(X >> (8-C1)) & (0xff << C1)" to "((X >> 8) & 0xff) << C1" if
1558// safe. This allows us to convert the shift and and into an h-register
1559// extract and a scaled index. Returns false if the simplification is
1560// performed.
1561static bool foldMaskAndShiftToExtract(SelectionDAG &DAG, SDValue N,
1562 uint64_t Mask,
1563 SDValue Shift, SDValue X,
1564 X86ISelAddressMode &AM) {
1565 if (Shift.getOpcode() != ISD::SRL ||
1566 !isa<ConstantSDNode>(Shift.getOperand(1)) ||
1567 !Shift.hasOneUse())
1568 return true;
1569
1570 int ScaleLog = 8 - Shift.getConstantOperandVal(1);
1571 if (ScaleLog <= 0 || ScaleLog >= 4 ||
1572 Mask != (0xffu << ScaleLog))
1573 return true;
1574
1575 MVT VT = N.getSimpleValueType();
1576 SDLoc DL(N);
1577 SDValue Eight = DAG.getConstant(8, DL, MVT::i8);
1578 SDValue NewMask = DAG.getConstant(0xff, DL, VT);
1579 SDValue Srl = DAG.getNode(ISD::SRL, DL, VT, X, Eight);
1580 SDValue And = DAG.getNode(ISD::AND, DL, VT, Srl, NewMask);
1581 SDValue ShlCount = DAG.getConstant(ScaleLog, DL, MVT::i8);
1582 SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, And, ShlCount);
1583
1584 // Insert the new nodes into the topological ordering. We must do this in
1585 // a valid topological ordering as nothing is going to go back and re-sort
1586 // these nodes. We continually insert before 'N' in sequence as this is
1587 // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
1588 // hierarchy left to express.
1589 insertDAGNode(DAG, N, Eight);
1590 insertDAGNode(DAG, N, Srl);
1591 insertDAGNode(DAG, N, NewMask);
1592 insertDAGNode(DAG, N, And);
1593 insertDAGNode(DAG, N, ShlCount);
1594 insertDAGNode(DAG, N, Shl);
1595 DAG.ReplaceAllUsesWith(N, Shl);
1596 DAG.RemoveDeadNode(N.getNode());
1597 AM.IndexReg = And;
1598 AM.Scale = (1 << ScaleLog);
1599 return false;
1600}
1601
1602// Transforms "(X << C1) & C2" to "(X & (C2>>C1)) << C1" if safe and if this
1603// allows us to fold the shift into this addressing mode. Returns false if the
1604// transform succeeded.
1605static bool foldMaskedShiftToScaledMask(SelectionDAG &DAG, SDValue N,
1606 X86ISelAddressMode &AM) {
1607 SDValue Shift = N.getOperand(0);
1608
1609 // Use a signed mask so that shifting right will insert sign bits. These
1610 // bits will be removed when we shift the result left so it doesn't matter
1611 // what we use. This might allow a smaller immediate encoding.
1612 int64_t Mask = cast<ConstantSDNode>(N->getOperand(1))->getSExtValue();
1613
1614 // If we have an any_extend feeding the AND, look through it to see if there
1615 // is a shift behind it. But only if the AND doesn't use the extended bits.
1616 // FIXME: Generalize this to other ANY_EXTEND than i32 to i64?
1617 bool FoundAnyExtend = false;
1618 if (Shift.getOpcode() == ISD::ANY_EXTEND && Shift.hasOneUse() &&
1619 Shift.getOperand(0).getSimpleValueType() == MVT::i32 &&
1620 isUInt<32>(Mask)) {
1621 FoundAnyExtend = true;
1622 Shift = Shift.getOperand(0);
1623 }
1624
1625 if (Shift.getOpcode() != ISD::SHL ||
1626 !isa<ConstantSDNode>(Shift.getOperand(1)))
1627 return true;
1628
1629 SDValue X = Shift.getOperand(0);
1630
1631 // Not likely to be profitable if either the AND or SHIFT node has more
1632 // than one use (unless all uses are for address computation). Besides,
1633 // isel mechanism requires their node ids to be reused.
1634 if (!N.hasOneUse() || !Shift.hasOneUse())
1635 return true;
1636
1637 // Verify that the shift amount is something we can fold.
1638 unsigned ShiftAmt = Shift.getConstantOperandVal(1);
1639 if (ShiftAmt != 1 && ShiftAmt != 2 && ShiftAmt != 3)
1640 return true;
1641
1642 MVT VT = N.getSimpleValueType();
1643 SDLoc DL(N);
1644 if (FoundAnyExtend) {
1645 SDValue NewX = DAG.getNode(ISD::ANY_EXTEND, DL, VT, X);
1646 insertDAGNode(DAG, N, NewX);
1647 X = NewX;
1648 }
1649
1650 SDValue NewMask = DAG.getConstant(Mask >> ShiftAmt, DL, VT);
1651 SDValue NewAnd = DAG.getNode(ISD::AND, DL, VT, X, NewMask);
1652 SDValue NewShift = DAG.getNode(ISD::SHL, DL, VT, NewAnd, Shift.getOperand(1));
1653
1654 // Insert the new nodes into the topological ordering. We must do this in
1655 // a valid topological ordering as nothing is going to go back and re-sort
1656 // these nodes. We continually insert before 'N' in sequence as this is
1657 // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
1658 // hierarchy left to express.
1659 insertDAGNode(DAG, N, NewMask);
1660 insertDAGNode(DAG, N, NewAnd);
1661 insertDAGNode(DAG, N, NewShift);
1662 DAG.ReplaceAllUsesWith(N, NewShift);
1663 DAG.RemoveDeadNode(N.getNode());
1664
1665 AM.Scale = 1 << ShiftAmt;
1666 AM.IndexReg = NewAnd;
1667 return false;
1668}
1669
1670// Implement some heroics to detect shifts of masked values where the mask can
1671// be replaced by extending the shift and undoing that in the addressing mode
1672// scale. Patterns such as (shl (srl x, c1), c2) are canonicalized into (and
1673// (srl x, SHIFT), MASK) by DAGCombines that don't know the shl can be done in
1674// the addressing mode. This results in code such as:
1675//
1676// int f(short *y, int *lookup_table) {
1677// ...
1678// return *y + lookup_table[*y >> 11];
1679// }
1680//
1681// Turning into:
1682// movzwl (%rdi), %eax
1683// movl %eax, %ecx
1684// shrl $11, %ecx
1685// addl (%rsi,%rcx,4), %eax
1686//
1687// Instead of:
1688// movzwl (%rdi), %eax
1689// movl %eax, %ecx
1690// shrl $9, %ecx
1691// andl $124, %rcx
1692// addl (%rsi,%rcx), %eax
1693//
1694// Note that this function assumes the mask is provided as a mask *after* the
1695// value is shifted. The input chain may or may not match that, but computing
1696// such a mask is trivial.
1697static bool foldMaskAndShiftToScale(SelectionDAG &DAG, SDValue N,
1698 uint64_t Mask,
1699 SDValue Shift, SDValue X,
1700 X86ISelAddressMode &AM) {
1701 if (Shift.getOpcode() != ISD::SRL || !Shift.hasOneUse() ||
1702 !isa<ConstantSDNode>(Shift.getOperand(1)))
1703 return true;
1704
1705 unsigned ShiftAmt = Shift.getConstantOperandVal(1);
1706 unsigned MaskLZ = countLeadingZeros(Mask);
1707 unsigned MaskTZ = countTrailingZeros(Mask);
1708
1709 // The amount of shift we're trying to fit into the addressing mode is taken
1710 // from the trailing zeros of the mask.
1711 unsigned AMShiftAmt = MaskTZ;
1712
1713 // There is nothing we can do here unless the mask is removing some bits.
1714 // Also, the addressing mode can only represent shifts of 1, 2, or 3 bits.
1715 if (AMShiftAmt <= 0 || AMShiftAmt > 3) return true;
1716
1717 // We also need to ensure that mask is a continuous run of bits.
1718 if (countTrailingOnes(Mask >> MaskTZ) + MaskTZ + MaskLZ != 64) return true;
1719
1720 // Scale the leading zero count down based on the actual size of the value.
1721 // Also scale it down based on the size of the shift.
1722 unsigned ScaleDown = (64 - X.getSimpleValueType().getSizeInBits()) + ShiftAmt;
1723 if (MaskLZ < ScaleDown)
1724 return true;
1725 MaskLZ -= ScaleDown;
1726
1727 // The final check is to ensure that any masked out high bits of X are
1728 // already known to be zero. Otherwise, the mask has a semantic impact
1729 // other than masking out a couple of low bits. Unfortunately, because of
1730 // the mask, zero extensions will be removed from operands in some cases.
1731 // This code works extra hard to look through extensions because we can
1732 // replace them with zero extensions cheaply if necessary.
1733 bool ReplacingAnyExtend = false;
1734 if (X.getOpcode() == ISD::ANY_EXTEND) {
1735 unsigned ExtendBits = X.getSimpleValueType().getSizeInBits() -
1736 X.getOperand(0).getSimpleValueType().getSizeInBits();
1737 // Assume that we'll replace the any-extend with a zero-extend, and
1738 // narrow the search to the extended value.
1739 X = X.getOperand(0);
1740 MaskLZ = ExtendBits > MaskLZ ? 0 : MaskLZ - ExtendBits;
1741 ReplacingAnyExtend = true;
1742 }
1743 APInt MaskedHighBits =
1744 APInt::getHighBitsSet(X.getSimpleValueType().getSizeInBits(), MaskLZ);
1745 KnownBits Known = DAG.computeKnownBits(X);
1746 if (MaskedHighBits != Known.Zero) return true;
1747
1748 // We've identified a pattern that can be transformed into a single shift
1749 // and an addressing mode. Make it so.
1750 MVT VT = N.getSimpleValueType();
1751 if (ReplacingAnyExtend) {
1752 assert(X.getValueType() != VT)((X.getValueType() != VT) ? static_cast<void> (0) : __assert_fail
("X.getValueType() != VT", "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 1752, __PRETTY_FUNCTION__))
;
1753 // We looked through an ANY_EXTEND node, insert a ZERO_EXTEND.
1754 SDValue NewX = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(X), VT, X);
1755 insertDAGNode(DAG, N, NewX);
1756 X = NewX;
1757 }
1758 SDLoc DL(N);
1759 SDValue NewSRLAmt = DAG.getConstant(ShiftAmt + AMShiftAmt, DL, MVT::i8);
1760 SDValue NewSRL = DAG.getNode(ISD::SRL, DL, VT, X, NewSRLAmt);
1761 SDValue NewSHLAmt = DAG.getConstant(AMShiftAmt, DL, MVT::i8);
1762 SDValue NewSHL = DAG.getNode(ISD::SHL, DL, VT, NewSRL, NewSHLAmt);
1763
1764 // Insert the new nodes into the topological ordering. We must do this in
1765 // a valid topological ordering as nothing is going to go back and re-sort
1766 // these nodes. We continually insert before 'N' in sequence as this is
1767 // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
1768 // hierarchy left to express.
1769 insertDAGNode(DAG, N, NewSRLAmt);
1770 insertDAGNode(DAG, N, NewSRL);
1771 insertDAGNode(DAG, N, NewSHLAmt);
1772 insertDAGNode(DAG, N, NewSHL);
1773 DAG.ReplaceAllUsesWith(N, NewSHL);
1774 DAG.RemoveDeadNode(N.getNode());
1775
1776 AM.Scale = 1 << AMShiftAmt;
1777 AM.IndexReg = NewSRL;
1778 return false;
1779}
1780
1781// Transform "(X >> SHIFT) & (MASK << C1)" to
1782// "((X >> (SHIFT + C1)) & (MASK)) << C1". Everything before the SHL will be
1783// matched to a BEXTR later. Returns false if the simplification is performed.
1784static bool foldMaskedShiftToBEXTR(SelectionDAG &DAG, SDValue N,
1785 uint64_t Mask,
1786 SDValue Shift, SDValue X,
1787 X86ISelAddressMode &AM,
1788 const X86Subtarget &Subtarget) {
1789 if (Shift.getOpcode() != ISD::SRL ||
1790 !isa<ConstantSDNode>(Shift.getOperand(1)) ||
1791 !Shift.hasOneUse() || !N.hasOneUse())
1792 return true;
1793
1794 // Only do this if BEXTR will be matched by matchBEXTRFromAndImm.
1795 if (!Subtarget.hasTBM() &&
1796 !(Subtarget.hasBMI() && Subtarget.hasFastBEXTR()))
1797 return true;
1798
1799 // We need to ensure that mask is a continuous run of bits.
1800 if (!isShiftedMask_64(Mask)) return true;
1801
1802 unsigned ShiftAmt = Shift.getConstantOperandVal(1);
1803
1804 // The amount of shift we're trying to fit into the addressing mode is taken
1805 // from the trailing zeros of the mask.
1806 unsigned AMShiftAmt = countTrailingZeros(Mask);
1807
1808 // There is nothing we can do here unless the mask is removing some bits.
1809 // Also, the addressing mode can only represent shifts of 1, 2, or 3 bits.
1810 if (AMShiftAmt <= 0 || AMShiftAmt > 3) return true;
1811
1812 MVT VT = N.getSimpleValueType();
1813 SDLoc DL(N);
1814 SDValue NewSRLAmt = DAG.getConstant(ShiftAmt + AMShiftAmt, DL, MVT::i8);
1815 SDValue NewSRL = DAG.getNode(ISD::SRL, DL, VT, X, NewSRLAmt);
1816 SDValue NewMask = DAG.getConstant(Mask >> AMShiftAmt, DL, VT);
1817 SDValue NewAnd = DAG.getNode(ISD::AND, DL, VT, NewSRL, NewMask);
1818 SDValue NewSHLAmt = DAG.getConstant(AMShiftAmt, DL, MVT::i8);
1819 SDValue NewSHL = DAG.getNode(ISD::SHL, DL, VT, NewAnd, NewSHLAmt);
1820
1821 // Insert the new nodes into the topological ordering. We must do this in
1822 // a valid topological ordering as nothing is going to go back and re-sort
1823 // these nodes. We continually insert before 'N' in sequence as this is
1824 // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
1825 // hierarchy left to express.
1826 insertDAGNode(DAG, N, NewSRLAmt);
1827 insertDAGNode(DAG, N, NewSRL);
1828 insertDAGNode(DAG, N, NewMask);
1829 insertDAGNode(DAG, N, NewAnd);
1830 insertDAGNode(DAG, N, NewSHLAmt);
1831 insertDAGNode(DAG, N, NewSHL);
1832 DAG.ReplaceAllUsesWith(N, NewSHL);
1833 DAG.RemoveDeadNode(N.getNode());
1834
1835 AM.Scale = 1 << AMShiftAmt;
1836 AM.IndexReg = NewAnd;
1837 return false;
1838}
1839
1840bool X86DAGToDAGISel::matchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
1841 unsigned Depth) {
1842 SDLoc dl(N);
1843 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-isel")) { { dbgs() << "MatchAddress: "; AM.dump(CurDAG
); }; } } while (false)
1844 dbgs() << "MatchAddress: ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-isel")) { { dbgs() << "MatchAddress: "; AM.dump(CurDAG
); }; } } while (false)
1845 AM.dump(CurDAG);do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-isel")) { { dbgs() << "MatchAddress: "; AM.dump(CurDAG
); }; } } while (false)
1846 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-isel")) { { dbgs() << "MatchAddress: "; AM.dump(CurDAG
); }; } } while (false)
;
1847 // Limit recursion.
1848 if (Depth > 5)
1849 return matchAddressBase(N, AM);
1850
1851 // If this is already a %rip relative address, we can only merge immediates
1852 // into it. Instead of handling this in every case, we handle it here.
1853 // RIP relative addressing: %rip + 32-bit displacement!
1854 if (AM.isRIPRelative()) {
1855 // FIXME: JumpTable and ExternalSymbol address currently don't like
1856 // displacements. It isn't very important, but this should be fixed for
1857 // consistency.
1858 if (!(AM.ES || AM.MCSym) && AM.JT != -1)
1859 return true;
1860
1861 if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N))
1862 if (!foldOffsetIntoAddress(Cst->getSExtValue(), AM))
1863 return false;
1864 return true;
1865 }
1866
1867 switch (N.getOpcode()) {
1868 default: break;
1869 case ISD::LOCAL_RECOVER: {
1870 if (!AM.hasSymbolicDisplacement() && AM.Disp == 0)
1871 if (const auto *ESNode = dyn_cast<MCSymbolSDNode>(N.getOperand(0))) {
1872 // Use the symbol and don't prefix it.
1873 AM.MCSym = ESNode->getMCSymbol();
1874 return false;
1875 }
1876 break;
1877 }
1878 case ISD::Constant: {
1879 uint64_t Val = cast<ConstantSDNode>(N)->getSExtValue();
1880 if (!foldOffsetIntoAddress(Val, AM))
1881 return false;
1882 break;
1883 }
1884
1885 case X86ISD::Wrapper:
1886 case X86ISD::WrapperRIP:
1887 if (!matchWrapper(N, AM))
1888 return false;
1889 break;
1890
1891 case ISD::LOAD:
1892 if (!matchLoadInAddress(cast<LoadSDNode>(N), AM))
1893 return false;
1894 break;
1895
1896 case ISD::FrameIndex:
1897 if (AM.BaseType == X86ISelAddressMode::RegBase &&
1898 AM.Base_Reg.getNode() == nullptr &&
1899 (!Subtarget->is64Bit() || isDispSafeForFrameIndex(AM.Disp))) {
1900 AM.BaseType = X86ISelAddressMode::FrameIndexBase;
1901 AM.Base_FrameIndex = cast<FrameIndexSDNode>(N)->getIndex();
1902 return false;
1903 }
1904 break;
1905
1906 case ISD::SHL:
1907 if (AM.IndexReg.getNode() != nullptr || AM.Scale != 1)
1908 break;
1909
1910 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1911 unsigned Val = CN->getZExtValue();
1912 // Note that we handle x<<1 as (,x,2) rather than (x,x) here so
1913 // that the base operand remains free for further matching. If
1914 // the base doesn't end up getting used, a post-processing step
1915 // in MatchAddress turns (,x,2) into (x,x), which is cheaper.
1916 if (Val == 1 || Val == 2 || Val == 3) {
1917 AM.Scale = 1 << Val;
1918 SDValue ShVal = N.getOperand(0);
1919
1920 // Okay, we know that we have a scale by now. However, if the scaled
1921 // value is an add of something and a constant, we can fold the
1922 // constant into the disp field here.
1923 if (CurDAG->isBaseWithConstantOffset(ShVal)) {
1924 AM.IndexReg = ShVal.getOperand(0);
1925 ConstantSDNode *AddVal = cast<ConstantSDNode>(ShVal.getOperand(1));
1926 uint64_t Disp = (uint64_t)AddVal->getSExtValue() << Val;
1927 if (!foldOffsetIntoAddress(Disp, AM))
1928 return false;
1929 }
1930
1931 AM.IndexReg = ShVal;
1932 return false;
1933 }
1934 }
1935 break;
1936
1937 case ISD::SRL: {
1938 // Scale must not be used already.
1939 if (AM.IndexReg.getNode() != nullptr || AM.Scale != 1) break;
1940
1941 // We only handle up to 64-bit values here as those are what matter for
1942 // addressing mode optimizations.
1943 assert(N.getSimpleValueType().getSizeInBits() <= 64 &&((N.getSimpleValueType().getSizeInBits() <= 64 && "Unexpected value size!"
) ? static_cast<void> (0) : __assert_fail ("N.getSimpleValueType().getSizeInBits() <= 64 && \"Unexpected value size!\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 1944, __PRETTY_FUNCTION__))
1944 "Unexpected value size!")((N.getSimpleValueType().getSizeInBits() <= 64 && "Unexpected value size!"
) ? static_cast<void> (0) : __assert_fail ("N.getSimpleValueType().getSizeInBits() <= 64 && \"Unexpected value size!\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 1944, __PRETTY_FUNCTION__))
;
1945
1946 SDValue And = N.getOperand(0);
1947 if (And.getOpcode() != ISD::AND) break;
1948 SDValue X = And.getOperand(0);
1949
1950 // The mask used for the transform is expected to be post-shift, but we
1951 // found the shift first so just apply the shift to the mask before passing
1952 // it down.
1953 if (!isa<ConstantSDNode>(N.getOperand(1)) ||
1954 !isa<ConstantSDNode>(And.getOperand(1)))
1955 break;
1956 uint64_t Mask = And.getConstantOperandVal(1) >> N.getConstantOperandVal(1);
1957
1958 // Try to fold the mask and shift into the scale, and return false if we
1959 // succeed.
1960 if (!foldMaskAndShiftToScale(*CurDAG, N, Mask, N, X, AM))
1961 return false;
1962 break;
1963 }
1964
1965 case ISD::SMUL_LOHI:
1966 case ISD::UMUL_LOHI:
1967 // A mul_lohi where we need the low part can be folded as a plain multiply.
1968 if (N.getResNo() != 0) break;
1969 LLVM_FALLTHROUGH[[gnu::fallthrough]];
1970 case ISD::MUL:
1971 case X86ISD::MUL_IMM:
1972 // X*[3,5,9] -> X+X*[2,4,8]
1973 if (AM.BaseType == X86ISelAddressMode::RegBase &&
1974 AM.Base_Reg.getNode() == nullptr &&
1975 AM.IndexReg.getNode() == nullptr) {
1976 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1)))
1977 if (CN->getZExtValue() == 3 || CN->getZExtValue() == 5 ||
1978 CN->getZExtValue() == 9) {
1979 AM.Scale = unsigned(CN->getZExtValue())-1;
1980
1981 SDValue MulVal = N.getOperand(0);
1982 SDValue Reg;
1983
1984 // Okay, we know that we have a scale by now. However, if the scaled
1985 // value is an add of something and a constant, we can fold the
1986 // constant into the disp field here.
1987 if (MulVal.getNode()->getOpcode() == ISD::ADD && MulVal.hasOneUse() &&
1988 isa<ConstantSDNode>(MulVal.getOperand(1))) {
1989 Reg = MulVal.getOperand(0);
1990 ConstantSDNode *AddVal =
1991 cast<ConstantSDNode>(MulVal.getOperand(1));
1992 uint64_t Disp = AddVal->getSExtValue() * CN->getZExtValue();
1993 if (foldOffsetIntoAddress(Disp, AM))
1994 Reg = N.getOperand(0);
1995 } else {
1996 Reg = N.getOperand(0);
1997 }
1998
1999 AM.IndexReg = AM.Base_Reg = Reg;
2000 return false;
2001 }
2002 }
2003 break;
2004
2005 case ISD::SUB: {
2006 // Given A-B, if A can be completely folded into the address and
2007 // the index field with the index field unused, use -B as the index.
2008 // This is a win if a has multiple parts that can be folded into
2009 // the address. Also, this saves a mov if the base register has
2010 // other uses, since it avoids a two-address sub instruction, however
2011 // it costs an additional mov if the index register has other uses.
2012
2013 // Add an artificial use to this node so that we can keep track of
2014 // it if it gets CSE'd with a different node.
2015 HandleSDNode Handle(N);
2016
2017 // Test if the LHS of the sub can be folded.
2018 X86ISelAddressMode Backup = AM;
2019 if (matchAddressRecursively(N.getOperand(0), AM, Depth+1)) {
2020 N = Handle.getValue();
2021 AM = Backup;
2022 break;
2023 }
2024 N = Handle.getValue();
2025 // Test if the index field is free for use.
2026 if (AM.IndexReg.getNode() || AM.isRIPRelative()) {
2027 AM = Backup;
2028 break;
2029 }
2030
2031 int Cost = 0;
2032 SDValue RHS = N.getOperand(1);
2033 // If the RHS involves a register with multiple uses, this
2034 // transformation incurs an extra mov, due to the neg instruction
2035 // clobbering its operand.
2036 if (!RHS.getNode()->hasOneUse() ||
2037 RHS.getNode()->getOpcode() == ISD::CopyFromReg ||
2038 RHS.getNode()->getOpcode() == ISD::TRUNCATE ||
2039 RHS.getNode()->getOpcode() == ISD::ANY_EXTEND ||
2040 (RHS.getNode()->getOpcode() == ISD::ZERO_EXTEND &&
2041 RHS.getOperand(0).getValueType() == MVT::i32))
2042 ++Cost;
2043 // If the base is a register with multiple uses, this
2044 // transformation may save a mov.
2045 if ((AM.BaseType == X86ISelAddressMode::RegBase && AM.Base_Reg.getNode() &&
2046 !AM.Base_Reg.getNode()->hasOneUse()) ||
2047 AM.BaseType == X86ISelAddressMode::FrameIndexBase)
2048 --Cost;
2049 // If the folded LHS was interesting, this transformation saves
2050 // address arithmetic.
2051 if ((AM.hasSymbolicDisplacement() && !Backup.hasSymbolicDisplacement()) +
2052 ((AM.Disp != 0) && (Backup.Disp == 0)) +
2053 (AM.Segment.getNode() && !Backup.Segment.getNode()) >= 2)
2054 --Cost;
2055 // If it doesn't look like it may be an overall win, don't do it.
2056 if (Cost >= 0) {
2057 AM = Backup;
2058 break;
2059 }
2060
2061 // Ok, the transformation is legal and appears profitable. Go for it.
2062 // Negation will be emitted later to avoid creating dangling nodes if this
2063 // was an unprofitable LEA.
2064 AM.IndexReg = RHS;
2065 AM.NegateIndex = true;
2066 AM.Scale = 1;
2067 return false;
2068 }
2069
2070 case ISD::ADD:
2071 if (!matchAdd(N, AM, Depth))
2072 return false;
2073 break;
2074
2075 case ISD::OR:
2076 // We want to look through a transform in InstCombine and DAGCombiner that
2077 // turns 'add' into 'or', so we can treat this 'or' exactly like an 'add'.
2078 // Example: (or (and x, 1), (shl y, 3)) --> (add (and x, 1), (shl y, 3))
2079 // An 'lea' can then be used to match the shift (multiply) and add:
2080 // and $1, %esi
2081 // lea (%rsi, %rdi, 8), %rax
2082 if (CurDAG->haveNoCommonBitsSet(N.getOperand(0), N.getOperand(1)) &&
2083 !matchAdd(N, AM, Depth))
2084 return false;
2085 break;
2086
2087 case ISD::AND: {
2088 // Perform some heroic transforms on an and of a constant-count shift
2089 // with a constant to enable use of the scaled offset field.
2090
2091 // Scale must not be used already.
2092 if (AM.IndexReg.getNode() != nullptr || AM.Scale != 1) break;
2093
2094 // We only handle up to 64-bit values here as those are what matter for
2095 // addressing mode optimizations.
2096 assert(N.getSimpleValueType().getSizeInBits() <= 64 &&((N.getSimpleValueType().getSizeInBits() <= 64 && "Unexpected value size!"
) ? static_cast<void> (0) : __assert_fail ("N.getSimpleValueType().getSizeInBits() <= 64 && \"Unexpected value size!\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 2097, __PRETTY_FUNCTION__))
2097 "Unexpected value size!")((N.getSimpleValueType().getSizeInBits() <= 64 && "Unexpected value size!"
) ? static_cast<void> (0) : __assert_fail ("N.getSimpleValueType().getSizeInBits() <= 64 && \"Unexpected value size!\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 2097, __PRETTY_FUNCTION__))
;
2098
2099 if (!isa<ConstantSDNode>(N.getOperand(1)))
2100 break;
2101
2102 if (N.getOperand(0).getOpcode() == ISD::SRL) {
2103 SDValue Shift = N.getOperand(0);
2104 SDValue X = Shift.getOperand(0);
2105
2106 uint64_t Mask = N.getConstantOperandVal(1);
2107
2108 // Try to fold the mask and shift into an extract and scale.
2109 if (!foldMaskAndShiftToExtract(*CurDAG, N, Mask, Shift, X, AM))
2110 return false;
2111
2112 // Try to fold the mask and shift directly into the scale.
2113 if (!foldMaskAndShiftToScale(*CurDAG, N, Mask, Shift, X, AM))
2114 return false;
2115
2116 // Try to fold the mask and shift into BEXTR and scale.
2117 if (!foldMaskedShiftToBEXTR(*CurDAG, N, Mask, Shift, X, AM, *Subtarget))
2118 return false;
2119 }
2120
2121 // Try to swap the mask and shift to place shifts which can be done as
2122 // a scale on the outside of the mask.
2123 if (!foldMaskedShiftToScaledMask(*CurDAG, N, AM))
2124 return false;
2125
2126 break;
2127 }
2128 case ISD::ZERO_EXTEND: {
2129 // Try to widen a zexted shift left to the same size as its use, so we can
2130 // match the shift as a scale factor.
2131 if (AM.IndexReg.getNode() != nullptr || AM.Scale != 1)
2132 break;
2133 if (N.getOperand(0).getOpcode() != ISD::SHL || !N.getOperand(0).hasOneUse())
2134 break;
2135
2136 // Give up if the shift is not a valid scale factor [1,2,3].
2137 SDValue Shl = N.getOperand(0);
2138 auto *ShAmtC = dyn_cast<ConstantSDNode>(Shl.getOperand(1));
2139 if (!ShAmtC || ShAmtC->getZExtValue() > 3)
2140 break;
2141
2142 // The narrow shift must only shift out zero bits (it must be 'nuw').
2143 // That makes it safe to widen to the destination type.
2144 APInt HighZeros = APInt::getHighBitsSet(Shl.getValueSizeInBits(),
2145 ShAmtC->getZExtValue());
2146 if (!CurDAG->MaskedValueIsZero(Shl.getOperand(0), HighZeros))
2147 break;
2148
2149 // zext (shl nuw i8 %x, C) to i32 --> shl (zext i8 %x to i32), (zext C)
2150 MVT VT = N.getSimpleValueType();
2151 SDLoc DL(N);
2152 SDValue Zext = CurDAG->getNode(ISD::ZERO_EXTEND, DL, VT, Shl.getOperand(0));
2153 SDValue NewShl = CurDAG->getNode(ISD::SHL, DL, VT, Zext, Shl.getOperand(1));
2154
2155 // Convert the shift to scale factor.
2156 AM.Scale = 1 << ShAmtC->getZExtValue();
2157 AM.IndexReg = Zext;
2158
2159 insertDAGNode(*CurDAG, N, Zext);
2160 insertDAGNode(*CurDAG, N, NewShl);
2161 CurDAG->ReplaceAllUsesWith(N, NewShl);
2162 CurDAG->RemoveDeadNode(N.getNode());
2163 return false;
2164 }
2165 }
2166
2167 return matchAddressBase(N, AM);
2168}
2169
2170/// Helper for MatchAddress. Add the specified node to the
2171/// specified addressing mode without any further recursion.
2172bool X86DAGToDAGISel::matchAddressBase(SDValue N, X86ISelAddressMode &AM) {
2173 // Is the base register already occupied?
2174 if (AM.BaseType != X86ISelAddressMode::RegBase || AM.Base_Reg.getNode()) {
2175 // If so, check to see if the scale index register is set.
2176 if (!AM.IndexReg.getNode()) {
2177 AM.IndexReg = N;
2178 AM.Scale = 1;
2179 return false;
2180 }
2181
2182 // Otherwise, we cannot select it.
2183 return true;
2184 }
2185
2186 // Default, generate it as a register.
2187 AM.BaseType = X86ISelAddressMode::RegBase;
2188 AM.Base_Reg = N;
2189 return false;
2190}
2191
2192/// Helper for selectVectorAddr. Handles things that can be folded into a
2193/// gather scatter address. The index register and scale should have already
2194/// been handled.
2195bool X86DAGToDAGISel::matchVectorAddress(SDValue N, X86ISelAddressMode &AM) {
2196 // TODO: Support other operations.
2197 switch (N.getOpcode()) {
2198 case ISD::Constant: {
2199 uint64_t Val = cast<ConstantSDNode>(N)->getSExtValue();
2200 if (!foldOffsetIntoAddress(Val, AM))
2201 return false;
2202 break;
2203 }
2204 case X86ISD::Wrapper:
2205 if (!matchWrapper(N, AM))
2206 return false;
2207 break;
2208 }
2209
2210 return matchAddressBase(N, AM);
2211}
2212
2213bool X86DAGToDAGISel::selectVectorAddr(SDNode *Parent, SDValue N, SDValue &Base,
2214 SDValue &Scale, SDValue &Index,
2215 SDValue &Disp, SDValue &Segment) {
2216 X86ISelAddressMode AM;
2217 auto *Mgs = cast<X86MaskedGatherScatterSDNode>(Parent);
2218 AM.IndexReg = Mgs->getIndex();
2219 AM.Scale = cast<ConstantSDNode>(Mgs->getScale())->getZExtValue();
2220
2221 unsigned AddrSpace = cast<MemSDNode>(Parent)->getPointerInfo().getAddrSpace();
2222 // AddrSpace 256 -> GS, 257 -> FS, 258 -> SS.
2223 if (AddrSpace == 256)
2224 AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16);
2225 if (AddrSpace == 257)
2226 AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16);
2227 if (AddrSpace == 258)
2228 AM.Segment = CurDAG->getRegister(X86::SS, MVT::i16);
2229
2230 SDLoc DL(N);
2231 MVT VT = N.getSimpleValueType();
2232
2233 // Try to match into the base and displacement fields.
2234 if (matchVectorAddress(N, AM))
2235 return false;
2236
2237 getAddressOperands(AM, DL, VT, Base, Scale, Index, Disp, Segment);
2238 return true;
2239}
2240
2241/// Returns true if it is able to pattern match an addressing mode.
2242/// It returns the operands which make up the maximal addressing mode it can
2243/// match by reference.
2244///
2245/// Parent is the parent node of the addr operand that is being matched. It
2246/// is always a load, store, atomic node, or null. It is only null when
2247/// checking memory operands for inline asm nodes.
2248bool X86DAGToDAGISel::selectAddr(SDNode *Parent, SDValue N, SDValue &Base,
2249 SDValue &Scale, SDValue &Index,
2250 SDValue &Disp, SDValue &Segment) {
2251 X86ISelAddressMode AM;
2252
2253 if (Parent &&
2254 // This list of opcodes are all the nodes that have an "addr:$ptr" operand
2255 // that are not a MemSDNode, and thus don't have proper addrspace info.
2256 Parent->getOpcode() != ISD::INTRINSIC_W_CHAIN && // unaligned loads, fixme
2257 Parent->getOpcode() != ISD::INTRINSIC_VOID && // nontemporal stores
2258 Parent->getOpcode() != X86ISD::TLSCALL && // Fixme
2259 Parent->getOpcode() != X86ISD::ENQCMD && // Fixme
2260 Parent->getOpcode() != X86ISD::ENQCMDS && // Fixme
2261 Parent->getOpcode() != X86ISD::EH_SJLJ_SETJMP && // setjmp
2262 Parent->getOpcode() != X86ISD::EH_SJLJ_LONGJMP) { // longjmp
2263 unsigned AddrSpace =
2264 cast<MemSDNode>(Parent)->getPointerInfo().getAddrSpace();
2265 // AddrSpace 256 -> GS, 257 -> FS, 258 -> SS.
2266 if (AddrSpace == 256)
2267 AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16);
2268 if (AddrSpace == 257)
2269 AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16);
2270 if (AddrSpace == 258)
2271 AM.Segment = CurDAG->getRegister(X86::SS, MVT::i16);
2272 }
2273
2274 // Save the DL and VT before calling matchAddress, it can invalidate N.
2275 SDLoc DL(N);
2276 MVT VT = N.getSimpleValueType();
2277
2278 if (matchAddress(N, AM))
2279 return false;
2280
2281 getAddressOperands(AM, DL, VT, Base, Scale, Index, Disp, Segment);
2282 return true;
2283}
2284
2285// We can only fold a load if all nodes between it and the root node have a
2286// single use. If there are additional uses, we could end up duplicating the
2287// load.
2288static bool hasSingleUsesFromRoot(SDNode *Root, SDNode *User) {
2289 while (User != Root) {
2290 if (!User->hasOneUse())
2291 return false;
2292 User = *User->use_begin();
2293 }
2294
2295 return true;
2296}
2297
2298/// Match a scalar SSE load. In particular, we want to match a load whose top
2299/// elements are either undef or zeros. The load flavor is derived from the
2300/// type of N, which is either v4f32 or v2f64.
2301///
2302/// We also return:
2303/// PatternChainNode: this is the matched node that has a chain input and
2304/// output.
2305bool X86DAGToDAGISel::selectScalarSSELoad(SDNode *Root, SDNode *Parent,
2306 SDValue N, SDValue &Base,
2307 SDValue &Scale, SDValue &Index,
2308 SDValue &Disp, SDValue &Segment,
2309 SDValue &PatternNodeWithChain) {
2310 if (!hasSingleUsesFromRoot(Root, Parent))
2311 return false;
2312
2313 // We can allow a full vector load here since narrowing a load is ok unless
2314 // it's volatile or atomic.
2315 if (ISD::isNON_EXTLoad(N.getNode())) {
2316 LoadSDNode *LD = cast<LoadSDNode>(N);
2317 if (LD->isSimple() &&
2318 IsProfitableToFold(N, LD, Root) &&
2319 IsLegalToFold(N, Parent, Root, OptLevel)) {
2320 PatternNodeWithChain = N;
2321 return selectAddr(LD, LD->getBasePtr(), Base, Scale, Index, Disp,
2322 Segment);
2323 }
2324 }
2325
2326 // We can also match the special zero extended load opcode.
2327 if (N.getOpcode() == X86ISD::VZEXT_LOAD) {
2328 PatternNodeWithChain = N;
2329 if (IsProfitableToFold(PatternNodeWithChain, N.getNode(), Root) &&
2330 IsLegalToFold(PatternNodeWithChain, Parent, Root, OptLevel)) {
2331 auto *MI = cast<MemIntrinsicSDNode>(PatternNodeWithChain);
2332 return selectAddr(MI, MI->getBasePtr(), Base, Scale, Index, Disp,
2333 Segment);
2334 }
2335 }
2336
2337 // Need to make sure that the SCALAR_TO_VECTOR and load are both only used
2338 // once. Otherwise the load might get duplicated and the chain output of the
2339 // duplicate load will not be observed by all dependencies.
2340 if (N.getOpcode() == ISD::SCALAR_TO_VECTOR && N.getNode()->hasOneUse()) {
2341 PatternNodeWithChain = N.getOperand(0);
2342 if (ISD::isNON_EXTLoad(PatternNodeWithChain.getNode()) &&
2343 IsProfitableToFold(PatternNodeWithChain, N.getNode(), Root) &&
2344 IsLegalToFold(PatternNodeWithChain, N.getNode(), Root, OptLevel)) {
2345 LoadSDNode *LD = cast<LoadSDNode>(PatternNodeWithChain);
2346 return selectAddr(LD, LD->getBasePtr(), Base, Scale, Index, Disp,
2347 Segment);
2348 }
2349 }
2350
2351 return false;
2352}
2353
2354
2355bool X86DAGToDAGISel::selectMOV64Imm32(SDValue N, SDValue &Imm) {
2356 if (const ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) {
2357 uint64_t ImmVal = CN->getZExtValue();
2358 if (!isUInt<32>(ImmVal))
2359 return false;
2360
2361 Imm = CurDAG->getTargetConstant(ImmVal, SDLoc(N), MVT::i64);
2362 return true;
2363 }
2364
2365 // In static codegen with small code model, we can get the address of a label
2366 // into a register with 'movl'
2367 if (N->getOpcode() != X86ISD::Wrapper)
2368 return false;
2369
2370 N = N.getOperand(0);
2371
2372 // At least GNU as does not accept 'movl' for TPOFF relocations.
2373 // FIXME: We could use 'movl' when we know we are targeting MC.
2374 if (N->getOpcode() == ISD::TargetGlobalTLSAddress)
2375 return false;
2376
2377 Imm = N;
2378 if (N->getOpcode() != ISD::TargetGlobalAddress)
2379 return TM.getCodeModel() == CodeModel::Small;
2380
2381 Optional<ConstantRange> CR =
2382 cast<GlobalAddressSDNode>(N)->getGlobal()->getAbsoluteSymbolRange();
2383 if (!CR)
2384 return TM.getCodeModel() == CodeModel::Small;
2385
2386 return CR->getUnsignedMax().ult(1ull << 32);
2387}
2388
2389bool X86DAGToDAGISel::selectLEA64_32Addr(SDValue N, SDValue &Base,
2390 SDValue &Scale, SDValue &Index,
2391 SDValue &Disp, SDValue &Segment) {
2392 // Save the debug loc before calling selectLEAAddr, in case it invalidates N.
2393 SDLoc DL(N);
2394
2395 if (!selectLEAAddr(N, Base, Scale, Index, Disp, Segment))
2396 return false;
2397
2398 RegisterSDNode *RN = dyn_cast<RegisterSDNode>(Base);
2399 if (RN && RN->getReg() == 0)
2400 Base = CurDAG->getRegister(0, MVT::i64);
2401 else if (Base.getValueType() == MVT::i32 && !isa<FrameIndexSDNode>(Base)) {
2402 // Base could already be %rip, particularly in the x32 ABI.
2403 SDValue ImplDef = SDValue(CurDAG->getMachineNode(X86::IMPLICIT_DEF, DL,
2404 MVT::i64), 0);
2405 Base = CurDAG->getTargetInsertSubreg(X86::sub_32bit, DL, MVT::i64, ImplDef,
2406 Base);
2407 }
2408
2409 RN = dyn_cast<RegisterSDNode>(Index);
2410 if (RN && RN->getReg() == 0)
2411 Index = CurDAG->getRegister(0, MVT::i64);
2412 else {
2413 assert(Index.getValueType() == MVT::i32 &&((Index.getValueType() == MVT::i32 && "Expect to be extending 32-bit registers for use in LEA"
) ? static_cast<void> (0) : __assert_fail ("Index.getValueType() == MVT::i32 && \"Expect to be extending 32-bit registers for use in LEA\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 2414, __PRETTY_FUNCTION__))
2414 "Expect to be extending 32-bit registers for use in LEA")((Index.getValueType() == MVT::i32 && "Expect to be extending 32-bit registers for use in LEA"
) ? static_cast<void> (0) : __assert_fail ("Index.getValueType() == MVT::i32 && \"Expect to be extending 32-bit registers for use in LEA\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 2414, __PRETTY_FUNCTION__))
;
2415 SDValue ImplDef = SDValue(CurDAG->getMachineNode(X86::IMPLICIT_DEF, DL,
2416 MVT::i64), 0);
2417 Index = CurDAG->getTargetInsertSubreg(X86::sub_32bit, DL, MVT::i64, ImplDef,
2418 Index);
2419 }
2420
2421 return true;
2422}
2423
2424/// Calls SelectAddr and determines if the maximal addressing
2425/// mode it matches can be cost effectively emitted as an LEA instruction.
2426bool X86DAGToDAGISel::selectLEAAddr(SDValue N,
2427 SDValue &Base, SDValue &Scale,
2428 SDValue &Index, SDValue &Disp,
2429 SDValue &Segment) {
2430 X86ISelAddressMode AM;
2431
2432 // Save the DL and VT before calling matchAddress, it can invalidate N.
2433 SDLoc DL(N);
2434 MVT VT = N.getSimpleValueType();
2435
2436 // Set AM.Segment to prevent MatchAddress from using one. LEA doesn't support
2437 // segments.
2438 SDValue Copy = AM.Segment;
2439 SDValue T = CurDAG->getRegister(0, MVT::i32);
2440 AM.Segment = T;
2441 if (matchAddress(N, AM))
2442 return false;
2443 assert (T == AM.Segment)((T == AM.Segment) ? static_cast<void> (0) : __assert_fail
("T == AM.Segment", "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 2443, __PRETTY_FUNCTION__))
;
2444 AM.Segment = Copy;
2445
2446 unsigned Complexity = 0;
2447 if (AM.BaseType == X86ISelAddressMode::RegBase && AM.Base_Reg.getNode())
2448 Complexity = 1;
2449 else if (AM.BaseType == X86ISelAddressMode::FrameIndexBase)
2450 Complexity = 4;
2451
2452 if (AM.IndexReg.getNode())
2453 Complexity++;
2454
2455 // Don't match just leal(,%reg,2). It's cheaper to do addl %reg, %reg, or with
2456 // a simple shift.
2457 if (AM.Scale > 1)
2458 Complexity++;
2459
2460 // FIXME: We are artificially lowering the criteria to turn ADD %reg, $GA
2461 // to a LEA. This is determined with some experimentation but is by no means
2462 // optimal (especially for code size consideration). LEA is nice because of
2463 // its three-address nature. Tweak the cost function again when we can run
2464 // convertToThreeAddress() at register allocation time.
2465 if (AM.hasSymbolicDisplacement()) {
2466 // For X86-64, always use LEA to materialize RIP-relative addresses.
2467 if (Subtarget->is64Bit())
2468 Complexity = 4;
2469 else
2470 Complexity += 2;
2471 }
2472
2473 // Heuristic: try harder to form an LEA from ADD if the operands set flags.
2474 // Unlike ADD, LEA does not affect flags, so we will be less likely to require
2475 // duplicating flag-producing instructions later in the pipeline.
2476 if (N.getOpcode() == ISD::ADD) {
2477 auto isMathWithFlags = [](SDValue V) {
2478 switch (V.getOpcode()) {
2479 case X86ISD::ADD:
2480 case X86ISD::SUB:
2481 case X86ISD::ADC:
2482 case X86ISD::SBB:
2483 /* TODO: These opcodes can be added safely, but we may want to justify
2484 their inclusion for different reasons (better for reg-alloc).
2485 case X86ISD::SMUL:
2486 case X86ISD::UMUL:
2487 case X86ISD::OR:
2488 case X86ISD::XOR:
2489 case X86ISD::AND:
2490 */
2491 // Value 1 is the flag output of the node - verify it's not dead.
2492 return !SDValue(V.getNode(), 1).use_empty();
2493 default:
2494 return false;
2495 }
2496 };
2497 // TODO: This could be an 'or' rather than 'and' to make the transform more
2498 // likely to happen. We might want to factor in whether there's a
2499 // load folding opportunity for the math op that disappears with LEA.
2500 if (isMathWithFlags(N.getOperand(0)) && isMathWithFlags(N.getOperand(1)))
2501 Complexity++;
2502 }
2503
2504 if (AM.Disp)
2505 Complexity++;
2506
2507 // If it isn't worth using an LEA, reject it.
2508 if (Complexity <= 2)
2509 return false;
2510
2511 getAddressOperands(AM, DL, VT, Base, Scale, Index, Disp, Segment);
2512 return true;
2513}
2514
2515/// This is only run on TargetGlobalTLSAddress nodes.
2516bool X86DAGToDAGISel::selectTLSADDRAddr(SDValue N, SDValue &Base,
2517 SDValue &Scale, SDValue &Index,
2518 SDValue &Disp, SDValue &Segment) {
2519 assert(N.getOpcode() == ISD::TargetGlobalTLSAddress)((N.getOpcode() == ISD::TargetGlobalTLSAddress) ? static_cast
<void> (0) : __assert_fail ("N.getOpcode() == ISD::TargetGlobalTLSAddress"
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 2519, __PRETTY_FUNCTION__))
;
2520 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N);
2521
2522 X86ISelAddressMode AM;
2523 AM.GV = GA->getGlobal();
2524 AM.Disp += GA->getOffset();
2525 AM.SymbolFlags = GA->getTargetFlags();
2526
2527 MVT VT = N.getSimpleValueType();
2528 if (VT == MVT::i32) {
2529 AM.Scale = 1;
2530 AM.IndexReg = CurDAG->getRegister(X86::EBX, MVT::i32);
2531 }
2532
2533 getAddressOperands(AM, SDLoc(N), VT, Base, Scale, Index, Disp, Segment);
2534 return true;
2535}
2536
2537bool X86DAGToDAGISel::selectRelocImm(SDValue N, SDValue &Op) {
2538 if (auto *CN = dyn_cast<ConstantSDNode>(N)) {
2539 Op = CurDAG->getTargetConstant(CN->getAPIntValue(), SDLoc(CN),
2540 N.getValueType());
2541 return true;
2542 }
2543
2544 // Keep track of the original value type and whether this value was
2545 // truncated. If we see a truncation from pointer type to VT that truncates
2546 // bits that are known to be zero, we can use a narrow reference.
2547 EVT VT = N.getValueType();
2548 bool WasTruncated = false;
2549 if (N.getOpcode() == ISD::TRUNCATE) {
2550 WasTruncated = true;
2551 N = N.getOperand(0);
2552 }
2553
2554 if (N.getOpcode() != X86ISD::Wrapper)
2555 return false;
2556
2557 // We can only use non-GlobalValues as immediates if they were not truncated,
2558 // as we do not have any range information. If we have a GlobalValue and the
2559 // address was not truncated, we can select it as an operand directly.
2560 unsigned Opc = N.getOperand(0)->getOpcode();
2561 if (Opc != ISD::TargetGlobalAddress || !WasTruncated) {
2562 Op = N.getOperand(0);
2563 // We can only select the operand directly if we didn't have to look past a
2564 // truncate.
2565 return !WasTruncated;
2566 }
2567
2568 // Check that the global's range fits into VT.
2569 auto *GA = cast<GlobalAddressSDNode>(N.getOperand(0));
2570 Optional<ConstantRange> CR = GA->getGlobal()->getAbsoluteSymbolRange();
2571 if (!CR || CR->getUnsignedMax().uge(1ull << VT.getSizeInBits()))
2572 return false;
2573
2574 // Okay, we can use a narrow reference.
2575 Op = CurDAG->getTargetGlobalAddress(GA->getGlobal(), SDLoc(N), VT,
2576 GA->getOffset(), GA->getTargetFlags());
2577 return true;
2578}
2579
2580bool X86DAGToDAGISel::tryFoldLoad(SDNode *Root, SDNode *P, SDValue N,
2581 SDValue &Base, SDValue &Scale,
2582 SDValue &Index, SDValue &Disp,
2583 SDValue &Segment) {
2584 if (!ISD::isNON_EXTLoad(N.getNode()) ||
30
Calling 'isNON_EXTLoad'
35
Returning from 'isNON_EXTLoad'
2585 !IsProfitableToFold(N, P, Root) ||
36
Passing null pointer value via 2nd parameter 'U'
37
Calling 'X86DAGToDAGISel::IsProfitableToFold'
2586 !IsLegalToFold(N, P, Root, OptLevel))
2587 return false;
2588
2589 return selectAddr(N.getNode(),
2590 N.getOperand(1), Base, Scale, Index, Disp, Segment);
2591}
2592
2593/// Return an SDNode that returns the value of the global base register.
2594/// Output instructions required to initialize the global base register,
2595/// if necessary.
2596SDNode *X86DAGToDAGISel::getGlobalBaseReg() {
2597 unsigned GlobalBaseReg = getInstrInfo()->getGlobalBaseReg(MF);
2598 auto &DL = MF->getDataLayout();
2599 return CurDAG->getRegister(GlobalBaseReg, TLI->getPointerTy(DL)).getNode();
2600}
2601
2602bool X86DAGToDAGISel::isSExtAbsoluteSymbolRef(unsigned Width, SDNode *N) const {
2603 if (N->getOpcode() == ISD::TRUNCATE)
2604 N = N->getOperand(0).getNode();
2605 if (N->getOpcode() != X86ISD::Wrapper)
2606 return false;
2607
2608 auto *GA = dyn_cast<GlobalAddressSDNode>(N->getOperand(0));
2609 if (!GA)
2610 return false;
2611
2612 Optional<ConstantRange> CR = GA->getGlobal()->getAbsoluteSymbolRange();
2613 return CR && CR->getSignedMin().sge(-1ull << Width) &&
2614 CR->getSignedMax().slt(1ull << Width);
2615}
2616
2617static X86::CondCode getCondFromNode(SDNode *N) {
2618 assert(N->isMachineOpcode() && "Unexpected node")((N->isMachineOpcode() && "Unexpected node") ? static_cast
<void> (0) : __assert_fail ("N->isMachineOpcode() && \"Unexpected node\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 2618, __PRETTY_FUNCTION__))
;
2619 X86::CondCode CC = X86::COND_INVALID;
2620 unsigned Opc = N->getMachineOpcode();
2621 if (Opc == X86::JCC_1)
2622 CC = static_cast<X86::CondCode>(N->getConstantOperandVal(1));
2623 else if (Opc == X86::SETCCr)
2624 CC = static_cast<X86::CondCode>(N->getConstantOperandVal(0));
2625 else if (Opc == X86::SETCCm)
2626 CC = static_cast<X86::CondCode>(N->getConstantOperandVal(5));
2627 else if (Opc == X86::CMOV16rr || Opc == X86::CMOV32rr ||
2628 Opc == X86::CMOV64rr)
2629 CC = static_cast<X86::CondCode>(N->getConstantOperandVal(2));
2630 else if (Opc == X86::CMOV16rm || Opc == X86::CMOV32rm ||
2631 Opc == X86::CMOV64rm)
2632 CC = static_cast<X86::CondCode>(N->getConstantOperandVal(6));
2633
2634 return CC;
2635}
2636
2637/// Test whether the given X86ISD::CMP node has any users that use a flag
2638/// other than ZF.
2639bool X86DAGToDAGISel::onlyUsesZeroFlag(SDValue Flags) const {
2640 // Examine each user of the node.
2641 for (SDNode::use_iterator UI = Flags->use_begin(), UE = Flags->use_end();
2642 UI != UE; ++UI) {
2643 // Only check things that use the flags.
2644 if (UI.getUse().getResNo() != Flags.getResNo())
2645 continue;
2646 // Only examine CopyToReg uses that copy to EFLAGS.
2647 if (UI->getOpcode() != ISD::CopyToReg ||
2648 cast<RegisterSDNode>(UI->getOperand(1))->getReg() != X86::EFLAGS)
2649 return false;
2650 // Examine each user of the CopyToReg use.
2651 for (SDNode::use_iterator FlagUI = UI->use_begin(),
2652 FlagUE = UI->use_end(); FlagUI != FlagUE; ++FlagUI) {
2653 // Only examine the Flag result.
2654 if (FlagUI.getUse().getResNo() != 1) continue;
2655 // Anything unusual: assume conservatively.
2656 if (!FlagUI->isMachineOpcode()) return false;
2657 // Examine the condition code of the user.
2658 X86::CondCode CC = getCondFromNode(*FlagUI);
2659
2660 switch (CC) {
2661 // Comparisons which only use the zero flag.
2662 case X86::COND_E: case X86::COND_NE:
2663 continue;
2664 // Anything else: assume conservatively.
2665 default:
2666 return false;
2667 }
2668 }
2669 }
2670 return true;
2671}
2672
2673/// Test whether the given X86ISD::CMP node has any uses which require the SF
2674/// flag to be accurate.
2675bool X86DAGToDAGISel::hasNoSignFlagUses(SDValue Flags) const {
2676 // Examine each user of the node.
2677 for (SDNode::use_iterator UI = Flags->use_begin(), UE = Flags->use_end();
2678 UI != UE; ++UI) {
2679 // Only check things that use the flags.
2680 if (UI.getUse().getResNo() != Flags.getResNo())
2681 continue;
2682 // Only examine CopyToReg uses that copy to EFLAGS.
2683 if (UI->getOpcode() != ISD::CopyToReg ||
2684 cast<RegisterSDNode>(UI->getOperand(1))->getReg() != X86::EFLAGS)
2685 return false;
2686 // Examine each user of the CopyToReg use.
2687 for (SDNode::use_iterator FlagUI = UI->use_begin(),
2688 FlagUE = UI->use_end(); FlagUI != FlagUE; ++FlagUI) {
2689 // Only examine the Flag result.
2690 if (FlagUI.getUse().getResNo() != 1) continue;
2691 // Anything unusual: assume conservatively.
2692 if (!FlagUI->isMachineOpcode()) return false;
2693 // Examine the condition code of the user.
2694 X86::CondCode CC = getCondFromNode(*FlagUI);
2695
2696 switch (CC) {
2697 // Comparisons which don't examine the SF flag.
2698 case X86::COND_A: case X86::COND_AE:
2699 case X86::COND_B: case X86::COND_BE:
2700 case X86::COND_E: case X86::COND_NE:
2701 case X86::COND_O: case X86::COND_NO:
2702 case X86::COND_P: case X86::COND_NP:
2703 continue;
2704 // Anything else: assume conservatively.
2705 default:
2706 return false;
2707 }
2708 }
2709 }
2710 return true;
2711}
2712
2713static bool mayUseCarryFlag(X86::CondCode CC) {
2714 switch (CC) {
2715 // Comparisons which don't examine the CF flag.
2716 case X86::COND_O: case X86::COND_NO:
2717 case X86::COND_E: case X86::COND_NE:
2718 case X86::COND_S: case X86::COND_NS:
2719 case X86::COND_P: case X86::COND_NP:
2720 case X86::COND_L: case X86::COND_GE:
2721 case X86::COND_G: case X86::COND_LE:
2722 return false;
2723 // Anything else: assume conservatively.
2724 default:
2725 return true;
2726 }
2727}
2728
2729/// Test whether the given node which sets flags has any uses which require the
2730/// CF flag to be accurate.
2731 bool X86DAGToDAGISel::hasNoCarryFlagUses(SDValue Flags) const {
2732 // Examine each user of the node.
2733 for (SDNode::use_iterator UI = Flags->use_begin(), UE = Flags->use_end();
2734 UI != UE; ++UI) {
2735 // Only check things that use the flags.
2736 if (UI.getUse().getResNo() != Flags.getResNo())
2737 continue;
2738
2739 unsigned UIOpc = UI->getOpcode();
2740
2741 if (UIOpc == ISD::CopyToReg) {
2742 // Only examine CopyToReg uses that copy to EFLAGS.
2743 if (cast<RegisterSDNode>(UI->getOperand(1))->getReg() != X86::EFLAGS)
2744 return false;
2745 // Examine each user of the CopyToReg use.
2746 for (SDNode::use_iterator FlagUI = UI->use_begin(), FlagUE = UI->use_end();
2747 FlagUI != FlagUE; ++FlagUI) {
2748 // Only examine the Flag result.
2749 if (FlagUI.getUse().getResNo() != 1)
2750 continue;
2751 // Anything unusual: assume conservatively.
2752 if (!FlagUI->isMachineOpcode())
2753 return false;
2754 // Examine the condition code of the user.
2755 X86::CondCode CC = getCondFromNode(*FlagUI);
2756
2757 if (mayUseCarryFlag(CC))
2758 return false;
2759 }
2760
2761 // This CopyToReg is ok. Move on to the next user.
2762 continue;
2763 }
2764
2765 // This might be an unselected node. So look for the pre-isel opcodes that
2766 // use flags.
2767 unsigned CCOpNo;
2768 switch (UIOpc) {
2769 default:
2770 // Something unusual. Be conservative.
2771 return false;
2772 case X86ISD::SETCC: CCOpNo = 0; break;
2773 case X86ISD::SETCC_CARRY: CCOpNo = 0; break;
2774 case X86ISD::CMOV: CCOpNo = 2; break;
2775 case X86ISD::BRCOND: CCOpNo = 2; break;
2776 }
2777
2778 X86::CondCode CC = (X86::CondCode)UI->getConstantOperandVal(CCOpNo);
2779 if (mayUseCarryFlag(CC))
2780 return false;
2781 }
2782 return true;
2783}
2784
2785/// Check whether or not the chain ending in StoreNode is suitable for doing
2786/// the {load; op; store} to modify transformation.
2787static bool isFusableLoadOpStorePattern(StoreSDNode *StoreNode,
2788 SDValue StoredVal, SelectionDAG *CurDAG,
2789 unsigned LoadOpNo,
2790 LoadSDNode *&LoadNode,
2791 SDValue &InputChain) {
2792 // Is the stored value result 0 of the operation?
2793 if (StoredVal.getResNo() != 0) return false;
2794
2795 // Are there other uses of the operation other than the store?
2796 if (!StoredVal.getNode()->hasNUsesOfValue(1, 0)) return false;
2797
2798 // Is the store non-extending and non-indexed?
2799 if (!ISD::isNormalStore(StoreNode) || StoreNode->isNonTemporal())
2800 return false;
2801
2802 SDValue Load = StoredVal->getOperand(LoadOpNo);
2803 // Is the stored value a non-extending and non-indexed load?
2804 if (!ISD::isNormalLoad(Load.getNode())) return false;
2805
2806 // Return LoadNode by reference.
2807 LoadNode = cast<LoadSDNode>(Load);
2808
2809 // Is store the only read of the loaded value?
2810 if (!Load.hasOneUse())
2811 return false;
2812
2813 // Is the address of the store the same as the load?
2814 if (LoadNode->getBasePtr() != StoreNode->getBasePtr() ||
2815 LoadNode->getOffset() != StoreNode->getOffset())
2816 return false;
2817
2818 bool FoundLoad = false;
2819 SmallVector<SDValue, 4> ChainOps;
2820 SmallVector<const SDNode *, 4> LoopWorklist;
2821 SmallPtrSet<const SDNode *, 16> Visited;
2822 const unsigned int Max = 1024;
2823
2824 // Visualization of Load-Op-Store fusion:
2825 // -------------------------
2826 // Legend:
2827 // *-lines = Chain operand dependencies.
2828 // |-lines = Normal operand dependencies.
2829 // Dependencies flow down and right. n-suffix references multiple nodes.
2830 //
2831 // C Xn C
2832 // * * *
2833 // * * *
2834 // Xn A-LD Yn TF Yn
2835 // * * \ | * |
2836 // * * \ | * |
2837 // * * \ | => A--LD_OP_ST
2838 // * * \| \
2839 // TF OP \
2840 // * | \ Zn
2841 // * | \
2842 // A-ST Zn
2843 //
2844
2845 // This merge induced dependences from: #1: Xn -> LD, OP, Zn
2846 // #2: Yn -> LD
2847 // #3: ST -> Zn
2848
2849 // Ensure the transform is safe by checking for the dual
2850 // dependencies to make sure we do not induce a loop.
2851
2852 // As LD is a predecessor to both OP and ST we can do this by checking:
2853 // a). if LD is a predecessor to a member of Xn or Yn.
2854 // b). if a Zn is a predecessor to ST.
2855
2856 // However, (b) can only occur through being a chain predecessor to
2857 // ST, which is the same as Zn being a member or predecessor of Xn,
2858 // which is a subset of LD being a predecessor of Xn. So it's
2859 // subsumed by check (a).
2860
2861 SDValue Chain = StoreNode->getChain();
2862
2863 // Gather X elements in ChainOps.
2864 if (Chain == Load.getValue(1)) {
2865 FoundLoad = true;
2866 ChainOps.push_back(Load.getOperand(0));
2867 } else if (Chain.getOpcode() == ISD::TokenFactor) {
2868 for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i) {
2869 SDValue Op = Chain.getOperand(i);
2870 if (Op == Load.getValue(1)) {
2871 FoundLoad = true;
2872 // Drop Load, but keep its chain. No cycle check necessary.
2873 ChainOps.push_back(Load.getOperand(0));
2874 continue;
2875 }
2876 LoopWorklist.push_back(Op.getNode());
2877 ChainOps.push_back(Op);
2878 }
2879 }
2880
2881 if (!FoundLoad)
2882 return false;
2883
2884 // Worklist is currently Xn. Add Yn to worklist.
2885 for (SDValue Op : StoredVal->ops())
2886 if (Op.getNode() != LoadNode)
2887 LoopWorklist.push_back(Op.getNode());
2888
2889 // Check (a) if Load is a predecessor to Xn + Yn
2890 if (SDNode::hasPredecessorHelper(Load.getNode(), Visited, LoopWorklist, Max,
2891 true))
2892 return false;
2893
2894 InputChain =
2895 CurDAG->getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ChainOps);
2896 return true;
2897}
2898
2899// Change a chain of {load; op; store} of the same value into a simple op
2900// through memory of that value, if the uses of the modified value and its
2901// address are suitable.
2902//
2903// The tablegen pattern memory operand pattern is currently not able to match
2904// the case where the EFLAGS on the original operation are used.
2905//
2906// To move this to tablegen, we'll need to improve tablegen to allow flags to
2907// be transferred from a node in the pattern to the result node, probably with
2908// a new keyword. For example, we have this
2909// def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
2910// [(store (add (loadi64 addr:$dst), -1), addr:$dst),
2911// (implicit EFLAGS)]>;
2912// but maybe need something like this
2913// def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
2914// [(store (add (loadi64 addr:$dst), -1), addr:$dst),
2915// (transferrable EFLAGS)]>;
2916//
2917// Until then, we manually fold these and instruction select the operation
2918// here.
2919bool X86DAGToDAGISel::foldLoadStoreIntoMemOperand(SDNode *Node) {
2920 StoreSDNode *StoreNode = cast<StoreSDNode>(Node);
2921 SDValue StoredVal = StoreNode->getOperand(1);
2922 unsigned Opc = StoredVal->getOpcode();
2923
2924 // Before we try to select anything, make sure this is memory operand size
2925 // and opcode we can handle. Note that this must match the code below that
2926 // actually lowers the opcodes.
2927 EVT MemVT = StoreNode->getMemoryVT();
2928 if (MemVT != MVT::i64 && MemVT != MVT::i32 && MemVT != MVT::i16 &&
2929 MemVT != MVT::i8)
2930 return false;
2931
2932 bool IsCommutable = false;
2933 bool IsNegate = false;
2934 switch (Opc) {
2935 default:
2936 return false;
2937 case X86ISD::SUB:
2938 IsNegate = isNullConstant(StoredVal.getOperand(0));
2939 break;
2940 case X86ISD::SBB:
2941 break;
2942 case X86ISD::ADD:
2943 case X86ISD::ADC:
2944 case X86ISD::AND:
2945 case X86ISD::OR:
2946 case X86ISD::XOR:
2947 IsCommutable = true;
2948 break;
2949 }
2950
2951 unsigned LoadOpNo = IsNegate ? 1 : 0;
2952 LoadSDNode *LoadNode = nullptr;
2953 SDValue InputChain;
2954 if (!isFusableLoadOpStorePattern(StoreNode, StoredVal, CurDAG, LoadOpNo,
2955 LoadNode, InputChain)) {
2956 if (!IsCommutable)
2957 return false;
2958
2959 // This operation is commutable, try the other operand.
2960 LoadOpNo = 1;
2961 if (!isFusableLoadOpStorePattern(StoreNode, StoredVal, CurDAG, LoadOpNo,
2962 LoadNode, InputChain))
2963 return false;
2964 }
2965
2966 SDValue Base, Scale, Index, Disp, Segment;
2967 if (!selectAddr(LoadNode, LoadNode->getBasePtr(), Base, Scale, Index, Disp,
2968 Segment))
2969 return false;
2970
2971 auto SelectOpcode = [&](unsigned Opc64, unsigned Opc32, unsigned Opc16,
2972 unsigned Opc8) {
2973 switch (MemVT.getSimpleVT().SimpleTy) {
2974 case MVT::i64:
2975 return Opc64;
2976 case MVT::i32:
2977 return Opc32;
2978 case MVT::i16:
2979 return Opc16;
2980 case MVT::i8:
2981 return Opc8;
2982 default:
2983 llvm_unreachable("Invalid size!")::llvm::llvm_unreachable_internal("Invalid size!", "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 2983)
;
2984 }
2985 };
2986
2987 MachineSDNode *Result;
2988 switch (Opc) {
2989 case X86ISD::SUB:
2990 // Handle negate.
2991 if (IsNegate) {
2992 unsigned NewOpc = SelectOpcode(X86::NEG64m, X86::NEG32m, X86::NEG16m,
2993 X86::NEG8m);
2994 const SDValue Ops[] = {Base, Scale, Index, Disp, Segment, InputChain};
2995 Result = CurDAG->getMachineNode(NewOpc, SDLoc(Node), MVT::i32,
2996 MVT::Other, Ops);
2997 break;
2998 }
2999 LLVM_FALLTHROUGH[[gnu::fallthrough]];
3000 case X86ISD::ADD:
3001 // Try to match inc/dec.
3002 if (!Subtarget->slowIncDec() || OptForSize) {
3003 bool IsOne = isOneConstant(StoredVal.getOperand(1));
3004 bool IsNegOne = isAllOnesConstant(StoredVal.getOperand(1));
3005 // ADD/SUB with 1/-1 and carry flag isn't used can use inc/dec.
3006 if ((IsOne || IsNegOne) && hasNoCarryFlagUses(StoredVal.getValue(1))) {
3007 unsigned NewOpc =
3008 ((Opc == X86ISD::ADD) == IsOne)
3009 ? SelectOpcode(X86::INC64m, X86::INC32m, X86::INC16m, X86::INC8m)
3010 : SelectOpcode(X86::DEC64m, X86::DEC32m, X86::DEC16m, X86::DEC8m);
3011 const SDValue Ops[] = {Base, Scale, Index, Disp, Segment, InputChain};
3012 Result = CurDAG->getMachineNode(NewOpc, SDLoc(Node), MVT::i32,
3013 MVT::Other, Ops);
3014 break;
3015 }
3016 }
3017 LLVM_FALLTHROUGH[[gnu::fallthrough]];
3018 case X86ISD::ADC:
3019 case X86ISD::SBB:
3020 case X86ISD::AND:
3021 case X86ISD::OR:
3022 case X86ISD::XOR: {
3023 auto SelectRegOpcode = [SelectOpcode](unsigned Opc) {
3024 switch (Opc) {
3025 case X86ISD::ADD:
3026 return SelectOpcode(X86::ADD64mr, X86::ADD32mr, X86::ADD16mr,
3027 X86::ADD8mr);
3028 case X86ISD::ADC:
3029 return SelectOpcode(X86::ADC64mr, X86::ADC32mr, X86::ADC16mr,
3030 X86::ADC8mr);
3031 case X86ISD::SUB:
3032 return SelectOpcode(X86::SUB64mr, X86::SUB32mr, X86::SUB16mr,
3033 X86::SUB8mr);
3034 case X86ISD::SBB:
3035 return SelectOpcode(X86::SBB64mr, X86::SBB32mr, X86::SBB16mr,
3036 X86::SBB8mr);
3037 case X86ISD::AND:
3038 return SelectOpcode(X86::AND64mr, X86::AND32mr, X86::AND16mr,
3039 X86::AND8mr);
3040 case X86ISD::OR:
3041 return SelectOpcode(X86::OR64mr, X86::OR32mr, X86::OR16mr, X86::OR8mr);
3042 case X86ISD::XOR:
3043 return SelectOpcode(X86::XOR64mr, X86::XOR32mr, X86::XOR16mr,
3044 X86::XOR8mr);
3045 default:
3046 llvm_unreachable("Invalid opcode!")::llvm::llvm_unreachable_internal("Invalid opcode!", "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 3046)
;
3047 }
3048 };
3049 auto SelectImm8Opcode = [SelectOpcode](unsigned Opc) {
3050 switch (Opc) {
3051 case X86ISD::ADD:
3052 return SelectOpcode(X86::ADD64mi8, X86::ADD32mi8, X86::ADD16mi8, 0);
3053 case X86ISD::ADC:
3054 return SelectOpcode(X86::ADC64mi8, X86::ADC32mi8, X86::ADC16mi8, 0);
3055 case X86ISD::SUB:
3056 return SelectOpcode(X86::SUB64mi8, X86::SUB32mi8, X86::SUB16mi8, 0);
3057 case X86ISD::SBB:
3058 return SelectOpcode(X86::SBB64mi8, X86::SBB32mi8, X86::SBB16mi8, 0);
3059 case X86ISD::AND:
3060 return SelectOpcode(X86::AND64mi8, X86::AND32mi8, X86::AND16mi8, 0);
3061 case X86ISD::OR:
3062 return SelectOpcode(X86::OR64mi8, X86::OR32mi8, X86::OR16mi8, 0);
3063 case X86ISD::XOR:
3064 return SelectOpcode(X86::XOR64mi8, X86::XOR32mi8, X86::XOR16mi8, 0);
3065 default:
3066 llvm_unreachable("Invalid opcode!")::llvm::llvm_unreachable_internal("Invalid opcode!", "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 3066)
;
3067 }
3068 };
3069 auto SelectImmOpcode = [SelectOpcode](unsigned Opc) {
3070 switch (Opc) {
3071 case X86ISD::ADD:
3072 return SelectOpcode(X86::ADD64mi32, X86::ADD32mi, X86::ADD16mi,
3073 X86::ADD8mi);
3074 case X86ISD::ADC:
3075 return SelectOpcode(X86::ADC64mi32, X86::ADC32mi, X86::ADC16mi,
3076 X86::ADC8mi);
3077 case X86ISD::SUB:
3078 return SelectOpcode(X86::SUB64mi32, X86::SUB32mi, X86::SUB16mi,
3079 X86::SUB8mi);
3080 case X86ISD::SBB:
3081 return SelectOpcode(X86::SBB64mi32, X86::SBB32mi, X86::SBB16mi,
3082 X86::SBB8mi);
3083 case X86ISD::AND:
3084 return SelectOpcode(X86::AND64mi32, X86::AND32mi, X86::AND16mi,
3085 X86::AND8mi);
3086 case X86ISD::OR:
3087 return SelectOpcode(X86::OR64mi32, X86::OR32mi, X86::OR16mi,
3088 X86::OR8mi);
3089 case X86ISD::XOR:
3090 return SelectOpcode(X86::XOR64mi32, X86::XOR32mi, X86::XOR16mi,
3091 X86::XOR8mi);
3092 default:
3093 llvm_unreachable("Invalid opcode!")::llvm::llvm_unreachable_internal("Invalid opcode!", "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 3093)
;
3094 }
3095 };
3096
3097 unsigned NewOpc = SelectRegOpcode(Opc);
3098 SDValue Operand = StoredVal->getOperand(1-LoadOpNo);
3099
3100 // See if the operand is a constant that we can fold into an immediate
3101 // operand.
3102 if (auto *OperandC = dyn_cast<ConstantSDNode>(Operand)) {
3103 int64_t OperandV = OperandC->getSExtValue();
3104
3105 // Check if we can shrink the operand enough to fit in an immediate (or
3106 // fit into a smaller immediate) by negating it and switching the
3107 // operation.
3108 if ((Opc == X86ISD::ADD || Opc == X86ISD::SUB) &&
3109 ((MemVT != MVT::i8 && !isInt<8>(OperandV) && isInt<8>(-OperandV)) ||
3110 (MemVT == MVT::i64 && !isInt<32>(OperandV) &&
3111 isInt<32>(-OperandV))) &&
3112 hasNoCarryFlagUses(StoredVal.getValue(1))) {
3113 OperandV = -OperandV;
3114 Opc = Opc == X86ISD::ADD ? X86ISD::SUB : X86ISD::ADD;
3115 }
3116
3117 // First try to fit this into an Imm8 operand. If it doesn't fit, then try
3118 // the larger immediate operand.
3119 if (MemVT != MVT::i8 && isInt<8>(OperandV)) {
3120 Operand = CurDAG->getTargetConstant(OperandV, SDLoc(Node), MemVT);
3121 NewOpc = SelectImm8Opcode(Opc);
3122 } else if (MemVT != MVT::i64 || isInt<32>(OperandV)) {
3123 Operand = CurDAG->getTargetConstant(OperandV, SDLoc(Node), MemVT);
3124 NewOpc = SelectImmOpcode(Opc);
3125 }
3126 }
3127
3128 if (Opc == X86ISD::ADC || Opc == X86ISD::SBB) {
3129 SDValue CopyTo =
3130 CurDAG->getCopyToReg(InputChain, SDLoc(Node), X86::EFLAGS,
3131 StoredVal.getOperand(2), SDValue());
3132
3133 const SDValue Ops[] = {Base, Scale, Index, Disp,
3134 Segment, Operand, CopyTo, CopyTo.getValue(1)};
3135 Result = CurDAG->getMachineNode(NewOpc, SDLoc(Node), MVT::i32, MVT::Other,
3136 Ops);
3137 } else {
3138 const SDValue Ops[] = {Base, Scale, Index, Disp,
3139 Segment, Operand, InputChain};
3140 Result = CurDAG->getMachineNode(NewOpc, SDLoc(Node), MVT::i32, MVT::Other,
3141 Ops);
3142 }
3143 break;
3144 }
3145 default:
3146 llvm_unreachable("Invalid opcode!")::llvm::llvm_unreachable_internal("Invalid opcode!", "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 3146)
;
3147 }
3148
3149 MachineMemOperand *MemOps[] = {StoreNode->getMemOperand(),
3150 LoadNode->getMemOperand()};
3151 CurDAG->setNodeMemRefs(Result, MemOps);
3152
3153 // Update Load Chain uses as well.
3154 ReplaceUses(SDValue(LoadNode, 1), SDValue(Result, 1));
3155 ReplaceUses(SDValue(StoreNode, 0), SDValue(Result, 1));
3156 ReplaceUses(SDValue(StoredVal.getNode(), 1), SDValue(Result, 0));
3157 CurDAG->RemoveDeadNode(Node);
3158 return true;
3159}
3160
3161// See if this is an X & Mask that we can match to BEXTR/BZHI.
3162// Where Mask is one of the following patterns:
3163// a) x & (1 << nbits) - 1
3164// b) x & ~(-1 << nbits)
3165// c) x & (-1 >> (32 - y))
3166// d) x << (32 - y) >> (32 - y)
3167bool X86DAGToDAGISel::matchBitExtract(SDNode *Node) {
3168 assert((((Node->getOpcode() == ISD::AND || Node->getOpcode() ==
ISD::SRL) && "Should be either an and-mask, or right-shift after clearing high bits."
) ? static_cast<void> (0) : __assert_fail ("(Node->getOpcode() == ISD::AND || Node->getOpcode() == ISD::SRL) && \"Should be either an and-mask, or right-shift after clearing high bits.\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 3170, __PRETTY_FUNCTION__))
3169 (Node->getOpcode() == ISD::AND || Node->getOpcode() == ISD::SRL) &&(((Node->getOpcode() == ISD::AND || Node->getOpcode() ==
ISD::SRL) && "Should be either an and-mask, or right-shift after clearing high bits."
) ? static_cast<void> (0) : __assert_fail ("(Node->getOpcode() == ISD::AND || Node->getOpcode() == ISD::SRL) && \"Should be either an and-mask, or right-shift after clearing high bits.\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 3170, __PRETTY_FUNCTION__))
3170 "Should be either an and-mask, or right-shift after clearing high bits.")(((Node->getOpcode() == ISD::AND || Node->getOpcode() ==
ISD::SRL) && "Should be either an and-mask, or right-shift after clearing high bits."
) ? static_cast<void> (0) : __assert_fail ("(Node->getOpcode() == ISD::AND || Node->getOpcode() == ISD::SRL) && \"Should be either an and-mask, or right-shift after clearing high bits.\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 3170, __PRETTY_FUNCTION__))
;
3171
3172 // BEXTR is BMI instruction, BZHI is BMI2 instruction. We need at least one.
3173 if (!Subtarget->hasBMI() && !Subtarget->hasBMI2())
3174 return false;
3175
3176 MVT NVT = Node->getSimpleValueType(0);
3177
3178 // Only supported for 32 and 64 bits.
3179 if (NVT != MVT::i32 && NVT != MVT::i64)
3180 return false;
3181
3182 SDValue NBits;
3183
3184 // If we have BMI2's BZHI, we are ok with muti-use patterns.
3185 // Else, if we only have BMI1's BEXTR, we require one-use.
3186 const bool CanHaveExtraUses = Subtarget->hasBMI2();
3187 auto checkUses = [CanHaveExtraUses](SDValue Op, unsigned NUses) {
3188 return CanHaveExtraUses ||
3189 Op.getNode()->hasNUsesOfValue(NUses, Op.getResNo());
3190 };
3191 auto checkOneUse = [checkUses](SDValue Op) { return checkUses(Op, 1); };
3192 auto checkTwoUse = [checkUses](SDValue Op) { return checkUses(Op, 2); };
3193
3194 auto peekThroughOneUseTruncation = [checkOneUse](SDValue V) {
3195 if (V->getOpcode() == ISD::TRUNCATE && checkOneUse(V)) {
3196 assert(V.getSimpleValueType() == MVT::i32 &&((V.getSimpleValueType() == MVT::i32 && V.getOperand(
0).getSimpleValueType() == MVT::i64 && "Expected i64 -> i32 truncation"
) ? static_cast<void> (0) : __assert_fail ("V.getSimpleValueType() == MVT::i32 && V.getOperand(0).getSimpleValueType() == MVT::i64 && \"Expected i64 -> i32 truncation\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 3198, __PRETTY_FUNCTION__))
3197 V.getOperand(0).getSimpleValueType() == MVT::i64 &&((V.getSimpleValueType() == MVT::i32 && V.getOperand(
0).getSimpleValueType() == MVT::i64 && "Expected i64 -> i32 truncation"
) ? static_cast<void> (0) : __assert_fail ("V.getSimpleValueType() == MVT::i32 && V.getOperand(0).getSimpleValueType() == MVT::i64 && \"Expected i64 -> i32 truncation\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 3198, __PRETTY_FUNCTION__))
3198 "Expected i64 -> i32 truncation")((V.getSimpleValueType() == MVT::i32 && V.getOperand(
0).getSimpleValueType() == MVT::i64 && "Expected i64 -> i32 truncation"
) ? static_cast<void> (0) : __assert_fail ("V.getSimpleValueType() == MVT::i32 && V.getOperand(0).getSimpleValueType() == MVT::i64 && \"Expected i64 -> i32 truncation\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 3198, __PRETTY_FUNCTION__))
;
3199 V = V.getOperand(0);
3200 }
3201 return V;
3202 };
3203
3204 // a) x & ((1 << nbits) + (-1))
3205 auto matchPatternA = [checkOneUse, peekThroughOneUseTruncation,
3206 &NBits](SDValue Mask) -> bool {
3207 // Match `add`. Must only have one use!
3208 if (Mask->getOpcode() != ISD::ADD || !checkOneUse(Mask))
3209 return false;
3210 // We should be adding all-ones constant (i.e. subtracting one.)
3211 if (!isAllOnesConstant(Mask->getOperand(1)))
3212 return false;
3213 // Match `1 << nbits`. Might be truncated. Must only have one use!
3214 SDValue M0 = peekThroughOneUseTruncation(Mask->getOperand(0));
3215 if (M0->getOpcode() != ISD::SHL || !checkOneUse(M0))
3216 return false;
3217 if (!isOneConstant(M0->getOperand(0)))
3218 return false;
3219 NBits = M0->getOperand(1);
3220 return true;
3221 };
3222
3223 auto isAllOnes = [this, peekThroughOneUseTruncation, NVT](SDValue V) {
3224 V = peekThroughOneUseTruncation(V);
3225 return CurDAG->MaskedValueIsAllOnes(
3226 V, APInt::getLowBitsSet(V.getSimpleValueType().getSizeInBits(),
3227 NVT.getSizeInBits()));
3228 };
3229
3230 // b) x & ~(-1 << nbits)
3231 auto matchPatternB = [checkOneUse, isAllOnes, peekThroughOneUseTruncation,
3232 &NBits](SDValue Mask) -> bool {
3233 // Match `~()`. Must only have one use!
3234 if (Mask.getOpcode() != ISD::XOR || !checkOneUse(Mask))
3235 return false;
3236 // The -1 only has to be all-ones for the final Node's NVT.
3237 if (!isAllOnes(Mask->getOperand(1)))
3238 return false;
3239 // Match `-1 << nbits`. Might be truncated. Must only have one use!
3240 SDValue M0 = peekThroughOneUseTruncation(Mask->getOperand(0));
3241 if (M0->getOpcode() != ISD::SHL || !checkOneUse(M0))
3242 return false;
3243 // The -1 only has to be all-ones for the final Node's NVT.
3244 if (!isAllOnes(M0->getOperand(0)))
3245 return false;
3246 NBits = M0->getOperand(1);
3247 return true;
3248 };
3249
3250 // Match potentially-truncated (bitwidth - y)
3251 auto matchShiftAmt = [checkOneUse, &NBits](SDValue ShiftAmt,
3252 unsigned Bitwidth) {
3253 // Skip over a truncate of the shift amount.
3254 if (ShiftAmt.getOpcode() == ISD::TRUNCATE) {
3255 ShiftAmt = ShiftAmt.getOperand(0);
3256 // The trunc should have been the only user of the real shift amount.
3257 if (!checkOneUse(ShiftAmt))
3258 return false;
3259 }
3260 // Match the shift amount as: (bitwidth - y). It should go away, too.
3261 if (ShiftAmt.getOpcode() != ISD::SUB)
3262 return false;
3263 auto V0 = dyn_cast<ConstantSDNode>(ShiftAmt.getOperand(0));
3264 if (!V0 || V0->getZExtValue() != Bitwidth)
3265 return false;
3266 NBits = ShiftAmt.getOperand(1);
3267 return true;
3268 };
3269
3270 // c) x & (-1 >> (32 - y))
3271 auto matchPatternC = [checkOneUse, peekThroughOneUseTruncation,
3272 matchShiftAmt](SDValue Mask) -> bool {
3273 // The mask itself may be truncated.
3274 Mask = peekThroughOneUseTruncation(Mask);
3275 unsigned Bitwidth = Mask.getSimpleValueType().getSizeInBits();
3276 // Match `l>>`. Must only have one use!
3277 if (Mask.getOpcode() != ISD::SRL || !checkOneUse(Mask))
3278 return false;
3279 // We should be shifting truly all-ones constant.
3280 if (!isAllOnesConstant(Mask.getOperand(0)))
3281 return false;
3282 SDValue M1 = Mask.getOperand(1);
3283 // The shift amount should not be used externally.
3284 if (!checkOneUse(M1))
3285 return false;
3286 return matchShiftAmt(M1, Bitwidth);
3287 };
3288
3289 SDValue X;
3290
3291 // d) x << (32 - y) >> (32 - y)
3292 auto matchPatternD = [checkOneUse, checkTwoUse, matchShiftAmt,
3293 &X](SDNode *Node) -> bool {
3294 if (Node->getOpcode() != ISD::SRL)
3295 return false;
3296 SDValue N0 = Node->getOperand(0);
3297 if (N0->getOpcode() != ISD::SHL || !checkOneUse(N0))
3298 return false;
3299 unsigned Bitwidth = N0.getSimpleValueType().getSizeInBits();
3300 SDValue N1 = Node->getOperand(1);
3301 SDValue N01 = N0->getOperand(1);
3302 // Both of the shifts must be by the exact same value.
3303 // There should not be any uses of the shift amount outside of the pattern.
3304 if (N1 != N01 || !checkTwoUse(N1))
3305 return false;
3306 if (!matchShiftAmt(N1, Bitwidth))
3307 return false;
3308 X = N0->getOperand(0);
3309 return true;
3310 };
3311
3312 auto matchLowBitMask = [matchPatternA, matchPatternB,
3313 matchPatternC](SDValue Mask) -> bool {
3314 return matchPatternA(Mask) || matchPatternB(Mask) || matchPatternC(Mask);
3315 };
3316
3317 if (Node->getOpcode() == ISD::AND) {
3318 X = Node->getOperand(0);
3319 SDValue Mask = Node->getOperand(1);
3320
3321 if (matchLowBitMask(Mask)) {
3322 // Great.
3323 } else {
3324 std::swap(X, Mask);
3325 if (!matchLowBitMask(Mask))
3326 return false;
3327 }
3328 } else if (!matchPatternD(Node))
3329 return false;
3330
3331 SDLoc DL(Node);
3332
3333 // Truncate the shift amount.
3334 NBits = CurDAG->getNode(ISD::TRUNCATE, DL, MVT::i8, NBits);
3335 insertDAGNode(*CurDAG, SDValue(Node, 0), NBits);
3336
3337 // Insert 8-bit NBits into lowest 8 bits of 32-bit register.
3338 // All the other bits are undefined, we do not care about them.
3339 SDValue ImplDef = SDValue(
3340 CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, MVT::i32), 0);
3341 insertDAGNode(*CurDAG, SDValue(Node, 0), ImplDef);
3342
3343 SDValue SRIdxVal = CurDAG->getTargetConstant(X86::sub_8bit, DL, MVT::i32);
3344 insertDAGNode(*CurDAG, SDValue(Node, 0), SRIdxVal);
3345 NBits = SDValue(
3346 CurDAG->getMachineNode(TargetOpcode::INSERT_SUBREG, DL, MVT::i32, ImplDef,
3347 NBits, SRIdxVal), 0);
3348 insertDAGNode(*CurDAG, SDValue(Node, 0), NBits);
3349
3350 if (Subtarget->hasBMI2()) {
3351 // Great, just emit the the BZHI..
3352 if (NVT != MVT::i32) {
3353 // But have to place the bit count into the wide-enough register first.
3354 NBits = CurDAG->getNode(ISD::ANY_EXTEND, DL, NVT, NBits);
3355 insertDAGNode(*CurDAG, SDValue(Node, 0), NBits);
3356 }
3357
3358 SDValue Extract = CurDAG->getNode(X86ISD::BZHI, DL, NVT, X, NBits);
3359 ReplaceNode(Node, Extract.getNode());
3360 SelectCode(Extract.getNode());
3361 return true;
3362 }
3363
3364 // Else, if we do *NOT* have BMI2, let's find out if the if the 'X' is
3365 // *logically* shifted (potentially with one-use trunc inbetween),
3366 // and the truncation was the only use of the shift,
3367 // and if so look past one-use truncation.
3368 {
3369 SDValue RealX = peekThroughOneUseTruncation(X);
3370 // FIXME: only if the shift is one-use?
3371 if (RealX != X && RealX.getOpcode() == ISD::SRL)
3372 X = RealX;
3373 }
3374
3375 MVT XVT = X.getSimpleValueType();
3376
3377 // Else, emitting BEXTR requires one more step.
3378 // The 'control' of BEXTR has the pattern of:
3379 // [15...8 bit][ 7...0 bit] location
3380 // [ bit count][ shift] name
3381 // I.e. 0b000000011'00000001 means (x >> 0b1) & 0b11
3382
3383 // Shift NBits left by 8 bits, thus producing 'control'.
3384 // This makes the low 8 bits to be zero.
3385 SDValue C8 = CurDAG->getConstant(8, DL, MVT::i8);
3386 SDValue Control = CurDAG->getNode(ISD::SHL, DL, MVT::i32, NBits, C8);
3387 insertDAGNode(*CurDAG, SDValue(Node, 0), Control);
3388
3389 // If the 'X' is *logically* shifted, we can fold that shift into 'control'.
3390 // FIXME: only if the shift is one-use?
3391 if (X.getOpcode() == ISD::SRL) {
3392 SDValue ShiftAmt = X.getOperand(1);
3393 X = X.getOperand(0);
3394
3395 assert(ShiftAmt.getValueType() == MVT::i8 &&((ShiftAmt.getValueType() == MVT::i8 && "Expected shift amount to be i8"
) ? static_cast<void> (0) : __assert_fail ("ShiftAmt.getValueType() == MVT::i8 && \"Expected shift amount to be i8\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 3396, __PRETTY_FUNCTION__))
3396 "Expected shift amount to be i8")((ShiftAmt.getValueType() == MVT::i8 && "Expected shift amount to be i8"
) ? static_cast<void> (0) : __assert_fail ("ShiftAmt.getValueType() == MVT::i8 && \"Expected shift amount to be i8\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 3396, __PRETTY_FUNCTION__))
;
3397
3398 // Now, *zero*-extend the shift amount. The bits 8...15 *must* be zero!
3399 // We could zext to i16 in some form, but we intentionally don't do that.
3400 SDValue OrigShiftAmt = ShiftAmt;
3401 ShiftAmt = CurDAG->getNode(ISD::ZERO_EXTEND, DL, MVT::i32, ShiftAmt);
3402 insertDAGNode(*CurDAG, OrigShiftAmt, ShiftAmt);
3403
3404 // And now 'or' these low 8 bits of shift amount into the 'control'.
3405 Control = CurDAG->getNode(ISD::OR, DL, MVT::i32, Control, ShiftAmt);
3406 insertDAGNode(*CurDAG, SDValue(Node, 0), Control);
3407 }
3408
3409 // But have to place the 'control' into the wide-enough register first.
3410 if (XVT != MVT::i32) {
3411 Control = CurDAG->getNode(ISD::ANY_EXTEND, DL, XVT, Control);
3412 insertDAGNode(*CurDAG, SDValue(Node, 0), Control);
3413 }
3414
3415 // And finally, form the BEXTR itself.
3416 SDValue Extract = CurDAG->getNode(X86ISD::BEXTR, DL, XVT, X, Control);
3417
3418 // The 'X' was originally truncated. Do that now.
3419 if (XVT != NVT) {
3420 insertDAGNode(*CurDAG, SDValue(Node, 0), Extract);
3421 Extract = CurDAG->getNode(ISD::TRUNCATE, DL, NVT, Extract);
3422 }
3423
3424 ReplaceNode(Node, Extract.getNode());
3425 SelectCode(Extract.getNode());
3426
3427 return true;
3428}
3429
3430// See if this is an (X >> C1) & C2 that we can match to BEXTR/BEXTRI.
3431MachineSDNode *X86DAGToDAGISel::matchBEXTRFromAndImm(SDNode *Node) {
3432 MVT NVT = Node->getSimpleValueType(0);
3433 SDLoc dl(Node);
3434
3435 SDValue N0 = Node->getOperand(0);
3436 SDValue N1 = Node->getOperand(1);
3437
3438 // If we have TBM we can use an immediate for the control. If we have BMI
3439 // we should only do this if the BEXTR instruction is implemented well.
3440 // Otherwise moving the control into a register makes this more costly.
3441 // TODO: Maybe load folding, greater than 32-bit masks, or a guarantee of LICM
3442 // hoisting the move immediate would make it worthwhile with a less optimal
3443 // BEXTR?
3444 if (!Subtarget->hasTBM() &&
3445 !(Subtarget->hasBMI() && Subtarget->hasFastBEXTR()))
3446 return nullptr;
3447
3448 // Must have a shift right.
3449 if (N0->getOpcode() != ISD::SRL && N0->getOpcode() != ISD::SRA)
3450 return nullptr;
3451
3452 // Shift can't have additional users.
3453 if (!N0->hasOneUse())
3454 return nullptr;
3455
3456 // Only supported for 32 and 64 bits.
3457 if (NVT != MVT::i32 && NVT != MVT::i64)
3458 return nullptr;
3459
3460 // Shift amount and RHS of and must be constant.
3461 ConstantSDNode *MaskCst = dyn_cast<ConstantSDNode>(N1);
3462 ConstantSDNode *ShiftCst = dyn_cast<ConstantSDNode>(N0->getOperand(1));
3463 if (!MaskCst || !ShiftCst)
3464 return nullptr;
3465
3466 // And RHS must be a mask.
3467 uint64_t Mask = MaskCst->getZExtValue();
3468 if (!isMask_64(Mask))
3469 return nullptr;
3470
3471 uint64_t Shift = ShiftCst->getZExtValue();
3472 uint64_t MaskSize = countPopulation(Mask);
3473
3474 // Don't interfere with something that can be handled by extracting AH.
3475 // TODO: If we are able to fold a load, BEXTR might still be better than AH.
3476 if (Shift == 8 && MaskSize == 8)
3477 return nullptr;
3478
3479 // Make sure we are only using bits that were in the original value, not
3480 // shifted in.
3481 if (Shift + MaskSize > NVT.getSizeInBits())
3482 return nullptr;
3483
3484 SDValue New = CurDAG->getTargetConstant(Shift | (MaskSize << 8), dl, NVT);
3485 unsigned ROpc = NVT == MVT::i64 ? X86::BEXTRI64ri : X86::BEXTRI32ri;
3486 unsigned MOpc = NVT == MVT::i64 ? X86::BEXTRI64mi : X86::BEXTRI32mi;
3487
3488 // BMI requires the immediate to placed in a register.
3489 if (!Subtarget->hasTBM()) {
3490 ROpc = NVT == MVT::i64 ? X86::BEXTR64rr : X86::BEXTR32rr;
3491 MOpc = NVT == MVT::i64 ? X86::BEXTR64rm : X86::BEXTR32rm;
3492 unsigned NewOpc = NVT == MVT::i64 ? X86::MOV32ri64 : X86::MOV32ri;
3493 New = SDValue(CurDAG->getMachineNode(NewOpc, dl, NVT, New), 0);
3494 }
3495
3496 MachineSDNode *NewNode;
3497 SDValue Input = N0->getOperand(0);
3498 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
3499 if (tryFoldLoad(Node, N0.getNode(), Input, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
3500 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, New, Input.getOperand(0) };
3501 SDVTList VTs = CurDAG->getVTList(NVT, MVT::i32, MVT::Other);
3502 NewNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
3503 // Update the chain.
3504 ReplaceUses(Input.getValue(1), SDValue(NewNode, 2));
3505 // Record the mem-refs
3506 CurDAG->setNodeMemRefs(NewNode, {cast<LoadSDNode>(Input)->getMemOperand()});
3507 } else {
3508 NewNode = CurDAG->getMachineNode(ROpc, dl, NVT, MVT::i32, Input, New);
3509 }
3510
3511 return NewNode;
3512}
3513
3514// Emit a PCMISTR(I/M) instruction.
3515MachineSDNode *X86DAGToDAGISel::emitPCMPISTR(unsigned ROpc, unsigned MOpc,
3516 bool MayFoldLoad, const SDLoc &dl,
3517 MVT VT, SDNode *Node) {
3518 SDValue N0 = Node->getOperand(0);
3519 SDValue N1 = Node->getOperand(1);
3520 SDValue Imm = Node->getOperand(2);
3521 const ConstantInt *Val = cast<ConstantSDNode>(Imm)->getConstantIntValue();
3522 Imm = CurDAG->getTargetConstant(*Val, SDLoc(Node), Imm.getValueType());
3523
3524 // Try to fold a load. No need to check alignment.
3525 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
3526 if (MayFoldLoad && tryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
3527 SDValue Ops[] = { N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Imm,
3528 N1.getOperand(0) };
3529 SDVTList VTs = CurDAG->getVTList(VT, MVT::i32, MVT::Other);
3530 MachineSDNode *CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
3531 // Update the chain.
3532 ReplaceUses(N1.getValue(1), SDValue(CNode, 2));
3533 // Record the mem-refs
3534 CurDAG->setNodeMemRefs(CNode, {cast<LoadSDNode>(N1)->getMemOperand()});
3535 return CNode;
3536 }
3537
3538 SDValue Ops[] = { N0, N1, Imm };
3539 SDVTList VTs = CurDAG->getVTList(VT, MVT::i32);
3540 MachineSDNode *CNode = CurDAG->getMachineNode(ROpc, dl, VTs, Ops);
3541 return CNode;
3542}
3543
3544// Emit a PCMESTR(I/M) instruction. Also return the Glue result in case we need
3545// to emit a second instruction after this one. This is needed since we have two
3546// copyToReg nodes glued before this and we need to continue that glue through.
3547MachineSDNode *X86DAGToDAGISel::emitPCMPESTR(unsigned ROpc, unsigned MOpc,
3548 bool MayFoldLoad, const SDLoc &dl,
3549 MVT VT, SDNode *Node,
3550 SDValue &InFlag) {
3551 SDValue N0 = Node->getOperand(0);
3552 SDValue N2 = Node->getOperand(2);
3553 SDValue Imm = Node->getOperand(4);
3554 const ConstantInt *Val = cast<ConstantSDNode>(Imm)->getConstantIntValue();
3555 Imm = CurDAG->getTargetConstant(*Val, SDLoc(Node), Imm.getValueType());
3556
3557 // Try to fold a load. No need to check alignment.
3558 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
3559 if (MayFoldLoad && tryFoldLoad(Node, N2, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
3560 SDValue Ops[] = { N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Imm,
3561 N2.getOperand(0), InFlag };
3562 SDVTList VTs = CurDAG->getVTList(VT, MVT::i32, MVT::Other, MVT::Glue);
3563 MachineSDNode *CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
3564 InFlag = SDValue(CNode, 3);
3565 // Update the chain.
3566 ReplaceUses(N2.getValue(1), SDValue(CNode, 2));
3567 // Record the mem-refs
3568 CurDAG->setNodeMemRefs(CNode, {cast<LoadSDNode>(N2)->getMemOperand()});
3569 return CNode;
3570 }
3571
3572 SDValue Ops[] = { N0, N2, Imm, InFlag };
3573 SDVTList VTs = CurDAG->getVTList(VT, MVT::i32, MVT::Glue);
3574 MachineSDNode *CNode = CurDAG->getMachineNode(ROpc, dl, VTs, Ops);
3575 InFlag = SDValue(CNode, 2);
3576 return CNode;
3577}
3578
3579bool X86DAGToDAGISel::tryShiftAmountMod(SDNode *N) {
3580 EVT VT = N->getValueType(0);
3581
3582 // Only handle scalar shifts.
3583 if (VT.isVector())
3584 return false;
3585
3586 // Narrower shifts only mask to 5 bits in hardware.
3587 unsigned Size = VT == MVT::i64 ? 64 : 32;
3588
3589 SDValue OrigShiftAmt = N->getOperand(1);
3590 SDValue ShiftAmt = OrigShiftAmt;
3591 SDLoc DL(N);
3592
3593 // Skip over a truncate of the shift amount.
3594 if (ShiftAmt->getOpcode() == ISD::TRUNCATE)
3595 ShiftAmt = ShiftAmt->getOperand(0);
3596
3597 // This function is called after X86DAGToDAGISel::matchBitExtract(),
3598 // so we are not afraid that we might mess up BZHI/BEXTR pattern.
3599
3600 SDValue NewShiftAmt;
3601 if (ShiftAmt->getOpcode() == ISD::ADD || ShiftAmt->getOpcode() == ISD::SUB) {
3602 SDValue Add0 = ShiftAmt->getOperand(0);
3603 SDValue Add1 = ShiftAmt->getOperand(1);
3604 // If we are shifting by X+/-N where N == 0 mod Size, then just shift by X
3605 // to avoid the ADD/SUB.
3606 if (isa<ConstantSDNode>(Add1) &&
3607 cast<ConstantSDNode>(Add1)->getZExtValue() % Size == 0) {
3608 NewShiftAmt = Add0;
3609 // If we are shifting by N-X where N == 0 mod Size, then just shift by -X to
3610 // generate a NEG instead of a SUB of a constant.
3611 } else if (ShiftAmt->getOpcode() == ISD::SUB &&
3612 isa<ConstantSDNode>(Add0) &&
3613 cast<ConstantSDNode>(Add0)->getZExtValue() != 0 &&
3614 cast<ConstantSDNode>(Add0)->getZExtValue() % Size == 0) {
3615 // Insert a negate op.
3616 // TODO: This isn't guaranteed to replace the sub if there is a logic cone
3617 // that uses it that's not a shift.
3618 EVT SubVT = ShiftAmt.getValueType();
3619 SDValue Zero = CurDAG->getConstant(0, DL, SubVT);
3620 SDValue Neg = CurDAG->getNode(ISD::SUB, DL, SubVT, Zero, Add1);
3621 NewShiftAmt = Neg;
3622
3623 // Insert these operands into a valid topological order so they can
3624 // get selected independently.
3625 insertDAGNode(*CurDAG, OrigShiftAmt, Zero);
3626 insertDAGNode(*CurDAG, OrigShiftAmt, Neg);
3627 } else
3628 return false;
3629 } else
3630 return false;
3631
3632 if (NewShiftAmt.getValueType() != MVT::i8) {
3633 // Need to truncate the shift amount.
3634 NewShiftAmt = CurDAG->getNode(ISD::TRUNCATE, DL, MVT::i8, NewShiftAmt);
3635 // Add to a correct topological ordering.
3636 insertDAGNode(*CurDAG, OrigShiftAmt, NewShiftAmt);
3637 }
3638
3639 // Insert a new mask to keep the shift amount legal. This should be removed
3640 // by isel patterns.
3641 NewShiftAmt = CurDAG->getNode(ISD::AND, DL, MVT::i8, NewShiftAmt,
3642 CurDAG->getConstant(Size - 1, DL, MVT::i8));
3643 // Place in a correct topological ordering.
3644 insertDAGNode(*CurDAG, OrigShiftAmt, NewShiftAmt);
3645
3646 SDNode *UpdatedNode = CurDAG->UpdateNodeOperands(N, N->getOperand(0),
3647 NewShiftAmt);
3648 if (UpdatedNode != N) {
3649 // If we found an existing node, we should replace ourselves with that node
3650 // and wait for it to be selected after its other users.
3651 ReplaceNode(N, UpdatedNode);
3652 return true;
3653 }
3654
3655 // If the original shift amount is now dead, delete it so that we don't run
3656 // it through isel.
3657 if (OrigShiftAmt.getNode()->use_empty())
3658 CurDAG->RemoveDeadNode(OrigShiftAmt.getNode());
3659
3660 // Now that we've optimized the shift amount, defer to normal isel to get
3661 // load folding and legacy vs BMI2 selection without repeating it here.
3662 SelectCode(N);
3663 return true;
3664}
3665
3666bool X86DAGToDAGISel::tryShrinkShlLogicImm(SDNode *N) {
3667 MVT NVT = N->getSimpleValueType(0);
3668 unsigned Opcode = N->getOpcode();
3669 SDLoc dl(N);
3670
3671 // For operations of the form (x << C1) op C2, check if we can use a smaller
3672 // encoding for C2 by transforming it into (x op (C2>>C1)) << C1.
3673 SDValue Shift = N->getOperand(0);
3674 SDValue N1 = N->getOperand(1);
3675
3676 ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N1);
3677 if (!Cst)
3678 return false;
3679
3680 int64_t Val = Cst->getSExtValue();
3681
3682 // If we have an any_extend feeding the AND, look through it to see if there
3683 // is a shift behind it. But only if the AND doesn't use the extended bits.
3684 // FIXME: Generalize this to other ANY_EXTEND than i32 to i64?
3685 bool FoundAnyExtend = false;
3686 if (Shift.getOpcode() == ISD::ANY_EXTEND && Shift.hasOneUse() &&
3687 Shift.getOperand(0).getSimpleValueType() == MVT::i32 &&
3688 isUInt<32>(Val)) {
3689 FoundAnyExtend = true;
3690 Shift = Shift.getOperand(0);
3691 }
3692
3693 if (Shift.getOpcode() != ISD::SHL || !Shift.hasOneUse())
3694 return false;
3695
3696 // i8 is unshrinkable, i16 should be promoted to i32.
3697 if (NVT != MVT::i32 && NVT != MVT::i64)
3698 return false;
3699
3700 ConstantSDNode *ShlCst = dyn_cast<ConstantSDNode>(Shift.getOperand(1));
3701 if (!ShlCst)
3702 return false;
3703
3704 uint64_t ShAmt = ShlCst->getZExtValue();
3705
3706 // Make sure that we don't change the operation by removing bits.
3707 // This only matters for OR and XOR, AND is unaffected.
3708 uint64_t RemovedBitsMask = (1ULL << ShAmt) - 1;
3709 if (Opcode != ISD::AND && (Val & RemovedBitsMask) != 0)
3710 return false;
3711
3712 // Check the minimum bitwidth for the new constant.
3713 // TODO: Using 16 and 8 bit operations is also possible for or32 & xor32.
3714 auto CanShrinkImmediate = [&](int64_t &ShiftedVal) {
3715 if (Opcode == ISD::AND) {
3716 // AND32ri is the same as AND64ri32 with zext imm.
3717 // Try this before sign extended immediates below.
3718 ShiftedVal = (uint64_t)Val >> ShAmt;
3719 if (NVT == MVT::i64 && !isUInt<32>(Val) && isUInt<32>(ShiftedVal))
3720 return true;
3721 // Also swap order when the AND can become MOVZX.
3722 if (ShiftedVal == UINT8_MAX(255) || ShiftedVal == UINT16_MAX(65535))
3723 return true;
3724 }
3725 ShiftedVal = Val >> ShAmt;
3726 if ((!isInt<8>(Val) && isInt<8>(ShiftedVal)) ||
3727 (!isInt<32>(Val) && isInt<32>(ShiftedVal)))
3728 return true;
3729 if (Opcode != ISD::AND) {
3730 // MOV32ri+OR64r/XOR64r is cheaper than MOV64ri64+OR64rr/XOR64rr
3731 ShiftedVal = (uint64_t)Val >> ShAmt;
3732 if (NVT == MVT::i64 && !isUInt<32>(Val) && isUInt<32>(ShiftedVal))
3733 return true;
3734 }
3735 return false;
3736 };
3737
3738 int64_t ShiftedVal;
3739 if (!CanShrinkImmediate(ShiftedVal))
3740 return false;
3741
3742 // Ok, we can reorder to get a smaller immediate.
3743
3744 // But, its possible the original immediate allowed an AND to become MOVZX.
3745 // Doing this late due to avoid the MakedValueIsZero call as late as
3746 // possible.
3747 if (Opcode == ISD::AND) {
3748 // Find the smallest zext this could possibly be.
3749 unsigned ZExtWidth = Cst->getAPIntValue().getActiveBits();
3750 ZExtWidth = PowerOf2Ceil(std::max(ZExtWidth, 8U));
3751
3752 // Figure out which bits need to be zero to achieve that mask.
3753 APInt NeededMask = APInt::getLowBitsSet(NVT.getSizeInBits(),
3754 ZExtWidth);
3755 NeededMask &= ~Cst->getAPIntValue();
3756
3757 if (CurDAG->MaskedValueIsZero(N->getOperand(0), NeededMask))
3758 return false;
3759 }
3760
3761 SDValue X = Shift.getOperand(0);
3762 if (FoundAnyExtend) {
3763 SDValue NewX = CurDAG->getNode(ISD::ANY_EXTEND, dl, NVT, X);
3764 insertDAGNode(*CurDAG, SDValue(N, 0), NewX);
3765 X = NewX;
3766 }
3767
3768 SDValue NewCst = CurDAG->getConstant(ShiftedVal, dl, NVT);
3769 insertDAGNode(*CurDAG, SDValue(N, 0), NewCst);
3770 SDValue NewBinOp = CurDAG->getNode(Opcode, dl, NVT, X, NewCst);
3771 insertDAGNode(*CurDAG, SDValue(N, 0), NewBinOp);
3772 SDValue NewSHL = CurDAG->getNode(ISD::SHL, dl, NVT, NewBinOp,
3773 Shift.getOperand(1));
3774 ReplaceNode(N, NewSHL.getNode());
3775 SelectCode(NewSHL.getNode());
3776 return true;
3777}
3778
3779/// Convert vector increment or decrement to sub/add with an all-ones constant:
3780/// add X, <1, 1...> --> sub X, <-1, -1...>
3781/// sub X, <1, 1...> --> add X, <-1, -1...>
3782/// The all-ones vector constant can be materialized using a pcmpeq instruction
3783/// that is commonly recognized as an idiom (has no register dependency), so
3784/// that's better/smaller than loading a splat 1 constant.
3785bool X86DAGToDAGISel::combineIncDecVector(SDNode *Node) {
3786 assert((Node->getOpcode() == ISD::ADD || Node->getOpcode() == ISD::SUB) &&(((Node->getOpcode() == ISD::ADD || Node->getOpcode() ==
ISD::SUB) && "Unexpected opcode for increment/decrement transform"
) ? static_cast<void> (0) : __assert_fail ("(Node->getOpcode() == ISD::ADD || Node->getOpcode() == ISD::SUB) && \"Unexpected opcode for increment/decrement transform\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 3787, __PRETTY_FUNCTION__))
3787 "Unexpected opcode for increment/decrement transform")(((Node->getOpcode() == ISD::ADD || Node->getOpcode() ==
ISD::SUB) && "Unexpected opcode for increment/decrement transform"
) ? static_cast<void> (0) : __assert_fail ("(Node->getOpcode() == ISD::ADD || Node->getOpcode() == ISD::SUB) && \"Unexpected opcode for increment/decrement transform\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 3787, __PRETTY_FUNCTION__))
;
3788
3789 EVT VT = Node->getValueType(0);
3790 assert(VT.isVector() && "Should only be called for vectors.")((VT.isVector() && "Should only be called for vectors."
) ? static_cast<void> (0) : __assert_fail ("VT.isVector() && \"Should only be called for vectors.\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 3790, __PRETTY_FUNCTION__))
;
3791
3792 SDValue X = Node->getOperand(0);
3793 SDValue OneVec = Node->getOperand(1);
3794
3795 APInt SplatVal;
3796 if (!X86::isConstantSplat(OneVec, SplatVal) || !SplatVal.isOneValue())
3797 return false;
3798
3799 SDLoc DL(Node);
3800 SDValue OneConstant, AllOnesVec;
3801
3802 APInt Ones = APInt::getAllOnesValue(32);
3803 assert(VT.getSizeInBits() % 32 == 0 &&((VT.getSizeInBits() % 32 == 0 && "Expected bit count to be a multiple of 32"
) ? static_cast<void> (0) : __assert_fail ("VT.getSizeInBits() % 32 == 0 && \"Expected bit count to be a multiple of 32\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 3804, __PRETTY_FUNCTION__))
3804 "Expected bit count to be a multiple of 32")((VT.getSizeInBits() % 32 == 0 && "Expected bit count to be a multiple of 32"
) ? static_cast<void> (0) : __assert_fail ("VT.getSizeInBits() % 32 == 0 && \"Expected bit count to be a multiple of 32\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 3804, __PRETTY_FUNCTION__))
;
3805 OneConstant = CurDAG->getConstant(Ones, DL, MVT::i32);
3806 insertDAGNode(*CurDAG, X, OneConstant);
3807
3808 unsigned NumElts = VT.getSizeInBits() / 32;
3809 assert(NumElts > 0 && "Expected to get non-empty vector.")((NumElts > 0 && "Expected to get non-empty vector."
) ? static_cast<void> (0) : __assert_fail ("NumElts > 0 && \"Expected to get non-empty vector.\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 3809, __PRETTY_FUNCTION__))
;
3810 AllOnesVec = CurDAG->getSplatBuildVector(MVT::getVectorVT(MVT::i32, NumElts),
3811 DL, OneConstant);
3812 insertDAGNode(*CurDAG, X, AllOnesVec);
3813
3814 AllOnesVec = CurDAG->getBitcast(VT, AllOnesVec);
3815 insertDAGNode(*CurDAG, X, AllOnesVec);
3816
3817 unsigned NewOpcode = Node->getOpcode() == ISD::ADD ? ISD::SUB : ISD::ADD;
3818 SDValue NewNode = CurDAG->getNode(NewOpcode, DL, VT, X, AllOnesVec);
3819
3820 ReplaceNode(Node, NewNode.getNode());
3821 SelectCode(NewNode.getNode());
3822 return true;
3823}
3824
3825/// If the high bits of an 'and' operand are known zero, try setting the
3826/// high bits of an 'and' constant operand to produce a smaller encoding by
3827/// creating a small, sign-extended negative immediate rather than a large
3828/// positive one. This reverses a transform in SimplifyDemandedBits that
3829/// shrinks mask constants by clearing bits. There is also a possibility that
3830/// the 'and' mask can be made -1, so the 'and' itself is unnecessary. In that
3831/// case, just replace the 'and'. Return 'true' if the node is replaced.
3832bool X86DAGToDAGISel::shrinkAndImmediate(SDNode *And) {
3833 // i8 is unshrinkable, i16 should be promoted to i32, and vector ops don't
3834 // have immediate operands.
3835 MVT VT = And->getSimpleValueType(0);
3836 if (VT != MVT::i32 && VT != MVT::i64)
3837 return false;
3838
3839 auto *And1C = dyn_cast<ConstantSDNode>(And->getOperand(1));
3840 if (!And1C)
3841 return false;
3842
3843 // Bail out if the mask constant is already negative. It's can't shrink more.
3844 // If the upper 32 bits of a 64 bit mask are all zeros, we have special isel
3845 // patterns to use a 32-bit and instead of a 64-bit and by relying on the
3846 // implicit zeroing of 32 bit ops. So we should check if the lower 32 bits
3847 // are negative too.
3848 APInt MaskVal = And1C->getAPIntValue();
3849 unsigned MaskLZ = MaskVal.countLeadingZeros();
3850 if (!MaskLZ || (VT == MVT::i64 && MaskLZ == 32))
3851 return false;
3852
3853 // Don't extend into the upper 32 bits of a 64 bit mask.
3854 if (VT == MVT::i64 && MaskLZ >= 32) {
3855 MaskLZ -= 32;
3856 MaskVal = MaskVal.trunc(32);
3857 }
3858
3859 SDValue And0 = And->getOperand(0);
3860 APInt HighZeros = APInt::getHighBitsSet(MaskVal.getBitWidth(), MaskLZ);
3861 APInt NegMaskVal = MaskVal | HighZeros;
3862
3863 // If a negative constant would not allow a smaller encoding, there's no need
3864 // to continue. Only change the constant when we know it's a win.
3865 unsigned MinWidth = NegMaskVal.getMinSignedBits();
3866 if (MinWidth > 32 || (MinWidth > 8 && MaskVal.getMinSignedBits() <= 32))
3867 return false;
3868
3869 // Extend masks if we truncated above.
3870 if (VT == MVT::i64 && MaskVal.getBitWidth() < 64) {
3871 NegMaskVal = NegMaskVal.zext(64);
3872 HighZeros = HighZeros.zext(64);
3873 }
3874
3875 // The variable operand must be all zeros in the top bits to allow using the
3876 // new, negative constant as the mask.
3877 if (!CurDAG->MaskedValueIsZero(And0, HighZeros))
3878 return false;
3879
3880 // Check if the mask is -1. In that case, this is an unnecessary instruction
3881 // that escaped earlier analysis.
3882 if (NegMaskVal.isAllOnesValue()) {
3883 ReplaceNode(And, And0.getNode());
3884 return true;
3885 }
3886
3887 // A negative mask allows a smaller encoding. Create a new 'and' node.
3888 SDValue NewMask = CurDAG->getConstant(NegMaskVal, SDLoc(And), VT);
3889 SDValue NewAnd = CurDAG->getNode(ISD::AND, SDLoc(And), VT, And0, NewMask);
3890 ReplaceNode(And, NewAnd.getNode());
3891 SelectCode(NewAnd.getNode());
3892 return true;
3893}
3894
3895static unsigned getVPTESTMOpc(MVT TestVT, bool IsTestN, bool FoldedLoad,
3896 bool FoldedBCast, bool Masked) {
3897 if (Masked) {
3898 if (FoldedLoad) {
3899 switch (TestVT.SimpleTy) {
3900 default: llvm_unreachable("Unexpected VT!")::llvm::llvm_unreachable_internal("Unexpected VT!", "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 3900)
;
3901 case MVT::v16i8:
3902 return IsTestN ? X86::VPTESTNMBZ128rmk : X86::VPTESTMBZ128rmk;
3903 case MVT::v8i16:
3904 return IsTestN ? X86::VPTESTNMWZ128rmk : X86::VPTESTMWZ128rmk;
3905 case MVT::v4i32:
3906 return IsTestN ? X86::VPTESTNMDZ128rmk : X86::VPTESTMDZ128rmk;
3907 case MVT::v2i64:
3908 return IsTestN ? X86::VPTESTNMQZ128rmk : X86::VPTESTMQZ128rmk;
3909 case MVT::v32i8:
3910 return IsTestN ? X86::VPTESTNMBZ256rmk : X86::VPTESTMBZ256rmk;
3911 case MVT::v16i16:
3912 return IsTestN ? X86::VPTESTNMWZ256rmk : X86::VPTESTMWZ256rmk;
3913 case MVT::v8i32:
3914 return IsTestN ? X86::VPTESTNMDZ256rmk : X86::VPTESTMDZ256rmk;
3915 case MVT::v4i64:
3916 return IsTestN ? X86::VPTESTNMQZ256rmk : X86::VPTESTMQZ256rmk;
3917 case MVT::v64i8:
3918 return IsTestN ? X86::VPTESTNMBZrmk : X86::VPTESTMBZrmk;
3919 case MVT::v32i16:
3920 return IsTestN ? X86::VPTESTNMWZrmk : X86::VPTESTMWZrmk;
3921 case MVT::v16i32:
3922 return IsTestN ? X86::VPTESTNMDZrmk : X86::VPTESTMDZrmk;
3923 case MVT::v8i64:
3924 return IsTestN ? X86::VPTESTNMQZrmk : X86::VPTESTMQZrmk;
3925 }
3926 }
3927
3928 if (FoldedBCast) {
3929 switch (TestVT.SimpleTy) {
3930 default: llvm_unreachable("Unexpected VT!")::llvm::llvm_unreachable_internal("Unexpected VT!", "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 3930)
;
3931 case MVT::v4i32:
3932 return IsTestN ? X86::VPTESTNMDZ128rmbk : X86::VPTESTMDZ128rmbk;
3933 case MVT::v2i64:
3934 return IsTestN ? X86::VPTESTNMQZ128rmbk : X86::VPTESTMQZ128rmbk;
3935 case MVT::v8i32:
3936 return IsTestN ? X86::VPTESTNMDZ256rmbk : X86::VPTESTMDZ256rmbk;
3937 case MVT::v4i64:
3938 return IsTestN ? X86::VPTESTNMQZ256rmbk : X86::VPTESTMQZ256rmbk;
3939 case MVT::v16i32:
3940 return IsTestN ? X86::VPTESTNMDZrmbk : X86::VPTESTMDZrmbk;
3941 case MVT::v8i64:
3942 return IsTestN ? X86::VPTESTNMQZrmbk : X86::VPTESTMQZrmbk;
3943 }
3944 }
3945
3946 switch (TestVT.SimpleTy) {
3947 default: llvm_unreachable("Unexpected VT!")::llvm::llvm_unreachable_internal("Unexpected VT!", "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 3947)
;
3948 case MVT::v16i8:
3949 return IsTestN ? X86::VPTESTNMBZ128rrk : X86::VPTESTMBZ128rrk;
3950 case MVT::v8i16:
3951 return IsTestN ? X86::VPTESTNMWZ128rrk : X86::VPTESTMWZ128rrk;
3952 case MVT::v4i32:
3953 return IsTestN ? X86::VPTESTNMDZ128rrk : X86::VPTESTMDZ128rrk;
3954 case MVT::v2i64:
3955 return IsTestN ? X86::VPTESTNMQZ128rrk : X86::VPTESTMQZ128rrk;
3956 case MVT::v32i8:
3957 return IsTestN ? X86::VPTESTNMBZ256rrk : X86::VPTESTMBZ256rrk;
3958 case MVT::v16i16:
3959 return IsTestN ? X86::VPTESTNMWZ256rrk : X86::VPTESTMWZ256rrk;
3960 case MVT::v8i32:
3961 return IsTestN ? X86::VPTESTNMDZ256rrk : X86::VPTESTMDZ256rrk;
3962 case MVT::v4i64:
3963 return IsTestN ? X86::VPTESTNMQZ256rrk : X86::VPTESTMQZ256rrk;
3964 case MVT::v64i8:
3965 return IsTestN ? X86::VPTESTNMBZrrk : X86::VPTESTMBZrrk;
3966 case MVT::v32i16:
3967 return IsTestN ? X86::VPTESTNMWZrrk : X86::VPTESTMWZrrk;
3968 case MVT::v16i32:
3969 return IsTestN ? X86::VPTESTNMDZrrk : X86::VPTESTMDZrrk;
3970 case MVT::v8i64:
3971 return IsTestN ? X86::VPTESTNMQZrrk : X86::VPTESTMQZrrk;
3972 }
3973 }
3974
3975 if (FoldedLoad) {
3976 switch (TestVT.SimpleTy) {
3977 default: llvm_unreachable("Unexpected VT!")::llvm::llvm_unreachable_internal("Unexpected VT!", "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 3977)
;
3978 case MVT::v16i8:
3979 return IsTestN ? X86::VPTESTNMBZ128rm : X86::VPTESTMBZ128rm;
3980 case MVT::v8i16:
3981 return IsTestN ? X86::VPTESTNMWZ128rm : X86::VPTESTMWZ128rm;
3982 case MVT::v4i32:
3983 return IsTestN ? X86::VPTESTNMDZ128rm : X86::VPTESTMDZ128rm;
3984 case MVT::v2i64:
3985 return IsTestN ? X86::VPTESTNMQZ128rm : X86::VPTESTMQZ128rm;
3986 case MVT::v32i8:
3987 return IsTestN ? X86::VPTESTNMBZ256rm : X86::VPTESTMBZ256rm;
3988 case MVT::v16i16:
3989 return IsTestN ? X86::VPTESTNMWZ256rm : X86::VPTESTMWZ256rm;
3990 case MVT::v8i32:
3991 return IsTestN ? X86::VPTESTNMDZ256rm : X86::VPTESTMDZ256rm;
3992 case MVT::v4i64:
3993 return IsTestN ? X86::VPTESTNMQZ256rm : X86::VPTESTMQZ256rm;
3994 case MVT::v64i8:
3995 return IsTestN ? X86::VPTESTNMBZrm : X86::VPTESTMBZrm;
3996 case MVT::v32i16:
3997 return IsTestN ? X86::VPTESTNMWZrm : X86::VPTESTMWZrm;
3998 case MVT::v16i32:
3999 return IsTestN ? X86::VPTESTNMDZrm : X86::VPTESTMDZrm;
4000 case MVT::v8i64:
4001 return IsTestN ? X86::VPTESTNMQZrm : X86::VPTESTMQZrm;
4002 }
4003 }
4004
4005 if (FoldedBCast) {
4006 switch (TestVT.SimpleTy) {
4007 default: llvm_unreachable("Unexpected VT!")::llvm::llvm_unreachable_internal("Unexpected VT!", "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 4007)
;
4008 case MVT::v4i32:
4009 return IsTestN ? X86::VPTESTNMDZ128rmb : X86::VPTESTMDZ128rmb;
4010 case MVT::v2i64:
4011 return IsTestN ? X86::VPTESTNMQZ128rmb : X86::VPTESTMQZ128rmb;
4012 case MVT::v8i32:
4013 return IsTestN ? X86::VPTESTNMDZ256rmb : X86::VPTESTMDZ256rmb;
4014 case MVT::v4i64:
4015 return IsTestN ? X86::VPTESTNMQZ256rmb : X86::VPTESTMQZ256rmb;
4016 case MVT::v16i32:
4017 return IsTestN ? X86::VPTESTNMDZrmb : X86::VPTESTMDZrmb;
4018 case MVT::v8i64:
4019 return IsTestN ? X86::VPTESTNMQZrmb : X86::VPTESTMQZrmb;
4020 }
4021 }
4022
4023 switch (TestVT.SimpleTy) {
4024 default: llvm_unreachable("Unexpected VT!")::llvm::llvm_unreachable_internal("Unexpected VT!", "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 4024)
;
4025 case MVT::v16i8:
4026 return IsTestN ? X86::VPTESTNMBZ128rr : X86::VPTESTMBZ128rr;
4027 case MVT::v8i16:
4028 return IsTestN ? X86::VPTESTNMWZ128rr : X86::VPTESTMWZ128rr;
4029 case MVT::v4i32:
4030 return IsTestN ? X86::VPTESTNMDZ128rr : X86::VPTESTMDZ128rr;
4031 case MVT::v2i64:
4032 return IsTestN ? X86::VPTESTNMQZ128rr : X86::VPTESTMQZ128rr;
4033 case MVT::v32i8:
4034 return IsTestN ? X86::VPTESTNMBZ256rr : X86::VPTESTMBZ256rr;
4035 case MVT::v16i16:
4036 return IsTestN ? X86::VPTESTNMWZ256rr : X86::VPTESTMWZ256rr;
4037 case MVT::v8i32:
4038 return IsTestN ? X86::VPTESTNMDZ256rr : X86::VPTESTMDZ256rr;
4039 case MVT::v4i64:
4040 return IsTestN ? X86::VPTESTNMQZ256rr : X86::VPTESTMQZ256rr;
4041 case MVT::v64i8:
4042 return IsTestN ? X86::VPTESTNMBZrr : X86::VPTESTMBZrr;
4043 case MVT::v32i16:
4044 return IsTestN ? X86::VPTESTNMWZrr : X86::VPTESTMWZrr;
4045 case MVT::v16i32:
4046 return IsTestN ? X86::VPTESTNMDZrr : X86::VPTESTMDZrr;
4047 case MVT::v8i64:
4048 return IsTestN ? X86::VPTESTNMQZrr : X86::VPTESTMQZrr;
4049 }
4050}
4051
4052// Try to create VPTESTM instruction. If InMask is not null, it will be used
4053// to form a masked operation.
4054bool X86DAGToDAGISel::tryVPTESTM(SDNode *Root, SDValue Setcc,
4055 SDValue InMask) {
4056 assert(Subtarget->hasAVX512() && "Expected AVX512!")((Subtarget->hasAVX512() && "Expected AVX512!") ? static_cast
<void> (0) : __assert_fail ("Subtarget->hasAVX512() && \"Expected AVX512!\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 4056, __PRETTY_FUNCTION__))
;
1
'?' condition is true
4057 assert(Setcc.getSimpleValueType().getVectorElementType() == MVT::i1 &&((Setcc.getSimpleValueType().getVectorElementType() == MVT::i1
&& "Unexpected VT!") ? static_cast<void> (0) :
__assert_fail ("Setcc.getSimpleValueType().getVectorElementType() == MVT::i1 && \"Unexpected VT!\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 4058, __PRETTY_FUNCTION__))
2
'?' condition is true
4058 "Unexpected VT!")((Setcc.getSimpleValueType().getVectorElementType() == MVT::i1
&& "Unexpected VT!") ? static_cast<void> (0) :
__assert_fail ("Setcc.getSimpleValueType().getVectorElementType() == MVT::i1 && \"Unexpected VT!\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 4058, __PRETTY_FUNCTION__))
;
4059
4060 // Look for equal and not equal compares.
4061 ISD::CondCode CC = cast<CondCodeSDNode>(Setcc.getOperand(2))->get();
4062 if (CC != ISD::SETEQ && CC != ISD::SETNE)
3
Assuming 'CC' is equal to SETEQ
4063 return false;
4064
4065 // See if we're comparing against zero. This should have been canonicalized
4066 // to RHS during lowering.
4067 if (!ISD::isBuildVectorAllZeros(Setcc.getOperand(1).getNode()))
4
Assuming the condition is false
5
Taking false branch
4068 return false;
4069
4070 SDValue N0 = Setcc.getOperand(0);
4071
4072 MVT CmpVT = N0.getSimpleValueType();
4073 MVT CmpSVT = CmpVT.getVectorElementType();
4074
4075 // Start with both operands the same. We'll try to refine this.
4076 SDValue Src0 = N0;
4077 SDValue Src1 = N0;
4078
4079 {
4080 // Look through single use bitcasts.
4081 SDValue N0Temp = N0;
4082 if (N0Temp.getOpcode() == ISD::BITCAST && N0Temp.hasOneUse())
6
Assuming the condition is false
4083 N0Temp = N0.getOperand(0);
4084
4085 // Look for single use AND.
4086 if (N0Temp.getOpcode() == ISD::AND && N0Temp.hasOneUse()) {
7
Assuming the condition is true
8
Assuming the condition is true
9
Taking true branch
4087 Src0 = N0Temp.getOperand(0);
4088 Src1 = N0Temp.getOperand(1);
4089 }
4090 }
4091
4092 // Without VLX we need to widen the load.
4093 bool Widen = !Subtarget->hasVLX() && !CmpVT.is512BitVector();
10
Assuming the condition is false
4094
4095 // We can only fold loads if the sources are unique.
4096 bool CanFoldLoads = Src0 != Src1;
4097
4098 // Try to fold loads unless we need to widen.
4099 bool FoldedLoad = false;
4100 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Load;
4101 if (!Widen
10.1
'Widen' is false
10.1
'Widen' is false
10.1
'Widen' is false
&& CanFoldLoads
10.2
'CanFoldLoads' is true
10.2
'CanFoldLoads' is true
10.2
'CanFoldLoads' is true
) {
11
Taking true branch
4102 Load = Src1;
4103 FoldedLoad = tryFoldLoad(Root, N0.getNode(), Load, Tmp0, Tmp1, Tmp2, Tmp3,
4104 Tmp4);
4105 if (!FoldedLoad
11.1
'FoldedLoad' is false
11.1
'FoldedLoad' is false
11.1
'FoldedLoad' is false
) {
12
Taking true branch
4106 // And is computative.
4107 Load = Src0;
4108 FoldedLoad = tryFoldLoad(Root, N0.getNode(), Load, Tmp0, Tmp1, Tmp2,
4109 Tmp3, Tmp4);
4110 if (FoldedLoad
12.1
'FoldedLoad' is false
12.1
'FoldedLoad' is false
12.1
'FoldedLoad' is false
)
13
Taking false branch
4111 std::swap(Src0, Src1);
4112 }
4113 }
4114
4115 auto findBroadcastedOp = [](SDValue Src, MVT CmpSVT, SDNode *&Parent) {
4116 // Look through single use bitcasts.
4117 if (Src.getOpcode() == ISD::BITCAST && Src.hasOneUse())
17
Assuming the condition is false
4118 Src = Src.getOperand(0);
4119
4120 if (Src.getOpcode() == X86ISD::VBROADCAST && Src.hasOneUse()) {
18
Assuming the condition is true
19
Assuming the condition is true
20
Taking true branch
4121 Parent = Src.getNode();
4122 Src = Src.getOperand(0);
4123 if (Src.getSimpleValueType() == CmpSVT)
21
Taking true branch
4124 return Src;
22
Returning without writing to 'Parent'
4125 }
4126
4127 return SDValue();
4128 };
4129
4130 // If we didn't fold a load, try to match broadcast. No widening limitation
4131 // for this. But only 32 and 64 bit types are supported.
4132 bool FoldedBCast = false;
4133 if (!FoldedLoad
13.1
'FoldedLoad' is false
13.1
'FoldedLoad' is false
13.1
'FoldedLoad' is false
&& CanFoldLoads
13.2
'CanFoldLoads' is true
13.2
'CanFoldLoads' is true
13.2
'CanFoldLoads' is true
&&
14
Taking true branch
4134 (CmpSVT == MVT::i32 || CmpSVT == MVT::i64)) {
4135 SDNode *ParentNode = nullptr;
15
'ParentNode' initialized to a null pointer value
4136 if ((Load = findBroadcastedOp(Src1, CmpSVT, ParentNode))) {
16
Calling 'operator()'
23
Returning from 'operator()'
24
Calling 'SDValue::operator bool'
26
Returning from 'SDValue::operator bool'
27
Taking true branch
4137 FoldedBCast = tryFoldLoad(Root, ParentNode, Load, Tmp0,
28
Passing null pointer value via 2nd parameter 'P'
29
Calling 'X86DAGToDAGISel::tryFoldLoad'
4138 Tmp1, Tmp2, Tmp3, Tmp4);
4139 }
4140
4141 // Try the other operand.
4142 if (!FoldedBCast) {
4143 if ((Load = findBroadcastedOp(Src0, CmpSVT, ParentNode))) {
4144 FoldedBCast = tryFoldLoad(Root, ParentNode, Load, Tmp0,
4145 Tmp1, Tmp2, Tmp3, Tmp4);
4146 if (FoldedBCast)
4147 std::swap(Src0, Src1);
4148 }
4149 }
4150 }
4151
4152 auto getMaskRC = [](MVT MaskVT) {
4153 switch (MaskVT.SimpleTy) {
4154 default: llvm_unreachable("Unexpected VT!")::llvm::llvm_unreachable_internal("Unexpected VT!", "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 4154)
;
4155 case MVT::v2i1: return X86::VK2RegClassID;
4156 case MVT::v4i1: return X86::VK4RegClassID;
4157 case MVT::v8i1: return X86::VK8RegClassID;
4158 case MVT::v16i1: return X86::VK16RegClassID;
4159 case MVT::v32i1: return X86::VK32RegClassID;
4160 case MVT::v64i1: return X86::VK64RegClassID;
4161 }
4162 };
4163
4164 bool IsMasked = InMask.getNode() != nullptr;
4165
4166 SDLoc dl(Root);
4167
4168 MVT ResVT = Setcc.getSimpleValueType();
4169 MVT MaskVT = ResVT;
4170 if (Widen) {
4171 // Widen the inputs using insert_subreg or copy_to_regclass.
4172 unsigned Scale = CmpVT.is128BitVector() ? 4 : 2;
4173 unsigned SubReg = CmpVT.is128BitVector() ? X86::sub_xmm : X86::sub_ymm;
4174 unsigned NumElts = CmpVT.getVectorNumElements() * Scale;
4175 CmpVT = MVT::getVectorVT(CmpSVT, NumElts);
4176 MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
4177 SDValue ImplDef = SDValue(CurDAG->getMachineNode(X86::IMPLICIT_DEF, dl,
4178 CmpVT), 0);
4179 Src0 = CurDAG->getTargetInsertSubreg(SubReg, dl, CmpVT, ImplDef, Src0);
4180
4181 assert(!FoldedLoad && "Shouldn't have folded the load")((!FoldedLoad && "Shouldn't have folded the load") ? static_cast
<void> (0) : __assert_fail ("!FoldedLoad && \"Shouldn't have folded the load\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 4181, __PRETTY_FUNCTION__))
;
4182 if (!FoldedBCast)
4183 Src1 = CurDAG->getTargetInsertSubreg(SubReg, dl, CmpVT, ImplDef, Src1);
4184
4185 if (IsMasked) {
4186 // Widen the mask.
4187 unsigned RegClass = getMaskRC(MaskVT);
4188 SDValue RC = CurDAG->getTargetConstant(RegClass, dl, MVT::i32);
4189 InMask = SDValue(CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
4190 dl, MaskVT, InMask, RC), 0);
4191 }
4192 }
4193
4194 bool IsTestN = CC == ISD::SETEQ;
4195 unsigned Opc = getVPTESTMOpc(CmpVT, IsTestN, FoldedLoad, FoldedBCast,
4196 IsMasked);
4197
4198 MachineSDNode *CNode;
4199 if (FoldedLoad || FoldedBCast) {
4200 SDVTList VTs = CurDAG->getVTList(MaskVT, MVT::Other);
4201
4202 if (IsMasked) {
4203 SDValue Ops[] = { InMask, Src0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4,
4204 Load.getOperand(0) };
4205 CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops);
4206 } else {
4207 SDValue Ops[] = { Src0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4,
4208 Load.getOperand(0) };
4209 CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops);
4210 }
4211
4212 // Update the chain.
4213 ReplaceUses(Load.getValue(1), SDValue(CNode, 1));
4214 // Record the mem-refs
4215 CurDAG->setNodeMemRefs(CNode, {cast<LoadSDNode>(Load)->getMemOperand()});
4216 } else {
4217 if (IsMasked)
4218 CNode = CurDAG->getMachineNode(Opc, dl, MaskVT, InMask, Src0, Src1);
4219 else
4220 CNode = CurDAG->getMachineNode(Opc, dl, MaskVT, Src0, Src1);
4221 }
4222
4223 // If we widened, we need to shrink the mask VT.
4224 if (Widen) {
4225 unsigned RegClass = getMaskRC(ResVT);
4226 SDValue RC = CurDAG->getTargetConstant(RegClass, dl, MVT::i32);
4227 CNode = CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
4228 dl, ResVT, SDValue(CNode, 0), RC);
4229 }
4230
4231 ReplaceUses(SDValue(Root, 0), SDValue(CNode, 0));
4232 CurDAG->RemoveDeadNode(Root);
4233 return true;
4234}
4235
4236void X86DAGToDAGISel::Select(SDNode *Node) {
4237 MVT NVT = Node->getSimpleValueType(0);
4238 unsigned Opcode = Node->getOpcode();
4239 SDLoc dl(Node);
4240
4241 if (Node->isMachineOpcode()) {
4242 LLVM_DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-isel")) { dbgs() << "== "; Node->dump(CurDAG);
dbgs() << '\n'; } } while (false)
;
4243 Node->setNodeId(-1);
4244 return; // Already selected.
4245 }
4246
4247 switch (Opcode) {
4248 default: break;
4249 case ISD::INTRINSIC_VOID: {
4250 unsigned IntNo = Node->getConstantOperandVal(1);
4251 switch (IntNo) {
4252 default: break;
4253 case Intrinsic::x86_sse3_monitor:
4254 case Intrinsic::x86_monitorx:
4255 case Intrinsic::x86_clzero: {
4256 bool Use64BitPtr = Node->getOperand(2).getValueType() == MVT::i64;
4257
4258 unsigned Opc = 0;
4259 switch (IntNo) {
4260 default: llvm_unreachable("Unexpected intrinsic!")::llvm::llvm_unreachable_internal("Unexpected intrinsic!", "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 4260)
;
4261 case Intrinsic::x86_sse3_monitor:
4262 if (!Subtarget->hasSSE3())
4263 break;
4264 Opc = Use64BitPtr ? X86::MONITOR64rrr : X86::MONITOR32rrr;
4265 break;
4266 case Intrinsic::x86_monitorx:
4267 if (!Subtarget->hasMWAITX())
4268 break;
4269 Opc = Use64BitPtr ? X86::MONITORX64rrr : X86::MONITORX32rrr;
4270 break;
4271 case Intrinsic::x86_clzero:
4272 if (!Subtarget->hasCLZERO())
4273 break;
4274 Opc = Use64BitPtr ? X86::CLZERO64r : X86::CLZERO32r;
4275 break;
4276 }
4277
4278 if (Opc) {
4279 unsigned PtrReg = Use64BitPtr ? X86::RAX : X86::EAX;
4280 SDValue Chain = CurDAG->getCopyToReg(Node->getOperand(0), dl, PtrReg,
4281 Node->getOperand(2), SDValue());
4282 SDValue InFlag = Chain.getValue(1);
4283
4284 if (IntNo == Intrinsic::x86_sse3_monitor ||
4285 IntNo == Intrinsic::x86_monitorx) {
4286 // Copy the other two operands to ECX and EDX.
4287 Chain = CurDAG->getCopyToReg(Chain, dl, X86::ECX, Node->getOperand(3),
4288 InFlag);
4289 InFlag = Chain.getValue(1);
4290 Chain = CurDAG->getCopyToReg(Chain, dl, X86::EDX, Node->getOperand(4),
4291 InFlag);
4292 InFlag = Chain.getValue(1);
4293 }
4294
4295 MachineSDNode *CNode = CurDAG->getMachineNode(Opc, dl, MVT::Other,
4296 { Chain, InFlag});
4297 ReplaceNode(Node, CNode);
4298 return;
4299 }
4300 }
4301 }
4302
4303 break;
4304 }
4305 case ISD::BRIND: {
4306 if (Subtarget->isTargetNaCl())
4307 // NaCl has its own pass where jmp %r32 are converted to jmp %r64. We
4308 // leave the instruction alone.
4309 break;
4310 if (Subtarget->isTarget64BitILP32()) {
4311 // Converts a 32-bit register to a 64-bit, zero-extended version of
4312 // it. This is needed because x86-64 can do many things, but jmp %r32
4313 // ain't one of them.
4314 const SDValue &Target = Node->getOperand(1);
4315 assert(Target.getSimpleValueType() == llvm::MVT::i32)((Target.getSimpleValueType() == llvm::MVT::i32) ? static_cast
<void> (0) : __assert_fail ("Target.getSimpleValueType() == llvm::MVT::i32"
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 4315, __PRETTY_FUNCTION__))
;
4316 SDValue ZextTarget = CurDAG->getZExtOrTrunc(Target, dl, EVT(MVT::i64));
4317 SDValue Brind = CurDAG->getNode(ISD::BRIND, dl, MVT::Other,
4318 Node->getOperand(0), ZextTarget);
4319 ReplaceNode(Node, Brind.getNode());
4320 SelectCode(ZextTarget.getNode());
4321 SelectCode(Brind.getNode());
4322 return;
4323 }
4324 break;
4325 }
4326 case X86ISD::GlobalBaseReg:
4327 ReplaceNode(Node, getGlobalBaseReg());
4328 return;
4329
4330 case ISD::BITCAST:
4331 // Just drop all 128/256/512-bit bitcasts.
4332 if (NVT.is512BitVector() || NVT.is256BitVector() || NVT.is128BitVector() ||
4333 NVT == MVT::f128) {
4334 ReplaceUses(SDValue(Node, 0), Node->getOperand(0));
4335 CurDAG->RemoveDeadNode(Node);
4336 return;
4337 }
4338 break;
4339
4340 case ISD::VSELECT: {
4341 // Replace VSELECT with non-mask conditions with with BLENDV.
4342 if (Node->getOperand(0).getValueType().getVectorElementType() == MVT::i1)
4343 break;
4344
4345 assert(Subtarget->hasSSE41() && "Expected SSE4.1 support!")((Subtarget->hasSSE41() && "Expected SSE4.1 support!"
) ? static_cast<void> (0) : __assert_fail ("Subtarget->hasSSE41() && \"Expected SSE4.1 support!\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 4345, __PRETTY_FUNCTION__))
;
4346 SDValue Blendv = CurDAG->getNode(
4347 X86ISD::BLENDV, SDLoc(Node), Node->getValueType(0), Node->getOperand(0),
4348 Node->getOperand(1), Node->getOperand(2));
4349 ReplaceNode(Node, Blendv.getNode());
4350 SelectCode(Blendv.getNode());
4351 // We already called ReplaceUses.
4352 return;
4353 }
4354
4355 case ISD::SRL:
4356 if (matchBitExtract(Node))
4357 return;
4358 LLVM_FALLTHROUGH[[gnu::fallthrough]];
4359 case ISD::SRA:
4360 case ISD::SHL:
4361 if (tryShiftAmountMod(Node))
4362 return;
4363 break;
4364
4365 case ISD::AND:
4366 if (NVT.isVector() && NVT.getVectorElementType() == MVT::i1) {
4367 // Try to form a masked VPTESTM. Operands can be in either order.
4368 SDValue N0 = Node->getOperand(0);
4369 SDValue N1 = Node->getOperand(1);
4370 if (N0.getOpcode() == ISD::SETCC && N0.hasOneUse() &&
4371 tryVPTESTM(Node, N0, N1))
4372 return;
4373 if (N1.getOpcode() == ISD::SETCC && N1.hasOneUse() &&
4374 tryVPTESTM(Node, N1, N0))
4375 return;
4376 }
4377
4378 if (MachineSDNode *NewNode = matchBEXTRFromAndImm(Node)) {
4379 ReplaceUses(SDValue(Node, 0), SDValue(NewNode, 0));
4380 CurDAG->RemoveDeadNode(Node);
4381 return;
4382 }
4383 if (matchBitExtract(Node))
4384 return;
4385 if (AndImmShrink && shrinkAndImmediate(Node))
4386 return;
4387
4388 LLVM_FALLTHROUGH[[gnu::fallthrough]];
4389 case ISD::OR:
4390 case ISD::XOR:
4391 if (tryShrinkShlLogicImm(Node))
4392 return;
4393
4394 LLVM_FALLTHROUGH[[gnu::fallthrough]];
4395 case ISD::ADD:
4396 case ISD::SUB: {
4397 if ((Opcode == ISD::ADD || Opcode == ISD::SUB) && NVT.isVector() &&
4398 combineIncDecVector(Node))
4399 return;
4400
4401 // Try to avoid folding immediates with multiple uses for optsize.
4402 // This code tries to select to register form directly to avoid going
4403 // through the isel table which might fold the immediate. We can't change
4404 // the patterns on the add/sub/and/or/xor with immediate paterns in the
4405 // tablegen files to check immediate use count without making the patterns
4406 // unavailable to the fast-isel table.
4407 if (!OptForSize)
4408 break;
4409
4410 // Only handle i8/i16/i32/i64.
4411 if (NVT != MVT::i8 && NVT != MVT::i16 && NVT != MVT::i32 && NVT != MVT::i64)
4412 break;
4413
4414 SDValue N0 = Node->getOperand(0);
4415 SDValue N1 = Node->getOperand(1);
4416
4417 ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N1);
4418 if (!Cst)
4419 break;
4420
4421 int64_t Val = Cst->getSExtValue();
4422
4423 // Make sure its an immediate that is considered foldable.
4424 // FIXME: Handle unsigned 32 bit immediates for 64-bit AND.
4425 if (!isInt<8>(Val) && !isInt<32>(Val))
4426 break;
4427
4428 // If this can match to INC/DEC, let it go.
4429 if (Opcode == ISD::ADD && (Val == 1 || Val == -1))
4430 break;
4431
4432 // Check if we should avoid folding this immediate.
4433 if (!shouldAvoidImmediateInstFormsForSize(N1.getNode()))
4434 break;
4435
4436 // We should not fold the immediate. So we need a register form instead.
4437 unsigned ROpc, MOpc;
4438 switch (NVT.SimpleTy) {
4439 default: llvm_unreachable("Unexpected VT!")::llvm::llvm_unreachable_internal("Unexpected VT!", "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 4439)
;
4440 case MVT::i8:
4441 switch (Opcode) {
4442 default: llvm_unreachable("Unexpected opcode!")::llvm::llvm_unreachable_internal("Unexpected opcode!", "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 4442)
;
4443 case ISD::ADD: ROpc = X86::ADD8rr; MOpc = X86::ADD8rm; break;
4444 case ISD::SUB: ROpc = X86::SUB8rr; MOpc = X86::SUB8rm; break;
4445 case ISD::AND: ROpc = X86::AND8rr; MOpc = X86::AND8rm; break;
4446 case ISD::OR: ROpc = X86::OR8rr; MOpc = X86::OR8rm; break;
4447 case ISD::XOR: ROpc = X86::XOR8rr; MOpc = X86::XOR8rm; break;
4448 }
4449 break;
4450 case MVT::i16:
4451 switch (Opcode) {
4452 default: llvm_unreachable("Unexpected opcode!")::llvm::llvm_unreachable_internal("Unexpected opcode!", "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 4452)
;
4453 case ISD::ADD: ROpc = X86::ADD16rr; MOpc = X86::ADD16rm; break;
4454 case ISD::SUB: ROpc = X86::SUB16rr; MOpc = X86::SUB16rm; break;
4455 case ISD::AND: ROpc = X86::AND16rr; MOpc = X86::AND16rm; break;
4456 case ISD::OR: ROpc = X86::OR16rr; MOpc = X86::OR16rm; break;
4457 case ISD::XOR: ROpc = X86::XOR16rr; MOpc = X86::XOR16rm; break;
4458 }
4459 break;
4460 case MVT::i32:
4461 switch (Opcode) {
4462 default: llvm_unreachable("Unexpected opcode!")::llvm::llvm_unreachable_internal("Unexpected opcode!", "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 4462)
;
4463 case ISD::ADD: ROpc = X86::ADD32rr; MOpc = X86::ADD32rm; break;
4464 case ISD::SUB: ROpc = X86::SUB32rr; MOpc = X86::SUB32rm; break;
4465 case ISD::AND: ROpc = X86::AND32rr; MOpc = X86::AND32rm; break;
4466 case ISD::OR: ROpc = X86::OR32rr; MOpc = X86::OR32rm; break;
4467 case ISD::XOR: ROpc = X86::XOR32rr; MOpc = X86::XOR32rm; break;
4468 }
4469 break;
4470 case MVT::i64:
4471 switch (Opcode) {
4472 default: llvm_unreachable("Unexpected opcode!")::llvm::llvm_unreachable_internal("Unexpected opcode!", "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 4472)
;
4473 case ISD::ADD: ROpc = X86::ADD64rr; MOpc = X86::ADD64rm; break;
4474 case ISD::SUB: ROpc = X86::SUB64rr; MOpc = X86::SUB64rm; break;
4475 case ISD::AND: ROpc = X86::AND64rr; MOpc = X86::AND64rm; break;
4476 case ISD::OR: ROpc = X86::OR64rr; MOpc = X86::OR64rm; break;
4477 case ISD::XOR: ROpc = X86::XOR64rr; MOpc = X86::XOR64rm; break;
4478 }
4479 break;
4480 }
4481
4482 // Ok this is a AND/OR/XOR/ADD/SUB with constant.
4483
4484 // If this is a not a subtract, we can still try to fold a load.
4485 if (Opcode != ISD::SUB) {
4486 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
4487 if (tryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
4488 SDValue Ops[] = { N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N0.getOperand(0) };
4489 SDVTList VTs = CurDAG->getVTList(NVT, MVT::i32, MVT::Other);
4490 MachineSDNode *CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
4491 // Update the chain.
4492 ReplaceUses(N0.getValue(1), SDValue(CNode, 2));
4493 // Record the mem-refs
4494 CurDAG->setNodeMemRefs(CNode, {cast<LoadSDNode>(N0)->getMemOperand()});
4495 ReplaceUses(SDValue(Node, 0), SDValue(CNode, 0));
4496 CurDAG->RemoveDeadNode(Node);
4497 return;
4498 }
4499 }
4500
4501 CurDAG->SelectNodeTo(Node, ROpc, NVT, MVT::i32, N0, N1);
4502 return;
4503 }
4504
4505 case X86ISD::SMUL:
4506 // i16/i32/i64 are handled with isel patterns.
4507 if (NVT != MVT::i8)
4508 break;
4509 LLVM_FALLTHROUGH[[gnu::fallthrough]];
4510 case X86ISD::UMUL: {
4511 SDValue N0 = Node->getOperand(0);
4512 SDValue N1 = Node->getOperand(1);
4513
4514 unsigned LoReg, ROpc, MOpc;
4515 switch (NVT.SimpleTy) {
4516 default: llvm_unreachable("Unsupported VT!")::llvm::llvm_unreachable_internal("Unsupported VT!", "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 4516)
;
4517 case MVT::i8:
4518 LoReg = X86::AL;
4519 ROpc = Opcode == X86ISD::SMUL ? X86::IMUL8r : X86::MUL8r;
4520 MOpc = Opcode == X86ISD::SMUL ? X86::IMUL8m : X86::MUL8m;
4521 break;
4522 case MVT::i16:
4523 LoReg = X86::AX;
4524 ROpc = X86::MUL16r;
4525 MOpc = X86::MUL16m;
4526 break;
4527 case MVT::i32:
4528 LoReg = X86::EAX;
4529 ROpc = X86::MUL32r;
4530 MOpc = X86::MUL32m;
4531 break;
4532 case MVT::i64:
4533 LoReg = X86::RAX;
4534 ROpc = X86::MUL64r;
4535 MOpc = X86::MUL64m;
4536 break;
4537 }
4538
4539 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
4540 bool FoldedLoad = tryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
4541 // Multiply is commmutative.
4542 if (!FoldedLoad) {
4543 FoldedLoad = tryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
4544 if (FoldedLoad)
4545 std::swap(N0, N1);
4546 }
4547
4548 SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, LoReg,
4549 N0, SDValue()).getValue(1);
4550
4551 MachineSDNode *CNode;
4552 if (FoldedLoad) {
4553 // i16/i32/i64 use an instruction that produces a low and high result even
4554 // though only the low result is used.
4555 SDVTList VTs;
4556 if (NVT == MVT::i8)
4557 VTs = CurDAG->getVTList(NVT, MVT::i32, MVT::Other);
4558 else
4559 VTs = CurDAG->getVTList(NVT, NVT, MVT::i32, MVT::Other);
4560
4561 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0),
4562 InFlag };
4563 CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
4564
4565 // Update the chain.
4566 ReplaceUses(N1.getValue(1), SDValue(CNode, NVT == MVT::i8 ? 2 : 3));
4567 // Record the mem-refs
4568 CurDAG->setNodeMemRefs(CNode, {cast<LoadSDNode>(N1)->getMemOperand()});
4569 } else {
4570 // i16/i32/i64 use an instruction that produces a low and high result even
4571 // though only the low result is used.
4572 SDVTList VTs;
4573 if (NVT == MVT::i8)
4574 VTs = CurDAG->getVTList(NVT, MVT::i32);
4575 else
4576 VTs = CurDAG->getVTList(NVT, NVT, MVT::i32);
4577
4578 CNode = CurDAG->getMachineNode(ROpc, dl, VTs, {N1, InFlag});
4579 }
4580
4581 ReplaceUses(SDValue(Node, 0), SDValue(CNode, 0));
4582 ReplaceUses(SDValue(Node, 1), SDValue(CNode, NVT == MVT::i8 ? 1 : 2));
4583 CurDAG->RemoveDeadNode(Node);
4584 return;
4585 }
4586
4587 case ISD::SMUL_LOHI:
4588 case ISD::UMUL_LOHI: {
4589 SDValue N0 = Node->getOperand(0);
4590 SDValue N1 = Node->getOperand(1);
4591
4592 unsigned Opc, MOpc;
4593 bool isSigned = Opcode == ISD::SMUL_LOHI;
4594 if (!isSigned) {
4595 switch (NVT.SimpleTy) {
4596 default: llvm_unreachable("Unsupported VT!")::llvm::llvm_unreachable_internal("Unsupported VT!", "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 4596)
;
4597 case MVT::i32: Opc = X86::MUL32r; MOpc = X86::MUL32m; break;
4598 case MVT::i64: Opc = X86::MUL64r; MOpc = X86::MUL64m; break;
4599 }
4600 } else {
4601 switch (NVT.SimpleTy) {
4602 default: llvm_unreachable("Unsupported VT!")::llvm::llvm_unreachable_internal("Unsupported VT!", "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 4602)
;
4603 case MVT::i32: Opc = X86::IMUL32r; MOpc = X86::IMUL32m; break;
4604 case MVT::i64: Opc = X86::IMUL64r; MOpc = X86::IMUL64m; break;
4605 }
4606 }
4607
4608 unsigned SrcReg, LoReg, HiReg;
4609 switch (Opc) {
4610 default: llvm_unreachable("Unknown MUL opcode!")::llvm::llvm_unreachable_internal("Unknown MUL opcode!", "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 4610)
;
4611 case X86::IMUL32r:
4612 case X86::MUL32r:
4613 SrcReg = LoReg = X86::EAX; HiReg = X86::EDX;
4614 break;
4615 case X86::IMUL64r:
4616 case X86::MUL64r:
4617 SrcReg = LoReg = X86::RAX; HiReg = X86::RDX;
4618 break;
4619 }
4620
4621 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
4622 bool foldedLoad = tryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
4623 // Multiply is commmutative.
4624 if (!foldedLoad) {
4625 foldedLoad = tryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
4626 if (foldedLoad)
4627 std::swap(N0, N1);
4628 }
4629
4630 SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, SrcReg,
4631 N0, SDValue()).getValue(1);
4632 if (foldedLoad) {
4633 SDValue Chain;
4634 MachineSDNode *CNode = nullptr;
4635 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0),
4636 InFlag };
4637 SDVTList VTs = CurDAG->getVTList(MVT::Other, MVT::Glue);
4638 CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
4639 Chain = SDValue(CNode, 0);
4640 InFlag = SDValue(CNode, 1);
4641
4642 // Update the chain.
4643 ReplaceUses(N1.getValue(1), Chain);
4644 // Record the mem-refs
4645 CurDAG->setNodeMemRefs(CNode, {cast<LoadSDNode>(N1)->getMemOperand()});
4646 } else {
4647 SDValue Ops[] = { N1, InFlag };
4648 SDVTList VTs = CurDAG->getVTList(MVT::Glue);
4649 SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops);
4650 InFlag = SDValue(CNode, 0);
4651 }
4652
4653 // Copy the low half of the result, if it is needed.
4654 if (!SDValue(Node, 0).use_empty()) {
4655 assert(LoReg && "Register for low half is not defined!")((LoReg && "Register for low half is not defined!") ?
static_cast<void> (0) : __assert_fail ("LoReg && \"Register for low half is not defined!\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 4655, __PRETTY_FUNCTION__))
;
4656 SDValue ResLo = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, LoReg,
4657 NVT, InFlag);
4658 InFlag = ResLo.getValue(2);
4659 ReplaceUses(SDValue(Node, 0), ResLo);
4660 LLVM_DEBUG(dbgs() << "=> "; ResLo.getNode()->dump(CurDAG);do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-isel")) { dbgs() << "=> "; ResLo.getNode()->
dump(CurDAG); dbgs() << '\n'; } } while (false)
4661 dbgs() << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-isel")) { dbgs() << "=> "; ResLo.getNode()->
dump(CurDAG); dbgs() << '\n'; } } while (false)
;
4662 }
4663 // Copy the high half of the result, if it is needed.
4664 if (!SDValue(Node, 1).use_empty()) {
4665 assert(HiReg && "Register for high half is not defined!")((HiReg && "Register for high half is not defined!") ?
static_cast<void> (0) : __assert_fail ("HiReg && \"Register for high half is not defined!\""
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 4665, __PRETTY_FUNCTION__))
;
4666 SDValue ResHi = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, HiReg,
4667 NVT, InFlag);
4668 InFlag = ResHi.getValue(2);
4669 ReplaceUses(SDValue(Node, 1), ResHi);
4670 LLVM_DEBUG(dbgs() << "=> "; ResHi.getNode()->dump(CurDAG);do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-isel")) { dbgs() << "=> "; ResHi.getNode()->
dump(CurDAG); dbgs() << '\n'; } } while (false)
4671 dbgs() << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-isel")) { dbgs() << "=> "; ResHi.getNode()->
dump(CurDAG); dbgs() << '\n'; } } while (false)
;
4672 }
4673
4674 CurDAG->RemoveDeadNode(Node);
4675 return;
4676 }
4677
4678 case ISD::SDIVREM:
4679 case ISD::UDIVREM: {
4680 SDValue N0 = Node->getOperand(0);
4681 SDValue N1 = Node->getOperand(1);
4682
4683 unsigned Opc, MOpc;
4684 bool isSigned = Opcode == ISD::SDIVREM;
4685 if (!isSigned) {
4686 switch (NVT.SimpleTy) {
4687 default: llvm_unreachable("Unsupported VT!")::llvm::llvm_unreachable_internal("Unsupported VT!", "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 4687)
;
4688 case MVT::i8: Opc = X86::DIV8r; MOpc = X86::DIV8m; break;
4689 case MVT::i16: Opc = X86::DIV16r; MOpc = X86::DIV16m; break;
4690 case MVT::i32: Opc = X86::DIV32r; MOpc = X86::DIV32m; break;
4691 case MVT::i64: Opc = X86::DIV64r; MOpc = X86::DIV64m; break;
4692 }
4693 } else {
4694 switch (NVT.SimpleTy) {
4695 default: llvm_unreachable("Unsupported VT!")::llvm::llvm_unreachable_internal("Unsupported VT!", "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 4695)
;
4696 case MVT::i8: Opc = X86::IDIV8r; MOpc = X86::IDIV8m; break;
4697 case MVT::i16: Opc = X86::IDIV16r; MOpc = X86::IDIV16m; break;
4698 case MVT::i32: Opc = X86::IDIV32r; MOpc = X86::IDIV32m; break;
4699 case MVT::i64: Opc = X86::IDIV64r; MOpc = X86::IDIV64m; break;
4700 }
4701 }
4702
4703 unsigned LoReg, HiReg, ClrReg;
4704 unsigned SExtOpcode;
4705 switch (NVT.SimpleTy) {
4706 default: llvm_unreachable("Unsupported VT!")::llvm::llvm_unreachable_internal("Unsupported VT!", "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 4706)
;
4707 case MVT::i8:
4708 LoReg = X86::AL; ClrReg = HiReg = X86::AH;
4709 SExtOpcode = 0; // Not used.
4710 break;
4711 case MVT::i16:
4712 LoReg = X86::AX; HiReg = X86::DX;
4713 ClrReg = X86::DX;
4714 SExtOpcode = X86::CWD;
4715 break;
4716 case MVT::i32:
4717 LoReg = X86::EAX; ClrReg = HiReg = X86::EDX;
4718 SExtOpcode = X86::CDQ;
4719 break;
4720 case MVT::i64:
4721 LoReg = X86::RAX; ClrReg = HiReg = X86::RDX;
4722 SExtOpcode = X86::CQO;
4723 break;
4724 }
4725
4726 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
4727 bool foldedLoad = tryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
4728 bool signBitIsZero = CurDAG->SignBitIsZero(N0);
4729
4730 SDValue InFlag;
4731 if (NVT == MVT::i8) {
4732 // Special case for div8, just use a move with zero extension to AX to
4733 // clear the upper 8 bits (AH).
4734 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Chain;
4735 MachineSDNode *Move;
4736 if (tryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
4737 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N0.getOperand(0) };
4738 unsigned Opc = (isSigned && !signBitIsZero) ? X86::MOVSX16rm8
4739 : X86::MOVZX16rm8;
4740 Move = CurDAG->getMachineNode(Opc, dl, MVT::i16, MVT::Other, Ops);
4741 Chain = SDValue(Move, 1);
4742 ReplaceUses(N0.getValue(1), Chain);
4743 // Record the mem-refs
4744 CurDAG->setNodeMemRefs(Move, {cast<LoadSDNode>(N0)->getMemOperand()});
4745 } else {
4746 unsigned Opc = (isSigned && !signBitIsZero) ? X86::MOVSX16rr8
4747 : X86::MOVZX16rr8;
4748 Move = CurDAG->getMachineNode(Opc, dl, MVT::i16, N0);
4749 Chain = CurDAG->getEntryNode();
4750 }
4751 Chain = CurDAG->getCopyToReg(Chain, dl, X86::AX, SDValue(Move, 0),
4752 SDValue());
4753 InFlag = Chain.getValue(1);
4754 } else {
4755 InFlag =
4756 CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl,
4757 LoReg, N0, SDValue()).getValue(1);
4758 if (isSigned && !signBitIsZero) {
4759 // Sign extend the low part into the high part.
4760 InFlag =
4761 SDValue(CurDAG->getMachineNode(SExtOpcode, dl, MVT::Glue, InFlag),0);
4762 } else {
4763 // Zero out the high part, effectively zero extending the input.
4764 SDValue ClrNode = SDValue(CurDAG->getMachineNode(X86::MOV32r0, dl, NVT), 0);
4765 switch (NVT.SimpleTy) {
4766 case MVT::i16:
4767 ClrNode =
4768 SDValue(CurDAG->getMachineNode(
4769 TargetOpcode::EXTRACT_SUBREG, dl, MVT::i16, ClrNode,
4770 CurDAG->getTargetConstant(X86::sub_16bit, dl,
4771 MVT::i32)),
4772 0);
4773 break;
4774 case MVT::i32:
4775 break;
4776 case MVT::i64:
4777 ClrNode =
4778 SDValue(CurDAG->getMachineNode(
4779 TargetOpcode::SUBREG_TO_REG, dl, MVT::i64,
4780 CurDAG->getTargetConstant(0, dl, MVT::i64), ClrNode,
4781 CurDAG->getTargetConstant(X86::sub_32bit, dl,
4782 MVT::i32)),
4783 0);
4784 break;
4785 default:
4786 llvm_unreachable("Unexpected division source")::llvm::llvm_unreachable_internal("Unexpected division source"
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 4786)
;
4787 }
4788
4789 InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, ClrReg,
4790 ClrNode, InFlag).getValue(1);
4791 }
4792 }
4793
4794 if (foldedLoad) {
4795 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0),
4796 InFlag };
4797 MachineSDNode *CNode =
4798 CurDAG->getMachineNode(MOpc, dl, MVT::Other, MVT::Glue, Ops);
4799 InFlag = SDValue(CNode, 1);
4800 // Update the chain.
4801 ReplaceUses(N1.getValue(1), SDValue(CNode, 0));
4802 // Record the mem-refs
4803 CurDAG->setNodeMemRefs(CNode, {cast<LoadSDNode>(N1)->getMemOperand()});
4804 } else {
4805 InFlag =
4806 SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Glue, N1, InFlag), 0);
4807 }
4808
4809 // Prevent use of AH in a REX instruction by explicitly copying it to
4810 // an ABCD_L register.
4811 //
4812 // The current assumption of the register allocator is that isel
4813 // won't generate explicit references to the GR8_ABCD_H registers. If
4814 // the allocator and/or the backend get enhanced to be more robust in
4815 // that regard, this can be, and should be, removed.
4816 if (HiReg == X86::AH && !SDValue(Node, 1).use_empty()) {
4817 SDValue AHCopy = CurDAG->getRegister(X86::AH, MVT::i8);
4818 unsigned AHExtOpcode =
4819 isSigned ? X86::MOVSX32rr8_NOREX : X86::MOVZX32rr8_NOREX;
4820
4821 SDNode *RNode = CurDAG->getMachineNode(AHExtOpcode, dl, MVT::i32,
4822 MVT::Glue, AHCopy, InFlag);
4823 SDValue Result(RNode, 0);
4824 InFlag = SDValue(RNode, 1);
4825
4826 Result =
4827 CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result);
4828
4829 ReplaceUses(SDValue(Node, 1), Result);
4830 LLVM_DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG);do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-isel")) { dbgs() << "=> "; Result.getNode()->
dump(CurDAG); dbgs() << '\n'; } } while (false)
4831 dbgs() << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-isel")) { dbgs() << "=> "; Result.getNode()->
dump(CurDAG); dbgs() << '\n'; } } while (false)
;
4832 }
4833 // Copy the division (low) result, if it is needed.
4834 if (!SDValue(Node, 0).use_empty()) {
4835 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
4836 LoReg, NVT, InFlag);
4837 InFlag = Result.getValue(2);
4838 ReplaceUses(SDValue(Node, 0), Result);
4839 LLVM_DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG);do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-isel")) { dbgs() << "=> "; Result.getNode()->
dump(CurDAG); dbgs() << '\n'; } } while (false)
4840 dbgs() << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-isel")) { dbgs() << "=> "; Result.getNode()->
dump(CurDAG); dbgs() << '\n'; } } while (false)
;
4841 }
4842 // Copy the remainder (high) result, if it is needed.
4843 if (!SDValue(Node, 1).use_empty()) {
4844 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
4845 HiReg, NVT, InFlag);
4846 InFlag = Result.getValue(2);
4847 ReplaceUses(SDValue(Node, 1), Result);
4848 LLVM_DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG);do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-isel")) { dbgs() << "=> "; Result.getNode()->
dump(CurDAG); dbgs() << '\n'; } } while (false)
4849 dbgs() << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("x86-isel")) { dbgs() << "=> "; Result.getNode()->
dump(CurDAG); dbgs() << '\n'; } } while (false)
;
4850 }
4851 CurDAG->RemoveDeadNode(Node);
4852 return;
4853 }
4854
4855 case X86ISD::CMP: {
4856 SDValue N0 = Node->getOperand(0);
4857 SDValue N1 = Node->getOperand(1);
4858
4859 // Optimizations for TEST compares.
4860 if (!isNullConstant(N1))
4861 break;
4862
4863 // Save the original VT of the compare.
4864 MVT CmpVT = N0.getSimpleValueType();
4865
4866 // If we are comparing (and (shr X, C, Mask) with 0, emit a BEXTR followed
4867 // by a test instruction. The test should be removed later by
4868 // analyzeCompare if we are using only the zero flag.
4869 // TODO: Should we check the users and use the BEXTR flags directly?
4870 if (N0.getOpcode() == ISD::AND && N0.hasOneUse()) {
4871 if (MachineSDNode *NewNode = matchBEXTRFromAndImm(N0.getNode())) {
4872 unsigned TestOpc = CmpVT == MVT::i64 ? X86::TEST64rr
4873 : X86::TEST32rr;
4874 SDValue BEXTR = SDValue(NewNode, 0);
4875 NewNode = CurDAG->getMachineNode(TestOpc, dl, MVT::i32, BEXTR, BEXTR);
4876 ReplaceUses(SDValue(Node, 0), SDValue(NewNode, 0));
4877 CurDAG->RemoveDeadNode(Node);
4878 return;
4879 }
4880 }
4881
4882 // We can peek through truncates, but we need to be careful below.
4883 if (N0.getOpcode() == ISD::TRUNCATE && N0.hasOneUse())
4884 N0 = N0.getOperand(0);
4885
4886 // Look for (X86cmp (and $op, $imm), 0) and see if we can convert it to
4887 // use a smaller encoding.
4888 // Look past the truncate if CMP is the only use of it.
4889 if (N0.getOpcode() == ISD::AND &&
4890 N0.getNode()->hasOneUse() &&
4891 N0.getValueType() != MVT::i8) {
4892 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
4893 if (!C) break;
4894 uint64_t Mask = C->getZExtValue();
4895
4896 // Check if we can replace AND+IMM64 with a shift. This is possible for
4897 // masks/ like 0xFF000000 or 0x00FFFFFF and if we care only about the zero
4898 // flag.
4899 if (CmpVT == MVT::i64 && !isInt<32>(Mask) &&
4900 onlyUsesZeroFlag(SDValue(Node, 0))) {
4901 if (isMask_64(~Mask)) {
4902 unsigned TrailingZeros = countTrailingZeros(Mask);
4903 SDValue Imm = CurDAG->getTargetConstant(TrailingZeros, dl, MVT::i64);
4904 SDValue Shift =
4905 SDValue(CurDAG->getMachineNode(X86::SHR64ri, dl, MVT::i64, MVT::i32,
4906 N0.getOperand(0), Imm), 0);
4907 MachineSDNode *Test = CurDAG->getMachineNode(X86::TEST64rr, dl,
4908 MVT::i32, Shift, Shift);
4909 ReplaceNode(Node, Test);
4910 return;
4911 }
4912 if (isMask_64(Mask)) {
4913 unsigned LeadingZeros = countLeadingZeros(Mask);
4914 SDValue Imm = CurDAG->getTargetConstant(LeadingZeros, dl, MVT::i64);
4915 SDValue Shift =
4916 SDValue(CurDAG->getMachineNode(X86::SHL64ri, dl, MVT::i64, MVT::i32,
4917 N0.getOperand(0), Imm), 0);
4918 MachineSDNode *Test = CurDAG->getMachineNode(X86::TEST64rr, dl,
4919 MVT::i32, Shift, Shift);
4920 ReplaceNode(Node, Test);
4921 return;
4922 }
4923 }
4924
4925 MVT VT;
4926 int SubRegOp;
4927 unsigned ROpc, MOpc;
4928
4929 // For each of these checks we need to be careful if the sign flag is
4930 // being used. It is only safe to use the sign flag in two conditions,
4931 // either the sign bit in the shrunken mask is zero or the final test
4932 // size is equal to the original compare size.
4933
4934 if (isUInt<8>(Mask) &&
4935 (!(Mask & 0x80) || CmpVT == MVT::i8 ||
4936 hasNoSignFlagUses(SDValue(Node, 0)))) {
4937 // For example, convert "testl %eax, $8" to "testb %al, $8"
4938 VT = MVT::i8;
4939 SubRegOp = X86::sub_8bit;
4940 ROpc = X86::TEST8ri;
4941 MOpc = X86::TEST8mi;
4942 } else if (OptForMinSize && isUInt<16>(Mask) &&
4943 (!(Mask & 0x8000) || CmpVT == MVT::i16 ||
4944 hasNoSignFlagUses(SDValue(Node, 0)))) {
4945 // For example, "testl %eax, $32776" to "testw %ax, $32776".
4946 // NOTE: We only want to form TESTW instructions if optimizing for
4947 // min size. Otherwise we only save one byte and possibly get a length
4948 // changing prefix penalty in the decoders.
4949 VT = MVT::i16;
4950 SubRegOp = X86::sub_16bit;
4951 ROpc = X86::TEST16ri;
4952 MOpc = X86::TEST16mi;
4953 } else if (isUInt<32>(Mask) && N0.getValueType() != MVT::i16 &&
4954 ((!(Mask & 0x80000000) &&
4955 // Without minsize 16-bit Cmps can get here so we need to
4956 // be sure we calculate the correct sign flag if needed.
4957 (CmpVT != MVT::i16 || !(Mask & 0x8000))) ||
4958 CmpVT == MVT::i32 ||
4959 hasNoSignFlagUses(SDValue(Node, 0)))) {
4960 // For example, "testq %rax, $268468232" to "testl %eax, $268468232".
4961 // NOTE: We only want to run that transform if N0 is 32 or 64 bits.
4962 // Otherwize, we find ourselves in a position where we have to do
4963 // promotion. If previous passes did not promote the and, we assume
4964 // they had a good reason not to and do not promote here.
4965 VT = MVT::i32;
4966 SubRegOp = X86::sub_32bit;
4967 ROpc = X86::TEST32ri;
4968 MOpc = X86::TEST32mi;
4969 } else {
4970 // No eligible transformation was found.
4971 break;
4972 }
4973
4974 SDValue Imm = CurDAG->getTargetConstant(Mask, dl, VT);
4975 SDValue Reg = N0.getOperand(0);
4976
4977 // Emit a testl or testw.
4978 MachineSDNode *NewNode;
4979 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
4980 if (tryFoldLoad(Node, N0.getNode(), Reg, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
4981 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Imm,
4982 Reg.getOperand(0) };
4983 NewNode = CurDAG->getMachineNode(MOpc, dl, MVT::i32, MVT::Other, Ops);
4984 // Update the chain.
4985 ReplaceUses(Reg.getValue(1), SDValue(NewNode, 1));
4986 // Record the mem-refs
4987 CurDAG->setNodeMemRefs(NewNode,
4988 {cast<LoadSDNode>(Reg)->getMemOperand()});
4989 } else {
4990 // Extract the subregister if necessary.
4991 if (N0.getValueType() != VT)
4992 Reg = CurDAG->getTargetExtractSubreg(SubRegOp, dl, VT, Reg);
4993
4994 NewNode = CurDAG->getMachineNode(ROpc, dl, MVT::i32, Reg, Imm);
4995 }
4996 // Replace CMP with TEST.
4997 ReplaceNode(Node, NewNode);
4998 return;
4999 }
5000 break;
5001 }
5002 case X86ISD::PCMPISTR: {
5003 if (!Subtarget->hasSSE42())
5004 break;
5005
5006 bool NeedIndex = !SDValue(Node, 0).use_empty();
5007 bool NeedMask = !SDValue(Node, 1).use_empty();
5008 // We can't fold a load if we are going to make two instructions.
5009 bool MayFoldLoad = !NeedIndex || !NeedMask;
5010
5011 MachineSDNode *CNode;
5012 if (NeedMask) {
5013 unsigned ROpc = Subtarget->hasAVX() ? X86::VPCMPISTRMrr : X86::PCMPISTRMrr;
5014 unsigned MOpc = Subtarget->hasAVX() ? X86::VPCMPISTRMrm : X86::PCMPISTRMrm;
5015 CNode = emitPCMPISTR(ROpc, MOpc, MayFoldLoad, dl, MVT::v16i8, Node);
5016 ReplaceUses(SDValue(Node, 1), SDValue(CNode, 0));
5017 }
5018 if (NeedIndex || !NeedMask) {
5019 unsigned ROpc = Subtarget->hasAVX() ? X86::VPCMPISTRIrr : X86::PCMPISTRIrr;
5020 unsigned MOpc = Subtarget->hasAVX() ? X86::VPCMPISTRIrm : X86::PCMPISTRIrm;
5021 CNode = emitPCMPISTR(ROpc, MOpc, MayFoldLoad, dl, MVT::i32, Node);
5022 ReplaceUses(SDValue(Node, 0), SDValue(CNode, 0));
5023 }
5024
5025 // Connect the flag usage to the last instruction created.
5026 ReplaceUses(SDValue(Node, 2), SDValue(CNode, 1));
5027 CurDAG->RemoveDeadNode(Node);
5028 return;
5029 }
5030 case X86ISD::PCMPESTR: {
5031 if (!Subtarget->hasSSE42())
5032 break;
5033
5034 // Copy the two implicit register inputs.
5035 SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, X86::EAX,
5036 Node->getOperand(1),
5037 SDValue()).getValue(1);
5038 InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, X86::EDX,
5039 Node->getOperand(3), InFlag).getValue(1);
5040
5041 bool NeedIndex = !SDValue(Node, 0).use_empty();
5042 bool NeedMask = !SDValue(Node, 1).use_empty();
5043 // We can't fold a load if we are going to make two instructions.
5044 bool MayFoldLoad = !NeedIndex || !NeedMask;
5045
5046 MachineSDNode *CNode;
5047 if (NeedMask) {
5048 unsigned ROpc = Subtarget->hasAVX() ? X86::VPCMPESTRMrr : X86::PCMPESTRMrr;
5049 unsigned MOpc = Subtarget->hasAVX() ? X86::VPCMPESTRMrm : X86::PCMPESTRMrm;
5050 CNode = emitPCMPESTR(ROpc, MOpc, MayFoldLoad, dl, MVT::v16i8, Node,
5051 InFlag);
5052 ReplaceUses(SDValue(Node, 1), SDValue(CNode, 0));
5053 }
5054 if (NeedIndex || !NeedMask) {
5055 unsigned ROpc = Subtarget->hasAVX() ? X86::VPCMPESTRIrr : X86::PCMPESTRIrr;
5056 unsigned MOpc = Subtarget->hasAVX() ? X86::VPCMPESTRIrm : X86::PCMPESTRIrm;
5057 CNode = emitPCMPESTR(ROpc, MOpc, MayFoldLoad, dl, MVT::i32, Node, InFlag);
5058 ReplaceUses(SDValue(Node, 0), SDValue(CNode, 0));
5059 }
5060 // Connect the flag usage to the last instruction created.
5061 ReplaceUses(SDValue(Node, 2), SDValue(CNode, 1));
5062 CurDAG->RemoveDeadNode(Node);
5063 return;
5064 }
5065
5066 case ISD::SETCC: {
5067 if (NVT.isVector() && tryVPTESTM(Node, SDValue(Node, 0), SDValue()))
5068 return;
5069
5070 break;
5071 }
5072
5073 case ISD::STORE:
5074 if (foldLoadStoreIntoMemOperand(Node))
5075 return;
5076 break;
5077 case ISD::FCEIL:
5078 case ISD::FFLOOR:
5079 case ISD::FTRUNC:
5080 case ISD::FNEARBYINT:
5081 case ISD::FRINT: {
5082 // Replace fp rounding with their X86 specific equivalent so we don't
5083 // need 2 sets of patterns.
5084 // FIXME: This can only happen when the nodes started as STRICT_* and have
5085 // been mutated into their non-STRICT equivalents. Eventually this
5086 // mutation will be removed and we should switch the STRICT_ nodes to a
5087 // strict version of RNDSCALE in PreProcessISelDAG.
5088 unsigned Imm;
5089 switch (Node->getOpcode()) {
5090 default: llvm_unreachable("Unexpected opcode!")::llvm::llvm_unreachable_internal("Unexpected opcode!", "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 5090)
;
5091 case ISD::FCEIL: Imm = 0xA; break;
5092 case ISD::FFLOOR: Imm = 0x9; break;
5093 case ISD::FTRUNC: Imm = 0xB; break;
5094 case ISD::FNEARBYINT: Imm = 0xC; break;
5095 case ISD::FRINT: Imm = 0x4; break;
5096 }
5097 SDLoc dl(Node);
5098 SDValue Res = CurDAG->getNode(X86ISD::VRNDSCALE, dl,
5099 Node->getValueType(0),
5100 Node->getOperand(0),
5101 CurDAG->getConstant(Imm, dl, MVT::i8));
5102 ReplaceNode(Node, Res.getNode());
5103 SelectCode(Res.getNode());
5104 return;
5105 }
5106 }
5107
5108 SelectCode(Node);
5109}
5110
5111bool X86DAGToDAGISel::
5112SelectInlineAsmMemoryOperand(const SDValue &Op, unsigned ConstraintID,
5113 std::vector<SDValue> &OutOps) {
5114 SDValue Op0, Op1, Op2, Op3, Op4;
5115 switch (ConstraintID) {
5116 default:
5117 llvm_unreachable("Unexpected asm memory constraint")::llvm::llvm_unreachable_internal("Unexpected asm memory constraint"
, "/build/llvm-toolchain-snapshot-10~svn372087/lib/Target/X86/X86ISelDAGToDAG.cpp"
, 5117)
;
5118 case InlineAsm::Constraint_i:
5119 // FIXME: It seems strange that 'i' is needed here since it's supposed to
5120 // be an immediate and not a memory constraint.
5121 LLVM_FALLTHROUGH[[gnu::fallthrough]];
5122 case InlineAsm::Constraint_o: // offsetable ??
5123 case InlineAsm::Constraint_v: // not offsetable ??
5124 case InlineAsm::Constraint_m: // memory
5125 case InlineAsm::Constraint_X:
5126 if (!selectAddr(nullptr, Op, Op0, Op1, Op2, Op3, Op4))
5127 return true;
5128 break;
5129 }
5130
5131 OutOps.push_back(Op0);
5132 OutOps.push_back(Op1);
5133 OutOps.push_back(Op2);
5134 OutOps.push_back(Op3);
5135 OutOps.push_back(Op4);
5136 return false;
5137}
5138
5139/// This pass converts a legalized DAG into a X86-specific DAG,
5140/// ready for instruction scheduling.
5141FunctionPass *llvm::createX86ISelDag(X86TargetMachine &TM,
5142 CodeGenOpt::Level OptLevel) {
5143 return new X86DAGToDAGISel(TM, OptLevel);
5144}

/build/llvm-toolchain-snapshot-10~svn372087/include/llvm/CodeGen/SelectionDAGNodes.h

1//===- llvm/CodeGen/SelectionDAGNodes.h - SelectionDAG Nodes ----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file declares the SDNode class and derived classes, which are used to
10// represent the nodes and operations present in a SelectionDAG. These nodes
11// and operations are machine code level operations, with some similarities to
12// the GCC RTL representation.
13//
14// Clients should include the SelectionDAG.h file instead of this file directly.
15//
16//===----------------------------------------------------------------------===//
17
18#ifndef LLVM_CODEGEN_SELECTIONDAGNODES_H
19#define LLVM_CODEGEN_SELECTIONDAGNODES_H
20
21#include "llvm/ADT/APFloat.h"
22#include "llvm/ADT/ArrayRef.h"
23#include "llvm/ADT/BitVector.h"
24#include "llvm/ADT/FoldingSet.h"
25#include "llvm/ADT/GraphTraits.h"
26#include "llvm/ADT/SmallPtrSet.h"
27#include "llvm/ADT/SmallVector.h"
28#include "llvm/ADT/ilist_node.h"
29#include "llvm/ADT/iterator.h"
30#include "llvm/ADT/iterator_range.h"
31#include "llvm/CodeGen/ISDOpcodes.h"
32#include "llvm/CodeGen/MachineMemOperand.h"
33#include "llvm/CodeGen/ValueTypes.h"
34#include "llvm/IR/Constants.h"
35#include "llvm/IR/DebugLoc.h"
36#include "llvm/IR/Instruction.h"
37#include "llvm/IR/Instructions.h"
38#include "llvm/IR/Metadata.h"
39#include "llvm/IR/Operator.h"
40#include "llvm/Support/AlignOf.h"
41#include "llvm/Support/AtomicOrdering.h"
42#include "llvm/Support/Casting.h"
43#include "llvm/Support/ErrorHandling.h"
44#include "llvm/Support/MachineValueType.h"
45#include <algorithm>
46#include <cassert>
47#include <climits>
48#include <cstddef>
49#include <cstdint>
50#include <cstring>
51#include <iterator>
52#include <string>
53#include <tuple>
54
55namespace llvm {
56
57class APInt;
58class Constant;
59template <typename T> struct DenseMapInfo;
60class GlobalValue;
61class MachineBasicBlock;
62class MachineConstantPoolValue;
63class MCSymbol;
64class raw_ostream;
65class SDNode;
66class SelectionDAG;
67class Type;
68class Value;
69
70void checkForCycles(const SDNode *N, const SelectionDAG *DAG = nullptr,
71 bool force = false);
72
73/// This represents a list of ValueType's that has been intern'd by
74/// a SelectionDAG. Instances of this simple value class are returned by
75/// SelectionDAG::getVTList(...).
76///
77struct SDVTList {
78 const EVT *VTs;
79 unsigned int NumVTs;
80};
81
82namespace ISD {
83
84 /// Node predicates
85
86 /// If N is a BUILD_VECTOR node whose elements are all the same constant or
87 /// undefined, return true and return the constant value in \p SplatValue.
88 bool isConstantSplatVector(const SDNode *N, APInt &SplatValue);
89
90 /// Return true if the specified node is a BUILD_VECTOR where all of the
91 /// elements are ~0 or undef.
92 bool isBuildVectorAllOnes(const SDNode *N);
93
94 /// Return true if the specified node is a BUILD_VECTOR where all of the
95 /// elements are 0 or undef.
96 bool isBuildVectorAllZeros(const SDNode *N);
97
98 /// Return true if the specified node is a BUILD_VECTOR node of all
99 /// ConstantSDNode or undef.
100 bool isBuildVectorOfConstantSDNodes(const SDNode *N);
101
102 /// Return true if the specified node is a BUILD_VECTOR node of all
103 /// ConstantFPSDNode or undef.
104 bool isBuildVectorOfConstantFPSDNodes(const SDNode *N);
105
106 /// Return true if the node has at least one operand and all operands of the
107 /// specified node are ISD::UNDEF.
108 bool allOperandsUndef(const SDNode *N);
109
110} // end namespace ISD
111
112//===----------------------------------------------------------------------===//
113/// Unlike LLVM values, Selection DAG nodes may return multiple
114/// values as the result of a computation. Many nodes return multiple values,
115/// from loads (which define a token and a return value) to ADDC (which returns
116/// a result and a carry value), to calls (which may return an arbitrary number
117/// of values).
118///
119/// As such, each use of a SelectionDAG computation must indicate the node that
120/// computes it as well as which return value to use from that node. This pair
121/// of information is represented with the SDValue value type.
122///
123class SDValue {
124 friend struct DenseMapInfo<SDValue>;
125
126 SDNode *Node = nullptr; // The node defining the value we are using.
127 unsigned ResNo = 0; // Which return value of the node we are using.
128
129public:
130 SDValue() = default;
131 SDValue(SDNode *node, unsigned resno);
132
133 /// get the index which selects a specific result in the SDNode
134 unsigned getResNo() const { return ResNo; }
135
136 /// get the SDNode which holds the desired result
137 SDNode *getNode() const { return Node; }
138
139 /// set the SDNode
140 void setNode(SDNode *N) { Node = N; }
141
142 inline SDNode *operator->() const { return Node; }
143
144 bool operator==(const SDValue &O) const {
145 return Node == O.Node && ResNo == O.ResNo;
146 }
147 bool operator!=(const SDValue &O) const {
148 return !operator==(O);
149 }
150 bool operator<(const SDValue &O) const {
151 return std::tie(Node, ResNo) < std::tie(O.Node, O.ResNo);
152 }
153 explicit operator bool() const {
154 return Node != nullptr;
25
Returning the value 1, which participates in a condition later
155 }
156
157 SDValue getValue(unsigned R) const {
158 return SDValue(Node, R);
159 }
160
161 /// Return true if this node is an operand of N.
162 bool isOperandOf(const SDNode *N) const;
163
164 /// Return the ValueType of the referenced return value.
165 inline EVT getValueType() const;
166
167 /// Return the simple ValueType of the referenced return value.
168 MVT getSimpleValueType() const {
169 return getValueType().getSimpleVT();
170 }
171
172 /// Returns the size of the value in bits.
173 unsigned getValueSizeInBits() const {
174 return getValueType().getSizeInBits();
175 }
176
177 unsigned getScalarValueSizeInBits() const {
178 return getValueType().getScalarType().getSizeInBits();
179 }
180
181 // Forwarding methods - These forward to the corresponding methods in SDNode.
182 inline unsigned getOpcode() const;
183 inline unsigned getNumOperands() const;
184 inline const SDValue &getOperand(unsigned i) const;
185 inline uint64_t getConstantOperandVal(unsigned i) const;
186 inline const APInt &getConstantOperandAPInt(unsigned i) const;
187 inline bool isTargetMemoryOpcode() const;
188 inline bool isTargetOpcode() const;
189 inline bool isMachineOpcode() const;
190 inline bool isUndef() const;
191 inline unsigned getMachineOpcode() const;
192 inline const DebugLoc &getDebugLoc() const;
193 inline void dump() const;
194 inline void dump(const SelectionDAG *G) const;
195 inline void dumpr() const;
196 inline void dumpr(const SelectionDAG *G) const;
197
198 /// Return true if this operand (which must be a chain) reaches the
199 /// specified operand without crossing any side-effecting instructions.
200 /// In practice, this looks through token factors and non-volatile loads.
201 /// In order to remain efficient, this only
202 /// looks a couple of nodes in, it does not do an exhaustive search.
203 bool reachesChainWithoutSideEffects(SDValue Dest,
204 unsigned Depth = 2) const;
205
206 /// Return true if there are no nodes using value ResNo of Node.
207 inline bool use_empty() const;
208
209 /// Return true if there is exactly one node using value ResNo of Node.
210 inline bool hasOneUse() const;
211};
212
213template<> struct DenseMapInfo<SDValue> {
214 static inline SDValue getEmptyKey() {
215 SDValue V;
216 V.ResNo = -1U;
217 return V;
218 }
219
220 static inline SDValue getTombstoneKey() {
221 SDValue V;
222 V.ResNo = -2U;
223 return V;
224 }
225
226 static unsigned getHashValue(const SDValue &Val) {
227 return ((unsigned)((uintptr_t)Val.getNode() >> 4) ^
228 (unsigned)((uintptr_t)Val.getNode() >> 9)) + Val.getResNo();
229 }
230
231 static bool isEqual(const SDValue &LHS, const SDValue &RHS) {
232 return LHS == RHS;
233 }
234};
235
236/// Allow casting operators to work directly on
237/// SDValues as if they were SDNode*'s.
238template<> struct simplify_type<SDValue> {
239 using SimpleType = SDNode *;
240
241 static SimpleType getSimplifiedValue(SDValue &Val) {
242 return Val.getNode();
243 }
244};
245template<> struct simplify_type<const SDValue> {
246 using SimpleType = /*const*/ SDNode *;
247
248 static SimpleType getSimplifiedValue(const SDValue &Val) {
249 return Val.getNode();
250 }
251};
252
253/// Represents a use of a SDNode. This class holds an SDValue,
254/// which records the SDNode being used and the result number, a
255/// pointer to the SDNode using the value, and Next and Prev pointers,
256/// which link together all the uses of an SDNode.
257///
258class SDUse {
259 /// Val - The value being used.
260 SDValue Val;
261 /// User - The user of this value.
262 SDNode *User = nullptr;
263 /// Prev, Next - Pointers to the uses list of the SDNode referred by
264 /// this operand.
265 SDUse **Prev = nullptr;
266 SDUse *Next = nullptr;
267
268public:
269 SDUse() = default;
270 SDUse(const SDUse &U) = delete;
271 SDUse &operator=(const SDUse &) = delete;
272
273 /// Normally SDUse will just implicitly convert to an SDValue that it holds.
274 operator const SDValue&() const { return Val; }
275
276 /// If implicit conversion to SDValue doesn't work, the get() method returns
277 /// the SDValue.
278 const SDValue &get() const { return Val; }
279
280 /// This returns the SDNode that contains this Use.
281 SDNode *getUser() { return User; }
282
283 /// Get the next SDUse in the use list.
284 SDUse *getNext() const { return Next; }
285
286 /// Convenience function for get().getNode().
287 SDNode *getNode() const { return Val.getNode(); }
288 /// Convenience function for get().getResNo().
289 unsigned getResNo() const { return Val.getResNo(); }
290 /// Convenience function for get().getValueType().
291 EVT getValueType() const { return Val.getValueType(); }
292
293 /// Convenience function for get().operator==
294 bool operator==(const SDValue &V) const {
295 return Val == V;
296 }
297
298 /// Convenience function for get().operator!=
299 bool operator!=(const SDValue &V) const {
300 return Val != V;
301 }
302
303 /// Convenience function for get().operator<
304 bool operator<(const SDValue &V) const {
305 return Val < V;
306 }
307
308private:
309 friend class SelectionDAG;
310 friend class SDNode;
311 // TODO: unfriend HandleSDNode once we fix its operand handling.
312 friend class HandleSDNode;
313
314 void setUser(SDNode *p) { User = p; }
315
316 /// Remove this use from its existing use list, assign it the
317 /// given value, and add it to the new value's node's use list.
318 inline void set(const SDValue &V);
319 /// Like set, but only supports initializing a newly-allocated
320 /// SDUse with a non-null value.
321 inline void setInitial(const SDValue &V);
322 /// Like set, but only sets the Node portion of the value,
323 /// leaving the ResNo portion unmodified.
324 inline void setNode(SDNode *N);
325
326 void addToList(SDUse **List) {
327 Next = *List;
328 if (Next) Next->Prev = &Next;
329 Prev = List;
330 *List = this;
331 }
332
333 void removeFromList() {
334 *Prev = Next;
335 if (Next) Next->Prev = Prev;
336 }
337};
338
339/// simplify_type specializations - Allow casting operators to work directly on
340/// SDValues as if they were SDNode*'s.
341template<> struct simplify_type<SDUse> {
342 using SimpleType = SDNode *;
343
344 static SimpleType getSimplifiedValue(SDUse &Val) {
345 return Val.getNode();
346 }
347};
348
349/// These are IR-level optimization flags that may be propagated to SDNodes.
350/// TODO: This data structure should be shared by the IR optimizer and the
351/// the backend.
352struct SDNodeFlags {
353private:
354 // This bit is used to determine if the flags are in a defined state.
355 // Flag bits can only be masked out during intersection if the masking flags
356 // are defined.
357 bool AnyDefined : 1;
358
359 bool NoUnsignedWrap : 1;
360 bool NoSignedWrap : 1;
361 bool Exact : 1;
362 bool NoNaNs : 1;
363 bool NoInfs : 1;
364 bool NoSignedZeros : 1;
365 bool AllowReciprocal : 1;
366 bool VectorReduction : 1;
367 bool AllowContract : 1;
368 bool ApproximateFuncs : 1;
369 bool AllowReassociation : 1;
370
371 // We assume instructions do not raise floating-point exceptions by default,
372 // and only those marked explicitly may do so. We could choose to represent
373 // this via a positive "FPExcept" flags like on the MI level, but having a
374 // negative "NoFPExcept" flag here (that defaults to true) makes the flag
375 // intersection logic more straightforward.
376 bool NoFPExcept : 1;
377
378public:
379 /// Default constructor turns off all optimization flags.
380 SDNodeFlags()
381 : AnyDefined(false), NoUnsignedWrap(false), NoSignedWrap(false),
382 Exact(false), NoNaNs(false), NoInfs(false),
383 NoSignedZeros(false), AllowReciprocal(false), VectorReduction(false),
384 AllowContract(false), ApproximateFuncs(false),
385 AllowReassociation(false), NoFPExcept(true) {}
386
387 /// Propagate the fast-math-flags from an IR FPMathOperator.
388 void copyFMF(const FPMathOperator &FPMO) {
389 setNoNaNs(FPMO.hasNoNaNs());
390 setNoInfs(FPMO.hasNoInfs());
391 setNoSignedZeros(FPMO.hasNoSignedZeros());
392 setAllowReciprocal(FPMO.hasAllowReciprocal());
393 setAllowContract(FPMO.hasAllowContract());
394 setApproximateFuncs(FPMO.hasApproxFunc());
395 setAllowReassociation(FPMO.hasAllowReassoc());
396 }
397
398 /// Sets the state of the flags to the defined state.
399 void setDefined() { AnyDefined = true; }
400 /// Returns true if the flags are in a defined state.
401 bool isDefined() const { return AnyDefined; }
402
403 // These are mutators for each flag.
404 void setNoUnsignedWrap(bool b) {
405 setDefined();
406 NoUnsignedWrap = b;
407 }
408 void setNoSignedWrap(bool b) {
409 setDefined();
410 NoSignedWrap = b;
411 }
412 void setExact(bool b) {
413 setDefined();
414 Exact = b;
415 }
416 void setNoNaNs(bool b) {
417 setDefined();
418 NoNaNs = b;
419 }
420 void setNoInfs(bool b) {
421 setDefined();
422 NoInfs = b;
423 }
424 void setNoSignedZeros(bool b) {
425 setDefined();
426 NoSignedZeros = b;
427 }
428 void setAllowReciprocal(bool b) {
429 setDefined();
430 AllowReciprocal = b;
431 }
432 void setVectorReduction(bool b) {
433 setDefined();
434 VectorReduction = b;
435 }
436 void setAllowContract(bool b) {
437 setDefined();
438 AllowContract = b;
439 }
440 void setApproximateFuncs(bool b) {
441 setDefined();
442 ApproximateFuncs = b;
443 }
444 void setAllowReassociation(bool b) {
445 setDefined();
446 AllowReassociation = b;
447 }
448 void setFPExcept(bool b) {
449 setDefined();
450 NoFPExcept = !b;
451 }
452
453 // These are accessors for each flag.
454 bool hasNoUnsignedWrap() const { return NoUnsignedWrap; }
455 bool hasNoSignedWrap() const { return NoSignedWrap; }
456 bool hasExact() const { return Exact; }
457 bool hasNoNaNs() const { return NoNaNs; }
458 bool hasNoInfs() const { return NoInfs; }
459 bool hasNoSignedZeros() const { return NoSignedZeros; }
460 bool hasAllowReciprocal() const { return AllowReciprocal; }
461 bool hasVectorReduction() const { return VectorReduction; }
462 bool hasAllowContract() const { return AllowContract; }
463 bool hasApproximateFuncs() const { return ApproximateFuncs; }
464 bool hasAllowReassociation() const { return AllowReassociation; }
465 bool hasFPExcept() const { return !NoFPExcept; }
466
467 bool isFast() const {
468 return NoSignedZeros && AllowReciprocal && NoNaNs && NoInfs && NoFPExcept &&
469 AllowContract && ApproximateFuncs && AllowReassociation;
470 }
471
472 /// Clear any flags in this flag set that aren't also set in Flags.
473 /// If the given Flags are undefined then don't do anything.
474 void intersectWith(const SDNodeFlags Flags) {
475 if (!Flags.isDefined())
476 return;
477 NoUnsignedWrap &= Flags.NoUnsignedWrap;
478 NoSignedWrap &= Flags.NoSignedWrap;
479 Exact &= Flags.Exact;
480 NoNaNs &= Flags.NoNaNs;
481 NoInfs &= Flags.NoInfs;
482 NoSignedZeros &= Flags.NoSignedZeros;
483 AllowReciprocal &= Flags.AllowReciprocal;
484 VectorReduction &= Flags.VectorReduction;
485 AllowContract &= Flags.AllowContract;
486 ApproximateFuncs &= Flags.ApproximateFuncs;
487 AllowReassociation &= Flags.AllowReassociation;
488 NoFPExcept &= Flags.NoFPExcept;
489 }
490};
491
492/// Represents one node in the SelectionDAG.
493///
494class SDNode : public FoldingSetNode, public ilist_node<SDNode> {
495private:
496 /// The operation that this node performs.
497 int16_t NodeType;
498
499protected:
500 // We define a set of mini-helper classes to help us interpret the bits in our
501 // SubclassData. These are designed to fit within a uint16_t so they pack
502 // with NodeType.
503
504#if defined(_AIX) && (!defined(__GNUC__4) || defined(__ibmxl__))
505// Except for GCC; by default, AIX compilers store bit-fields in 4-byte words
506// and give the `pack` pragma push semantics.
507#define BEGIN_TWO_BYTE_PACK() _Pragma("pack(2)")pack(2)
508#define END_TWO_BYTE_PACK() _Pragma("pack(pop)")pack(pop)
509#else
510#define BEGIN_TWO_BYTE_PACK()
511#define END_TWO_BYTE_PACK()
512#endif
513
514BEGIN_TWO_BYTE_PACK()
515 class SDNodeBitfields {
516 friend class SDNode;
517 friend class MemIntrinsicSDNode;
518 friend class MemSDNode;
519 friend class SelectionDAG;
520
521 uint16_t HasDebugValue : 1;
522 uint16_t IsMemIntrinsic : 1;
523 uint16_t IsDivergent : 1;
524 };
525 enum { NumSDNodeBits = 3 };
526
527 class ConstantSDNodeBitfields {
528 friend class ConstantSDNode;
529
530 uint16_t : NumSDNodeBits;
531
532 uint16_t IsOpaque : 1;
533 };
534
535 class MemSDNodeBitfields {
536 friend class MemSDNode;
537 friend class MemIntrinsicSDNode;
538 friend class AtomicSDNode;
539
540 uint16_t : NumSDNodeBits;
541
542 uint16_t IsVolatile : 1;
543 uint16_t IsNonTemporal : 1;
544 uint16_t IsDereferenceable : 1;
545 uint16_t IsInvariant : 1;
546 };
547 enum { NumMemSDNodeBits = NumSDNodeBits + 4 };
548
549 class LSBaseSDNodeBitfields {
550 friend class LSBaseSDNode;
551 friend class MaskedGatherScatterSDNode;
552
553 uint16_t : NumMemSDNodeBits;
554
555 // This storage is shared between disparate class hierarchies to hold an
556 // enumeration specific to the class hierarchy in use.
557 // LSBaseSDNode => enum ISD::MemIndexedMode
558 // MaskedGatherScatterSDNode => enum ISD::MemIndexType
559 uint16_t AddressingMode : 3;
560 };
561 enum { NumLSBaseSDNodeBits = NumMemSDNodeBits + 3 };
562
563 class LoadSDNodeBitfields {
564 friend class LoadSDNode;
565 friend class MaskedLoadSDNode;
566
567 uint16_t : NumLSBaseSDNodeBits;
568
569 uint16_t ExtTy : 2; // enum ISD::LoadExtType
570 uint16_t IsExpanding : 1;
571 };
572
573 class StoreSDNodeBitfields {
574 friend class StoreSDNode;
575 friend class MaskedStoreSDNode;
576
577 uint16_t : NumLSBaseSDNodeBits;
578
579 uint16_t IsTruncating : 1;
580 uint16_t IsCompressing : 1;
581 };
582
583 union {
584 char RawSDNodeBits[sizeof(uint16_t)];
585 SDNodeBitfields SDNodeBits;
586 ConstantSDNodeBitfields ConstantSDNodeBits;
587 MemSDNodeBitfields MemSDNodeBits;
588 LSBaseSDNodeBitfields LSBaseSDNodeBits;
589 LoadSDNodeBitfields LoadSDNodeBits;
590 StoreSDNodeBitfields StoreSDNodeBits;
591 };
592END_TWO_BYTE_PACK()
593#undef BEGIN_TWO_BYTE_PACK
594#undef END_TWO_BYTE_PACK
595
596 // RawSDNodeBits must cover the entirety of the union. This means that all of
597 // the union's members must have size <= RawSDNodeBits. We write the RHS as
598 // "2" instead of sizeof(RawSDNodeBits) because MSVC can't handle the latter.
599 static_assert(sizeof(SDNodeBitfields) <= 2, "field too wide");
600 static_assert(sizeof(ConstantSDNodeBitfields) <= 2, "field too wide");
601 static_assert(sizeof(MemSDNodeBitfields) <= 2, "field too wide");
602 static_assert(sizeof(LSBaseSDNodeBitfields) <= 2, "field too wide");
603 static_assert(sizeof(LoadSDNodeBitfields) <= 2, "field too wide");
604 static_assert(sizeof(StoreSDNodeBitfields) <= 2, "field too wide");
605
606private:
607 friend class SelectionDAG;
608 // TODO: unfriend HandleSDNode once we fix its operand handling.
609 friend class HandleSDNode;
610
611 /// Unique id per SDNode in the DAG.
612 int NodeId = -1;
613
614 /// The values that are used by this operation.
615 SDUse *OperandList = nullptr;
616
617 /// The types of the values this node defines. SDNode's may
618 /// define multiple values simultaneously.
619 const EVT *ValueList;
620
621 /// List of uses for this SDNode.
622 SDUse *UseList = nullptr;
623
624 /// The number of entries in the Operand/Value list.
625 unsigned short NumOperands = 0;
626 unsigned short NumValues;
627
628 // The ordering of the SDNodes. It roughly corresponds to the ordering of the
629 // original LLVM instructions.
630 // This is used for turning off scheduling, because we'll forgo
631 // the normal scheduling algorithms and output the instructions according to
632 // this ordering.
633 unsigned IROrder;
634
635 /// Source line information.
636 DebugLoc debugLoc;
637
638 /// Return a pointer to the specified value type.
639 static const EVT *getValueTypeList(EVT VT);
640
641 SDNodeFlags Flags;
642
643public:
644 /// Unique and persistent id per SDNode in the DAG.
645 /// Used for debug printing.
646 uint16_t PersistentId;
647
648 //===--------------------------------------------------------------------===//
649 // Accessors
650 //
651
652 /// Return the SelectionDAG opcode value for this node. For
653 /// pre-isel nodes (those for which isMachineOpcode returns false), these
654 /// are the opcode values in the ISD and <target>ISD namespaces. For
655 /// post-isel opcodes, see getMachineOpcode.
656 unsigned getOpcode() const { return (unsigned short)NodeType; }
657
658 /// Test if this node has a target-specific opcode (in the
659 /// \<target\>ISD namespace).
660 bool isTargetOpcode() const { return NodeType >= ISD::BUILTIN_OP_END; }
661
662 /// Test if this node has a target-specific
663 /// memory-referencing opcode (in the \<target\>ISD namespace and
664 /// greater than FIRST_TARGET_MEMORY_OPCODE).
665 bool isTargetMemoryOpcode() const {
666 return NodeType >= ISD::FIRST_TARGET_MEMORY_OPCODE;
667 }
668
669 /// Return true if the type of the node type undefined.
670 bool isUndef() const { return NodeType == ISD::UNDEF; }
671
672 /// Test if this node is a memory intrinsic (with valid pointer information).
673 /// INTRINSIC_W_CHAIN and INTRINSIC_VOID nodes are sometimes created for
674 /// non-memory intrinsics (with chains) that are not really instances of
675 /// MemSDNode. For such nodes, we need some extra state to determine the
676 /// proper classof relationship.
677 bool isMemIntrinsic() const {
678 return (NodeType == ISD::INTRINSIC_W_CHAIN ||
679 NodeType == ISD::INTRINSIC_VOID) &&
680 SDNodeBits.IsMemIntrinsic;
681 }
682
683 /// Test if this node is a strict floating point pseudo-op.
684 bool isStrictFPOpcode() {
685 switch (NodeType) {
686 default:
687 return false;
688 case ISD::STRICT_FADD:
689 case ISD::STRICT_FSUB:
690 case ISD::STRICT_FMUL:
691 case ISD::STRICT_FDIV:
692 case ISD::STRICT_FREM:
693 case ISD::STRICT_FMA:
694 case ISD::STRICT_FSQRT:
695 case ISD::STRICT_FPOW:
696 case ISD::STRICT_FPOWI:
697 case ISD::STRICT_FSIN:
698 case ISD::STRICT_FCOS:
699 case ISD::STRICT_FEXP:
700 case ISD::STRICT_FEXP2:
701 case ISD::STRICT_FLOG:
702 case ISD::STRICT_FLOG10:
703 case ISD::STRICT_FLOG2:
704 case ISD::STRICT_FRINT:
705 case ISD::STRICT_FNEARBYINT:
706 case ISD::STRICT_FMAXNUM:
707 case ISD::STRICT_FMINNUM:
708 case ISD::STRICT_FCEIL:
709 case ISD::STRICT_FFLOOR:
710 case ISD::STRICT_FROUND:
711 case ISD::STRICT_FTRUNC:
712 case ISD::STRICT_FP_TO_SINT:
713 case ISD::STRICT_FP_TO_UINT:
714 case ISD::STRICT_FP_ROUND:
715 case ISD::STRICT_FP_EXTEND:
716 return true;
717 }
718 }
719
720 /// Test if this node has a post-isel opcode, directly
721 /// corresponding to a MachineInstr opcode.
722 bool isMachineOpcode() const { return NodeType < 0; }
723
724 /// This may only be called if isMachineOpcode returns
725 /// true. It returns the MachineInstr opcode value that the node's opcode
726 /// corresponds to.
727 unsigned getMachineOpcode() const {
728 assert(isMachineOpcode() && "Not a MachineInstr opcode!")((isMachineOpcode() && "Not a MachineInstr opcode!") ?
static_cast<void> (0) : __assert_fail ("isMachineOpcode() && \"Not a MachineInstr opcode!\""
, "/build/llvm-toolchain-snapshot-10~svn372087/include/llvm/CodeGen/SelectionDAGNodes.h"
, 728, __PRETTY_FUNCTION__))
;
729 return ~NodeType;
730 }
731
732 bool getHasDebugValue() const { return SDNodeBits.HasDebugValue; }
733 void setHasDebugValue(bool b) { SDNodeBits.HasDebugValue = b; }
734
735 bool isDivergent() const { return SDNodeBits.IsDivergent; }
736
737 /// Return true if there are no uses of this node.
738 bool use_empty() const { return UseList == nullptr; }
739
740 /// Return true if there is exactly one use of this node.
741 bool hasOneUse() const {
742 return !use_empty() && std::next(use_begin()) == use_end();
743 }
744
745 /// Return the number of uses of this node. This method takes
746 /// time proportional to the number of uses.
747 size_t use_size() const { return std::distance(use_begin(), use_end()); }
748
749 /// Return the unique node id.
750 int getNodeId() const { return NodeId; }
751
752 /// Set unique node id.
753 void setNodeId(int Id) { NodeId = Id; }
754
755 /// Return the node ordering.
756 unsigned getIROrder() const {