Bug Summary

File:llvm/include/llvm/CodeGen/SelectionDAGNodes.h
Warning:line 1146, column 10
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name HexagonISelLowering.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mthread-model posix -mframe-pointer=none -fmath-errno -fdenormal-fp-math=ieee,ieee -fdenormal-fp-math-f32=ieee,ieee -fno-rounding-math -masm-verbose -mconstructor-aliases -munwind-tables -target-cpu x86-64 -dwarf-column-info -fno-split-dwarf-inlining -debugger-tuning=gdb -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-11/lib/clang/11.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/build-llvm/lib/Target/Hexagon -I /build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/Hexagon -I /build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/build-llvm/include -I /build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-11/lib/clang/11.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/build-llvm/lib/Target/Hexagon -fdebug-prefix-map=/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b=. -ferror-limit 19 -fmessage-length 0 -fvisibility hidden -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -o /tmp/scan-build-2020-02-26-193302-13812-1 -x c++ /build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp

/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp

1//===-- HexagonISelLowering.cpp - Hexagon DAG Lowering Implementation -----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the interfaces that Hexagon uses to lower LLVM code
10// into a selection DAG.
11//
12//===----------------------------------------------------------------------===//
13
14#include "HexagonISelLowering.h"
15#include "Hexagon.h"
16#include "HexagonMachineFunctionInfo.h"
17#include "HexagonRegisterInfo.h"
18#include "HexagonSubtarget.h"
19#include "HexagonTargetMachine.h"
20#include "HexagonTargetObjectFile.h"
21#include "llvm/ADT/APInt.h"
22#include "llvm/ADT/ArrayRef.h"
23#include "llvm/ADT/SmallVector.h"
24#include "llvm/ADT/StringSwitch.h"
25#include "llvm/CodeGen/CallingConvLower.h"
26#include "llvm/CodeGen/MachineFrameInfo.h"
27#include "llvm/CodeGen/MachineFunction.h"
28#include "llvm/CodeGen/MachineMemOperand.h"
29#include "llvm/CodeGen/MachineRegisterInfo.h"
30#include "llvm/CodeGen/RuntimeLibcalls.h"
31#include "llvm/CodeGen/SelectionDAG.h"
32#include "llvm/CodeGen/TargetCallingConv.h"
33#include "llvm/CodeGen/ValueTypes.h"
34#include "llvm/IR/BasicBlock.h"
35#include "llvm/IR/CallingConv.h"
36#include "llvm/IR/DataLayout.h"
37#include "llvm/IR/DerivedTypes.h"
38#include "llvm/IR/Function.h"
39#include "llvm/IR/GlobalValue.h"
40#include "llvm/IR/InlineAsm.h"
41#include "llvm/IR/Instructions.h"
42#include "llvm/IR/IntrinsicInst.h"
43#include "llvm/IR/Intrinsics.h"
44#include "llvm/IR/IntrinsicsHexagon.h"
45#include "llvm/IR/Module.h"
46#include "llvm/IR/Type.h"
47#include "llvm/IR/Value.h"
48#include "llvm/MC/MCRegisterInfo.h"
49#include "llvm/Support/Casting.h"
50#include "llvm/Support/CodeGen.h"
51#include "llvm/Support/CommandLine.h"
52#include "llvm/Support/Debug.h"
53#include "llvm/Support/ErrorHandling.h"
54#include "llvm/Support/MathExtras.h"
55#include "llvm/Support/raw_ostream.h"
56#include "llvm/Target/TargetMachine.h"
57#include <algorithm>
58#include <cassert>
59#include <cstddef>
60#include <cstdint>
61#include <limits>
62#include <utility>
63
64using namespace llvm;
65
66#define DEBUG_TYPE"hexagon-lowering" "hexagon-lowering"
67
68static cl::opt<bool> EmitJumpTables("hexagon-emit-jump-tables",
69 cl::init(true), cl::Hidden,
70 cl::desc("Control jump table emission on Hexagon target"));
71
72static cl::opt<bool> EnableHexSDNodeSched("enable-hexagon-sdnode-sched",
73 cl::Hidden, cl::ZeroOrMore, cl::init(false),
74 cl::desc("Enable Hexagon SDNode scheduling"));
75
76static cl::opt<bool> EnableFastMath("ffast-math",
77 cl::Hidden, cl::ZeroOrMore, cl::init(false),
78 cl::desc("Enable Fast Math processing"));
79
80static cl::opt<int> MinimumJumpTables("minimum-jump-tables",
81 cl::Hidden, cl::ZeroOrMore, cl::init(5),
82 cl::desc("Set minimum jump tables"));
83
84static cl::opt<int> MaxStoresPerMemcpyCL("max-store-memcpy",
85 cl::Hidden, cl::ZeroOrMore, cl::init(6),
86 cl::desc("Max #stores to inline memcpy"));
87
88static cl::opt<int> MaxStoresPerMemcpyOptSizeCL("max-store-memcpy-Os",
89 cl::Hidden, cl::ZeroOrMore, cl::init(4),
90 cl::desc("Max #stores to inline memcpy"));
91
92static cl::opt<int> MaxStoresPerMemmoveCL("max-store-memmove",
93 cl::Hidden, cl::ZeroOrMore, cl::init(6),
94 cl::desc("Max #stores to inline memmove"));
95
96static cl::opt<int> MaxStoresPerMemmoveOptSizeCL("max-store-memmove-Os",
97 cl::Hidden, cl::ZeroOrMore, cl::init(4),
98 cl::desc("Max #stores to inline memmove"));
99
100static cl::opt<int> MaxStoresPerMemsetCL("max-store-memset",
101 cl::Hidden, cl::ZeroOrMore, cl::init(8),
102 cl::desc("Max #stores to inline memset"));
103
104static cl::opt<int> MaxStoresPerMemsetOptSizeCL("max-store-memset-Os",
105 cl::Hidden, cl::ZeroOrMore, cl::init(4),
106 cl::desc("Max #stores to inline memset"));
107
108static cl::opt<bool> AlignLoads("hexagon-align-loads",
109 cl::Hidden, cl::init(false),
110 cl::desc("Rewrite unaligned loads as a pair of aligned loads"));
111
112
113namespace {
114
115 class HexagonCCState : public CCState {
116 unsigned NumNamedVarArgParams = 0;
117
118 public:
119 HexagonCCState(CallingConv::ID CC, bool IsVarArg, MachineFunction &MF,
120 SmallVectorImpl<CCValAssign> &locs, LLVMContext &C,
121 unsigned NumNamedArgs)
122 : CCState(CC, IsVarArg, MF, locs, C),
123 NumNamedVarArgParams(NumNamedArgs) {}
124 unsigned getNumNamedVarArgParams() const { return NumNamedVarArgParams; }
125 };
126
127} // end anonymous namespace
128
129
130// Implement calling convention for Hexagon.
131
132static bool CC_SkipOdd(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
133 CCValAssign::LocInfo &LocInfo,
134 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
135 static const MCPhysReg ArgRegs[] = {
136 Hexagon::R0, Hexagon::R1, Hexagon::R2,
137 Hexagon::R3, Hexagon::R4, Hexagon::R5
138 };
139 const unsigned NumArgRegs = array_lengthof(ArgRegs);
140 unsigned RegNum = State.getFirstUnallocated(ArgRegs);
141
142 // RegNum is an index into ArgRegs: skip a register if RegNum is odd.
143 if (RegNum != NumArgRegs && RegNum % 2 == 1)
144 State.AllocateReg(ArgRegs[RegNum]);
145
146 // Always return false here, as this function only makes sure that the first
147 // unallocated register has an even register number and does not actually
148 // allocate a register for the current argument.
149 return false;
150}
151
152#include "HexagonGenCallingConv.inc"
153
154
155SDValue
156HexagonTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG)
157 const {
158 return SDValue();
159}
160
161/// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified
162/// by "Src" to address "Dst" of size "Size". Alignment information is
163/// specified by the specific parameter attribute. The copy will be passed as
164/// a byval function parameter. Sometimes what we are copying is the end of a
165/// larger object, the part that does not fit in registers.
166static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst,
167 SDValue Chain, ISD::ArgFlagsTy Flags,
168 SelectionDAG &DAG, const SDLoc &dl) {
169 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32);
170 return DAG.getMemcpy(
171 Chain, dl, Dst, Src, SizeNode, Flags.getNonZeroByValAlign(),
172 /*isVolatile=*/false, /*AlwaysInline=*/false,
173 /*isTailCall=*/false, MachinePointerInfo(), MachinePointerInfo());
174}
175
176bool
177HexagonTargetLowering::CanLowerReturn(
178 CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg,
179 const SmallVectorImpl<ISD::OutputArg> &Outs,
180 LLVMContext &Context) const {
181 SmallVector<CCValAssign, 16> RVLocs;
182 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
183
184 if (MF.getSubtarget<HexagonSubtarget>().useHVXOps())
185 return CCInfo.CheckReturn(Outs, RetCC_Hexagon_HVX);
186 return CCInfo.CheckReturn(Outs, RetCC_Hexagon);
187}
188
189// LowerReturn - Lower ISD::RET. If a struct is larger than 8 bytes and is
190// passed by value, the function prototype is modified to return void and
191// the value is stored in memory pointed by a pointer passed by caller.
192SDValue
193HexagonTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
194 bool IsVarArg,
195 const SmallVectorImpl<ISD::OutputArg> &Outs,
196 const SmallVectorImpl<SDValue> &OutVals,
197 const SDLoc &dl, SelectionDAG &DAG) const {
198 // CCValAssign - represent the assignment of the return value to locations.
199 SmallVector<CCValAssign, 16> RVLocs;
200
201 // CCState - Info about the registers and stack slot.
202 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
203 *DAG.getContext());
204
205 // Analyze return values of ISD::RET
206 if (Subtarget.useHVXOps())
207 CCInfo.AnalyzeReturn(Outs, RetCC_Hexagon_HVX);
208 else
209 CCInfo.AnalyzeReturn(Outs, RetCC_Hexagon);
210
211 SDValue Flag;
212 SmallVector<SDValue, 4> RetOps(1, Chain);
213
214 // Copy the result values into the output registers.
215 for (unsigned i = 0; i != RVLocs.size(); ++i) {
216 CCValAssign &VA = RVLocs[i];
217
218 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), OutVals[i], Flag);
219
220 // Guarantee that all emitted copies are stuck together with flags.
221 Flag = Chain.getValue(1);
222 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
223 }
224
225 RetOps[0] = Chain; // Update chain.
226
227 // Add the flag if we have it.
228 if (Flag.getNode())
229 RetOps.push_back(Flag);
230
231 return DAG.getNode(HexagonISD::RET_FLAG, dl, MVT::Other, RetOps);
232}
233
234bool HexagonTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
235 // If either no tail call or told not to tail call at all, don't.
236 return CI->isTailCall();
237}
238
239Register HexagonTargetLowering::getRegisterByName(
240 const char* RegName, LLT VT, const MachineFunction &) const {
241 // Just support r19, the linux kernel uses it.
242 Register Reg = StringSwitch<Register>(RegName)
243 .Case("r0", Hexagon::R0)
244 .Case("r1", Hexagon::R1)
245 .Case("r2", Hexagon::R2)
246 .Case("r3", Hexagon::R3)
247 .Case("r4", Hexagon::R4)
248 .Case("r5", Hexagon::R5)
249 .Case("r6", Hexagon::R6)
250 .Case("r7", Hexagon::R7)
251 .Case("r8", Hexagon::R8)
252 .Case("r9", Hexagon::R9)
253 .Case("r10", Hexagon::R10)
254 .Case("r11", Hexagon::R11)
255 .Case("r12", Hexagon::R12)
256 .Case("r13", Hexagon::R13)
257 .Case("r14", Hexagon::R14)
258 .Case("r15", Hexagon::R15)
259 .Case("r16", Hexagon::R16)
260 .Case("r17", Hexagon::R17)
261 .Case("r18", Hexagon::R18)
262 .Case("r19", Hexagon::R19)
263 .Case("r20", Hexagon::R20)
264 .Case("r21", Hexagon::R21)
265 .Case("r22", Hexagon::R22)
266 .Case("r23", Hexagon::R23)
267 .Case("r24", Hexagon::R24)
268 .Case("r25", Hexagon::R25)
269 .Case("r26", Hexagon::R26)
270 .Case("r27", Hexagon::R27)
271 .Case("r28", Hexagon::R28)
272 .Case("r29", Hexagon::R29)
273 .Case("r30", Hexagon::R30)
274 .Case("r31", Hexagon::R31)
275 .Case("r1:0", Hexagon::D0)
276 .Case("r3:2", Hexagon::D1)
277 .Case("r5:4", Hexagon::D2)
278 .Case("r7:6", Hexagon::D3)
279 .Case("r9:8", Hexagon::D4)
280 .Case("r11:10", Hexagon::D5)
281 .Case("r13:12", Hexagon::D6)
282 .Case("r15:14", Hexagon::D7)
283 .Case("r17:16", Hexagon::D8)
284 .Case("r19:18", Hexagon::D9)
285 .Case("r21:20", Hexagon::D10)
286 .Case("r23:22", Hexagon::D11)
287 .Case("r25:24", Hexagon::D12)
288 .Case("r27:26", Hexagon::D13)
289 .Case("r29:28", Hexagon::D14)
290 .Case("r31:30", Hexagon::D15)
291 .Case("sp", Hexagon::R29)
292 .Case("fp", Hexagon::R30)
293 .Case("lr", Hexagon::R31)
294 .Case("p0", Hexagon::P0)
295 .Case("p1", Hexagon::P1)
296 .Case("p2", Hexagon::P2)
297 .Case("p3", Hexagon::P3)
298 .Case("sa0", Hexagon::SA0)
299 .Case("lc0", Hexagon::LC0)
300 .Case("sa1", Hexagon::SA1)
301 .Case("lc1", Hexagon::LC1)
302 .Case("m0", Hexagon::M0)
303 .Case("m1", Hexagon::M1)
304 .Case("usr", Hexagon::USR)
305 .Case("ugp", Hexagon::UGP)
306 .Default(Register());
307 if (Reg)
308 return Reg;
309
310 report_fatal_error("Invalid register name global variable");
311}
312
313/// LowerCallResult - Lower the result values of an ISD::CALL into the
314/// appropriate copies out of appropriate physical registers. This assumes that
315/// Chain/Glue are the input chain/glue to use, and that TheCall is the call
316/// being lowered. Returns a SDNode with the same number of values as the
317/// ISD::CALL.
318SDValue HexagonTargetLowering::LowerCallResult(
319 SDValue Chain, SDValue Glue, CallingConv::ID CallConv, bool IsVarArg,
320 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
321 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
322 const SmallVectorImpl<SDValue> &OutVals, SDValue Callee) const {
323 // Assign locations to each value returned by this call.
324 SmallVector<CCValAssign, 16> RVLocs;
325
326 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
327 *DAG.getContext());
328
329 if (Subtarget.useHVXOps())
330 CCInfo.AnalyzeCallResult(Ins, RetCC_Hexagon_HVX);
331 else
332 CCInfo.AnalyzeCallResult(Ins, RetCC_Hexagon);
333
334 // Copy all of the result registers out of their specified physreg.
335 for (unsigned i = 0; i != RVLocs.size(); ++i) {
336 SDValue RetVal;
337 if (RVLocs[i].getValVT() == MVT::i1) {
338 // Return values of type MVT::i1 require special handling. The reason
339 // is that MVT::i1 is associated with the PredRegs register class, but
340 // values of that type are still returned in R0. Generate an explicit
341 // copy into a predicate register from R0, and treat the value of the
342 // predicate register as the call result.
343 auto &MRI = DAG.getMachineFunction().getRegInfo();
344 SDValue FR0 = DAG.getCopyFromReg(Chain, dl, RVLocs[i].getLocReg(),
345 MVT::i32, Glue);
346 // FR0 = (Value, Chain, Glue)
347 Register PredR = MRI.createVirtualRegister(&Hexagon::PredRegsRegClass);
348 SDValue TPR = DAG.getCopyToReg(FR0.getValue(1), dl, PredR,
349 FR0.getValue(0), FR0.getValue(2));
350 // TPR = (Chain, Glue)
351 // Don't glue this CopyFromReg, because it copies from a virtual
352 // register. If it is glued to the call, InstrEmitter will add it
353 // as an implicit def to the call (EmitMachineNode).
354 RetVal = DAG.getCopyFromReg(TPR.getValue(0), dl, PredR, MVT::i1);
355 Glue = TPR.getValue(1);
356 Chain = TPR.getValue(0);
357 } else {
358 RetVal = DAG.getCopyFromReg(Chain, dl, RVLocs[i].getLocReg(),
359 RVLocs[i].getValVT(), Glue);
360 Glue = RetVal.getValue(2);
361 Chain = RetVal.getValue(1);
362 }
363 InVals.push_back(RetVal.getValue(0));
364 }
365
366 return Chain;
367}
368
369/// LowerCall - Functions arguments are copied from virtual regs to
370/// (physical regs)/(stack frame), CALLSEQ_START and CALLSEQ_END are emitted.
371SDValue
372HexagonTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
373 SmallVectorImpl<SDValue> &InVals) const {
374 SelectionDAG &DAG = CLI.DAG;
375 SDLoc &dl = CLI.DL;
376 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
377 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
378 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
379 SDValue Chain = CLI.Chain;
380 SDValue Callee = CLI.Callee;
381 CallingConv::ID CallConv = CLI.CallConv;
382 bool IsVarArg = CLI.IsVarArg;
383 bool DoesNotReturn = CLI.DoesNotReturn;
384
385 bool IsStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet();
386 MachineFunction &MF = DAG.getMachineFunction();
387 MachineFrameInfo &MFI = MF.getFrameInfo();
388 auto PtrVT = getPointerTy(MF.getDataLayout());
389
390 unsigned NumParams = CLI.CS.getInstruction()
391 ? CLI.CS.getFunctionType()->getNumParams()
392 : 0;
393 if (GlobalAddressSDNode *GAN = dyn_cast<GlobalAddressSDNode>(Callee))
394 Callee = DAG.getTargetGlobalAddress(GAN->getGlobal(), dl, MVT::i32);
395
396 // Linux ABI treats var-arg calls the same way as regular ones.
397 bool TreatAsVarArg = !Subtarget.isEnvironmentMusl() && IsVarArg;
398
399 // Analyze operands of the call, assigning locations to each operand.
400 SmallVector<CCValAssign, 16> ArgLocs;
401 HexagonCCState CCInfo(CallConv, TreatAsVarArg, MF, ArgLocs, *DAG.getContext(),
402 NumParams);
403
404 if (Subtarget.useHVXOps())
405 CCInfo.AnalyzeCallOperands(Outs, CC_Hexagon_HVX);
406 else
407 CCInfo.AnalyzeCallOperands(Outs, CC_Hexagon);
408
409 if (CLI.IsTailCall) {
410 bool StructAttrFlag = MF.getFunction().hasStructRetAttr();
411 CLI.IsTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
412 IsVarArg, IsStructRet, StructAttrFlag, Outs,
413 OutVals, Ins, DAG);
414 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
415 CCValAssign &VA = ArgLocs[i];
416 if (VA.isMemLoc()) {
417 CLI.IsTailCall = false;
418 break;
419 }
420 }
421 LLVM_DEBUG(dbgs() << (CLI.IsTailCall ? "Eligible for Tail Call\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-lowering")) { dbgs() << (CLI.IsTailCall ? "Eligible for Tail Call\n"
: "Argument must be passed on stack. " "Not eligible for Tail Call\n"
); } } while (false)
422 : "Argument must be passed on stack. "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-lowering")) { dbgs() << (CLI.IsTailCall ? "Eligible for Tail Call\n"
: "Argument must be passed on stack. " "Not eligible for Tail Call\n"
); } } while (false)
423 "Not eligible for Tail Call\n"))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-lowering")) { dbgs() << (CLI.IsTailCall ? "Eligible for Tail Call\n"
: "Argument must be passed on stack. " "Not eligible for Tail Call\n"
); } } while (false)
;
424 }
425 // Get a count of how many bytes are to be pushed on the stack.
426 unsigned NumBytes = CCInfo.getNextStackOffset();
427 SmallVector<std::pair<unsigned, SDValue>, 16> RegsToPass;
428 SmallVector<SDValue, 8> MemOpChains;
429
430 const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
431 SDValue StackPtr =
432 DAG.getCopyFromReg(Chain, dl, HRI.getStackRegister(), PtrVT);
433
434 bool NeedsArgAlign = false;
435 unsigned LargestAlignSeen = 0;
436 // Walk the register/memloc assignments, inserting copies/loads.
437 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
438 CCValAssign &VA = ArgLocs[i];
439 SDValue Arg = OutVals[i];
440 ISD::ArgFlagsTy Flags = Outs[i].Flags;
441 // Record if we need > 8 byte alignment on an argument.
442 bool ArgAlign = Subtarget.isHVXVectorType(VA.getValVT());
443 NeedsArgAlign |= ArgAlign;
444
445 // Promote the value if needed.
446 switch (VA.getLocInfo()) {
447 default:
448 // Loc info must be one of Full, BCvt, SExt, ZExt, or AExt.
449 llvm_unreachable("Unknown loc info!")::llvm::llvm_unreachable_internal("Unknown loc info!", "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 449)
;
450 case CCValAssign::Full:
451 break;
452 case CCValAssign::BCvt:
453 Arg = DAG.getBitcast(VA.getLocVT(), Arg);
454 break;
455 case CCValAssign::SExt:
456 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
457 break;
458 case CCValAssign::ZExt:
459 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
460 break;
461 case CCValAssign::AExt:
462 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
463 break;
464 }
465
466 if (VA.isMemLoc()) {
467 unsigned LocMemOffset = VA.getLocMemOffset();
468 SDValue MemAddr = DAG.getConstant(LocMemOffset, dl,
469 StackPtr.getValueType());
470 MemAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, MemAddr);
471 if (ArgAlign)
472 LargestAlignSeen = std::max(LargestAlignSeen,
473 (unsigned)VA.getLocVT().getStoreSizeInBits() >> 3);
474 if (Flags.isByVal()) {
475 // The argument is a struct passed by value. According to LLVM, "Arg"
476 // is a pointer.
477 MemOpChains.push_back(CreateCopyOfByValArgument(Arg, MemAddr, Chain,
478 Flags, DAG, dl));
479 } else {
480 MachinePointerInfo LocPI = MachinePointerInfo::getStack(
481 DAG.getMachineFunction(), LocMemOffset);
482 SDValue S = DAG.getStore(Chain, dl, Arg, MemAddr, LocPI);
483 MemOpChains.push_back(S);
484 }
485 continue;
486 }
487
488 // Arguments that can be passed on register must be kept at RegsToPass
489 // vector.
490 if (VA.isRegLoc())
491 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
492 }
493
494 if (NeedsArgAlign && Subtarget.hasV60Ops()) {
495 LLVM_DEBUG(dbgs() << "Function needs byte stack align due to call args\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-lowering")) { dbgs() << "Function needs byte stack align due to call args\n"
; } } while (false)
;
496 unsigned VecAlign = HRI.getSpillAlignment(Hexagon::HvxVRRegClass);
497 LargestAlignSeen = std::max(LargestAlignSeen, VecAlign);
498 MFI.ensureMaxAlignment(LargestAlignSeen);
499 }
500 // Transform all store nodes into one single node because all store
501 // nodes are independent of each other.
502 if (!MemOpChains.empty())
503 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
504
505 SDValue Glue;
506 if (!CLI.IsTailCall) {
507 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
508 Glue = Chain.getValue(1);
509 }
510
511 // Build a sequence of copy-to-reg nodes chained together with token
512 // chain and flag operands which copy the outgoing args into registers.
513 // The Glue is necessary since all emitted instructions must be
514 // stuck together.
515 if (!CLI.IsTailCall) {
516 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
517 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
518 RegsToPass[i].second, Glue);
519 Glue = Chain.getValue(1);
520 }
521 } else {
522 // For tail calls lower the arguments to the 'real' stack slot.
523 //
524 // Force all the incoming stack arguments to be loaded from the stack
525 // before any new outgoing arguments are stored to the stack, because the
526 // outgoing stack slots may alias the incoming argument stack slots, and
527 // the alias isn't otherwise explicit. This is slightly more conservative
528 // than necessary, because it means that each store effectively depends
529 // on every argument instead of just those arguments it would clobber.
530 //
531 // Do not flag preceding copytoreg stuff together with the following stuff.
532 Glue = SDValue();
533 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
534 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
535 RegsToPass[i].second, Glue);
536 Glue = Chain.getValue(1);
537 }
538 Glue = SDValue();
539 }
540
541 bool LongCalls = MF.getSubtarget<HexagonSubtarget>().useLongCalls();
542 unsigned Flags = LongCalls ? HexagonII::HMOTF_ConstExtended : 0;
543
544 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
545 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
546 // node so that legalize doesn't hack it.
547 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
548 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, PtrVT, 0, Flags);
549 } else if (ExternalSymbolSDNode *S =
550 dyn_cast<ExternalSymbolSDNode>(Callee)) {
551 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), PtrVT, Flags);
552 }
553
554 // Returns a chain & a flag for retval copy to use.
555 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
556 SmallVector<SDValue, 8> Ops;
557 Ops.push_back(Chain);
558 Ops.push_back(Callee);
559
560 // Add argument registers to the end of the list so that they are
561 // known live into the call.
562 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
563 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
564 RegsToPass[i].second.getValueType()));
565 }
566
567 const uint32_t *Mask = HRI.getCallPreservedMask(MF, CallConv);
568 assert(Mask && "Missing call preserved mask for calling convention")((Mask && "Missing call preserved mask for calling convention"
) ? static_cast<void> (0) : __assert_fail ("Mask && \"Missing call preserved mask for calling convention\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 568, __PRETTY_FUNCTION__))
;
569 Ops.push_back(DAG.getRegisterMask(Mask));
570
571 if (Glue.getNode())
572 Ops.push_back(Glue);
573
574 if (CLI.IsTailCall) {
575 MFI.setHasTailCall();
576 return DAG.getNode(HexagonISD::TC_RETURN, dl, NodeTys, Ops);
577 }
578
579 // Set this here because we need to know this for "hasFP" in frame lowering.
580 // The target-independent code calls getFrameRegister before setting it, and
581 // getFrameRegister uses hasFP to determine whether the function has FP.
582 MFI.setHasCalls(true);
583
584 unsigned OpCode = DoesNotReturn ? HexagonISD::CALLnr : HexagonISD::CALL;
585 Chain = DAG.getNode(OpCode, dl, NodeTys, Ops);
586 Glue = Chain.getValue(1);
587
588 // Create the CALLSEQ_END node.
589 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
590 DAG.getIntPtrConstant(0, dl, true), Glue, dl);
591 Glue = Chain.getValue(1);
592
593 // Handle result values, copying them out of physregs into vregs that we
594 // return.
595 return LowerCallResult(Chain, Glue, CallConv, IsVarArg, Ins, dl, DAG,
596 InVals, OutVals, Callee);
597}
598
599/// Returns true by value, base pointer and offset pointer and addressing
600/// mode by reference if this node can be combined with a load / store to
601/// form a post-indexed load / store.
602bool HexagonTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op,
603 SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM,
604 SelectionDAG &DAG) const {
605 LSBaseSDNode *LSN = dyn_cast<LSBaseSDNode>(N);
606 if (!LSN)
607 return false;
608 EVT VT = LSN->getMemoryVT();
609 if (!VT.isSimple())
610 return false;
611 bool IsLegalType = VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32 ||
612 VT == MVT::i64 || VT == MVT::f32 || VT == MVT::f64 ||
613 VT == MVT::v2i16 || VT == MVT::v2i32 || VT == MVT::v4i8 ||
614 VT == MVT::v4i16 || VT == MVT::v8i8 ||
615 Subtarget.isHVXVectorType(VT.getSimpleVT());
616 if (!IsLegalType)
617 return false;
618
619 if (Op->getOpcode() != ISD::ADD)
620 return false;
621 Base = Op->getOperand(0);
622 Offset = Op->getOperand(1);
623 if (!isa<ConstantSDNode>(Offset.getNode()))
624 return false;
625 AM = ISD::POST_INC;
626
627 int32_t V = cast<ConstantSDNode>(Offset.getNode())->getSExtValue();
628 return Subtarget.getInstrInfo()->isValidAutoIncImm(VT, V);
629}
630
631SDValue
632HexagonTargetLowering::LowerINLINEASM(SDValue Op, SelectionDAG &DAG) const {
633 MachineFunction &MF = DAG.getMachineFunction();
634 auto &HMFI = *MF.getInfo<HexagonMachineFunctionInfo>();
635 const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
636 unsigned LR = HRI.getRARegister();
637
638 if ((Op.getOpcode() != ISD::INLINEASM &&
639 Op.getOpcode() != ISD::INLINEASM_BR) || HMFI.hasClobberLR())
640 return Op;
641
642 unsigned NumOps = Op.getNumOperands();
643 if (Op.getOperand(NumOps-1).getValueType() == MVT::Glue)
644 --NumOps; // Ignore the flag operand.
645
646 for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
647 unsigned Flags = cast<ConstantSDNode>(Op.getOperand(i))->getZExtValue();
648 unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags);
649 ++i; // Skip the ID value.
650
651 switch (InlineAsm::getKind(Flags)) {
652 default:
653 llvm_unreachable("Bad flags!")::llvm::llvm_unreachable_internal("Bad flags!", "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 653)
;
654 case InlineAsm::Kind_RegUse:
655 case InlineAsm::Kind_Imm:
656 case InlineAsm::Kind_Mem:
657 i += NumVals;
658 break;
659 case InlineAsm::Kind_Clobber:
660 case InlineAsm::Kind_RegDef:
661 case InlineAsm::Kind_RegDefEarlyClobber: {
662 for (; NumVals; --NumVals, ++i) {
663 unsigned Reg = cast<RegisterSDNode>(Op.getOperand(i))->getReg();
664 if (Reg != LR)
665 continue;
666 HMFI.setHasClobberLR(true);
667 return Op;
668 }
669 break;
670 }
671 }
672 }
673
674 return Op;
675}
676
677// Need to transform ISD::PREFETCH into something that doesn't inherit
678// all of the properties of ISD::PREFETCH, specifically SDNPMayLoad and
679// SDNPMayStore.
680SDValue HexagonTargetLowering::LowerPREFETCH(SDValue Op,
681 SelectionDAG &DAG) const {
682 SDValue Chain = Op.getOperand(0);
683 SDValue Addr = Op.getOperand(1);
684 // Lower it to DCFETCH($reg, #0). A "pat" will try to merge the offset in,
685 // if the "reg" is fed by an "add".
686 SDLoc DL(Op);
687 SDValue Zero = DAG.getConstant(0, DL, MVT::i32);
688 return DAG.getNode(HexagonISD::DCFETCH, DL, MVT::Other, Chain, Addr, Zero);
689}
690
691// Custom-handle ISD::READCYCLECOUNTER because the target-independent SDNode
692// is marked as having side-effects, while the register read on Hexagon does
693// not have any. TableGen refuses to accept the direct pattern from that node
694// to the A4_tfrcpp.
695SDValue HexagonTargetLowering::LowerREADCYCLECOUNTER(SDValue Op,
696 SelectionDAG &DAG) const {
697 SDValue Chain = Op.getOperand(0);
698 SDLoc dl(Op);
699 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
700 return DAG.getNode(HexagonISD::READCYCLE, dl, VTs, Chain);
701}
702
703SDValue HexagonTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
704 SelectionDAG &DAG) const {
705 SDValue Chain = Op.getOperand(0);
706 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
707 // Lower the hexagon_prefetch builtin to DCFETCH, as above.
708 if (IntNo == Intrinsic::hexagon_prefetch) {
709 SDValue Addr = Op.getOperand(2);
710 SDLoc DL(Op);
711 SDValue Zero = DAG.getConstant(0, DL, MVT::i32);
712 return DAG.getNode(HexagonISD::DCFETCH, DL, MVT::Other, Chain, Addr, Zero);
713 }
714 return SDValue();
715}
716
717SDValue
718HexagonTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
719 SelectionDAG &DAG) const {
720 SDValue Chain = Op.getOperand(0);
721 SDValue Size = Op.getOperand(1);
722 SDValue Align = Op.getOperand(2);
723 SDLoc dl(Op);
724
725 ConstantSDNode *AlignConst = dyn_cast<ConstantSDNode>(Align);
726 assert(AlignConst && "Non-constant Align in LowerDYNAMIC_STACKALLOC")((AlignConst && "Non-constant Align in LowerDYNAMIC_STACKALLOC"
) ? static_cast<void> (0) : __assert_fail ("AlignConst && \"Non-constant Align in LowerDYNAMIC_STACKALLOC\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 726, __PRETTY_FUNCTION__))
;
727
728 unsigned A = AlignConst->getSExtValue();
729 auto &HFI = *Subtarget.getFrameLowering();
730 // "Zero" means natural stack alignment.
731 if (A == 0)
732 A = HFI.getStackAlignment();
733
734 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-lowering")) { { dbgs () << __func__ << " Align: "
<< A << " Size: "; Size.getNode()->dump(&
DAG); dbgs() << "\n"; }; } } while (false)
735 dbgs () << __func__ << " Align: " << A << " Size: ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-lowering")) { { dbgs () << __func__ << " Align: "
<< A << " Size: "; Size.getNode()->dump(&
DAG); dbgs() << "\n"; }; } } while (false)
736 Size.getNode()->dump(&DAG);do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-lowering")) { { dbgs () << __func__ << " Align: "
<< A << " Size: "; Size.getNode()->dump(&
DAG); dbgs() << "\n"; }; } } while (false)
737 dbgs() << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-lowering")) { { dbgs () << __func__ << " Align: "
<< A << " Size: "; Size.getNode()->dump(&
DAG); dbgs() << "\n"; }; } } while (false)
738 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-lowering")) { { dbgs () << __func__ << " Align: "
<< A << " Size: "; Size.getNode()->dump(&
DAG); dbgs() << "\n"; }; } } while (false)
;
739
740 SDValue AC = DAG.getConstant(A, dl, MVT::i32);
741 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
742 SDValue AA = DAG.getNode(HexagonISD::ALLOCA, dl, VTs, Chain, Size, AC);
743
744 DAG.ReplaceAllUsesOfValueWith(Op, AA);
745 return AA;
746}
747
748SDValue HexagonTargetLowering::LowerFormalArguments(
749 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
750 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
751 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
752 MachineFunction &MF = DAG.getMachineFunction();
753 MachineFrameInfo &MFI = MF.getFrameInfo();
754 MachineRegisterInfo &MRI = MF.getRegInfo();
755
756 // Linux ABI treats var-arg calls the same way as regular ones.
757 bool TreatAsVarArg = !Subtarget.isEnvironmentMusl() && IsVarArg;
758
759 // Assign locations to all of the incoming arguments.
760 SmallVector<CCValAssign, 16> ArgLocs;
761 HexagonCCState CCInfo(CallConv, TreatAsVarArg, MF, ArgLocs,
762 *DAG.getContext(),
763 MF.getFunction().getFunctionType()->getNumParams());
764
765 if (Subtarget.useHVXOps())
766 CCInfo.AnalyzeFormalArguments(Ins, CC_Hexagon_HVX);
767 else
768 CCInfo.AnalyzeFormalArguments(Ins, CC_Hexagon);
769
770 // For LLVM, in the case when returning a struct by value (>8byte),
771 // the first argument is a pointer that points to the location on caller's
772 // stack where the return value will be stored. For Hexagon, the location on
773 // caller's stack is passed only when the struct size is smaller than (and
774 // equal to) 8 bytes. If not, no address will be passed into callee and
775 // callee return the result direclty through R0/R1.
776 auto NextSingleReg = [] (const TargetRegisterClass &RC, unsigned Reg) {
777 switch (RC.getID()) {
778 case Hexagon::IntRegsRegClassID:
779 return Reg - Hexagon::R0 + 1;
780 case Hexagon::DoubleRegsRegClassID:
781 return (Reg - Hexagon::D0 + 1) * 2;
782 case Hexagon::HvxVRRegClassID:
783 return Reg - Hexagon::V0 + 1;
784 case Hexagon::HvxWRRegClassID:
785 return (Reg - Hexagon::W0 + 1) * 2;
786 }
787 llvm_unreachable("Unexpected register class")::llvm::llvm_unreachable_internal("Unexpected register class"
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 787)
;
788 };
789
790 auto &HFL = const_cast<HexagonFrameLowering&>(*Subtarget.getFrameLowering());
791 auto &HMFI = *MF.getInfo<HexagonMachineFunctionInfo>();
792 HFL.FirstVarArgSavedReg = 0;
793 HMFI.setFirstNamedArgFrameIndex(-int(MFI.getNumFixedObjects()));
794
795 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
796 CCValAssign &VA = ArgLocs[i];
797 ISD::ArgFlagsTy Flags = Ins[i].Flags;
798 bool ByVal = Flags.isByVal();
799
800 // Arguments passed in registers:
801 // 1. 32- and 64-bit values and HVX vectors are passed directly,
802 // 2. Large structs are passed via an address, and the address is
803 // passed in a register.
804 if (VA.isRegLoc() && ByVal && Flags.getByValSize() <= 8)
805 llvm_unreachable("ByValSize must be bigger than 8 bytes")::llvm::llvm_unreachable_internal("ByValSize must be bigger than 8 bytes"
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 805)
;
806
807 bool InReg = VA.isRegLoc() &&
808 (!ByVal || (ByVal && Flags.getByValSize() > 8));
809
810 if (InReg) {
811 MVT RegVT = VA.getLocVT();
812 if (VA.getLocInfo() == CCValAssign::BCvt)
813 RegVT = VA.getValVT();
814
815 const TargetRegisterClass *RC = getRegClassFor(RegVT);
816 Register VReg = MRI.createVirtualRegister(RC);
817 SDValue Copy = DAG.getCopyFromReg(Chain, dl, VReg, RegVT);
818
819 // Treat values of type MVT::i1 specially: they are passed in
820 // registers of type i32, but they need to remain as values of
821 // type i1 for consistency of the argument lowering.
822 if (VA.getValVT() == MVT::i1) {
823 assert(RegVT.getSizeInBits() <= 32)((RegVT.getSizeInBits() <= 32) ? static_cast<void> (
0) : __assert_fail ("RegVT.getSizeInBits() <= 32", "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 823, __PRETTY_FUNCTION__))
;
824 SDValue T = DAG.getNode(ISD::AND, dl, RegVT,
825 Copy, DAG.getConstant(1, dl, RegVT));
826 Copy = DAG.getSetCC(dl, MVT::i1, T, DAG.getConstant(0, dl, RegVT),
827 ISD::SETNE);
828 } else {
829#ifndef NDEBUG
830 unsigned RegSize = RegVT.getSizeInBits();
831 assert(RegSize == 32 || RegSize == 64 ||((RegSize == 32 || RegSize == 64 || Subtarget.isHVXVectorType
(RegVT)) ? static_cast<void> (0) : __assert_fail ("RegSize == 32 || RegSize == 64 || Subtarget.isHVXVectorType(RegVT)"
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 832, __PRETTY_FUNCTION__))
832 Subtarget.isHVXVectorType(RegVT))((RegSize == 32 || RegSize == 64 || Subtarget.isHVXVectorType
(RegVT)) ? static_cast<void> (0) : __assert_fail ("RegSize == 32 || RegSize == 64 || Subtarget.isHVXVectorType(RegVT)"
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 832, __PRETTY_FUNCTION__))
;
833#endif
834 }
835 InVals.push_back(Copy);
836 MRI.addLiveIn(VA.getLocReg(), VReg);
837 HFL.FirstVarArgSavedReg = NextSingleReg(*RC, VA.getLocReg());
838 } else {
839 assert(VA.isMemLoc() && "Argument should be passed in memory")((VA.isMemLoc() && "Argument should be passed in memory"
) ? static_cast<void> (0) : __assert_fail ("VA.isMemLoc() && \"Argument should be passed in memory\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 839, __PRETTY_FUNCTION__))
;
840
841 // If it's a byval parameter, then we need to compute the
842 // "real" size, not the size of the pointer.
843 unsigned ObjSize = Flags.isByVal()
844 ? Flags.getByValSize()
845 : VA.getLocVT().getStoreSizeInBits() / 8;
846
847 // Create the frame index object for this incoming parameter.
848 int Offset = HEXAGON_LRFP_SIZE8 + VA.getLocMemOffset();
849 int FI = MFI.CreateFixedObject(ObjSize, Offset, true);
850 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
851
852 if (Flags.isByVal()) {
853 // If it's a pass-by-value aggregate, then do not dereference the stack
854 // location. Instead, we should generate a reference to the stack
855 // location.
856 InVals.push_back(FIN);
857 } else {
858 SDValue L = DAG.getLoad(VA.getValVT(), dl, Chain, FIN,
859 MachinePointerInfo::getFixedStack(MF, FI, 0));
860 InVals.push_back(L);
861 }
862 }
863 }
864
865 if (IsVarArg && Subtarget.isEnvironmentMusl()) {
866 for (int i = HFL.FirstVarArgSavedReg; i < 6; i++)
867 MRI.addLiveIn(Hexagon::R0+i);
868 }
869
870 if (IsVarArg && Subtarget.isEnvironmentMusl()) {
871 HMFI.setFirstNamedArgFrameIndex(HMFI.getFirstNamedArgFrameIndex() - 1);
872 HMFI.setLastNamedArgFrameIndex(-int(MFI.getNumFixedObjects()));
873
874 // Create Frame index for the start of register saved area.
875 int NumVarArgRegs = 6 - HFL.FirstVarArgSavedReg;
876 bool RequiresPadding = (NumVarArgRegs & 1);
877 int RegSaveAreaSizePlusPadding = RequiresPadding
878 ? (NumVarArgRegs + 1) * 4
879 : NumVarArgRegs * 4;
880
881 if (RegSaveAreaSizePlusPadding > 0) {
882 // The offset to saved register area should be 8 byte aligned.
883 int RegAreaStart = HEXAGON_LRFP_SIZE8 + CCInfo.getNextStackOffset();
884 if (!(RegAreaStart % 8))
885 RegAreaStart = (RegAreaStart + 7) & -8;
886
887 int RegSaveAreaFrameIndex =
888 MFI.CreateFixedObject(RegSaveAreaSizePlusPadding, RegAreaStart, true);
889 HMFI.setRegSavedAreaStartFrameIndex(RegSaveAreaFrameIndex);
890
891 // This will point to the next argument passed via stack.
892 int Offset = RegAreaStart + RegSaveAreaSizePlusPadding;
893 int FI = MFI.CreateFixedObject(Hexagon_PointerSize(4), Offset, true);
894 HMFI.setVarArgsFrameIndex(FI);
895 } else {
896 // This will point to the next argument passed via stack, when
897 // there is no saved register area.
898 int Offset = HEXAGON_LRFP_SIZE8 + CCInfo.getNextStackOffset();
899 int FI = MFI.CreateFixedObject(Hexagon_PointerSize(4), Offset, true);
900 HMFI.setRegSavedAreaStartFrameIndex(FI);
901 HMFI.setVarArgsFrameIndex(FI);
902 }
903 }
904
905
906 if (IsVarArg && !Subtarget.isEnvironmentMusl()) {
907 // This will point to the next argument passed via stack.
908 int Offset = HEXAGON_LRFP_SIZE8 + CCInfo.getNextStackOffset();
909 int FI = MFI.CreateFixedObject(Hexagon_PointerSize(4), Offset, true);
910 HMFI.setVarArgsFrameIndex(FI);
911 }
912
913 return Chain;
914}
915
916SDValue
917HexagonTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
918 // VASTART stores the address of the VarArgsFrameIndex slot into the
919 // memory location argument.
920 MachineFunction &MF = DAG.getMachineFunction();
921 HexagonMachineFunctionInfo *QFI = MF.getInfo<HexagonMachineFunctionInfo>();
922 SDValue Addr = DAG.getFrameIndex(QFI->getVarArgsFrameIndex(), MVT::i32);
923 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
924
925 if (!Subtarget.isEnvironmentMusl()) {
926 return DAG.getStore(Op.getOperand(0), SDLoc(Op), Addr, Op.getOperand(1),
927 MachinePointerInfo(SV));
928 }
929 auto &FuncInfo = *MF.getInfo<HexagonMachineFunctionInfo>();
930 auto &HFL = *Subtarget.getFrameLowering();
931 SDLoc DL(Op);
932 SmallVector<SDValue, 8> MemOps;
933
934 // Get frame index of va_list.
935 SDValue FIN = Op.getOperand(1);
936
937 // If first Vararg register is odd, add 4 bytes to start of
938 // saved register area to point to the first register location.
939 // This is because the saved register area has to be 8 byte aligned.
940 // Incase of an odd start register, there will be 4 bytes of padding in
941 // the beginning of saved register area. If all registers area used up,
942 // the following condition will handle it correctly.
943 SDValue SavedRegAreaStartFrameIndex =
944 DAG.getFrameIndex(FuncInfo.getRegSavedAreaStartFrameIndex(), MVT::i32);
945
946 auto PtrVT = getPointerTy(DAG.getDataLayout());
947
948 if (HFL.FirstVarArgSavedReg & 1)
949 SavedRegAreaStartFrameIndex =
950 DAG.getNode(ISD::ADD, DL, PtrVT,
951 DAG.getFrameIndex(FuncInfo.getRegSavedAreaStartFrameIndex(),
952 MVT::i32),
953 DAG.getIntPtrConstant(4, DL));
954
955 // Store the saved register area start pointer.
956 SDValue Store =
957 DAG.getStore(Op.getOperand(0), DL,
958 SavedRegAreaStartFrameIndex,
959 FIN, MachinePointerInfo(SV));
960 MemOps.push_back(Store);
961
962 // Store saved register area end pointer.
963 FIN = DAG.getNode(ISD::ADD, DL, PtrVT,
964 FIN, DAG.getIntPtrConstant(4, DL));
965 Store = DAG.getStore(Op.getOperand(0), DL,
966 DAG.getFrameIndex(FuncInfo.getVarArgsFrameIndex(),
967 PtrVT),
968 FIN, MachinePointerInfo(SV, 4));
969 MemOps.push_back(Store);
970
971 // Store overflow area pointer.
972 FIN = DAG.getNode(ISD::ADD, DL, PtrVT,
973 FIN, DAG.getIntPtrConstant(4, DL));
974 Store = DAG.getStore(Op.getOperand(0), DL,
975 DAG.getFrameIndex(FuncInfo.getVarArgsFrameIndex(),
976 PtrVT),
977 FIN, MachinePointerInfo(SV, 8));
978 MemOps.push_back(Store);
979
980 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
981}
982
983SDValue
984HexagonTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const {
985 // Assert that the linux ABI is enabled for the current compilation.
986 assert(Subtarget.isEnvironmentMusl() && "Linux ABI should be enabled")((Subtarget.isEnvironmentMusl() && "Linux ABI should be enabled"
) ? static_cast<void> (0) : __assert_fail ("Subtarget.isEnvironmentMusl() && \"Linux ABI should be enabled\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 986, __PRETTY_FUNCTION__))
;
987 SDValue Chain = Op.getOperand(0);
988 SDValue DestPtr = Op.getOperand(1);
989 SDValue SrcPtr = Op.getOperand(2);
990 const Value *DestSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
991 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
992 SDLoc DL(Op);
993 // Size of the va_list is 12 bytes as it has 3 pointers. Therefore,
994 // we need to memcopy 12 bytes from va_list to another similar list.
995 return DAG.getMemcpy(Chain, DL, DestPtr, SrcPtr,
996 DAG.getIntPtrConstant(12, DL), Align(4),
997 /*isVolatile*/ false, false, false,
998 MachinePointerInfo(DestSV), MachinePointerInfo(SrcSV));
999}
1000
1001SDValue HexagonTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
1002 const SDLoc &dl(Op);
1003 SDValue LHS = Op.getOperand(0);
1004 SDValue RHS = Op.getOperand(1);
1005 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
1006 MVT ResTy = ty(Op);
1007 MVT OpTy = ty(LHS);
1008
1009 if (OpTy == MVT::v2i16 || OpTy == MVT::v4i8) {
8
Taking false branch
1010 MVT ElemTy = OpTy.getVectorElementType();
1011 assert(ElemTy.isScalarInteger())((ElemTy.isScalarInteger()) ? static_cast<void> (0) : __assert_fail
("ElemTy.isScalarInteger()", "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 1011, __PRETTY_FUNCTION__))
;
1012 MVT WideTy = MVT::getVectorVT(MVT::getIntegerVT(2*ElemTy.getSizeInBits()),
1013 OpTy.getVectorNumElements());
1014 return DAG.getSetCC(dl, ResTy,
1015 DAG.getSExtOrTrunc(LHS, SDLoc(LHS), WideTy),
1016 DAG.getSExtOrTrunc(RHS, SDLoc(RHS), WideTy), CC);
1017 }
1018
1019 // Treat all other vector types as legal.
1020 if (ResTy.isVector())
9
Taking false branch
1021 return Op;
1022
1023 // Comparisons of short integers should use sign-extend, not zero-extend,
1024 // since we can represent small negative values in the compare instructions.
1025 // The LLVM default is to use zero-extend arbitrarily in these cases.
1026 auto isSExtFree = [this](SDValue N) {
1027 switch (N.getOpcode()) {
14
Calling 'SDValue::getOpcode'
1028 case ISD::TRUNCATE: {
1029 // A sign-extend of a truncate of a sign-extend is free.
1030 SDValue Op = N.getOperand(0);
1031 if (Op.getOpcode() != ISD::AssertSext)
1032 return false;
1033 EVT OrigTy = cast<VTSDNode>(Op.getOperand(1))->getVT();
1034 unsigned ThisBW = ty(N).getSizeInBits();
1035 unsigned OrigBW = OrigTy.getSizeInBits();
1036 // The type that was sign-extended to get the AssertSext must be
1037 // narrower than the type of N (so that N has still the same value
1038 // as the original).
1039 return ThisBW >= OrigBW;
1040 }
1041 case ISD::LOAD:
1042 // We have sign-extended loads.
1043 return true;
1044 }
1045 return false;
1046 };
1047
1048 if (OpTy == MVT::i8 || OpTy == MVT::i16) {
10
Taking true branch
1049 ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS);
1050 bool IsNegative = C && C->getAPIntValue().isNegative();
11
Assuming 'C' is null
1051 if (IsNegative
11.1
'IsNegative' is false
11.1
'IsNegative' is false
|| isSExtFree(LHS) || isSExtFree(RHS))
12
Value assigned to 'N.Node'
13
Calling 'operator()'
1052 return DAG.getSetCC(dl, ResTy,
1053 DAG.getSExtOrTrunc(LHS, SDLoc(LHS), MVT::i32),
1054 DAG.getSExtOrTrunc(RHS, SDLoc(RHS), MVT::i32), CC);
1055 }
1056
1057 return SDValue();
1058}
1059
1060SDValue
1061HexagonTargetLowering::LowerVSELECT(SDValue Op, SelectionDAG &DAG) const {
1062 SDValue PredOp = Op.getOperand(0);
1063 SDValue Op1 = Op.getOperand(1), Op2 = Op.getOperand(2);
1064 MVT OpTy = ty(Op1);
1065 const SDLoc &dl(Op);
1066
1067 if (OpTy == MVT::v2i16 || OpTy == MVT::v4i8) {
1068 MVT ElemTy = OpTy.getVectorElementType();
1069 assert(ElemTy.isScalarInteger())((ElemTy.isScalarInteger()) ? static_cast<void> (0) : __assert_fail
("ElemTy.isScalarInteger()", "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 1069, __PRETTY_FUNCTION__))
;
1070 MVT WideTy = MVT::getVectorVT(MVT::getIntegerVT(2*ElemTy.getSizeInBits()),
1071 OpTy.getVectorNumElements());
1072 // Generate (trunc (select (_, sext, sext))).
1073 return DAG.getSExtOrTrunc(
1074 DAG.getSelect(dl, WideTy, PredOp,
1075 DAG.getSExtOrTrunc(Op1, dl, WideTy),
1076 DAG.getSExtOrTrunc(Op2, dl, WideTy)),
1077 dl, OpTy);
1078 }
1079
1080 return SDValue();
1081}
1082
1083SDValue
1084HexagonTargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const {
1085 EVT ValTy = Op.getValueType();
1086 ConstantPoolSDNode *CPN = cast<ConstantPoolSDNode>(Op);
1087 Constant *CVal = nullptr;
1088 bool isVTi1Type = false;
1089 if (auto *CV = dyn_cast<ConstantVector>(CPN->getConstVal())) {
1090 if (CV->getType()->getVectorElementType()->isIntegerTy(1)) {
1091 IRBuilder<> IRB(CV->getContext());
1092 SmallVector<Constant*, 128> NewConst;
1093 unsigned VecLen = CV->getNumOperands();
1094 assert(isPowerOf2_32(VecLen) &&((isPowerOf2_32(VecLen) && "conversion only supported for pow2 VectorSize"
) ? static_cast<void> (0) : __assert_fail ("isPowerOf2_32(VecLen) && \"conversion only supported for pow2 VectorSize\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 1095, __PRETTY_FUNCTION__))
1095 "conversion only supported for pow2 VectorSize")((isPowerOf2_32(VecLen) && "conversion only supported for pow2 VectorSize"
) ? static_cast<void> (0) : __assert_fail ("isPowerOf2_32(VecLen) && \"conversion only supported for pow2 VectorSize\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 1095, __PRETTY_FUNCTION__))
;
1096 for (unsigned i = 0; i < VecLen; ++i)
1097 NewConst.push_back(IRB.getInt8(CV->getOperand(i)->isZeroValue()));
1098
1099 CVal = ConstantVector::get(NewConst);
1100 isVTi1Type = true;
1101 }
1102 }
1103 unsigned Align = CPN->getAlignment();
1104 bool IsPositionIndependent = isPositionIndependent();
1105 unsigned char TF = IsPositionIndependent ? HexagonII::MO_PCREL : 0;
1106
1107 unsigned Offset = 0;
1108 SDValue T;
1109 if (CPN->isMachineConstantPoolEntry())
1110 T = DAG.getTargetConstantPool(CPN->getMachineCPVal(), ValTy, Align, Offset,
1111 TF);
1112 else if (isVTi1Type)
1113 T = DAG.getTargetConstantPool(CVal, ValTy, Align, Offset, TF);
1114 else
1115 T = DAG.getTargetConstantPool(CPN->getConstVal(), ValTy, Align, Offset, TF);
1116
1117 assert(cast<ConstantPoolSDNode>(T)->getTargetFlags() == TF &&((cast<ConstantPoolSDNode>(T)->getTargetFlags() == TF
&& "Inconsistent target flag encountered") ? static_cast
<void> (0) : __assert_fail ("cast<ConstantPoolSDNode>(T)->getTargetFlags() == TF && \"Inconsistent target flag encountered\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 1118, __PRETTY_FUNCTION__))
1118 "Inconsistent target flag encountered")((cast<ConstantPoolSDNode>(T)->getTargetFlags() == TF
&& "Inconsistent target flag encountered") ? static_cast
<void> (0) : __assert_fail ("cast<ConstantPoolSDNode>(T)->getTargetFlags() == TF && \"Inconsistent target flag encountered\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 1118, __PRETTY_FUNCTION__))
;
1119
1120 if (IsPositionIndependent)
1121 return DAG.getNode(HexagonISD::AT_PCREL, SDLoc(Op), ValTy, T);
1122 return DAG.getNode(HexagonISD::CP, SDLoc(Op), ValTy, T);
1123}
1124
1125SDValue
1126HexagonTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
1127 EVT VT = Op.getValueType();
1128 int Idx = cast<JumpTableSDNode>(Op)->getIndex();
1129 if (isPositionIndependent()) {
1130 SDValue T = DAG.getTargetJumpTable(Idx, VT, HexagonII::MO_PCREL);
1131 return DAG.getNode(HexagonISD::AT_PCREL, SDLoc(Op), VT, T);
1132 }
1133
1134 SDValue T = DAG.getTargetJumpTable(Idx, VT);
1135 return DAG.getNode(HexagonISD::JT, SDLoc(Op), VT, T);
1136}
1137
1138SDValue
1139HexagonTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const {
1140 const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
1141 MachineFunction &MF = DAG.getMachineFunction();
1142 MachineFrameInfo &MFI = MF.getFrameInfo();
1143 MFI.setReturnAddressIsTaken(true);
1144
1145 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
1146 return SDValue();
1147
1148 EVT VT = Op.getValueType();
1149 SDLoc dl(Op);
1150 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1151 if (Depth) {
1152 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
1153 SDValue Offset = DAG.getConstant(4, dl, MVT::i32);
1154 return DAG.getLoad(VT, dl, DAG.getEntryNode(),
1155 DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset),
1156 MachinePointerInfo());
1157 }
1158
1159 // Return LR, which contains the return address. Mark it an implicit live-in.
1160 unsigned Reg = MF.addLiveIn(HRI.getRARegister(), getRegClassFor(MVT::i32));
1161 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT);
1162}
1163
1164SDValue
1165HexagonTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
1166 const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
1167 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
1168 MFI.setFrameAddressIsTaken(true);
1169
1170 EVT VT = Op.getValueType();
1171 SDLoc dl(Op);
1172 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1173 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl,
1174 HRI.getFrameRegister(), VT);
1175 while (Depth--)
1176 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
1177 MachinePointerInfo());
1178 return FrameAddr;
1179}
1180
1181SDValue
1182HexagonTargetLowering::LowerATOMIC_FENCE(SDValue Op, SelectionDAG& DAG) const {
1183 SDLoc dl(Op);
1184 return DAG.getNode(HexagonISD::BARRIER, dl, MVT::Other, Op.getOperand(0));
1185}
1186
1187SDValue
1188HexagonTargetLowering::LowerGLOBALADDRESS(SDValue Op, SelectionDAG &DAG) const {
1189 SDLoc dl(Op);
1190 auto *GAN = cast<GlobalAddressSDNode>(Op);
1191 auto PtrVT = getPointerTy(DAG.getDataLayout());
1192 auto *GV = GAN->getGlobal();
1193 int64_t Offset = GAN->getOffset();
1194
1195 auto &HLOF = *HTM.getObjFileLowering();
1196 Reloc::Model RM = HTM.getRelocationModel();
1197
1198 if (RM == Reloc::Static) {
1199 SDValue GA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, Offset);
1200 const GlobalObject *GO = GV->getBaseObject();
1201 if (GO && Subtarget.useSmallData() && HLOF.isGlobalInSmallSection(GO, HTM))
1202 return DAG.getNode(HexagonISD::CONST32_GP, dl, PtrVT, GA);
1203 return DAG.getNode(HexagonISD::CONST32, dl, PtrVT, GA);
1204 }
1205
1206 bool UsePCRel = getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
1207 if (UsePCRel) {
1208 SDValue GA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, Offset,
1209 HexagonII::MO_PCREL);
1210 return DAG.getNode(HexagonISD::AT_PCREL, dl, PtrVT, GA);
1211 }
1212
1213 // Use GOT index.
1214 SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(PtrVT);
1215 SDValue GA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, HexagonII::MO_GOT);
1216 SDValue Off = DAG.getConstant(Offset, dl, MVT::i32);
1217 return DAG.getNode(HexagonISD::AT_GOT, dl, PtrVT, GOT, GA, Off);
1218}
1219
1220// Specifies that for loads and stores VT can be promoted to PromotedLdStVT.
1221SDValue
1222HexagonTargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
1223 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
1224 SDLoc dl(Op);
1225 EVT PtrVT = getPointerTy(DAG.getDataLayout());
1226
1227 Reloc::Model RM = HTM.getRelocationModel();
1228 if (RM == Reloc::Static) {
1229 SDValue A = DAG.getTargetBlockAddress(BA, PtrVT);
1230 return DAG.getNode(HexagonISD::CONST32_GP, dl, PtrVT, A);
1231 }
1232
1233 SDValue A = DAG.getTargetBlockAddress(BA, PtrVT, 0, HexagonII::MO_PCREL);
1234 return DAG.getNode(HexagonISD::AT_PCREL, dl, PtrVT, A);
1235}
1236
1237SDValue
1238HexagonTargetLowering::LowerGLOBAL_OFFSET_TABLE(SDValue Op, SelectionDAG &DAG)
1239 const {
1240 EVT PtrVT = getPointerTy(DAG.getDataLayout());
1241 SDValue GOTSym = DAG.getTargetExternalSymbol(HEXAGON_GOT_SYM_NAME"_GLOBAL_OFFSET_TABLE_", PtrVT,
1242 HexagonII::MO_PCREL);
1243 return DAG.getNode(HexagonISD::AT_PCREL, SDLoc(Op), PtrVT, GOTSym);
1244}
1245
1246SDValue
1247HexagonTargetLowering::GetDynamicTLSAddr(SelectionDAG &DAG, SDValue Chain,
1248 GlobalAddressSDNode *GA, SDValue Glue, EVT PtrVT, unsigned ReturnReg,
1249 unsigned char OperandFlags) const {
1250 MachineFunction &MF = DAG.getMachineFunction();
1251 MachineFrameInfo &MFI = MF.getFrameInfo();
1252 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1253 SDLoc dl(GA);
1254 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
1255 GA->getValueType(0),
1256 GA->getOffset(),
1257 OperandFlags);
1258 // Create Operands for the call.The Operands should have the following:
1259 // 1. Chain SDValue
1260 // 2. Callee which in this case is the Global address value.
1261 // 3. Registers live into the call.In this case its R0, as we
1262 // have just one argument to be passed.
1263 // 4. Glue.
1264 // Note: The order is important.
1265
1266 const auto &HRI = *Subtarget.getRegisterInfo();
1267 const uint32_t *Mask = HRI.getCallPreservedMask(MF, CallingConv::C);
1268 assert(Mask && "Missing call preserved mask for calling convention")((Mask && "Missing call preserved mask for calling convention"
) ? static_cast<void> (0) : __assert_fail ("Mask && \"Missing call preserved mask for calling convention\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 1268, __PRETTY_FUNCTION__))
;
1269 SDValue Ops[] = { Chain, TGA, DAG.getRegister(Hexagon::R0, PtrVT),
1270 DAG.getRegisterMask(Mask), Glue };
1271 Chain = DAG.getNode(HexagonISD::CALL, dl, NodeTys, Ops);
1272
1273 // Inform MFI that function has calls.
1274 MFI.setAdjustsStack(true);
1275
1276 Glue = Chain.getValue(1);
1277 return DAG.getCopyFromReg(Chain, dl, ReturnReg, PtrVT, Glue);
1278}
1279
1280//
1281// Lower using the intial executable model for TLS addresses
1282//
1283SDValue
1284HexagonTargetLowering::LowerToTLSInitialExecModel(GlobalAddressSDNode *GA,
1285 SelectionDAG &DAG) const {
1286 SDLoc dl(GA);
1287 int64_t Offset = GA->getOffset();
1288 auto PtrVT = getPointerTy(DAG.getDataLayout());
1289
1290 // Get the thread pointer.
1291 SDValue TP = DAG.getCopyFromReg(DAG.getEntryNode(), dl, Hexagon::UGP, PtrVT);
1292
1293 bool IsPositionIndependent = isPositionIndependent();
1294 unsigned char TF =
1295 IsPositionIndependent ? HexagonII::MO_IEGOT : HexagonII::MO_IE;
1296
1297 // First generate the TLS symbol address
1298 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, PtrVT,
1299 Offset, TF);
1300
1301 SDValue Sym = DAG.getNode(HexagonISD::CONST32, dl, PtrVT, TGA);
1302
1303 if (IsPositionIndependent) {
1304 // Generate the GOT pointer in case of position independent code
1305 SDValue GOT = LowerGLOBAL_OFFSET_TABLE(Sym, DAG);
1306
1307 // Add the TLS Symbol address to GOT pointer.This gives
1308 // GOT relative relocation for the symbol.
1309 Sym = DAG.getNode(ISD::ADD, dl, PtrVT, GOT, Sym);
1310 }
1311
1312 // Load the offset value for TLS symbol.This offset is relative to
1313 // thread pointer.
1314 SDValue LoadOffset =
1315 DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Sym, MachinePointerInfo());
1316
1317 // Address of the thread local variable is the add of thread
1318 // pointer and the offset of the variable.
1319 return DAG.getNode(ISD::ADD, dl, PtrVT, TP, LoadOffset);
1320}
1321
1322//
1323// Lower using the local executable model for TLS addresses
1324//
1325SDValue
1326HexagonTargetLowering::LowerToTLSLocalExecModel(GlobalAddressSDNode *GA,
1327 SelectionDAG &DAG) const {
1328 SDLoc dl(GA);
1329 int64_t Offset = GA->getOffset();
1330 auto PtrVT = getPointerTy(DAG.getDataLayout());
1331
1332 // Get the thread pointer.
1333 SDValue TP = DAG.getCopyFromReg(DAG.getEntryNode(), dl, Hexagon::UGP, PtrVT);
1334 // Generate the TLS symbol address
1335 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, PtrVT, Offset,
1336 HexagonII::MO_TPREL);
1337 SDValue Sym = DAG.getNode(HexagonISD::CONST32, dl, PtrVT, TGA);
1338
1339 // Address of the thread local variable is the add of thread
1340 // pointer and the offset of the variable.
1341 return DAG.getNode(ISD::ADD, dl, PtrVT, TP, Sym);
1342}
1343
1344//
1345// Lower using the general dynamic model for TLS addresses
1346//
1347SDValue
1348HexagonTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
1349 SelectionDAG &DAG) const {
1350 SDLoc dl(GA);
1351 int64_t Offset = GA->getOffset();
1352 auto PtrVT = getPointerTy(DAG.getDataLayout());
1353
1354 // First generate the TLS symbol address
1355 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, PtrVT, Offset,
1356 HexagonII::MO_GDGOT);
1357
1358 // Then, generate the GOT pointer
1359 SDValue GOT = LowerGLOBAL_OFFSET_TABLE(TGA, DAG);
1360
1361 // Add the TLS symbol and the GOT pointer
1362 SDValue Sym = DAG.getNode(HexagonISD::CONST32, dl, PtrVT, TGA);
1363 SDValue Chain = DAG.getNode(ISD::ADD, dl, PtrVT, GOT, Sym);
1364
1365 // Copy over the argument to R0
1366 SDValue InFlag;
1367 Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, Hexagon::R0, Chain, InFlag);
1368 InFlag = Chain.getValue(1);
1369
1370 unsigned Flags =
1371 static_cast<const HexagonSubtarget &>(DAG.getSubtarget()).useLongCalls()
1372 ? HexagonII::MO_GDPLT | HexagonII::HMOTF_ConstExtended
1373 : HexagonII::MO_GDPLT;
1374
1375 return GetDynamicTLSAddr(DAG, Chain, GA, InFlag, PtrVT,
1376 Hexagon::R0, Flags);
1377}
1378
1379//
1380// Lower TLS addresses.
1381//
1382// For now for dynamic models, we only support the general dynamic model.
1383//
1384SDValue
1385HexagonTargetLowering::LowerGlobalTLSAddress(SDValue Op,
1386 SelectionDAG &DAG) const {
1387 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
1388
1389 switch (HTM.getTLSModel(GA->getGlobal())) {
1390 case TLSModel::GeneralDynamic:
1391 case TLSModel::LocalDynamic:
1392 return LowerToTLSGeneralDynamicModel(GA, DAG);
1393 case TLSModel::InitialExec:
1394 return LowerToTLSInitialExecModel(GA, DAG);
1395 case TLSModel::LocalExec:
1396 return LowerToTLSLocalExecModel(GA, DAG);
1397 }
1398 llvm_unreachable("Bogus TLS model")::llvm::llvm_unreachable_internal("Bogus TLS model", "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 1398)
;
1399}
1400
1401//===----------------------------------------------------------------------===//
1402// TargetLowering Implementation
1403//===----------------------------------------------------------------------===//
1404
1405HexagonTargetLowering::HexagonTargetLowering(const TargetMachine &TM,
1406 const HexagonSubtarget &ST)
1407 : TargetLowering(TM), HTM(static_cast<const HexagonTargetMachine&>(TM)),
1408 Subtarget(ST) {
1409 auto &HRI = *Subtarget.getRegisterInfo();
1410
1411 setPrefLoopAlignment(Align(16));
1412 setMinFunctionAlignment(Align(4));
1413 setPrefFunctionAlignment(Align(16));
1414 setStackPointerRegisterToSaveRestore(HRI.getStackRegister());
1415 setBooleanContents(TargetLoweringBase::UndefinedBooleanContent);
1416 setBooleanVectorContents(TargetLoweringBase::UndefinedBooleanContent);
1417
1418 setMaxAtomicSizeInBitsSupported(64);
1419 setMinCmpXchgSizeInBits(32);
1420
1421 if (EnableHexSDNodeSched)
1422 setSchedulingPreference(Sched::VLIW);
1423 else
1424 setSchedulingPreference(Sched::Source);
1425
1426 // Limits for inline expansion of memcpy/memmove
1427 MaxStoresPerMemcpy = MaxStoresPerMemcpyCL;
1428 MaxStoresPerMemcpyOptSize = MaxStoresPerMemcpyOptSizeCL;
1429 MaxStoresPerMemmove = MaxStoresPerMemmoveCL;
1430 MaxStoresPerMemmoveOptSize = MaxStoresPerMemmoveOptSizeCL;
1431 MaxStoresPerMemset = MaxStoresPerMemsetCL;
1432 MaxStoresPerMemsetOptSize = MaxStoresPerMemsetOptSizeCL;
1433
1434 //
1435 // Set up register classes.
1436 //
1437
1438 addRegisterClass(MVT::i1, &Hexagon::PredRegsRegClass);
1439 addRegisterClass(MVT::v2i1, &Hexagon::PredRegsRegClass); // bbbbaaaa
1440 addRegisterClass(MVT::v4i1, &Hexagon::PredRegsRegClass); // ddccbbaa
1441 addRegisterClass(MVT::v8i1, &Hexagon::PredRegsRegClass); // hgfedcba
1442 addRegisterClass(MVT::i32, &Hexagon::IntRegsRegClass);
1443 addRegisterClass(MVT::v2i16, &Hexagon::IntRegsRegClass);
1444 addRegisterClass(MVT::v4i8, &Hexagon::IntRegsRegClass);
1445 addRegisterClass(MVT::i64, &Hexagon::DoubleRegsRegClass);
1446 addRegisterClass(MVT::v8i8, &Hexagon::DoubleRegsRegClass);
1447 addRegisterClass(MVT::v4i16, &Hexagon::DoubleRegsRegClass);
1448 addRegisterClass(MVT::v2i32, &Hexagon::DoubleRegsRegClass);
1449
1450 addRegisterClass(MVT::f32, &Hexagon::IntRegsRegClass);
1451 addRegisterClass(MVT::f64, &Hexagon::DoubleRegsRegClass);
1452
1453 //
1454 // Handling of scalar operations.
1455 //
1456 // All operations default to "legal", except:
1457 // - indexed loads and stores (pre-/post-incremented),
1458 // - ANY_EXTEND_VECTOR_INREG, ATOMIC_CMP_SWAP_WITH_SUCCESS, CONCAT_VECTORS,
1459 // ConstantFP, DEBUGTRAP, FCEIL, FCOPYSIGN, FEXP, FEXP2, FFLOOR, FGETSIGN,
1460 // FLOG, FLOG2, FLOG10, FMAXNUM, FMINNUM, FNEARBYINT, FRINT, FROUND, TRAP,
1461 // FTRUNC, PREFETCH, SIGN_EXTEND_VECTOR_INREG, ZERO_EXTEND_VECTOR_INREG,
1462 // which default to "expand" for at least one type.
1463
1464 // Misc operations.
1465 setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
1466 setOperationAction(ISD::ConstantFP, MVT::f64, Legal);
1467 setOperationAction(ISD::TRAP, MVT::Other, Legal);
1468 setOperationAction(ISD::ConstantPool, MVT::i32, Custom);
1469 setOperationAction(ISD::JumpTable, MVT::i32, Custom);
1470 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
1471 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
1472 setOperationAction(ISD::INLINEASM, MVT::Other, Custom);
1473 setOperationAction(ISD::INLINEASM_BR, MVT::Other, Custom);
1474 setOperationAction(ISD::PREFETCH, MVT::Other, Custom);
1475 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Custom);
1476 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
1477 setOperationAction(ISD::EH_RETURN, MVT::Other, Custom);
1478 setOperationAction(ISD::GLOBAL_OFFSET_TABLE, MVT::i32, Custom);
1479 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
1480 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom);
1481
1482 // Custom legalize GlobalAddress nodes into CONST32.
1483 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
1484 setOperationAction(ISD::GlobalAddress, MVT::i8, Custom);
1485 setOperationAction(ISD::BlockAddress, MVT::i32, Custom);
1486
1487 // Hexagon needs to optimize cases with negative constants.
1488 setOperationAction(ISD::SETCC, MVT::i8, Custom);
1489 setOperationAction(ISD::SETCC, MVT::i16, Custom);
1490 setOperationAction(ISD::SETCC, MVT::v4i8, Custom);
1491 setOperationAction(ISD::SETCC, MVT::v2i16, Custom);
1492
1493 // VASTART needs to be custom lowered to use the VarArgsFrameIndex.
1494 setOperationAction(ISD::VASTART, MVT::Other, Custom);
1495 setOperationAction(ISD::VAEND, MVT::Other, Expand);
1496 setOperationAction(ISD::VAARG, MVT::Other, Expand);
1497 if (Subtarget.isEnvironmentMusl())
1498 setOperationAction(ISD::VACOPY, MVT::Other, Custom);
1499 else
1500 setOperationAction(ISD::VACOPY, MVT::Other, Expand);
1501
1502 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
1503 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
1504 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom);
1505
1506 if (EmitJumpTables)
1507 setMinimumJumpTableEntries(MinimumJumpTables);
1508 else
1509 setMinimumJumpTableEntries(std::numeric_limits<unsigned>::max());
1510 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
1511
1512 setOperationAction(ISD::ABS, MVT::i32, Legal);
1513 setOperationAction(ISD::ABS, MVT::i64, Legal);
1514
1515 // Hexagon has A4_addp_c and A4_subp_c that take and generate a carry bit,
1516 // but they only operate on i64.
1517 for (MVT VT : MVT::integer_valuetypes()) {
1518 setOperationAction(ISD::UADDO, VT, Custom);
1519 setOperationAction(ISD::USUBO, VT, Custom);
1520 setOperationAction(ISD::SADDO, VT, Expand);
1521 setOperationAction(ISD::SSUBO, VT, Expand);
1522 setOperationAction(ISD::ADDCARRY, VT, Expand);
1523 setOperationAction(ISD::SUBCARRY, VT, Expand);
1524 }
1525 setOperationAction(ISD::ADDCARRY, MVT::i64, Custom);
1526 setOperationAction(ISD::SUBCARRY, MVT::i64, Custom);
1527
1528 setOperationAction(ISD::CTLZ, MVT::i8, Promote);
1529 setOperationAction(ISD::CTLZ, MVT::i16, Promote);
1530 setOperationAction(ISD::CTTZ, MVT::i8, Promote);
1531 setOperationAction(ISD::CTTZ, MVT::i16, Promote);
1532
1533 // Popcount can count # of 1s in i64 but returns i32.
1534 setOperationAction(ISD::CTPOP, MVT::i8, Promote);
1535 setOperationAction(ISD::CTPOP, MVT::i16, Promote);
1536 setOperationAction(ISD::CTPOP, MVT::i32, Promote);
1537 setOperationAction(ISD::CTPOP, MVT::i64, Legal);
1538
1539 setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
1540 setOperationAction(ISD::BITREVERSE, MVT::i64, Legal);
1541 setOperationAction(ISD::BSWAP, MVT::i32, Legal);
1542 setOperationAction(ISD::BSWAP, MVT::i64, Legal);
1543
1544 setOperationAction(ISD::FSHL, MVT::i32, Legal);
1545 setOperationAction(ISD::FSHL, MVT::i64, Legal);
1546 setOperationAction(ISD::FSHR, MVT::i32, Legal);
1547 setOperationAction(ISD::FSHR, MVT::i64, Legal);
1548
1549 for (unsigned IntExpOp :
1550 {ISD::SDIV, ISD::UDIV, ISD::SREM, ISD::UREM,
1551 ISD::SDIVREM, ISD::UDIVREM, ISD::ROTL, ISD::ROTR,
1552 ISD::SHL_PARTS, ISD::SRA_PARTS, ISD::SRL_PARTS,
1553 ISD::SMUL_LOHI, ISD::UMUL_LOHI}) {
1554 for (MVT VT : MVT::integer_valuetypes())
1555 setOperationAction(IntExpOp, VT, Expand);
1556 }
1557
1558 for (unsigned FPExpOp :
1559 {ISD::FDIV, ISD::FREM, ISD::FSQRT, ISD::FSIN, ISD::FCOS, ISD::FSINCOS,
1560 ISD::FPOW, ISD::FCOPYSIGN}) {
1561 for (MVT VT : MVT::fp_valuetypes())
1562 setOperationAction(FPExpOp, VT, Expand);
1563 }
1564
1565 // No extending loads from i32.
1566 for (MVT VT : MVT::integer_valuetypes()) {
1567 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i32, Expand);
1568 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i32, Expand);
1569 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i32, Expand);
1570 }
1571 // Turn FP truncstore into trunc + store.
1572 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
1573 // Turn FP extload into load/fpextend.
1574 for (MVT VT : MVT::fp_valuetypes())
1575 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand);
1576
1577 // Expand BR_CC and SELECT_CC for all integer and fp types.
1578 for (MVT VT : MVT::integer_valuetypes()) {
1579 setOperationAction(ISD::BR_CC, VT, Expand);
1580 setOperationAction(ISD::SELECT_CC, VT, Expand);
1581 }
1582 for (MVT VT : MVT::fp_valuetypes()) {
1583 setOperationAction(ISD::BR_CC, VT, Expand);
1584 setOperationAction(ISD::SELECT_CC, VT, Expand);
1585 }
1586 setOperationAction(ISD::BR_CC, MVT::Other, Expand);
1587
1588 //
1589 // Handling of vector operations.
1590 //
1591
1592 // Set the action for vector operations to "expand", then override it with
1593 // either "custom" or "legal" for specific cases.
1594 static const unsigned VectExpOps[] = {
1595 // Integer arithmetic:
1596 ISD::ADD, ISD::SUB, ISD::MUL, ISD::SDIV, ISD::UDIV,
1597 ISD::SREM, ISD::UREM, ISD::SDIVREM, ISD::UDIVREM, ISD::SADDO,
1598 ISD::UADDO, ISD::SSUBO, ISD::USUBO, ISD::SMUL_LOHI, ISD::UMUL_LOHI,
1599 // Logical/bit:
1600 ISD::AND, ISD::OR, ISD::XOR, ISD::ROTL, ISD::ROTR,
1601 ISD::CTPOP, ISD::CTLZ, ISD::CTTZ,
1602 // Floating point arithmetic/math functions:
1603 ISD::FADD, ISD::FSUB, ISD::FMUL, ISD::FMA, ISD::FDIV,
1604 ISD::FREM, ISD::FNEG, ISD::FABS, ISD::FSQRT, ISD::FSIN,
1605 ISD::FCOS, ISD::FPOW, ISD::FLOG, ISD::FLOG2,
1606 ISD::FLOG10, ISD::FEXP, ISD::FEXP2, ISD::FCEIL, ISD::FTRUNC,
1607 ISD::FRINT, ISD::FNEARBYINT, ISD::FROUND, ISD::FFLOOR,
1608 ISD::FMINNUM, ISD::FMAXNUM, ISD::FSINCOS,
1609 // Misc:
1610 ISD::BR_CC, ISD::SELECT_CC, ISD::ConstantPool,
1611 // Vector:
1612 ISD::BUILD_VECTOR, ISD::SCALAR_TO_VECTOR,
1613 ISD::EXTRACT_VECTOR_ELT, ISD::INSERT_VECTOR_ELT,
1614 ISD::EXTRACT_SUBVECTOR, ISD::INSERT_SUBVECTOR,
1615 ISD::CONCAT_VECTORS, ISD::VECTOR_SHUFFLE
1616 };
1617
1618 for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
1619 for (unsigned VectExpOp : VectExpOps)
1620 setOperationAction(VectExpOp, VT, Expand);
1621
1622 // Expand all extending loads and truncating stores:
1623 for (MVT TargetVT : MVT::fixedlen_vector_valuetypes()) {
1624 if (TargetVT == VT)
1625 continue;
1626 setLoadExtAction(ISD::EXTLOAD, TargetVT, VT, Expand);
1627 setLoadExtAction(ISD::ZEXTLOAD, TargetVT, VT, Expand);
1628 setLoadExtAction(ISD::SEXTLOAD, TargetVT, VT, Expand);
1629 setTruncStoreAction(VT, TargetVT, Expand);
1630 }
1631
1632 // Normalize all inputs to SELECT to be vectors of i32.
1633 if (VT.getVectorElementType() != MVT::i32) {
1634 MVT VT32 = MVT::getVectorVT(MVT::i32, VT.getSizeInBits()/32);
1635 setOperationAction(ISD::SELECT, VT, Promote);
1636 AddPromotedToType(ISD::SELECT, VT, VT32);
1637 }
1638 setOperationAction(ISD::SRA, VT, Custom);
1639 setOperationAction(ISD::SHL, VT, Custom);
1640 setOperationAction(ISD::SRL, VT, Custom);
1641 }
1642
1643 // Extending loads from (native) vectors of i8 into (native) vectors of i16
1644 // are legal.
1645 setLoadExtAction(ISD::EXTLOAD, MVT::v2i16, MVT::v2i8, Legal);
1646 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i16, MVT::v2i8, Legal);
1647 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i16, MVT::v2i8, Legal);
1648 setLoadExtAction(ISD::EXTLOAD, MVT::v4i16, MVT::v4i8, Legal);
1649 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i16, MVT::v4i8, Legal);
1650 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i16, MVT::v4i8, Legal);
1651
1652 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Legal);
1653 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Legal);
1654 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i32, Legal);
1655
1656 // Types natively supported:
1657 for (MVT NativeVT : {MVT::v8i1, MVT::v4i1, MVT::v2i1, MVT::v4i8,
1658 MVT::v8i8, MVT::v2i16, MVT::v4i16, MVT::v2i32}) {
1659 setOperationAction(ISD::BUILD_VECTOR, NativeVT, Custom);
1660 setOperationAction(ISD::EXTRACT_VECTOR_ELT, NativeVT, Custom);
1661 setOperationAction(ISD::INSERT_VECTOR_ELT, NativeVT, Custom);
1662 setOperationAction(ISD::EXTRACT_SUBVECTOR, NativeVT, Custom);
1663 setOperationAction(ISD::INSERT_SUBVECTOR, NativeVT, Custom);
1664 setOperationAction(ISD::CONCAT_VECTORS, NativeVT, Custom);
1665
1666 setOperationAction(ISD::ADD, NativeVT, Legal);
1667 setOperationAction(ISD::SUB, NativeVT, Legal);
1668 setOperationAction(ISD::MUL, NativeVT, Legal);
1669 setOperationAction(ISD::AND, NativeVT, Legal);
1670 setOperationAction(ISD::OR, NativeVT, Legal);
1671 setOperationAction(ISD::XOR, NativeVT, Legal);
1672 }
1673
1674 // Custom lower unaligned loads.
1675 // Also, for both loads and stores, verify the alignment of the address
1676 // in case it is a compile-time constant. This is a usability feature to
1677 // provide a meaningful error message to users.
1678 for (MVT VT : {MVT::i16, MVT::i32, MVT::v4i8, MVT::i64, MVT::v8i8,
1679 MVT::v2i16, MVT::v4i16, MVT::v2i32}) {
1680 setOperationAction(ISD::LOAD, VT, Custom);
1681 setOperationAction(ISD::STORE, VT, Custom);
1682 }
1683
1684 setOperationAction(ISD::STORE, MVT::v128i1, Custom);
1685
1686 for (MVT VT : {MVT::v2i16, MVT::v4i8, MVT::v8i8, MVT::v2i32, MVT::v4i16,
1687 MVT::v2i32}) {
1688 setCondCodeAction(ISD::SETNE, VT, Expand);
1689 setCondCodeAction(ISD::SETLE, VT, Expand);
1690 setCondCodeAction(ISD::SETGE, VT, Expand);
1691 setCondCodeAction(ISD::SETLT, VT, Expand);
1692 setCondCodeAction(ISD::SETULE, VT, Expand);
1693 setCondCodeAction(ISD::SETUGE, VT, Expand);
1694 setCondCodeAction(ISD::SETULT, VT, Expand);
1695 }
1696
1697 // Custom-lower bitcasts from i8 to v8i1.
1698 setOperationAction(ISD::BITCAST, MVT::i8, Custom);
1699 setOperationAction(ISD::BITCAST, MVT::i32, Custom);
1700 setOperationAction(ISD::BITCAST, MVT::i64, Custom);
1701 setOperationAction(ISD::SETCC, MVT::v2i16, Custom);
1702 setOperationAction(ISD::VSELECT, MVT::v4i8, Custom);
1703 setOperationAction(ISD::VSELECT, MVT::v2i16, Custom);
1704 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i8, Custom);
1705 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i16, Custom);
1706 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i8, Custom);
1707
1708 // V5+.
1709 setOperationAction(ISD::FMA, MVT::f64, Expand);
1710 setOperationAction(ISD::FADD, MVT::f64, Expand);
1711 setOperationAction(ISD::FSUB, MVT::f64, Expand);
1712 setOperationAction(ISD::FMUL, MVT::f64, Expand);
1713
1714 setOperationAction(ISD::FMINNUM, MVT::f32, Legal);
1715 setOperationAction(ISD::FMAXNUM, MVT::f32, Legal);
1716
1717 setOperationAction(ISD::FP_TO_UINT, MVT::i1, Promote);
1718 setOperationAction(ISD::FP_TO_UINT, MVT::i8, Promote);
1719 setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote);
1720 setOperationAction(ISD::FP_TO_SINT, MVT::i1, Promote);
1721 setOperationAction(ISD::FP_TO_SINT, MVT::i8, Promote);
1722 setOperationAction(ISD::FP_TO_SINT, MVT::i16, Promote);
1723 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote);
1724 setOperationAction(ISD::UINT_TO_FP, MVT::i8, Promote);
1725 setOperationAction(ISD::UINT_TO_FP, MVT::i16, Promote);
1726 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote);
1727 setOperationAction(ISD::SINT_TO_FP, MVT::i8, Promote);
1728 setOperationAction(ISD::SINT_TO_FP, MVT::i16, Promote);
1729
1730 // Handling of indexed loads/stores: default is "expand".
1731 //
1732 for (MVT VT : {MVT::i8, MVT::i16, MVT::i32, MVT::i64, MVT::f32, MVT::f64,
1733 MVT::v2i16, MVT::v2i32, MVT::v4i8, MVT::v4i16, MVT::v8i8}) {
1734 setIndexedLoadAction(ISD::POST_INC, VT, Legal);
1735 setIndexedStoreAction(ISD::POST_INC, VT, Legal);
1736 }
1737
1738 // Subtarget-specific operation actions.
1739 //
1740 if (Subtarget.hasV60Ops()) {
1741 setOperationAction(ISD::ROTL, MVT::i32, Legal);
1742 setOperationAction(ISD::ROTL, MVT::i64, Legal);
1743 setOperationAction(ISD::ROTR, MVT::i32, Legal);
1744 setOperationAction(ISD::ROTR, MVT::i64, Legal);
1745 }
1746 if (Subtarget.hasV66Ops()) {
1747 setOperationAction(ISD::FADD, MVT::f64, Legal);
1748 setOperationAction(ISD::FSUB, MVT::f64, Legal);
1749 }
1750 if (Subtarget.hasV67Ops()) {
1751 setOperationAction(ISD::FMINNUM, MVT::f64, Legal);
1752 setOperationAction(ISD::FMAXNUM, MVT::f64, Legal);
1753 setOperationAction(ISD::FMUL, MVT::f64, Legal);
1754 }
1755
1756 setTargetDAGCombine(ISD::VSELECT);
1757
1758 if (Subtarget.useHVXOps())
1759 initializeHVXLowering();
1760
1761 computeRegisterProperties(&HRI);
1762
1763 //
1764 // Library calls for unsupported operations
1765 //
1766 bool FastMath = EnableFastMath;
1767
1768 setLibcallName(RTLIB::SDIV_I32, "__hexagon_divsi3");
1769 setLibcallName(RTLIB::SDIV_I64, "__hexagon_divdi3");
1770 setLibcallName(RTLIB::UDIV_I32, "__hexagon_udivsi3");
1771 setLibcallName(RTLIB::UDIV_I64, "__hexagon_udivdi3");
1772 setLibcallName(RTLIB::SREM_I32, "__hexagon_modsi3");
1773 setLibcallName(RTLIB::SREM_I64, "__hexagon_moddi3");
1774 setLibcallName(RTLIB::UREM_I32, "__hexagon_umodsi3");
1775 setLibcallName(RTLIB::UREM_I64, "__hexagon_umoddi3");
1776
1777 setLibcallName(RTLIB::SINTTOFP_I128_F64, "__hexagon_floattidf");
1778 setLibcallName(RTLIB::SINTTOFP_I128_F32, "__hexagon_floattisf");
1779 setLibcallName(RTLIB::FPTOUINT_F32_I128, "__hexagon_fixunssfti");
1780 setLibcallName(RTLIB::FPTOUINT_F64_I128, "__hexagon_fixunsdfti");
1781 setLibcallName(RTLIB::FPTOSINT_F32_I128, "__hexagon_fixsfti");
1782 setLibcallName(RTLIB::FPTOSINT_F64_I128, "__hexagon_fixdfti");
1783
1784 // This is the only fast library function for sqrtd.
1785 if (FastMath)
1786 setLibcallName(RTLIB::SQRT_F64, "__hexagon_fast2_sqrtdf2");
1787
1788 // Prefix is: nothing for "slow-math",
1789 // "fast2_" for V5+ fast-math double-precision
1790 // (actually, keep fast-math and fast-math2 separate for now)
1791 if (FastMath) {
1792 setLibcallName(RTLIB::ADD_F64, "__hexagon_fast_adddf3");
1793 setLibcallName(RTLIB::SUB_F64, "__hexagon_fast_subdf3");
1794 setLibcallName(RTLIB::MUL_F64, "__hexagon_fast_muldf3");
1795 setLibcallName(RTLIB::DIV_F64, "__hexagon_fast_divdf3");
1796 setLibcallName(RTLIB::DIV_F32, "__hexagon_fast_divsf3");
1797 } else {
1798 setLibcallName(RTLIB::ADD_F64, "__hexagon_adddf3");
1799 setLibcallName(RTLIB::SUB_F64, "__hexagon_subdf3");
1800 setLibcallName(RTLIB::MUL_F64, "__hexagon_muldf3");
1801 setLibcallName(RTLIB::DIV_F64, "__hexagon_divdf3");
1802 setLibcallName(RTLIB::DIV_F32, "__hexagon_divsf3");
1803 }
1804
1805 if (FastMath)
1806 setLibcallName(RTLIB::SQRT_F32, "__hexagon_fast2_sqrtf");
1807 else
1808 setLibcallName(RTLIB::SQRT_F32, "__hexagon_sqrtf");
1809
1810 // These cause problems when the shift amount is non-constant.
1811 setLibcallName(RTLIB::SHL_I128, nullptr);
1812 setLibcallName(RTLIB::SRL_I128, nullptr);
1813 setLibcallName(RTLIB::SRA_I128, nullptr);
1814}
1815
1816const char* HexagonTargetLowering::getTargetNodeName(unsigned Opcode) const {
1817 switch ((HexagonISD::NodeType)Opcode) {
1818 case HexagonISD::ADDC: return "HexagonISD::ADDC";
1819 case HexagonISD::SUBC: return "HexagonISD::SUBC";
1820 case HexagonISD::ALLOCA: return "HexagonISD::ALLOCA";
1821 case HexagonISD::AT_GOT: return "HexagonISD::AT_GOT";
1822 case HexagonISD::AT_PCREL: return "HexagonISD::AT_PCREL";
1823 case HexagonISD::BARRIER: return "HexagonISD::BARRIER";
1824 case HexagonISD::CALL: return "HexagonISD::CALL";
1825 case HexagonISD::CALLnr: return "HexagonISD::CALLnr";
1826 case HexagonISD::CALLR: return "HexagonISD::CALLR";
1827 case HexagonISD::COMBINE: return "HexagonISD::COMBINE";
1828 case HexagonISD::CONST32_GP: return "HexagonISD::CONST32_GP";
1829 case HexagonISD::CONST32: return "HexagonISD::CONST32";
1830 case HexagonISD::CP: return "HexagonISD::CP";
1831 case HexagonISD::DCFETCH: return "HexagonISD::DCFETCH";
1832 case HexagonISD::EH_RETURN: return "HexagonISD::EH_RETURN";
1833 case HexagonISD::TSTBIT: return "HexagonISD::TSTBIT";
1834 case HexagonISD::EXTRACTU: return "HexagonISD::EXTRACTU";
1835 case HexagonISD::INSERT: return "HexagonISD::INSERT";
1836 case HexagonISD::JT: return "HexagonISD::JT";
1837 case HexagonISD::RET_FLAG: return "HexagonISD::RET_FLAG";
1838 case HexagonISD::TC_RETURN: return "HexagonISD::TC_RETURN";
1839 case HexagonISD::VASL: return "HexagonISD::VASL";
1840 case HexagonISD::VASR: return "HexagonISD::VASR";
1841 case HexagonISD::VLSR: return "HexagonISD::VLSR";
1842 case HexagonISD::VSPLAT: return "HexagonISD::VSPLAT";
1843 case HexagonISD::VEXTRACTW: return "HexagonISD::VEXTRACTW";
1844 case HexagonISD::VINSERTW0: return "HexagonISD::VINSERTW0";
1845 case HexagonISD::VROR: return "HexagonISD::VROR";
1846 case HexagonISD::READCYCLE: return "HexagonISD::READCYCLE";
1847 case HexagonISD::PTRUE: return "HexagonISD::PTRUE";
1848 case HexagonISD::PFALSE: return "HexagonISD::PFALSE";
1849 case HexagonISD::VZERO: return "HexagonISD::VZERO";
1850 case HexagonISD::VSPLATW: return "HexagonISD::VSPLATW";
1851 case HexagonISD::D2P: return "HexagonISD::D2P";
1852 case HexagonISD::P2D: return "HexagonISD::P2D";
1853 case HexagonISD::V2Q: return "HexagonISD::V2Q";
1854 case HexagonISD::Q2V: return "HexagonISD::Q2V";
1855 case HexagonISD::QCAT: return "HexagonISD::QCAT";
1856 case HexagonISD::QTRUE: return "HexagonISD::QTRUE";
1857 case HexagonISD::QFALSE: return "HexagonISD::QFALSE";
1858 case HexagonISD::TYPECAST: return "HexagonISD::TYPECAST";
1859 case HexagonISD::VALIGN: return "HexagonISD::VALIGN";
1860 case HexagonISD::VALIGNADDR: return "HexagonISD::VALIGNADDR";
1861 case HexagonISD::OP_END: break;
1862 }
1863 return nullptr;
1864}
1865
1866void
1867HexagonTargetLowering::validateConstPtrAlignment(SDValue Ptr, const SDLoc &dl,
1868 unsigned NeedAlign) const {
1869 auto *CA = dyn_cast<ConstantSDNode>(Ptr);
1870 if (!CA)
1871 return;
1872 unsigned Addr = CA->getZExtValue();
1873 unsigned HaveAlign = Addr != 0 ? 1u << countTrailingZeros(Addr) : NeedAlign;
1874 if (HaveAlign < NeedAlign) {
1875 std::string ErrMsg;
1876 raw_string_ostream O(ErrMsg);
1877 O << "Misaligned constant address: " << format_hex(Addr, 10)
1878 << " has alignment " << HaveAlign
1879 << ", but the memory access requires " << NeedAlign;
1880 if (DebugLoc DL = dl.getDebugLoc())
1881 DL.print(O << ", at ");
1882 report_fatal_error(O.str());
1883 }
1884}
1885
1886// Bit-reverse Load Intrinsic: Check if the instruction is a bit reverse load
1887// intrinsic.
1888static bool isBrevLdIntrinsic(const Value *Inst) {
1889 unsigned ID = cast<IntrinsicInst>(Inst)->getIntrinsicID();
1890 return (ID == Intrinsic::hexagon_L2_loadrd_pbr ||
1891 ID == Intrinsic::hexagon_L2_loadri_pbr ||
1892 ID == Intrinsic::hexagon_L2_loadrh_pbr ||
1893 ID == Intrinsic::hexagon_L2_loadruh_pbr ||
1894 ID == Intrinsic::hexagon_L2_loadrb_pbr ||
1895 ID == Intrinsic::hexagon_L2_loadrub_pbr);
1896}
1897
1898// Bit-reverse Load Intrinsic :Crawl up and figure out the object from previous
1899// instruction. So far we only handle bitcast, extract value and bit reverse
1900// load intrinsic instructions. Should we handle CGEP ?
1901static Value *getBrevLdObject(Value *V) {
1902 if (Operator::getOpcode(V) == Instruction::ExtractValue ||
1903 Operator::getOpcode(V) == Instruction::BitCast)
1904 V = cast<Operator>(V)->getOperand(0);
1905 else if (isa<IntrinsicInst>(V) && isBrevLdIntrinsic(V))
1906 V = cast<Instruction>(V)->getOperand(0);
1907 return V;
1908}
1909
1910// Bit-reverse Load Intrinsic: For a PHI Node return either an incoming edge or
1911// a back edge. If the back edge comes from the intrinsic itself, the incoming
1912// edge is returned.
1913static Value *returnEdge(const PHINode *PN, Value *IntrBaseVal) {
1914 const BasicBlock *Parent = PN->getParent();
1915 int Idx = -1;
1916 for (unsigned i = 0, e = PN->getNumIncomingValues(); i < e; ++i) {
1917 BasicBlock *Blk = PN->getIncomingBlock(i);
1918 // Determine if the back edge is originated from intrinsic.
1919 if (Blk == Parent) {
1920 Value *BackEdgeVal = PN->getIncomingValue(i);
1921 Value *BaseVal;
1922 // Loop over till we return the same Value or we hit the IntrBaseVal.
1923 do {
1924 BaseVal = BackEdgeVal;
1925 BackEdgeVal = getBrevLdObject(BackEdgeVal);
1926 } while ((BaseVal != BackEdgeVal) && (IntrBaseVal != BackEdgeVal));
1927 // If the getBrevLdObject returns IntrBaseVal, we should return the
1928 // incoming edge.
1929 if (IntrBaseVal == BackEdgeVal)
1930 continue;
1931 Idx = i;
1932 break;
1933 } else // Set the node to incoming edge.
1934 Idx = i;
1935 }
1936 assert(Idx >= 0 && "Unexpected index to incoming argument in PHI")((Idx >= 0 && "Unexpected index to incoming argument in PHI"
) ? static_cast<void> (0) : __assert_fail ("Idx >= 0 && \"Unexpected index to incoming argument in PHI\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 1936, __PRETTY_FUNCTION__))
;
1937 return PN->getIncomingValue(Idx);
1938}
1939
1940// Bit-reverse Load Intrinsic: Figure out the underlying object the base
1941// pointer points to, for the bit-reverse load intrinsic. Setting this to
1942// memoperand might help alias analysis to figure out the dependencies.
1943static Value *getUnderLyingObjectForBrevLdIntr(Value *V) {
1944 Value *IntrBaseVal = V;
1945 Value *BaseVal;
1946 // Loop over till we return the same Value, implies we either figure out
1947 // the object or we hit a PHI
1948 do {
1949 BaseVal = V;
1950 V = getBrevLdObject(V);
1951 } while (BaseVal != V);
1952
1953 // Identify the object from PHINode.
1954 if (const PHINode *PN = dyn_cast<PHINode>(V))
1955 return returnEdge(PN, IntrBaseVal);
1956 // For non PHI nodes, the object is the last value returned by getBrevLdObject
1957 else
1958 return V;
1959}
1960
1961/// Given an intrinsic, checks if on the target the intrinsic will need to map
1962/// to a MemIntrinsicNode (touches memory). If this is the case, it returns
1963/// true and store the intrinsic information into the IntrinsicInfo that was
1964/// passed to the function.
1965bool HexagonTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
1966 const CallInst &I,
1967 MachineFunction &MF,
1968 unsigned Intrinsic) const {
1969 switch (Intrinsic) {
1970 case Intrinsic::hexagon_L2_loadrd_pbr:
1971 case Intrinsic::hexagon_L2_loadri_pbr:
1972 case Intrinsic::hexagon_L2_loadrh_pbr:
1973 case Intrinsic::hexagon_L2_loadruh_pbr:
1974 case Intrinsic::hexagon_L2_loadrb_pbr:
1975 case Intrinsic::hexagon_L2_loadrub_pbr: {
1976 Info.opc = ISD::INTRINSIC_W_CHAIN;
1977 auto &DL = I.getCalledFunction()->getParent()->getDataLayout();
1978 auto &Cont = I.getCalledFunction()->getParent()->getContext();
1979 // The intrinsic function call is of the form { ElTy, i8* }
1980 // @llvm.hexagon.L2.loadXX.pbr(i8*, i32). The pointer and memory access type
1981 // should be derived from ElTy.
1982 Type *ElTy = I.getCalledFunction()->getReturnType()->getStructElementType(0);
1983 Info.memVT = MVT::getVT(ElTy);
1984 llvm::Value *BasePtrVal = I.getOperand(0);
1985 Info.ptrVal = getUnderLyingObjectForBrevLdIntr(BasePtrVal);
1986 // The offset value comes through Modifier register. For now, assume the
1987 // offset is 0.
1988 Info.offset = 0;
1989 Info.align =
1990 MaybeAlign(DL.getABITypeAlignment(Info.memVT.getTypeForEVT(Cont)));
1991 Info.flags = MachineMemOperand::MOLoad;
1992 return true;
1993 }
1994 case Intrinsic::hexagon_V6_vgathermw:
1995 case Intrinsic::hexagon_V6_vgathermw_128B:
1996 case Intrinsic::hexagon_V6_vgathermh:
1997 case Intrinsic::hexagon_V6_vgathermh_128B:
1998 case Intrinsic::hexagon_V6_vgathermhw:
1999 case Intrinsic::hexagon_V6_vgathermhw_128B:
2000 case Intrinsic::hexagon_V6_vgathermwq:
2001 case Intrinsic::hexagon_V6_vgathermwq_128B:
2002 case Intrinsic::hexagon_V6_vgathermhq:
2003 case Intrinsic::hexagon_V6_vgathermhq_128B:
2004 case Intrinsic::hexagon_V6_vgathermhwq:
2005 case Intrinsic::hexagon_V6_vgathermhwq_128B: {
2006 const Module &M = *I.getParent()->getParent()->getParent();
2007 Info.opc = ISD::INTRINSIC_W_CHAIN;
2008 Type *VecTy = I.getArgOperand(1)->getType();
2009 Info.memVT = MVT::getVT(VecTy);
2010 Info.ptrVal = I.getArgOperand(0);
2011 Info.offset = 0;
2012 Info.align =
2013 MaybeAlign(M.getDataLayout().getTypeAllocSizeInBits(VecTy) / 8);
2014 Info.flags = MachineMemOperand::MOLoad |
2015 MachineMemOperand::MOStore |
2016 MachineMemOperand::MOVolatile;
2017 return true;
2018 }
2019 default:
2020 break;
2021 }
2022 return false;
2023}
2024
2025bool HexagonTargetLowering::hasBitTest(SDValue X, SDValue Y) const {
2026 return X.getValueType().isScalarInteger(); // 'tstbit'
2027}
2028
2029bool HexagonTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
2030 return isTruncateFree(EVT::getEVT(Ty1), EVT::getEVT(Ty2));
2031}
2032
2033bool HexagonTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
2034 if (!VT1.isSimple() || !VT2.isSimple())
2035 return false;
2036 return VT1.getSimpleVT() == MVT::i64 && VT2.getSimpleVT() == MVT::i32;
2037}
2038
2039bool HexagonTargetLowering::isFMAFasterThanFMulAndFAdd(
2040 const MachineFunction &MF, EVT VT) const {
2041 return isOperationLegalOrCustom(ISD::FMA, VT);
2042}
2043
2044// Should we expand the build vector with shuffles?
2045bool HexagonTargetLowering::shouldExpandBuildVectorWithShuffles(EVT VT,
2046 unsigned DefinedValues) const {
2047 return false;
2048}
2049
2050bool HexagonTargetLowering::isShuffleMaskLegal(ArrayRef<int> Mask,
2051 EVT VT) const {
2052 return true;
2053}
2054
2055TargetLoweringBase::LegalizeTypeAction
2056HexagonTargetLowering::getPreferredVectorAction(MVT VT) const {
2057 unsigned VecLen = VT.getVectorNumElements();
2058 MVT ElemTy = VT.getVectorElementType();
2059
2060 if (VecLen == 1 || VT.isScalableVector())
2061 return TargetLoweringBase::TypeScalarizeVector;
2062
2063 if (Subtarget.useHVXOps()) {
2064 unsigned HwLen = Subtarget.getVectorLength();
2065 // If the size of VT is at least half of the vector length,
2066 // widen the vector. Note: the threshold was not selected in
2067 // any scientific way.
2068 ArrayRef<MVT> Tys = Subtarget.getHVXElementTypes();
2069 if (llvm::find(Tys, ElemTy) != Tys.end()) {
2070 unsigned HwWidth = 8*HwLen;
2071 unsigned VecWidth = VT.getSizeInBits();
2072 if (VecWidth >= HwWidth/2 && VecWidth < HwWidth)
2073 return TargetLoweringBase::TypeWidenVector;
2074 }
2075 // Split vectors of i1 that correspond to (byte) vector pairs.
2076 if (ElemTy == MVT::i1 && VecLen == 2*HwLen)
2077 return TargetLoweringBase::TypeSplitVector;
2078 }
2079
2080 // Always widen (remaining) vectors of i1.
2081 if (ElemTy == MVT::i1)
2082 return TargetLoweringBase::TypeWidenVector;
2083
2084 return TargetLoweringBase::TypeSplitVector;
2085}
2086
2087std::pair<SDValue, int>
2088HexagonTargetLowering::getBaseAndOffset(SDValue Addr) const {
2089 if (Addr.getOpcode() == ISD::ADD) {
2090 SDValue Op1 = Addr.getOperand(1);
2091 if (auto *CN = dyn_cast<const ConstantSDNode>(Op1.getNode()))
2092 return { Addr.getOperand(0), CN->getSExtValue() };
2093 }
2094 return { Addr, 0 };
2095}
2096
2097// Lower a vector shuffle (V1, V2, V3). V1 and V2 are the two vectors
2098// to select data from, V3 is the permutation.
2099SDValue
2100HexagonTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG)
2101 const {
2102 const auto *SVN = cast<ShuffleVectorSDNode>(Op);
2103 ArrayRef<int> AM = SVN->getMask();
2104 assert(AM.size() <= 8 && "Unexpected shuffle mask")((AM.size() <= 8 && "Unexpected shuffle mask") ? static_cast
<void> (0) : __assert_fail ("AM.size() <= 8 && \"Unexpected shuffle mask\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 2104, __PRETTY_FUNCTION__))
;
2105 unsigned VecLen = AM.size();
2106
2107 MVT VecTy = ty(Op);
2108 assert(!Subtarget.isHVXVectorType(VecTy, true) &&((!Subtarget.isHVXVectorType(VecTy, true) && "HVX shuffles should be legal"
) ? static_cast<void> (0) : __assert_fail ("!Subtarget.isHVXVectorType(VecTy, true) && \"HVX shuffles should be legal\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 2109, __PRETTY_FUNCTION__))
2109 "HVX shuffles should be legal")((!Subtarget.isHVXVectorType(VecTy, true) && "HVX shuffles should be legal"
) ? static_cast<void> (0) : __assert_fail ("!Subtarget.isHVXVectorType(VecTy, true) && \"HVX shuffles should be legal\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 2109, __PRETTY_FUNCTION__))
;
2110 assert(VecTy.getSizeInBits() <= 64 && "Unexpected vector length")((VecTy.getSizeInBits() <= 64 && "Unexpected vector length"
) ? static_cast<void> (0) : __assert_fail ("VecTy.getSizeInBits() <= 64 && \"Unexpected vector length\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 2110, __PRETTY_FUNCTION__))
;
2111
2112 SDValue Op0 = Op.getOperand(0);
2113 SDValue Op1 = Op.getOperand(1);
2114 const SDLoc &dl(Op);
2115
2116 // If the inputs are not the same as the output, bail. This is not an
2117 // error situation, but complicates the handling and the default expansion
2118 // (into BUILD_VECTOR) should be adequate.
2119 if (ty(Op0) != VecTy || ty(Op1) != VecTy)
2120 return SDValue();
2121
2122 // Normalize the mask so that the first non-negative index comes from
2123 // the first operand.
2124 SmallVector<int,8> Mask(AM.begin(), AM.end());
2125 unsigned F = llvm::find_if(AM, [](int M) { return M >= 0; }) - AM.data();
2126 if (F == AM.size())
2127 return DAG.getUNDEF(VecTy);
2128 if (AM[F] >= int(VecLen)) {
2129 ShuffleVectorSDNode::commuteMask(Mask);
2130 std::swap(Op0, Op1);
2131 }
2132
2133 // Express the shuffle mask in terms of bytes.
2134 SmallVector<int,8> ByteMask;
2135 unsigned ElemBytes = VecTy.getVectorElementType().getSizeInBits() / 8;
2136 for (unsigned i = 0, e = Mask.size(); i != e; ++i) {
2137 int M = Mask[i];
2138 if (M < 0) {
2139 for (unsigned j = 0; j != ElemBytes; ++j)
2140 ByteMask.push_back(-1);
2141 } else {
2142 for (unsigned j = 0; j != ElemBytes; ++j)
2143 ByteMask.push_back(M*ElemBytes + j);
2144 }
2145 }
2146 assert(ByteMask.size() <= 8)((ByteMask.size() <= 8) ? static_cast<void> (0) : __assert_fail
("ByteMask.size() <= 8", "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 2146, __PRETTY_FUNCTION__))
;
2147
2148 // All non-undef (non-negative) indexes are well within [0..127], so they
2149 // fit in a single byte. Build two 64-bit words:
2150 // - MaskIdx where each byte is the corresponding index (for non-negative
2151 // indexes), and 0xFF for negative indexes, and
2152 // - MaskUnd that has 0xFF for each negative index.
2153 uint64_t MaskIdx = 0;
2154 uint64_t MaskUnd = 0;
2155 for (unsigned i = 0, e = ByteMask.size(); i != e; ++i) {
2156 unsigned S = 8*i;
2157 uint64_t M = ByteMask[i] & 0xFF;
2158 if (M == 0xFF)
2159 MaskUnd |= M << S;
2160 MaskIdx |= M << S;
2161 }
2162
2163 if (ByteMask.size() == 4) {
2164 // Identity.
2165 if (MaskIdx == (0x03020100 | MaskUnd))
2166 return Op0;
2167 // Byte swap.
2168 if (MaskIdx == (0x00010203 | MaskUnd)) {
2169 SDValue T0 = DAG.getBitcast(MVT::i32, Op0);
2170 SDValue T1 = DAG.getNode(ISD::BSWAP, dl, MVT::i32, T0);
2171 return DAG.getBitcast(VecTy, T1);
2172 }
2173
2174 // Byte packs.
2175 SDValue Concat10 = DAG.getNode(HexagonISD::COMBINE, dl,
2176 typeJoin({ty(Op1), ty(Op0)}), {Op1, Op0});
2177 if (MaskIdx == (0x06040200 | MaskUnd))
2178 return getInstr(Hexagon::S2_vtrunehb, dl, VecTy, {Concat10}, DAG);
2179 if (MaskIdx == (0x07050301 | MaskUnd))
2180 return getInstr(Hexagon::S2_vtrunohb, dl, VecTy, {Concat10}, DAG);
2181
2182 SDValue Concat01 = DAG.getNode(HexagonISD::COMBINE, dl,
2183 typeJoin({ty(Op0), ty(Op1)}), {Op0, Op1});
2184 if (MaskIdx == (0x02000604 | MaskUnd))
2185 return getInstr(Hexagon::S2_vtrunehb, dl, VecTy, {Concat01}, DAG);
2186 if (MaskIdx == (0x03010705 | MaskUnd))
2187 return getInstr(Hexagon::S2_vtrunohb, dl, VecTy, {Concat01}, DAG);
2188 }
2189
2190 if (ByteMask.size() == 8) {
2191 // Identity.
2192 if (MaskIdx == (0x0706050403020100ull | MaskUnd))
2193 return Op0;
2194 // Byte swap.
2195 if (MaskIdx == (0x0001020304050607ull | MaskUnd)) {
2196 SDValue T0 = DAG.getBitcast(MVT::i64, Op0);
2197 SDValue T1 = DAG.getNode(ISD::BSWAP, dl, MVT::i64, T0);
2198 return DAG.getBitcast(VecTy, T1);
2199 }
2200
2201 // Halfword picks.
2202 if (MaskIdx == (0x0d0c050409080100ull | MaskUnd))
2203 return getInstr(Hexagon::S2_shuffeh, dl, VecTy, {Op1, Op0}, DAG);
2204 if (MaskIdx == (0x0f0e07060b0a0302ull | MaskUnd))
2205 return getInstr(Hexagon::S2_shuffoh, dl, VecTy, {Op1, Op0}, DAG);
2206 if (MaskIdx == (0x0d0c090805040100ull | MaskUnd))
2207 return getInstr(Hexagon::S2_vtrunewh, dl, VecTy, {Op1, Op0}, DAG);
2208 if (MaskIdx == (0x0f0e0b0a07060302ull | MaskUnd))
2209 return getInstr(Hexagon::S2_vtrunowh, dl, VecTy, {Op1, Op0}, DAG);
2210 if (MaskIdx == (0x0706030205040100ull | MaskUnd)) {
2211 VectorPair P = opSplit(Op0, dl, DAG);
2212 return getInstr(Hexagon::S2_packhl, dl, VecTy, {P.second, P.first}, DAG);
2213 }
2214
2215 // Byte packs.
2216 if (MaskIdx == (0x0e060c040a020800ull | MaskUnd))
2217 return getInstr(Hexagon::S2_shuffeb, dl, VecTy, {Op1, Op0}, DAG);
2218 if (MaskIdx == (0x0f070d050b030901ull | MaskUnd))
2219 return getInstr(Hexagon::S2_shuffob, dl, VecTy, {Op1, Op0}, DAG);
2220 }
2221
2222 return SDValue();
2223}
2224
2225// Create a Hexagon-specific node for shifting a vector by an integer.
2226SDValue
2227HexagonTargetLowering::getVectorShiftByInt(SDValue Op, SelectionDAG &DAG)
2228 const {
2229 if (auto *BVN = dyn_cast<BuildVectorSDNode>(Op.getOperand(1).getNode())) {
2230 if (SDValue S = BVN->getSplatValue()) {
2231 unsigned NewOpc;
2232 switch (Op.getOpcode()) {
2233 case ISD::SHL:
2234 NewOpc = HexagonISD::VASL;
2235 break;
2236 case ISD::SRA:
2237 NewOpc = HexagonISD::VASR;
2238 break;
2239 case ISD::SRL:
2240 NewOpc = HexagonISD::VLSR;
2241 break;
2242 default:
2243 llvm_unreachable("Unexpected shift opcode")::llvm::llvm_unreachable_internal("Unexpected shift opcode", "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 2243)
;
2244 }
2245 return DAG.getNode(NewOpc, SDLoc(Op), ty(Op), Op.getOperand(0), S);
2246 }
2247 }
2248
2249 return SDValue();
2250}
2251
2252SDValue
2253HexagonTargetLowering::LowerVECTOR_SHIFT(SDValue Op, SelectionDAG &DAG) const {
2254 return getVectorShiftByInt(Op, DAG);
2255}
2256
2257SDValue
2258HexagonTargetLowering::LowerROTL(SDValue Op, SelectionDAG &DAG) const {
2259 if (isa<ConstantSDNode>(Op.getOperand(1).getNode()))
2260 return Op;
2261 return SDValue();
2262}
2263
2264SDValue
2265HexagonTargetLowering::LowerBITCAST(SDValue Op, SelectionDAG &DAG) const {
2266 MVT ResTy = ty(Op);
2267 SDValue InpV = Op.getOperand(0);
2268 MVT InpTy = ty(InpV);
2269 assert(ResTy.getSizeInBits() == InpTy.getSizeInBits())((ResTy.getSizeInBits() == InpTy.getSizeInBits()) ? static_cast
<void> (0) : __assert_fail ("ResTy.getSizeInBits() == InpTy.getSizeInBits()"
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 2269, __PRETTY_FUNCTION__))
;
2270 const SDLoc &dl(Op);
2271
2272 // Handle conversion from i8 to v8i1.
2273 if (InpTy == MVT::i8) {
2274 if (ResTy == MVT::v8i1) {
2275 SDValue Sc = DAG.getBitcast(tyScalar(InpTy), InpV);
2276 SDValue Ext = DAG.getZExtOrTrunc(Sc, dl, MVT::i32);
2277 return getInstr(Hexagon::C2_tfrrp, dl, ResTy, Ext, DAG);
2278 }
2279 return SDValue();
2280 }
2281
2282 return Op;
2283}
2284
2285bool
2286HexagonTargetLowering::getBuildVectorConstInts(ArrayRef<SDValue> Values,
2287 MVT VecTy, SelectionDAG &DAG,
2288 MutableArrayRef<ConstantInt*> Consts) const {
2289 MVT ElemTy = VecTy.getVectorElementType();
2290 unsigned ElemWidth = ElemTy.getSizeInBits();
2291 IntegerType *IntTy = IntegerType::get(*DAG.getContext(), ElemWidth);
2292 bool AllConst = true;
2293
2294 for (unsigned i = 0, e = Values.size(); i != e; ++i) {
2295 SDValue V = Values[i];
2296 if (V.isUndef()) {
2297 Consts[i] = ConstantInt::get(IntTy, 0);
2298 continue;
2299 }
2300 // Make sure to always cast to IntTy.
2301 if (auto *CN = dyn_cast<ConstantSDNode>(V.getNode())) {
2302 const ConstantInt *CI = CN->getConstantIntValue();
2303 Consts[i] = ConstantInt::get(IntTy, CI->getValue().getSExtValue());
2304 } else if (auto *CN = dyn_cast<ConstantFPSDNode>(V.getNode())) {
2305 const ConstantFP *CF = CN->getConstantFPValue();
2306 APInt A = CF->getValueAPF().bitcastToAPInt();
2307 Consts[i] = ConstantInt::get(IntTy, A.getZExtValue());
2308 } else {
2309 AllConst = false;
2310 }
2311 }
2312 return AllConst;
2313}
2314
2315SDValue
2316HexagonTargetLowering::buildVector32(ArrayRef<SDValue> Elem, const SDLoc &dl,
2317 MVT VecTy, SelectionDAG &DAG) const {
2318 MVT ElemTy = VecTy.getVectorElementType();
2319 assert(VecTy.getVectorNumElements() == Elem.size())((VecTy.getVectorNumElements() == Elem.size()) ? static_cast<
void> (0) : __assert_fail ("VecTy.getVectorNumElements() == Elem.size()"
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 2319, __PRETTY_FUNCTION__))
;
2320
2321 SmallVector<ConstantInt*,4> Consts(Elem.size());
2322 bool AllConst = getBuildVectorConstInts(Elem, VecTy, DAG, Consts);
2323
2324 unsigned First, Num = Elem.size();
2325 for (First = 0; First != Num; ++First)
2326 if (!isUndef(Elem[First]))
2327 break;
2328 if (First == Num)
2329 return DAG.getUNDEF(VecTy);
2330
2331 if (AllConst &&
2332 llvm::all_of(Consts, [](ConstantInt *CI) { return CI->isZero(); }))
2333 return getZero(dl, VecTy, DAG);
2334
2335 if (ElemTy == MVT::i16) {
2336 assert(Elem.size() == 2)((Elem.size() == 2) ? static_cast<void> (0) : __assert_fail
("Elem.size() == 2", "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 2336, __PRETTY_FUNCTION__))
;
2337 if (AllConst) {
2338 uint32_t V = (Consts[0]->getZExtValue() & 0xFFFF) |
2339 Consts[1]->getZExtValue() << 16;
2340 return DAG.getBitcast(MVT::v2i16, DAG.getConstant(V, dl, MVT::i32));
2341 }
2342 SDValue N = getInstr(Hexagon::A2_combine_ll, dl, MVT::i32,
2343 {Elem[1], Elem[0]}, DAG);
2344 return DAG.getBitcast(MVT::v2i16, N);
2345 }
2346
2347 if (ElemTy == MVT::i8) {
2348 // First try generating a constant.
2349 if (AllConst) {
2350 int32_t V = (Consts[0]->getZExtValue() & 0xFF) |
2351 (Consts[1]->getZExtValue() & 0xFF) << 8 |
2352 (Consts[1]->getZExtValue() & 0xFF) << 16 |
2353 Consts[2]->getZExtValue() << 24;
2354 return DAG.getBitcast(MVT::v4i8, DAG.getConstant(V, dl, MVT::i32));
2355 }
2356
2357 // Then try splat.
2358 bool IsSplat = true;
2359 for (unsigned i = 0; i != Num; ++i) {
2360 if (i == First)
2361 continue;
2362 if (Elem[i] == Elem[First] || isUndef(Elem[i]))
2363 continue;
2364 IsSplat = false;
2365 break;
2366 }
2367 if (IsSplat) {
2368 // Legalize the operand to VSPLAT.
2369 SDValue Ext = DAG.getZExtOrTrunc(Elem[First], dl, MVT::i32);
2370 return DAG.getNode(HexagonISD::VSPLAT, dl, VecTy, Ext);
2371 }
2372
2373 // Generate
2374 // (zxtb(Elem[0]) | (zxtb(Elem[1]) << 8)) |
2375 // (zxtb(Elem[2]) | (zxtb(Elem[3]) << 8)) << 16
2376 assert(Elem.size() == 4)((Elem.size() == 4) ? static_cast<void> (0) : __assert_fail
("Elem.size() == 4", "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 2376, __PRETTY_FUNCTION__))
;
2377 SDValue Vs[4];
2378 for (unsigned i = 0; i != 4; ++i) {
2379 Vs[i] = DAG.getZExtOrTrunc(Elem[i], dl, MVT::i32);
2380 Vs[i] = DAG.getZeroExtendInReg(Vs[i], dl, MVT::i8);
2381 }
2382 SDValue S8 = DAG.getConstant(8, dl, MVT::i32);
2383 SDValue T0 = DAG.getNode(ISD::SHL, dl, MVT::i32, {Vs[1], S8});
2384 SDValue T1 = DAG.getNode(ISD::SHL, dl, MVT::i32, {Vs[3], S8});
2385 SDValue B0 = DAG.getNode(ISD::OR, dl, MVT::i32, {Vs[0], T0});
2386 SDValue B1 = DAG.getNode(ISD::OR, dl, MVT::i32, {Vs[2], T1});
2387
2388 SDValue R = getInstr(Hexagon::A2_combine_ll, dl, MVT::i32, {B1, B0}, DAG);
2389 return DAG.getBitcast(MVT::v4i8, R);
2390 }
2391
2392#ifndef NDEBUG
2393 dbgs() << "VecTy: " << EVT(VecTy).getEVTString() << '\n';
2394#endif
2395 llvm_unreachable("Unexpected vector element type")::llvm::llvm_unreachable_internal("Unexpected vector element type"
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 2395)
;
2396}
2397
2398SDValue
2399HexagonTargetLowering::buildVector64(ArrayRef<SDValue> Elem, const SDLoc &dl,
2400 MVT VecTy, SelectionDAG &DAG) const {
2401 MVT ElemTy = VecTy.getVectorElementType();
2402 assert(VecTy.getVectorNumElements() == Elem.size())((VecTy.getVectorNumElements() == Elem.size()) ? static_cast<
void> (0) : __assert_fail ("VecTy.getVectorNumElements() == Elem.size()"
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 2402, __PRETTY_FUNCTION__))
;
2403
2404 SmallVector<ConstantInt*,8> Consts(Elem.size());
2405 bool AllConst = getBuildVectorConstInts(Elem, VecTy, DAG, Consts);
2406
2407 unsigned First, Num = Elem.size();
2408 for (First = 0; First != Num; ++First)
2409 if (!isUndef(Elem[First]))
2410 break;
2411 if (First == Num)
2412 return DAG.getUNDEF(VecTy);
2413
2414 if (AllConst &&
2415 llvm::all_of(Consts, [](ConstantInt *CI) { return CI->isZero(); }))
2416 return getZero(dl, VecTy, DAG);
2417
2418 // First try splat if possible.
2419 if (ElemTy == MVT::i16) {
2420 bool IsSplat = true;
2421 for (unsigned i = 0; i != Num; ++i) {
2422 if (i == First)
2423 continue;
2424 if (Elem[i] == Elem[First] || isUndef(Elem[i]))
2425 continue;
2426 IsSplat = false;
2427 break;
2428 }
2429 if (IsSplat) {
2430 // Legalize the operand to VSPLAT.
2431 SDValue Ext = DAG.getZExtOrTrunc(Elem[First], dl, MVT::i32);
2432 return DAG.getNode(HexagonISD::VSPLAT, dl, VecTy, Ext);
2433 }
2434 }
2435
2436 // Then try constant.
2437 if (AllConst) {
2438 uint64_t Val = 0;
2439 unsigned W = ElemTy.getSizeInBits();
2440 uint64_t Mask = (ElemTy == MVT::i8) ? 0xFFull
2441 : (ElemTy == MVT::i16) ? 0xFFFFull : 0xFFFFFFFFull;
2442 for (unsigned i = 0; i != Num; ++i)
2443 Val = (Val << W) | (Consts[Num-1-i]->getZExtValue() & Mask);
2444 SDValue V0 = DAG.getConstant(Val, dl, MVT::i64);
2445 return DAG.getBitcast(VecTy, V0);
2446 }
2447
2448 // Build two 32-bit vectors and concatenate.
2449 MVT HalfTy = MVT::getVectorVT(ElemTy, Num/2);
2450 SDValue L = (ElemTy == MVT::i32)
2451 ? Elem[0]
2452 : buildVector32(Elem.take_front(Num/2), dl, HalfTy, DAG);
2453 SDValue H = (ElemTy == MVT::i32)
2454 ? Elem[1]
2455 : buildVector32(Elem.drop_front(Num/2), dl, HalfTy, DAG);
2456 return DAG.getNode(HexagonISD::COMBINE, dl, VecTy, {H, L});
2457}
2458
2459SDValue
2460HexagonTargetLowering::extractVector(SDValue VecV, SDValue IdxV,
2461 const SDLoc &dl, MVT ValTy, MVT ResTy,
2462 SelectionDAG &DAG) const {
2463 MVT VecTy = ty(VecV);
2464 assert(!ValTy.isVector() ||((!ValTy.isVector() || VecTy.getVectorElementType() == ValTy.
getVectorElementType()) ? static_cast<void> (0) : __assert_fail
("!ValTy.isVector() || VecTy.getVectorElementType() == ValTy.getVectorElementType()"
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 2465, __PRETTY_FUNCTION__))
2465 VecTy.getVectorElementType() == ValTy.getVectorElementType())((!ValTy.isVector() || VecTy.getVectorElementType() == ValTy.
getVectorElementType()) ? static_cast<void> (0) : __assert_fail
("!ValTy.isVector() || VecTy.getVectorElementType() == ValTy.getVectorElementType()"
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 2465, __PRETTY_FUNCTION__))
;
2466 unsigned VecWidth = VecTy.getSizeInBits();
2467 unsigned ValWidth = ValTy.getSizeInBits();
2468 unsigned ElemWidth = VecTy.getVectorElementType().getSizeInBits();
2469 assert((VecWidth % ElemWidth) == 0)(((VecWidth % ElemWidth) == 0) ? static_cast<void> (0) :
__assert_fail ("(VecWidth % ElemWidth) == 0", "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 2469, __PRETTY_FUNCTION__))
;
2470 auto *IdxN = dyn_cast<ConstantSDNode>(IdxV);
2471
2472 // Special case for v{8,4,2}i1 (the only boolean vectors legal in Hexagon
2473 // without any coprocessors).
2474 if (ElemWidth == 1) {
2475 assert(VecWidth == VecTy.getVectorNumElements() && "Sanity failure")((VecWidth == VecTy.getVectorNumElements() && "Sanity failure"
) ? static_cast<void> (0) : __assert_fail ("VecWidth == VecTy.getVectorNumElements() && \"Sanity failure\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 2475, __PRETTY_FUNCTION__))
;
2476 assert(VecWidth == 8 || VecWidth == 4 || VecWidth == 2)((VecWidth == 8 || VecWidth == 4 || VecWidth == 2) ? static_cast
<void> (0) : __assert_fail ("VecWidth == 8 || VecWidth == 4 || VecWidth == 2"
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 2476, __PRETTY_FUNCTION__))
;
2477 // Check if this is an extract of the lowest bit.
2478 if (IdxN) {
2479 // Extracting the lowest bit is a no-op, but it changes the type,
2480 // so it must be kept as an operation to avoid errors related to
2481 // type mismatches.
2482 if (IdxN->isNullValue() && ValTy.getSizeInBits() == 1)
2483 return DAG.getNode(HexagonISD::TYPECAST, dl, MVT::i1, VecV);
2484 }
2485
2486 // If the value extracted is a single bit, use tstbit.
2487 if (ValWidth == 1) {
2488 SDValue A0 = getInstr(Hexagon::C2_tfrpr, dl, MVT::i32, {VecV}, DAG);
2489 SDValue M0 = DAG.getConstant(8 / VecWidth, dl, MVT::i32);
2490 SDValue I0 = DAG.getNode(ISD::MUL, dl, MVT::i32, IdxV, M0);
2491 return DAG.getNode(HexagonISD::TSTBIT, dl, MVT::i1, A0, I0);
2492 }
2493
2494 // Each bool vector (v2i1, v4i1, v8i1) always occupies 8 bits in
2495 // a predicate register. The elements of the vector are repeated
2496 // in the register (if necessary) so that the total number is 8.
2497 // The extracted subvector will need to be expanded in such a way.
2498 unsigned Scale = VecWidth / ValWidth;
2499
2500 // Generate (p2d VecV) >> 8*Idx to move the interesting bytes to
2501 // position 0.
2502 assert(ty(IdxV) == MVT::i32)((ty(IdxV) == MVT::i32) ? static_cast<void> (0) : __assert_fail
("ty(IdxV) == MVT::i32", "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 2502, __PRETTY_FUNCTION__))
;
2503 unsigned VecRep = 8 / VecWidth;
2504 SDValue S0 = DAG.getNode(ISD::MUL, dl, MVT::i32, IdxV,
2505 DAG.getConstant(8*VecRep, dl, MVT::i32));
2506 SDValue T0 = DAG.getNode(HexagonISD::P2D, dl, MVT::i64, VecV);
2507 SDValue T1 = DAG.getNode(ISD::SRL, dl, MVT::i64, T0, S0);
2508 while (Scale > 1) {
2509 // The longest possible subvector is at most 32 bits, so it is always
2510 // contained in the low subregister.
2511 T1 = DAG.getTargetExtractSubreg(Hexagon::isub_lo, dl, MVT::i32, T1);
2512 T1 = expandPredicate(T1, dl, DAG);
2513 Scale /= 2;
2514 }
2515
2516 return DAG.getNode(HexagonISD::D2P, dl, ResTy, T1);
2517 }
2518
2519 assert(VecWidth == 32 || VecWidth == 64)((VecWidth == 32 || VecWidth == 64) ? static_cast<void>
(0) : __assert_fail ("VecWidth == 32 || VecWidth == 64", "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 2519, __PRETTY_FUNCTION__))
;
2520
2521 // Cast everything to scalar integer types.
2522 MVT ScalarTy = tyScalar(VecTy);
2523 VecV = DAG.getBitcast(ScalarTy, VecV);
2524
2525 SDValue WidthV = DAG.getConstant(ValWidth, dl, MVT::i32);
2526 SDValue ExtV;
2527
2528 if (IdxN) {
2529 unsigned Off = IdxN->getZExtValue() * ElemWidth;
2530 if (VecWidth == 64 && ValWidth == 32) {
2531 assert(Off == 0 || Off == 32)((Off == 0 || Off == 32) ? static_cast<void> (0) : __assert_fail
("Off == 0 || Off == 32", "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 2531, __PRETTY_FUNCTION__))
;
2532 unsigned SubIdx = Off == 0 ? Hexagon::isub_lo : Hexagon::isub_hi;
2533 ExtV = DAG.getTargetExtractSubreg(SubIdx, dl, MVT::i32, VecV);
2534 } else if (Off == 0 && (ValWidth % 8) == 0) {
2535 ExtV = DAG.getZeroExtendInReg(VecV, dl, tyScalar(ValTy));
2536 } else {
2537 SDValue OffV = DAG.getConstant(Off, dl, MVT::i32);
2538 // The return type of EXTRACTU must be the same as the type of the
2539 // input vector.
2540 ExtV = DAG.getNode(HexagonISD::EXTRACTU, dl, ScalarTy,
2541 {VecV, WidthV, OffV});
2542 }
2543 } else {
2544 if (ty(IdxV) != MVT::i32)
2545 IdxV = DAG.getZExtOrTrunc(IdxV, dl, MVT::i32);
2546 SDValue OffV = DAG.getNode(ISD::MUL, dl, MVT::i32, IdxV,
2547 DAG.getConstant(ElemWidth, dl, MVT::i32));
2548 ExtV = DAG.getNode(HexagonISD::EXTRACTU, dl, ScalarTy,
2549 {VecV, WidthV, OffV});
2550 }
2551
2552 // Cast ExtV to the requested result type.
2553 ExtV = DAG.getZExtOrTrunc(ExtV, dl, tyScalar(ResTy));
2554 ExtV = DAG.getBitcast(ResTy, ExtV);
2555 return ExtV;
2556}
2557
2558SDValue
2559HexagonTargetLowering::insertVector(SDValue VecV, SDValue ValV, SDValue IdxV,
2560 const SDLoc &dl, MVT ValTy,
2561 SelectionDAG &DAG) const {
2562 MVT VecTy = ty(VecV);
2563 if (VecTy.getVectorElementType() == MVT::i1) {
2564 MVT ValTy = ty(ValV);
2565 assert(ValTy.getVectorElementType() == MVT::i1)((ValTy.getVectorElementType() == MVT::i1) ? static_cast<void
> (0) : __assert_fail ("ValTy.getVectorElementType() == MVT::i1"
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 2565, __PRETTY_FUNCTION__))
;
2566 SDValue ValR = DAG.getNode(HexagonISD::P2D, dl, MVT::i64, ValV);
2567 unsigned VecLen = VecTy.getVectorNumElements();
2568 unsigned Scale = VecLen / ValTy.getVectorNumElements();
2569 assert(Scale > 1)((Scale > 1) ? static_cast<void> (0) : __assert_fail
("Scale > 1", "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 2569, __PRETTY_FUNCTION__))
;
2570
2571 for (unsigned R = Scale; R > 1; R /= 2) {
2572 ValR = contractPredicate(ValR, dl, DAG);
2573 ValR = DAG.getNode(HexagonISD::COMBINE, dl, MVT::i64,
2574 DAG.getUNDEF(MVT::i32), ValR);
2575 }
2576 // The longest possible subvector is at most 32 bits, so it is always
2577 // contained in the low subregister.
2578 ValR = DAG.getTargetExtractSubreg(Hexagon::isub_lo, dl, MVT::i32, ValR);
2579
2580 unsigned ValBytes = 64 / Scale;
2581 SDValue Width = DAG.getConstant(ValBytes*8, dl, MVT::i32);
2582 SDValue Idx = DAG.getNode(ISD::MUL, dl, MVT::i32, IdxV,
2583 DAG.getConstant(8, dl, MVT::i32));
2584 SDValue VecR = DAG.getNode(HexagonISD::P2D, dl, MVT::i64, VecV);
2585 SDValue Ins = DAG.getNode(HexagonISD::INSERT, dl, MVT::i32,
2586 {VecR, ValR, Width, Idx});
2587 return DAG.getNode(HexagonISD::D2P, dl, VecTy, Ins);
2588 }
2589
2590 unsigned VecWidth = VecTy.getSizeInBits();
2591 unsigned ValWidth = ValTy.getSizeInBits();
2592 assert(VecWidth == 32 || VecWidth == 64)((VecWidth == 32 || VecWidth == 64) ? static_cast<void>
(0) : __assert_fail ("VecWidth == 32 || VecWidth == 64", "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 2592, __PRETTY_FUNCTION__))
;
2593 assert((VecWidth % ValWidth) == 0)(((VecWidth % ValWidth) == 0) ? static_cast<void> (0) :
__assert_fail ("(VecWidth % ValWidth) == 0", "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 2593, __PRETTY_FUNCTION__))
;
2594
2595 // Cast everything to scalar integer types.
2596 MVT ScalarTy = MVT::getIntegerVT(VecWidth);
2597 // The actual type of ValV may be different than ValTy (which is related
2598 // to the vector type).
2599 unsigned VW = ty(ValV).getSizeInBits();
2600 ValV = DAG.getBitcast(MVT::getIntegerVT(VW), ValV);
2601 VecV = DAG.getBitcast(ScalarTy, VecV);
2602 if (VW != VecWidth)
2603 ValV = DAG.getAnyExtOrTrunc(ValV, dl, ScalarTy);
2604
2605 SDValue WidthV = DAG.getConstant(ValWidth, dl, MVT::i32);
2606 SDValue InsV;
2607
2608 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(IdxV)) {
2609 unsigned W = C->getZExtValue() * ValWidth;
2610 SDValue OffV = DAG.getConstant(W, dl, MVT::i32);
2611 InsV = DAG.getNode(HexagonISD::INSERT, dl, ScalarTy,
2612 {VecV, ValV, WidthV, OffV});
2613 } else {
2614 if (ty(IdxV) != MVT::i32)
2615 IdxV = DAG.getZExtOrTrunc(IdxV, dl, MVT::i32);
2616 SDValue OffV = DAG.getNode(ISD::MUL, dl, MVT::i32, IdxV, WidthV);
2617 InsV = DAG.getNode(HexagonISD::INSERT, dl, ScalarTy,
2618 {VecV, ValV, WidthV, OffV});
2619 }
2620
2621 return DAG.getNode(ISD::BITCAST, dl, VecTy, InsV);
2622}
2623
2624SDValue
2625HexagonTargetLowering::expandPredicate(SDValue Vec32, const SDLoc &dl,
2626 SelectionDAG &DAG) const {
2627 assert(ty(Vec32).getSizeInBits() == 32)((ty(Vec32).getSizeInBits() == 32) ? static_cast<void> (
0) : __assert_fail ("ty(Vec32).getSizeInBits() == 32", "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 2627, __PRETTY_FUNCTION__))
;
2628 if (isUndef(Vec32))
2629 return DAG.getUNDEF(MVT::i64);
2630 return getInstr(Hexagon::S2_vsxtbh, dl, MVT::i64, {Vec32}, DAG);
2631}
2632
2633SDValue
2634HexagonTargetLowering::contractPredicate(SDValue Vec64, const SDLoc &dl,
2635 SelectionDAG &DAG) const {
2636 assert(ty(Vec64).getSizeInBits() == 64)((ty(Vec64).getSizeInBits() == 64) ? static_cast<void> (
0) : __assert_fail ("ty(Vec64).getSizeInBits() == 64", "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 2636, __PRETTY_FUNCTION__))
;
2637 if (isUndef(Vec64))
2638 return DAG.getUNDEF(MVT::i32);
2639 return getInstr(Hexagon::S2_vtrunehb, dl, MVT::i32, {Vec64}, DAG);
2640}
2641
2642SDValue
2643HexagonTargetLowering::getZero(const SDLoc &dl, MVT Ty, SelectionDAG &DAG)
2644 const {
2645 if (Ty.isVector()) {
2646 assert(Ty.isInteger() && "Only integer vectors are supported here")((Ty.isInteger() && "Only integer vectors are supported here"
) ? static_cast<void> (0) : __assert_fail ("Ty.isInteger() && \"Only integer vectors are supported here\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 2646, __PRETTY_FUNCTION__))
;
2647 unsigned W = Ty.getSizeInBits();
2648 if (W <= 64)
2649 return DAG.getBitcast(Ty, DAG.getConstant(0, dl, MVT::getIntegerVT(W)));
2650 return DAG.getNode(HexagonISD::VZERO, dl, Ty);
2651 }
2652
2653 if (Ty.isInteger())
2654 return DAG.getConstant(0, dl, Ty);
2655 if (Ty.isFloatingPoint())
2656 return DAG.getConstantFP(0.0, dl, Ty);
2657 llvm_unreachable("Invalid type for zero")::llvm::llvm_unreachable_internal("Invalid type for zero", "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 2657)
;
2658}
2659
2660SDValue
2661HexagonTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
2662 MVT VecTy = ty(Op);
2663 unsigned BW = VecTy.getSizeInBits();
2664 const SDLoc &dl(Op);
2665 SmallVector<SDValue,8> Ops;
2666 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i)
2667 Ops.push_back(Op.getOperand(i));
2668
2669 if (BW == 32)
2670 return buildVector32(Ops, dl, VecTy, DAG);
2671 if (BW == 64)
2672 return buildVector64(Ops, dl, VecTy, DAG);
2673
2674 if (VecTy == MVT::v8i1 || VecTy == MVT::v4i1 || VecTy == MVT::v2i1) {
2675 // Check if this is a special case or all-0 or all-1.
2676 bool All0 = true, All1 = true;
2677 for (SDValue P : Ops) {
2678 auto *CN = dyn_cast<ConstantSDNode>(P.getNode());
2679 if (CN == nullptr) {
2680 All0 = All1 = false;
2681 break;
2682 }
2683 uint32_t C = CN->getZExtValue();
2684 All0 &= (C == 0);
2685 All1 &= (C == 1);
2686 }
2687 if (All0)
2688 return DAG.getNode(HexagonISD::PFALSE, dl, VecTy);
2689 if (All1)
2690 return DAG.getNode(HexagonISD::PTRUE, dl, VecTy);
2691
2692 // For each i1 element in the resulting predicate register, put 1
2693 // shifted by the index of the element into a general-purpose register,
2694 // then or them together and transfer it back into a predicate register.
2695 SDValue Rs[8];
2696 SDValue Z = getZero(dl, MVT::i32, DAG);
2697 // Always produce 8 bits, repeat inputs if necessary.
2698 unsigned Rep = 8 / VecTy.getVectorNumElements();
2699 for (unsigned i = 0; i != 8; ++i) {
2700 SDValue S = DAG.getConstant(1ull << i, dl, MVT::i32);
2701 Rs[i] = DAG.getSelect(dl, MVT::i32, Ops[i/Rep], S, Z);
2702 }
2703 for (ArrayRef<SDValue> A(Rs); A.size() != 1; A = A.drop_back(A.size()/2)) {
2704 for (unsigned i = 0, e = A.size()/2; i != e; ++i)
2705 Rs[i] = DAG.getNode(ISD::OR, dl, MVT::i32, Rs[2*i], Rs[2*i+1]);
2706 }
2707 // Move the value directly to a predicate register.
2708 return getInstr(Hexagon::C2_tfrrp, dl, VecTy, {Rs[0]}, DAG);
2709 }
2710
2711 return SDValue();
2712}
2713
2714SDValue
2715HexagonTargetLowering::LowerCONCAT_VECTORS(SDValue Op,
2716 SelectionDAG &DAG) const {
2717 MVT VecTy = ty(Op);
2718 const SDLoc &dl(Op);
2719 if (VecTy.getSizeInBits() == 64) {
2720 assert(Op.getNumOperands() == 2)((Op.getNumOperands() == 2) ? static_cast<void> (0) : __assert_fail
("Op.getNumOperands() == 2", "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 2720, __PRETTY_FUNCTION__))
;
2721 return DAG.getNode(HexagonISD::COMBINE, dl, VecTy, Op.getOperand(1),
2722 Op.getOperand(0));
2723 }
2724
2725 MVT ElemTy = VecTy.getVectorElementType();
2726 if (ElemTy == MVT::i1) {
2727 assert(VecTy == MVT::v2i1 || VecTy == MVT::v4i1 || VecTy == MVT::v8i1)((VecTy == MVT::v2i1 || VecTy == MVT::v4i1 || VecTy == MVT::v8i1
) ? static_cast<void> (0) : __assert_fail ("VecTy == MVT::v2i1 || VecTy == MVT::v4i1 || VecTy == MVT::v8i1"
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 2727, __PRETTY_FUNCTION__))
;
2728 MVT OpTy = ty(Op.getOperand(0));
2729 // Scale is how many times the operands need to be contracted to match
2730 // the representation in the target register.
2731 unsigned Scale = VecTy.getVectorNumElements() / OpTy.getVectorNumElements();
2732 assert(Scale == Op.getNumOperands() && Scale > 1)((Scale == Op.getNumOperands() && Scale > 1) ? static_cast
<void> (0) : __assert_fail ("Scale == Op.getNumOperands() && Scale > 1"
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 2732, __PRETTY_FUNCTION__))
;
2733
2734 // First, convert all bool vectors to integers, then generate pairwise
2735 // inserts to form values of doubled length. Up until there are only
2736 // two values left to concatenate, all of these values will fit in a
2737 // 32-bit integer, so keep them as i32 to use 32-bit inserts.
2738 SmallVector<SDValue,4> Words[2];
2739 unsigned IdxW = 0;
2740
2741 for (SDValue P : Op.getNode()->op_values()) {
2742 SDValue W = DAG.getNode(HexagonISD::P2D, dl, MVT::i64, P);
2743 for (unsigned R = Scale; R > 1; R /= 2) {
2744 W = contractPredicate(W, dl, DAG);
2745 W = DAG.getNode(HexagonISD::COMBINE, dl, MVT::i64,
2746 DAG.getUNDEF(MVT::i32), W);
2747 }
2748 W = DAG.getTargetExtractSubreg(Hexagon::isub_lo, dl, MVT::i32, W);
2749 Words[IdxW].push_back(W);
2750 }
2751
2752 while (Scale > 2) {
2753 SDValue WidthV = DAG.getConstant(64 / Scale, dl, MVT::i32);
2754 Words[IdxW ^ 1].clear();
2755
2756 for (unsigned i = 0, e = Words[IdxW].size(); i != e; i += 2) {
2757 SDValue W0 = Words[IdxW][i], W1 = Words[IdxW][i+1];
2758 // Insert W1 into W0 right next to the significant bits of W0.
2759 SDValue T = DAG.getNode(HexagonISD::INSERT, dl, MVT::i32,
2760 {W0, W1, WidthV, WidthV});
2761 Words[IdxW ^ 1].push_back(T);
2762 }
2763 IdxW ^= 1;
2764 Scale /= 2;
2765 }
2766
2767 // Another sanity check. At this point there should only be two words
2768 // left, and Scale should be 2.
2769 assert(Scale == 2 && Words[IdxW].size() == 2)((Scale == 2 && Words[IdxW].size() == 2) ? static_cast
<void> (0) : __assert_fail ("Scale == 2 && Words[IdxW].size() == 2"
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 2769, __PRETTY_FUNCTION__))
;
2770
2771 SDValue WW = DAG.getNode(HexagonISD::COMBINE, dl, MVT::i64,
2772 Words[IdxW][1], Words[IdxW][0]);
2773 return DAG.getNode(HexagonISD::D2P, dl, VecTy, WW);
2774 }
2775
2776 return SDValue();
2777}
2778
2779SDValue
2780HexagonTargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
2781 SelectionDAG &DAG) const {
2782 SDValue Vec = Op.getOperand(0);
2783 MVT ElemTy = ty(Vec).getVectorElementType();
2784 return extractVector(Vec, Op.getOperand(1), SDLoc(Op), ElemTy, ty(Op), DAG);
2785}
2786
2787SDValue
2788HexagonTargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op,
2789 SelectionDAG &DAG) const {
2790 return extractVector(Op.getOperand(0), Op.getOperand(1), SDLoc(Op),
2791 ty(Op), ty(Op), DAG);
2792}
2793
2794SDValue
2795HexagonTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
2796 SelectionDAG &DAG) const {
2797 return insertVector(Op.getOperand(0), Op.getOperand(1), Op.getOperand(2),
2798 SDLoc(Op), ty(Op).getVectorElementType(), DAG);
2799}
2800
2801SDValue
2802HexagonTargetLowering::LowerINSERT_SUBVECTOR(SDValue Op,
2803 SelectionDAG &DAG) const {
2804 SDValue ValV = Op.getOperand(1);
2805 return insertVector(Op.getOperand(0), ValV, Op.getOperand(2),
2806 SDLoc(Op), ty(ValV), DAG);
2807}
2808
2809bool
2810HexagonTargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const {
2811 // Assuming the caller does not have either a signext or zeroext modifier, and
2812 // only one value is accepted, any reasonable truncation is allowed.
2813 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
2814 return false;
2815
2816 // FIXME: in principle up to 64-bit could be made safe, but it would be very
2817 // fragile at the moment: any support for multiple value returns would be
2818 // liable to disallow tail calls involving i64 -> iN truncation in many cases.
2819 return Ty1->getPrimitiveSizeInBits() <= 32;
2820}
2821
2822SDValue
2823HexagonTargetLowering::LowerLoad(SDValue Op, SelectionDAG &DAG) const {
2824 LoadSDNode *LN = cast<LoadSDNode>(Op.getNode());
2825 unsigned ClaimAlign = LN->getAlignment();
2826 validateConstPtrAlignment(LN->getBasePtr(), SDLoc(Op), ClaimAlign);
2827 // Call LowerUnalignedLoad for all loads, it recognizes loads that
2828 // don't need extra aligning.
2829 return LowerUnalignedLoad(Op, DAG);
2830}
2831
2832SDValue
2833HexagonTargetLowering::LowerStore(SDValue Op, SelectionDAG &DAG) const {
2834 StoreSDNode *SN = cast<StoreSDNode>(Op.getNode());
2835 unsigned ClaimAlign = SN->getAlignment();
2836 SDValue Ptr = SN->getBasePtr();
2837 const SDLoc &dl(Op);
2838 validateConstPtrAlignment(Ptr, dl, ClaimAlign);
2839
2840 MVT StoreTy = SN->getMemoryVT().getSimpleVT();
2841 unsigned NeedAlign = Subtarget.getTypeAlignment(StoreTy);
2842 if (ClaimAlign < NeedAlign)
2843 return expandUnalignedStore(SN, DAG);
2844 return Op;
2845}
2846
2847SDValue
2848HexagonTargetLowering::LowerUnalignedLoad(SDValue Op, SelectionDAG &DAG)
2849 const {
2850 LoadSDNode *LN = cast<LoadSDNode>(Op.getNode());
2851 MVT LoadTy = ty(Op);
2852 unsigned NeedAlign = Subtarget.getTypeAlignment(LoadTy);
2853 unsigned HaveAlign = LN->getAlignment();
2854 if (HaveAlign >= NeedAlign)
2855 return Op;
2856
2857 const SDLoc &dl(Op);
2858 const DataLayout &DL = DAG.getDataLayout();
2859 LLVMContext &Ctx = *DAG.getContext();
2860
2861 // If the load aligning is disabled or the load can be broken up into two
2862 // smaller legal loads, do the default (target-independent) expansion.
2863 bool DoDefault = false;
2864 // Handle it in the default way if this is an indexed load.
2865 if (!LN->isUnindexed())
2866 DoDefault = true;
2867
2868 if (!AlignLoads) {
2869 if (allowsMemoryAccessForAlignment(Ctx, DL, LN->getMemoryVT(),
2870 *LN->getMemOperand()))
2871 return Op;
2872 DoDefault = true;
2873 }
2874 if (!DoDefault && (2 * HaveAlign) == NeedAlign) {
2875 // The PartTy is the equivalent of "getLoadableTypeOfSize(HaveAlign)".
2876 MVT PartTy = HaveAlign <= 8 ? MVT::getIntegerVT(8 * HaveAlign)
2877 : MVT::getVectorVT(MVT::i8, HaveAlign);
2878 DoDefault =
2879 allowsMemoryAccessForAlignment(Ctx, DL, PartTy, *LN->getMemOperand());
2880 }
2881 if (DoDefault) {
2882 std::pair<SDValue, SDValue> P = expandUnalignedLoad(LN, DAG);
2883 return DAG.getMergeValues({P.first, P.second}, dl);
2884 }
2885
2886 // The code below generates two loads, both aligned as NeedAlign, and
2887 // with the distance of NeedAlign between them. For that to cover the
2888 // bits that need to be loaded (and without overlapping), the size of
2889 // the loads should be equal to NeedAlign. This is true for all loadable
2890 // types, but add an assertion in case something changes in the future.
2891 assert(LoadTy.getSizeInBits() == 8*NeedAlign)((LoadTy.getSizeInBits() == 8*NeedAlign) ? static_cast<void
> (0) : __assert_fail ("LoadTy.getSizeInBits() == 8*NeedAlign"
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 2891, __PRETTY_FUNCTION__))
;
2892
2893 unsigned LoadLen = NeedAlign;
2894 SDValue Base = LN->getBasePtr();
2895 SDValue Chain = LN->getChain();
2896 auto BO = getBaseAndOffset(Base);
2897 unsigned BaseOpc = BO.first.getOpcode();
2898 if (BaseOpc == HexagonISD::VALIGNADDR && BO.second % LoadLen == 0)
2899 return Op;
2900
2901 if (BO.second % LoadLen != 0) {
2902 BO.first = DAG.getNode(ISD::ADD, dl, MVT::i32, BO.first,
2903 DAG.getConstant(BO.second % LoadLen, dl, MVT::i32));
2904 BO.second -= BO.second % LoadLen;
2905 }
2906 SDValue BaseNoOff = (BaseOpc != HexagonISD::VALIGNADDR)
2907 ? DAG.getNode(HexagonISD::VALIGNADDR, dl, MVT::i32, BO.first,
2908 DAG.getConstant(NeedAlign, dl, MVT::i32))
2909 : BO.first;
2910 SDValue Base0 = DAG.getMemBasePlusOffset(BaseNoOff, BO.second, dl);
2911 SDValue Base1 = DAG.getMemBasePlusOffset(BaseNoOff, BO.second+LoadLen, dl);
2912
2913 MachineMemOperand *WideMMO = nullptr;
2914 if (MachineMemOperand *MMO = LN->getMemOperand()) {
2915 MachineFunction &MF = DAG.getMachineFunction();
2916 WideMMO = MF.getMachineMemOperand(MMO->getPointerInfo(), MMO->getFlags(),
2917 2*LoadLen, LoadLen, MMO->getAAInfo(), MMO->getRanges(),
2918 MMO->getSyncScopeID(), MMO->getOrdering(),
2919 MMO->getFailureOrdering());
2920 }
2921
2922 SDValue Load0 = DAG.getLoad(LoadTy, dl, Chain, Base0, WideMMO);
2923 SDValue Load1 = DAG.getLoad(LoadTy, dl, Chain, Base1, WideMMO);
2924
2925 SDValue Aligned = DAG.getNode(HexagonISD::VALIGN, dl, LoadTy,
2926 {Load1, Load0, BaseNoOff.getOperand(0)});
2927 SDValue NewChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
2928 Load0.getValue(1), Load1.getValue(1));
2929 SDValue M = DAG.getMergeValues({Aligned, NewChain}, dl);
2930 return M;
2931}
2932
2933SDValue
2934HexagonTargetLowering::LowerUAddSubO(SDValue Op, SelectionDAG &DAG) const {
2935 SDValue X = Op.getOperand(0), Y = Op.getOperand(1);
2936 auto *CY = dyn_cast<ConstantSDNode>(Y);
2937 if (!CY)
2938 return SDValue();
2939
2940 const SDLoc &dl(Op);
2941 SDVTList VTs = Op.getNode()->getVTList();
2942 assert(VTs.NumVTs == 2)((VTs.NumVTs == 2) ? static_cast<void> (0) : __assert_fail
("VTs.NumVTs == 2", "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 2942, __PRETTY_FUNCTION__))
;
2943 assert(VTs.VTs[1] == MVT::i1)((VTs.VTs[1] == MVT::i1) ? static_cast<void> (0) : __assert_fail
("VTs.VTs[1] == MVT::i1", "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 2943, __PRETTY_FUNCTION__))
;
2944 unsigned Opc = Op.getOpcode();
2945
2946 if (CY) {
2947 uint32_t VY = CY->getZExtValue();
2948 assert(VY != 0 && "This should have been folded")((VY != 0 && "This should have been folded") ? static_cast
<void> (0) : __assert_fail ("VY != 0 && \"This should have been folded\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 2948, __PRETTY_FUNCTION__))
;
2949 // X +/- 1
2950 if (VY != 1)
2951 return SDValue();
2952
2953 if (Opc == ISD::UADDO) {
2954 SDValue Op = DAG.getNode(ISD::ADD, dl, VTs.VTs[0], {X, Y});
2955 SDValue Ov = DAG.getSetCC(dl, MVT::i1, Op, getZero(dl, ty(Op), DAG),
2956 ISD::SETEQ);
2957 return DAG.getMergeValues({Op, Ov}, dl);
2958 }
2959 if (Opc == ISD::USUBO) {
2960 SDValue Op = DAG.getNode(ISD::SUB, dl, VTs.VTs[0], {X, Y});
2961 SDValue Ov = DAG.getSetCC(dl, MVT::i1, Op,
2962 DAG.getConstant(-1, dl, ty(Op)), ISD::SETEQ);
2963 return DAG.getMergeValues({Op, Ov}, dl);
2964 }
2965 }
2966
2967 return SDValue();
2968}
2969
2970SDValue
2971HexagonTargetLowering::LowerAddSubCarry(SDValue Op, SelectionDAG &DAG) const {
2972 const SDLoc &dl(Op);
2973 unsigned Opc = Op.getOpcode();
2974 SDValue X = Op.getOperand(0), Y = Op.getOperand(1), C = Op.getOperand(2);
2975
2976 if (Opc == ISD::ADDCARRY)
2977 return DAG.getNode(HexagonISD::ADDC, dl, Op.getNode()->getVTList(),
2978 { X, Y, C });
2979
2980 EVT CarryTy = C.getValueType();
2981 SDValue SubC = DAG.getNode(HexagonISD::SUBC, dl, Op.getNode()->getVTList(),
2982 { X, Y, DAG.getLogicalNOT(dl, C, CarryTy) });
2983 SDValue Out[] = { SubC.getValue(0),
2984 DAG.getLogicalNOT(dl, SubC.getValue(1), CarryTy) };
2985 return DAG.getMergeValues(Out, dl);
2986}
2987
2988SDValue
2989HexagonTargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
2990 SDValue Chain = Op.getOperand(0);
2991 SDValue Offset = Op.getOperand(1);
2992 SDValue Handler = Op.getOperand(2);
2993 SDLoc dl(Op);
2994 auto PtrVT = getPointerTy(DAG.getDataLayout());
2995
2996 // Mark function as containing a call to EH_RETURN.
2997 HexagonMachineFunctionInfo *FuncInfo =
2998 DAG.getMachineFunction().getInfo<HexagonMachineFunctionInfo>();
2999 FuncInfo->setHasEHReturn();
3000
3001 unsigned OffsetReg = Hexagon::R28;
3002
3003 SDValue StoreAddr =
3004 DAG.getNode(ISD::ADD, dl, PtrVT, DAG.getRegister(Hexagon::R30, PtrVT),
3005 DAG.getIntPtrConstant(4, dl));
3006 Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo());
3007 Chain = DAG.getCopyToReg(Chain, dl, OffsetReg, Offset);
3008
3009 // Not needed we already use it as explict input to EH_RETURN.
3010 // MF.getRegInfo().addLiveOut(OffsetReg);
3011
3012 return DAG.getNode(HexagonISD::EH_RETURN, dl, MVT::Other, Chain);
3013}
3014
3015SDValue
3016HexagonTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
3017 unsigned Opc = Op.getOpcode();
3018
3019 // Handle INLINEASM first.
3020 if (Opc == ISD::INLINEASM || Opc == ISD::INLINEASM_BR)
1
Assuming 'Opc' is not equal to INLINEASM
2
Assuming 'Opc' is not equal to INLINEASM_BR
3
Taking false branch
3021 return LowerINLINEASM(Op, DAG);
3022
3023 if (isHvxOperation(Op)) {
4
Assuming the condition is false
5
Taking false branch
3024 // If HVX lowering returns nothing, try the default lowering.
3025 if (SDValue V = LowerHvxOperation(Op, DAG))
3026 return V;
3027 }
3028
3029 switch (Opc) {
6
Control jumps to 'case SETCC:' at line 3068
3030 default:
3031#ifndef NDEBUG
3032 Op.getNode()->dumpr(&DAG);
3033 if (Opc > HexagonISD::OP_BEGIN && Opc < HexagonISD::OP_END)
3034 errs() << "Error: check for a non-legal type in this operation\n";
3035#endif
3036 llvm_unreachable("Should not custom lower this!")::llvm::llvm_unreachable_internal("Should not custom lower this!"
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 3036)
;
3037 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
3038 case ISD::INSERT_SUBVECTOR: return LowerINSERT_SUBVECTOR(Op, DAG);
3039 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
3040 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG);
3041 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
3042 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
3043 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
3044 case ISD::BITCAST: return LowerBITCAST(Op, DAG);
3045 case ISD::LOAD: return LowerLoad(Op, DAG);
3046 case ISD::STORE: return LowerStore(Op, DAG);
3047 case ISD::UADDO:
3048 case ISD::USUBO: return LowerUAddSubO(Op, DAG);
3049 case ISD::ADDCARRY:
3050 case ISD::SUBCARRY: return LowerAddSubCarry(Op, DAG);
3051 case ISD::SRA:
3052 case ISD::SHL:
3053 case ISD::SRL: return LowerVECTOR_SHIFT(Op, DAG);
3054 case ISD::ROTL: return LowerROTL(Op, DAG);
3055 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
3056 case ISD::JumpTable: return LowerJumpTable(Op, DAG);
3057 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG);
3058 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
3059 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
3060 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
3061 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG);
3062 case ISD::GlobalAddress: return LowerGLOBALADDRESS(Op, DAG);
3063 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
3064 case ISD::GLOBAL_OFFSET_TABLE: return LowerGLOBAL_OFFSET_TABLE(Op, DAG);
3065 case ISD::VACOPY: return LowerVACOPY(Op, DAG);
3066 case ISD::VASTART: return LowerVASTART(Op, DAG);
3067 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
3068 case ISD::SETCC: return LowerSETCC(Op, DAG);
7
Calling 'HexagonTargetLowering::LowerSETCC'
3069 case ISD::VSELECT: return LowerVSELECT(Op, DAG);
3070 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
3071 case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG);
3072 case ISD::PREFETCH: return LowerPREFETCH(Op, DAG);
3073 case ISD::READCYCLECOUNTER: return LowerREADCYCLECOUNTER(Op, DAG);
3074 break;
3075 }
3076
3077 return SDValue();
3078}
3079
3080void
3081HexagonTargetLowering::LowerOperationWrapper(SDNode *N,
3082 SmallVectorImpl<SDValue> &Results,
3083 SelectionDAG &DAG) const {
3084 // We are only custom-lowering stores to verify the alignment of the
3085 // address if it is a compile-time constant. Since a store can be modified
3086 // during type-legalization (the value being stored may need legalization),
3087 // return empty Results here to indicate that we don't really make any
3088 // changes in the custom lowering.
3089 if (N->getOpcode() != ISD::STORE)
3090 return TargetLowering::LowerOperationWrapper(N, Results, DAG);
3091}
3092
3093void
3094HexagonTargetLowering::ReplaceNodeResults(SDNode *N,
3095 SmallVectorImpl<SDValue> &Results,
3096 SelectionDAG &DAG) const {
3097 const SDLoc &dl(N);
3098 switch (N->getOpcode()) {
3099 case ISD::SRL:
3100 case ISD::SRA:
3101 case ISD::SHL:
3102 return;
3103 case ISD::BITCAST:
3104 // Handle a bitcast from v8i1 to i8.
3105 if (N->getValueType(0) == MVT::i8) {
3106 SDValue P = getInstr(Hexagon::C2_tfrpr, dl, MVT::i32,
3107 N->getOperand(0), DAG);
3108 SDValue T = DAG.getAnyExtOrTrunc(P, dl, MVT::i8);
3109 Results.push_back(T);
3110 }
3111 break;
3112 }
3113}
3114
3115SDValue
3116HexagonTargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI)
3117 const {
3118 SDValue Op(N, 0);
3119 if (isHvxOperation(Op)) {
3120 if (SDValue V = PerformHvxDAGCombine(N, DCI))
3121 return V;
3122 return SDValue();
3123 }
3124
3125 const SDLoc &dl(Op);
3126 unsigned Opc = Op.getOpcode();
3127
3128 if (Opc == HexagonISD::P2D) {
3129 SDValue P = Op.getOperand(0);
3130 switch (P.getOpcode()) {
3131 case HexagonISD::PTRUE:
3132 return DCI.DAG.getConstant(-1, dl, ty(Op));
3133 case HexagonISD::PFALSE:
3134 return getZero(dl, ty(Op), DCI.DAG);
3135 default:
3136 break;
3137 }
3138 } else if (Opc == ISD::VSELECT) {
3139 // This is pretty much duplicated in HexagonISelLoweringHVX...
3140 //
3141 // (vselect (xor x, ptrue), v0, v1) -> (vselect x, v1, v0)
3142 SDValue Cond = Op.getOperand(0);
3143 if (Cond->getOpcode() == ISD::XOR) {
3144 SDValue C0 = Cond.getOperand(0), C1 = Cond.getOperand(1);
3145 if (C1->getOpcode() == HexagonISD::PTRUE) {
3146 SDValue VSel = DCI.DAG.getNode(ISD::VSELECT, dl, ty(Op), C0,
3147 Op.getOperand(2), Op.getOperand(1));
3148 return VSel;
3149 }
3150 }
3151 }
3152
3153 return SDValue();
3154}
3155
3156/// Returns relocation base for the given PIC jumptable.
3157SDValue
3158HexagonTargetLowering::getPICJumpTableRelocBase(SDValue Table,
3159 SelectionDAG &DAG) const {
3160 int Idx = cast<JumpTableSDNode>(Table)->getIndex();
3161 EVT VT = Table.getValueType();
3162 SDValue T = DAG.getTargetJumpTable(Idx, VT, HexagonII::MO_PCREL);
3163 return DAG.getNode(HexagonISD::AT_PCREL, SDLoc(Table), VT, T);
3164}
3165
3166//===----------------------------------------------------------------------===//
3167// Inline Assembly Support
3168//===----------------------------------------------------------------------===//
3169
3170TargetLowering::ConstraintType
3171HexagonTargetLowering::getConstraintType(StringRef Constraint) const {
3172 if (Constraint.size() == 1) {
3173 switch (Constraint[0]) {
3174 case 'q':
3175 case 'v':
3176 if (Subtarget.useHVXOps())
3177 return C_RegisterClass;
3178 break;
3179 case 'a':
3180 return C_RegisterClass;
3181 default:
3182 break;
3183 }
3184 }
3185 return TargetLowering::getConstraintType(Constraint);
3186}
3187
3188std::pair<unsigned, const TargetRegisterClass*>
3189HexagonTargetLowering::getRegForInlineAsmConstraint(
3190 const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const {
3191
3192 if (Constraint.size() == 1) {
3193 switch (Constraint[0]) {
3194 case 'r': // R0-R31
3195 switch (VT.SimpleTy) {
3196 default:
3197 return {0u, nullptr};
3198 case MVT::i1:
3199 case MVT::i8:
3200 case MVT::i16:
3201 case MVT::i32:
3202 case MVT::f32:
3203 return {0u, &Hexagon::IntRegsRegClass};
3204 case MVT::i64:
3205 case MVT::f64:
3206 return {0u, &Hexagon::DoubleRegsRegClass};
3207 }
3208 break;
3209 case 'a': // M0-M1
3210 if (VT != MVT::i32)
3211 return {0u, nullptr};
3212 return {0u, &Hexagon::ModRegsRegClass};
3213 case 'q': // q0-q3
3214 switch (VT.getSizeInBits()) {
3215 default:
3216 return {0u, nullptr};
3217 case 64:
3218 case 128:
3219 return {0u, &Hexagon::HvxQRRegClass};
3220 }
3221 break;
3222 case 'v': // V0-V31
3223 switch (VT.getSizeInBits()) {
3224 default:
3225 return {0u, nullptr};
3226 case 512:
3227 return {0u, &Hexagon::HvxVRRegClass};
3228 case 1024:
3229 if (Subtarget.hasV60Ops() && Subtarget.useHVX128BOps())
3230 return {0u, &Hexagon::HvxVRRegClass};
3231 return {0u, &Hexagon::HvxWRRegClass};
3232 case 2048:
3233 return {0u, &Hexagon::HvxWRRegClass};
3234 }
3235 break;
3236 default:
3237 return {0u, nullptr};
3238 }
3239 }
3240
3241 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
3242}
3243
3244/// isFPImmLegal - Returns true if the target can instruction select the
3245/// specified FP immediate natively. If false, the legalizer will
3246/// materialize the FP immediate as a load from a constant pool.
3247bool HexagonTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
3248 bool ForCodeSize) const {
3249 return true;
3250}
3251
3252/// isLegalAddressingMode - Return true if the addressing mode represented by
3253/// AM is legal for this target, for a load/store of the specified type.
3254bool HexagonTargetLowering::isLegalAddressingMode(const DataLayout &DL,
3255 const AddrMode &AM, Type *Ty,
3256 unsigned AS, Instruction *I) const {
3257 if (Ty->isSized()) {
3258 // When LSR detects uses of the same base address to access different
3259 // types (e.g. unions), it will assume a conservative type for these
3260 // uses:
3261 // LSR Use: Kind=Address of void in addrspace(4294967295), ...
3262 // The type Ty passed here would then be "void". Skip the alignment
3263 // checks, but do not return false right away, since that confuses
3264 // LSR into crashing.
3265 unsigned A = DL.getABITypeAlignment(Ty);
3266 // The base offset must be a multiple of the alignment.
3267 if ((AM.BaseOffs % A) != 0)
3268 return false;
3269 // The shifted offset must fit in 11 bits.
3270 if (!isInt<11>(AM.BaseOffs >> Log2_32(A)))
3271 return false;
3272 }
3273
3274 // No global is ever allowed as a base.
3275 if (AM.BaseGV)
3276 return false;
3277
3278 int Scale = AM.Scale;
3279 if (Scale < 0)
3280 Scale = -Scale;
3281 switch (Scale) {
3282 case 0: // No scale reg, "r+i", "r", or just "i".
3283 break;
3284 default: // No scaled addressing mode.
3285 return false;
3286 }
3287 return true;
3288}
3289
3290/// Return true if folding a constant offset with the given GlobalAddress is
3291/// legal. It is frequently not legal in PIC relocation models.
3292bool HexagonTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA)
3293 const {
3294 return HTM.getRelocationModel() == Reloc::Static;
3295}
3296
3297/// isLegalICmpImmediate - Return true if the specified immediate is legal
3298/// icmp immediate, that is the target has icmp instructions which can compare
3299/// a register against the immediate without having to materialize the
3300/// immediate into a register.
3301bool HexagonTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
3302 return Imm >= -512 && Imm <= 511;
3303}
3304
3305/// IsEligibleForTailCallOptimization - Check whether the call is eligible
3306/// for tail call optimization. Targets which want to do tail call
3307/// optimization should implement this function.
3308bool HexagonTargetLowering::IsEligibleForTailCallOptimization(
3309 SDValue Callee,
3310 CallingConv::ID CalleeCC,
3311 bool IsVarArg,
3312 bool IsCalleeStructRet,
3313 bool IsCallerStructRet,
3314 const SmallVectorImpl<ISD::OutputArg> &Outs,
3315 const SmallVectorImpl<SDValue> &OutVals,
3316 const SmallVectorImpl<ISD::InputArg> &Ins,
3317 SelectionDAG& DAG) const {
3318 const Function &CallerF = DAG.getMachineFunction().getFunction();
3319 CallingConv::ID CallerCC = CallerF.getCallingConv();
3320 bool CCMatch = CallerCC == CalleeCC;
3321
3322 // ***************************************************************************
3323 // Look for obvious safe cases to perform tail call optimization that do not
3324 // require ABI changes.
3325 // ***************************************************************************
3326
3327 // If this is a tail call via a function pointer, then don't do it!
3328 if (!isa<GlobalAddressSDNode>(Callee) &&
3329 !isa<ExternalSymbolSDNode>(Callee)) {
3330 return false;
3331 }
3332
3333 // Do not optimize if the calling conventions do not match and the conventions
3334 // used are not C or Fast.
3335 if (!CCMatch) {
3336 bool R = (CallerCC == CallingConv::C || CallerCC == CallingConv::Fast);
3337 bool E = (CalleeCC == CallingConv::C || CalleeCC == CallingConv::Fast);
3338 // If R & E, then ok.
3339 if (!R || !E)
3340 return false;
3341 }
3342
3343 // Do not tail call optimize vararg calls.
3344 if (IsVarArg)
3345 return false;
3346
3347 // Also avoid tail call optimization if either caller or callee uses struct
3348 // return semantics.
3349 if (IsCalleeStructRet || IsCallerStructRet)
3350 return false;
3351
3352 // In addition to the cases above, we also disable Tail Call Optimization if
3353 // the calling convention code that at least one outgoing argument needs to
3354 // go on the stack. We cannot check that here because at this point that
3355 // information is not available.
3356 return true;
3357}
3358
3359/// Returns the target specific optimal type for load and store operations as
3360/// a result of memset, memcpy, and memmove lowering.
3361///
3362/// If DstAlign is zero that means it's safe to destination alignment can
3363/// satisfy any constraint. Similarly if SrcAlign is zero it means there isn't
3364/// a need to check it against alignment requirement, probably because the
3365/// source does not need to be loaded. If 'IsMemset' is true, that means it's
3366/// expanding a memset. If 'ZeroMemset' is true, that means it's a memset of
3367/// zero. 'MemcpyStrSrc' indicates whether the memcpy source is constant so it
3368/// does not need to be loaded. It returns EVT::Other if the type should be
3369/// determined using generic target-independent logic.
3370EVT HexagonTargetLowering::getOptimalMemOpType(
3371 const MemOp &Op, const AttributeList &FuncAttributes) const {
3372 if (Op.size() >= 8 && Op.isAligned(Align(8)))
3373 return MVT::i64;
3374 if (Op.size() >= 4 && Op.isAligned(Align(4)))
3375 return MVT::i32;
3376 if (Op.size() >= 2 && Op.isAligned(Align(2)))
3377 return MVT::i16;
3378 return MVT::Other;
3379}
3380
3381bool HexagonTargetLowering::allowsMisalignedMemoryAccesses(
3382 EVT VT, unsigned AS, unsigned Align, MachineMemOperand::Flags Flags,
3383 bool *Fast) const {
3384 if (Fast)
3385 *Fast = false;
3386 return Subtarget.isHVXVectorType(VT.getSimpleVT());
3387}
3388
3389std::pair<const TargetRegisterClass*, uint8_t>
3390HexagonTargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI,
3391 MVT VT) const {
3392 if (Subtarget.isHVXVectorType(VT, true)) {
3393 unsigned BitWidth = VT.getSizeInBits();
3394 unsigned VecWidth = Subtarget.getVectorLength() * 8;
3395
3396 if (VT.getVectorElementType() == MVT::i1)
3397 return std::make_pair(&Hexagon::HvxQRRegClass, 1);
3398 if (BitWidth == VecWidth)
3399 return std::make_pair(&Hexagon::HvxVRRegClass, 1);
3400 assert(BitWidth == 2 * VecWidth)((BitWidth == 2 * VecWidth) ? static_cast<void> (0) : __assert_fail
("BitWidth == 2 * VecWidth", "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 3400, __PRETTY_FUNCTION__))
;
3401 return std::make_pair(&Hexagon::HvxWRRegClass, 1);
3402 }
3403
3404 return TargetLowering::findRepresentativeClass(TRI, VT);
3405}
3406
3407bool HexagonTargetLowering::shouldReduceLoadWidth(SDNode *Load,
3408 ISD::LoadExtType ExtTy, EVT NewVT) const {
3409 // TODO: This may be worth removing. Check regression tests for diffs.
3410 if (!TargetLoweringBase::shouldReduceLoadWidth(Load, ExtTy, NewVT))
3411 return false;
3412
3413 auto *L = cast<LoadSDNode>(Load);
3414 std::pair<SDValue,int> BO = getBaseAndOffset(L->getBasePtr());
3415 // Small-data object, do not shrink.
3416 if (BO.first.getOpcode() == HexagonISD::CONST32_GP)
3417 return false;
3418 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(BO.first)) {
3419 auto &HTM = static_cast<const HexagonTargetMachine&>(getTargetMachine());
3420 const auto *GO = dyn_cast_or_null<const GlobalObject>(GA->getGlobal());
3421 return !GO || !HTM.getObjFileLowering()->isGlobalInSmallSection(GO, HTM);
3422 }
3423 return true;
3424}
3425
3426Value *HexagonTargetLowering::emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
3427 AtomicOrdering Ord) const {
3428 BasicBlock *BB = Builder.GetInsertBlock();
3429 Module *M = BB->getParent()->getParent();
3430 auto PT = cast<PointerType>(Addr->getType());
3431 Type *Ty = PT->getElementType();
3432 unsigned SZ = Ty->getPrimitiveSizeInBits();
3433 assert((SZ == 32 || SZ == 64) && "Only 32/64-bit atomic loads supported")(((SZ == 32 || SZ == 64) && "Only 32/64-bit atomic loads supported"
) ? static_cast<void> (0) : __assert_fail ("(SZ == 32 || SZ == 64) && \"Only 32/64-bit atomic loads supported\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 3433, __PRETTY_FUNCTION__))
;
3434 Intrinsic::ID IntID = (SZ == 32) ? Intrinsic::hexagon_L2_loadw_locked
3435 : Intrinsic::hexagon_L4_loadd_locked;
3436 Function *Fn = Intrinsic::getDeclaration(M, IntID);
3437
3438 PointerType *NewPtrTy
3439 = Builder.getIntNTy(SZ)->getPointerTo(PT->getAddressSpace());
3440 Addr = Builder.CreateBitCast(Addr, NewPtrTy);
3441
3442 Value *Call = Builder.CreateCall(Fn, Addr, "larx");
3443
3444 return Builder.CreateBitCast(Call, Ty);
3445}
3446
3447/// Perform a store-conditional operation to Addr. Return the status of the
3448/// store. This should be 0 if the store succeeded, non-zero otherwise.
3449Value *HexagonTargetLowering::emitStoreConditional(IRBuilder<> &Builder,
3450 Value *Val, Value *Addr, AtomicOrdering Ord) const {
3451 BasicBlock *BB = Builder.GetInsertBlock();
3452 Module *M = BB->getParent()->getParent();
3453 Type *Ty = Val->getType();
3454 unsigned SZ = Ty->getPrimitiveSizeInBits();
3455
3456 Type *CastTy = Builder.getIntNTy(SZ);
3457 assert((SZ == 32 || SZ == 64) && "Only 32/64-bit atomic stores supported")(((SZ == 32 || SZ == 64) && "Only 32/64-bit atomic stores supported"
) ? static_cast<void> (0) : __assert_fail ("(SZ == 32 || SZ == 64) && \"Only 32/64-bit atomic stores supported\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 3457, __PRETTY_FUNCTION__))
;
3458 Intrinsic::ID IntID = (SZ == 32) ? Intrinsic::hexagon_S2_storew_locked
3459 : Intrinsic::hexagon_S4_stored_locked;
3460 Function *Fn = Intrinsic::getDeclaration(M, IntID);
3461
3462 unsigned AS = Addr->getType()->getPointerAddressSpace();
3463 Addr = Builder.CreateBitCast(Addr, CastTy->getPointerTo(AS));
3464 Val = Builder.CreateBitCast(Val, CastTy);
3465
3466 Value *Call = Builder.CreateCall(Fn, {Addr, Val}, "stcx");
3467 Value *Cmp = Builder.CreateICmpEQ(Call, Builder.getInt32(0), "");
3468 Value *Ext = Builder.CreateZExt(Cmp, Type::getInt32Ty(M->getContext()));
3469 return Ext;
3470}
3471
3472TargetLowering::AtomicExpansionKind
3473HexagonTargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
3474 // Do not expand loads and stores that don't exceed 64 bits.
3475 return LI->getType()->getPrimitiveSizeInBits() > 64
3476 ? AtomicExpansionKind::LLOnly
3477 : AtomicExpansionKind::None;
3478}
3479
3480bool HexagonTargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
3481 // Do not expand loads and stores that don't exceed 64 bits.
3482 return SI->getValueOperand()->getType()->getPrimitiveSizeInBits() > 64;
3483}
3484
3485TargetLowering::AtomicExpansionKind
3486HexagonTargetLowering::shouldExpandAtomicCmpXchgInIR(
3487 AtomicCmpXchgInst *AI) const {
3488 const DataLayout &DL = AI->getModule()->getDataLayout();
3489 unsigned Size = DL.getTypeStoreSize(AI->getCompareOperand()->getType());
3490 if (Size >= 4 && Size <= 8)
3491 return AtomicExpansionKind::LLSC;
3492 return AtomicExpansionKind::None;
3493}

/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h

1//===- llvm/CodeGen/SelectionDAGNodes.h - SelectionDAG Nodes ----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file declares the SDNode class and derived classes, which are used to
10// represent the nodes and operations present in a SelectionDAG. These nodes
11// and operations are machine code level operations, with some similarities to
12// the GCC RTL representation.
13//
14// Clients should include the SelectionDAG.h file instead of this file directly.
15//
16//===----------------------------------------------------------------------===//
17
18#ifndef LLVM_CODEGEN_SELECTIONDAGNODES_H
19#define LLVM_CODEGEN_SELECTIONDAGNODES_H
20
21#include "llvm/ADT/APFloat.h"
22#include "llvm/ADT/ArrayRef.h"
23#include "llvm/ADT/BitVector.h"
24#include "llvm/ADT/FoldingSet.h"
25#include "llvm/ADT/GraphTraits.h"
26#include "llvm/ADT/SmallPtrSet.h"
27#include "llvm/ADT/SmallVector.h"
28#include "llvm/ADT/ilist_node.h"
29#include "llvm/ADT/iterator.h"
30#include "llvm/ADT/iterator_range.h"
31#include "llvm/CodeGen/ISDOpcodes.h"
32#include "llvm/CodeGen/MachineMemOperand.h"
33#include "llvm/CodeGen/ValueTypes.h"
34#include "llvm/IR/Constants.h"
35#include "llvm/IR/DebugLoc.h"
36#include "llvm/IR/Instruction.h"
37#include "llvm/IR/Instructions.h"
38#include "llvm/IR/Metadata.h"
39#include "llvm/IR/Operator.h"
40#include "llvm/Support/AlignOf.h"
41#include "llvm/Support/AtomicOrdering.h"
42#include "llvm/Support/Casting.h"
43#include "llvm/Support/ErrorHandling.h"
44#include "llvm/Support/MachineValueType.h"
45#include "llvm/Support/TypeSize.h"
46#include <algorithm>
47#include <cassert>
48#include <climits>
49#include <cstddef>
50#include <cstdint>
51#include <cstring>
52#include <iterator>
53#include <string>
54#include <tuple>
55
56namespace llvm {
57
58class APInt;
59class Constant;
60template <typename T> struct DenseMapInfo;
61class GlobalValue;
62class MachineBasicBlock;
63class MachineConstantPoolValue;
64class MCSymbol;
65class raw_ostream;
66class SDNode;
67class SelectionDAG;
68class Type;
69class Value;
70
71void checkForCycles(const SDNode *N, const SelectionDAG *DAG = nullptr,
72 bool force = false);
73
74/// This represents a list of ValueType's that has been intern'd by
75/// a SelectionDAG. Instances of this simple value class are returned by
76/// SelectionDAG::getVTList(...).
77///
78struct SDVTList {
79 const EVT *VTs;
80 unsigned int NumVTs;
81};
82
83namespace ISD {
84
85 /// Node predicates
86
87 /// If N is a BUILD_VECTOR node whose elements are all the same constant or
88 /// undefined, return true and return the constant value in \p SplatValue.
89 bool isConstantSplatVector(const SDNode *N, APInt &SplatValue);
90
91 /// Return true if the specified node is a BUILD_VECTOR where all of the
92 /// elements are ~0 or undef.
93 bool isBuildVectorAllOnes(const SDNode *N);
94
95 /// Return true if the specified node is a BUILD_VECTOR where all of the
96 /// elements are 0 or undef.
97 bool isBuildVectorAllZeros(const SDNode *N);
98
99 /// Return true if the specified node is a BUILD_VECTOR node of all
100 /// ConstantSDNode or undef.
101 bool isBuildVectorOfConstantSDNodes(const SDNode *N);
102
103 /// Return true if the specified node is a BUILD_VECTOR node of all
104 /// ConstantFPSDNode or undef.
105 bool isBuildVectorOfConstantFPSDNodes(const SDNode *N);
106
107 /// Return true if the node has at least one operand and all operands of the
108 /// specified node are ISD::UNDEF.
109 bool allOperandsUndef(const SDNode *N);
110
111} // end namespace ISD
112
113//===----------------------------------------------------------------------===//
114/// Unlike LLVM values, Selection DAG nodes may return multiple
115/// values as the result of a computation. Many nodes return multiple values,
116/// from loads (which define a token and a return value) to ADDC (which returns
117/// a result and a carry value), to calls (which may return an arbitrary number
118/// of values).
119///
120/// As such, each use of a SelectionDAG computation must indicate the node that
121/// computes it as well as which return value to use from that node. This pair
122/// of information is represented with the SDValue value type.
123///
124class SDValue {
125 friend struct DenseMapInfo<SDValue>;
126
127 SDNode *Node = nullptr; // The node defining the value we are using.
128 unsigned ResNo = 0; // Which return value of the node we are using.
129
130public:
131 SDValue() = default;
132 SDValue(SDNode *node, unsigned resno);
133
134 /// get the index which selects a specific result in the SDNode
135 unsigned getResNo() const { return ResNo; }
136
137 /// get the SDNode which holds the desired result
138 SDNode *getNode() const { return Node; }
139
140 /// set the SDNode
141 void setNode(SDNode *N) { Node = N; }
142
143 inline SDNode *operator->() const { return Node; }
144
145 bool operator==(const SDValue &O) const {
146 return Node == O.Node && ResNo == O.ResNo;
147 }
148 bool operator!=(const SDValue &O) const {
149 return !operator==(O);
150 }
151 bool operator<(const SDValue &O) const {
152 return std::tie(Node, ResNo) < std::tie(O.Node, O.ResNo);
153 }
154 explicit operator bool() const {
155 return Node != nullptr;
156 }
157
158 SDValue getValue(unsigned R) const {
159 return SDValue(Node, R);
160 }
161
162 /// Return true if this node is an operand of N.
163 bool isOperandOf(const SDNode *N) const;
164
165 /// Return the ValueType of the referenced return value.
166 inline EVT getValueType() const;
167
168 /// Return the simple ValueType of the referenced return value.
169 MVT getSimpleValueType() const {
170 return getValueType().getSimpleVT();
171 }
172
173 /// Returns the size of the value in bits.
174 ///
175 /// If the value type is a scalable vector type, the scalable property will
176 /// be set and the runtime size will be a positive integer multiple of the
177 /// base size.
178 TypeSize getValueSizeInBits() const {
179 return getValueType().getSizeInBits();
180 }
181
182 TypeSize getScalarValueSizeInBits() const {
183 return getValueType().getScalarType().getSizeInBits();
184 }
185
186 // Forwarding methods - These forward to the corresponding methods in SDNode.
187 inline unsigned getOpcode() const;
188 inline unsigned getNumOperands() const;
189 inline const SDValue &getOperand(unsigned i) const;
190 inline uint64_t getConstantOperandVal(unsigned i) const;
191 inline const APInt &getConstantOperandAPInt(unsigned i) const;
192 inline bool isTargetMemoryOpcode() const;
193 inline bool isTargetOpcode() const;
194 inline bool isMachineOpcode() const;
195 inline bool isUndef() const;
196 inline unsigned getMachineOpcode() const;
197 inline const DebugLoc &getDebugLoc() const;
198 inline void dump() const;
199 inline void dump(const SelectionDAG *G) const;
200 inline void dumpr() const;
201 inline void dumpr(const SelectionDAG *G) const;
202
203 /// Return true if this operand (which must be a chain) reaches the
204 /// specified operand without crossing any side-effecting instructions.
205 /// In practice, this looks through token factors and non-volatile loads.
206 /// In order to remain efficient, this only
207 /// looks a couple of nodes in, it does not do an exhaustive search.
208 bool reachesChainWithoutSideEffects(SDValue Dest,
209 unsigned Depth = 2) const;
210
211 /// Return true if there are no nodes using value ResNo of Node.
212 inline bool use_empty() const;
213
214 /// Return true if there is exactly one node using value ResNo of Node.
215 inline bool hasOneUse() const;
216};
217
218template<> struct DenseMapInfo<SDValue> {
219 static inline SDValue getEmptyKey() {
220 SDValue V;
221 V.ResNo = -1U;
222 return V;
223 }
224
225 static inline SDValue getTombstoneKey() {
226 SDValue V;
227 V.ResNo = -2U;
228 return V;
229 }
230
231 static unsigned getHashValue(const SDValue &Val) {
232 return ((unsigned)((uintptr_t)Val.getNode() >> 4) ^
233 (unsigned)((uintptr_t)Val.getNode() >> 9)) + Val.getResNo();
234 }
235
236 static bool isEqual(const SDValue &LHS, const SDValue &RHS) {
237 return LHS == RHS;
238 }
239};
240
241/// Allow casting operators to work directly on
242/// SDValues as if they were SDNode*'s.
243template<> struct simplify_type<SDValue> {
244 using SimpleType = SDNode *;
245
246 static SimpleType getSimplifiedValue(SDValue &Val) {
247 return Val.getNode();
248 }
249};
250template<> struct simplify_type<const SDValue> {
251 using SimpleType = /*const*/ SDNode *;
252
253 static SimpleType getSimplifiedValue(const SDValue &Val) {
254 return Val.getNode();
255 }
256};
257
258/// Represents a use of a SDNode. This class holds an SDValue,
259/// which records the SDNode being used and the result number, a
260/// pointer to the SDNode using the value, and Next and Prev pointers,
261/// which link together all the uses of an SDNode.
262///
263class SDUse {
264 /// Val - The value being used.
265 SDValue Val;
266 /// User - The user of this value.
267 SDNode *User = nullptr;
268 /// Prev, Next - Pointers to the uses list of the SDNode referred by
269 /// this operand.
270 SDUse **Prev = nullptr;
271 SDUse *Next = nullptr;
272
273public:
274 SDUse() = default;
275 SDUse(const SDUse &U) = delete;
276 SDUse &operator=(const SDUse &) = delete;
277
278 /// Normally SDUse will just implicitly convert to an SDValue that it holds.
279 operator const SDValue&() const { return Val; }
280
281 /// If implicit conversion to SDValue doesn't work, the get() method returns
282 /// the SDValue.
283 const SDValue &get() const { return Val; }
284
285 /// This returns the SDNode that contains this Use.
286 SDNode *getUser() { return User; }
287
288 /// Get the next SDUse in the use list.
289 SDUse *getNext() const { return Next; }
290
291 /// Convenience function for get().getNode().
292 SDNode *getNode() const { return Val.getNode(); }
293 /// Convenience function for get().getResNo().
294 unsigned getResNo() const { return Val.getResNo(); }
295 /// Convenience function for get().getValueType().
296 EVT getValueType() const { return Val.getValueType(); }
297
298 /// Convenience function for get().operator==
299 bool operator==(const SDValue &V) const {
300 return Val == V;
301 }
302
303 /// Convenience function for get().operator!=
304 bool operator!=(const SDValue &V) const {
305 return Val != V;
306 }
307
308 /// Convenience function for get().operator<
309 bool operator<(const SDValue &V) const {
310 return Val < V;
311 }
312
313private:
314 friend class SelectionDAG;
315 friend class SDNode;
316 // TODO: unfriend HandleSDNode once we fix its operand handling.
317 friend class HandleSDNode;
318
319 void setUser(SDNode *p) { User = p; }
320
321 /// Remove this use from its existing use list, assign it the
322 /// given value, and add it to the new value's node's use list.
323 inline void set(const SDValue &V);
324 /// Like set, but only supports initializing a newly-allocated
325 /// SDUse with a non-null value.
326 inline void setInitial(const SDValue &V);
327 /// Like set, but only sets the Node portion of the value,
328 /// leaving the ResNo portion unmodified.
329 inline void setNode(SDNode *N);
330
331 void addToList(SDUse **List) {
332 Next = *List;
333 if (Next) Next->Prev = &Next;
334 Prev = List;
335 *List = this;
336 }
337
338 void removeFromList() {
339 *Prev = Next;
340 if (Next) Next->Prev = Prev;
341 }
342};
343
344/// simplify_type specializations - Allow casting operators to work directly on
345/// SDValues as if they were SDNode*'s.
346template<> struct simplify_type<SDUse> {
347 using SimpleType = SDNode *;
348
349 static SimpleType getSimplifiedValue(SDUse &Val) {
350 return Val.getNode();
351 }
352};
353
354/// These are IR-level optimization flags that may be propagated to SDNodes.
355/// TODO: This data structure should be shared by the IR optimizer and the
356/// the backend.
357struct SDNodeFlags {
358private:
359 // This bit is used to determine if the flags are in a defined state.
360 // Flag bits can only be masked out during intersection if the masking flags
361 // are defined.
362 bool AnyDefined : 1;
363
364 bool NoUnsignedWrap : 1;
365 bool NoSignedWrap : 1;
366 bool Exact : 1;
367 bool NoNaNs : 1;
368 bool NoInfs : 1;
369 bool NoSignedZeros : 1;
370 bool AllowReciprocal : 1;
371 bool VectorReduction : 1;
372 bool AllowContract : 1;
373 bool ApproximateFuncs : 1;
374 bool AllowReassociation : 1;
375
376 // We assume instructions do not raise floating-point exceptions by default,
377 // and only those marked explicitly may do so. We could choose to represent
378 // this via a positive "FPExcept" flags like on the MI level, but having a
379 // negative "NoFPExcept" flag here (that defaults to true) makes the flag
380 // intersection logic more straightforward.
381 bool NoFPExcept : 1;
382
383public:
384 /// Default constructor turns off all optimization flags.
385 SDNodeFlags()
386 : AnyDefined(false), NoUnsignedWrap(false), NoSignedWrap(false),
387 Exact(false), NoNaNs(false), NoInfs(false),
388 NoSignedZeros(false), AllowReciprocal(false), VectorReduction(false),
389 AllowContract(false), ApproximateFuncs(false),
390 AllowReassociation(false), NoFPExcept(false) {}
391
392 /// Propagate the fast-math-flags from an IR FPMathOperator.
393 void copyFMF(const FPMathOperator &FPMO) {
394 setNoNaNs(FPMO.hasNoNaNs());
395 setNoInfs(FPMO.hasNoInfs());
396 setNoSignedZeros(FPMO.hasNoSignedZeros());
397 setAllowReciprocal(FPMO.hasAllowReciprocal());
398 setAllowContract(FPMO.hasAllowContract());
399 setApproximateFuncs(FPMO.hasApproxFunc());
400 setAllowReassociation(FPMO.hasAllowReassoc());
401 }
402
403 /// Sets the state of the flags to the defined state.
404 void setDefined() { AnyDefined = true; }
405 /// Returns true if the flags are in a defined state.
406 bool isDefined() const { return AnyDefined; }
407
408 // These are mutators for each flag.
409 void setNoUnsignedWrap(bool b) {
410 setDefined();
411 NoUnsignedWrap = b;
412 }
413 void setNoSignedWrap(bool b) {
414 setDefined();
415 NoSignedWrap = b;
416 }
417 void setExact(bool b) {
418 setDefined();
419 Exact = b;
420 }
421 void setNoNaNs(bool b) {
422 setDefined();
423 NoNaNs = b;
424 }
425 void setNoInfs(bool b) {
426 setDefined();
427 NoInfs = b;
428 }
429 void setNoSignedZeros(bool b) {
430 setDefined();
431 NoSignedZeros = b;
432 }
433 void setAllowReciprocal(bool b) {
434 setDefined();
435 AllowReciprocal = b;
436 }
437 void setVectorReduction(bool b) {
438 setDefined();
439 VectorReduction = b;
440 }
441 void setAllowContract(bool b) {
442 setDefined();
443 AllowContract = b;
444 }
445 void setApproximateFuncs(bool b) {
446 setDefined();
447 ApproximateFuncs = b;
448 }
449 void setAllowReassociation(bool b) {
450 setDefined();
451 AllowReassociation = b;
452 }
453 void setNoFPExcept(bool b) {
454 setDefined();
455 NoFPExcept = b;
456 }
457
458 // These are accessors for each flag.
459 bool hasNoUnsignedWrap() const { return NoUnsignedWrap; }
460 bool hasNoSignedWrap() const { return NoSignedWrap; }
461 bool hasExact() const { return Exact; }
462 bool hasNoNaNs() const { return NoNaNs; }
463 bool hasNoInfs() const { return NoInfs; }
464 bool hasNoSignedZeros() const { return NoSignedZeros; }
465 bool hasAllowReciprocal() const { return AllowReciprocal; }
466 bool hasVectorReduction() const { return VectorReduction; }
467 bool hasAllowContract() const { return AllowContract; }
468 bool hasApproximateFuncs() const { return ApproximateFuncs; }
469 bool hasAllowReassociation() const { return AllowReassociation; }
470 bool hasNoFPExcept() const { return NoFPExcept; }
471
472 /// Clear any flags in this flag set that aren't also set in Flags.
473 /// If the given Flags are undefined then don't do anything.
474 void intersectWith(const SDNodeFlags Flags) {
475 if (!Flags.isDefined())
476 return;
477 NoUnsignedWrap &= Flags.NoUnsignedWrap;
478 NoSignedWrap &= Flags.NoSignedWrap;
479 Exact &= Flags.Exact;
480 NoNaNs &= Flags.NoNaNs;
481 NoInfs &= Flags.NoInfs;
482 NoSignedZeros &= Flags.NoSignedZeros;
483 AllowReciprocal &= Flags.AllowReciprocal;
484 VectorReduction &= Flags.VectorReduction;
485 AllowContract &= Flags.AllowContract;
486 ApproximateFuncs &= Flags.ApproximateFuncs;
487 AllowReassociation &= Flags.AllowReassociation;
488 NoFPExcept &= Flags.NoFPExcept;
489 }
490};
491
492/// Represents one node in the SelectionDAG.
493///
494class SDNode : public FoldingSetNode, public ilist_node<SDNode> {
495private:
496 /// The operation that this node performs.
497 int16_t NodeType;
498
499protected:
500 // We define a set of mini-helper classes to help us interpret the bits in our
501 // SubclassData. These are designed to fit within a uint16_t so they pack
502 // with NodeType.
503
504#if defined(_AIX) && (!defined(__GNUC__4) || defined(__ibmxl__))
505// Except for GCC; by default, AIX compilers store bit-fields in 4-byte words
506// and give the `pack` pragma push semantics.
507#define BEGIN_TWO_BYTE_PACK() _Pragma("pack(2)")pack(2)
508#define END_TWO_BYTE_PACK() _Pragma("pack(pop)")pack(pop)
509#else
510#define BEGIN_TWO_BYTE_PACK()
511#define END_TWO_BYTE_PACK()
512#endif
513
514BEGIN_TWO_BYTE_PACK()
515 class SDNodeBitfields {
516 friend class SDNode;
517 friend class MemIntrinsicSDNode;
518 friend class MemSDNode;
519 friend class SelectionDAG;
520
521 uint16_t HasDebugValue : 1;
522 uint16_t IsMemIntrinsic : 1;
523 uint16_t IsDivergent : 1;
524 };
525 enum { NumSDNodeBits = 3 };
526
527 class ConstantSDNodeBitfields {
528 friend class ConstantSDNode;
529
530 uint16_t : NumSDNodeBits;
531
532 uint16_t IsOpaque : 1;
533 };
534
535 class MemSDNodeBitfields {
536 friend class MemSDNode;
537 friend class MemIntrinsicSDNode;
538 friend class AtomicSDNode;
539
540 uint16_t : NumSDNodeBits;
541
542 uint16_t IsVolatile : 1;
543 uint16_t IsNonTemporal : 1;
544 uint16_t IsDereferenceable : 1;
545 uint16_t IsInvariant : 1;
546 };
547 enum { NumMemSDNodeBits = NumSDNodeBits + 4 };
548
549 class LSBaseSDNodeBitfields {
550 friend class LSBaseSDNode;
551 friend class MaskedLoadStoreSDNode;
552 friend class MaskedGatherScatterSDNode;
553
554 uint16_t : NumMemSDNodeBits;
555
556 // This storage is shared between disparate class hierarchies to hold an
557 // enumeration specific to the class hierarchy in use.
558 // LSBaseSDNode => enum ISD::MemIndexedMode
559 // MaskedLoadStoreBaseSDNode => enum ISD::MemIndexedMode
560 // MaskedGatherScatterSDNode => enum ISD::MemIndexType
561 uint16_t AddressingMode : 3;
562 };
563 enum { NumLSBaseSDNodeBits = NumMemSDNodeBits + 3 };
564
565 class LoadSDNodeBitfields {
566 friend class LoadSDNode;
567 friend class MaskedLoadSDNode;
568
569 uint16_t : NumLSBaseSDNodeBits;
570
571 uint16_t ExtTy : 2; // enum ISD::LoadExtType
572 uint16_t IsExpanding : 1;
573 };
574
575 class StoreSDNodeBitfields {
576 friend class StoreSDNode;
577 friend class MaskedStoreSDNode;
578
579 uint16_t : NumLSBaseSDNodeBits;
580
581 uint16_t IsTruncating : 1;
582 uint16_t IsCompressing : 1;
583 };
584
585 union {
586 char RawSDNodeBits[sizeof(uint16_t)];
587 SDNodeBitfields SDNodeBits;
588 ConstantSDNodeBitfields ConstantSDNodeBits;
589 MemSDNodeBitfields MemSDNodeBits;
590 LSBaseSDNodeBitfields LSBaseSDNodeBits;
591 LoadSDNodeBitfields LoadSDNodeBits;
592 StoreSDNodeBitfields StoreSDNodeBits;
593 };
594END_TWO_BYTE_PACK()
595#undef BEGIN_TWO_BYTE_PACK
596#undef END_TWO_BYTE_PACK
597
598 // RawSDNodeBits must cover the entirety of the union. This means that all of
599 // the union's members must have size <= RawSDNodeBits. We write the RHS as
600 // "2" instead of sizeof(RawSDNodeBits) because MSVC can't handle the latter.
601 static_assert(sizeof(SDNodeBitfields) <= 2, "field too wide");
602 static_assert(sizeof(ConstantSDNodeBitfields) <= 2, "field too wide");
603 static_assert(sizeof(MemSDNodeBitfields) <= 2, "field too wide");
604 static_assert(sizeof(LSBaseSDNodeBitfields) <= 2, "field too wide");
605 static_assert(sizeof(LoadSDNodeBitfields) <= 2, "field too wide");
606 static_assert(sizeof(StoreSDNodeBitfields) <= 2, "field too wide");
607
608private:
609 friend class SelectionDAG;
610 // TODO: unfriend HandleSDNode once we fix its operand handling.
611 friend class HandleSDNode;
612
613 /// Unique id per SDNode in the DAG.
614 int NodeId = -1;
615
616 /// The values that are used by this operation.
617 SDUse *OperandList = nullptr;
618
619 /// The types of the values this node defines. SDNode's may
620 /// define multiple values simultaneously.
621 const EVT *ValueList;
622
623 /// List of uses for this SDNode.
624 SDUse *UseList = nullptr;
625
626 /// The number of entries in the Operand/Value list.
627 unsigned short NumOperands = 0;
628 unsigned short NumValues;
629
630 // The ordering of the SDNodes. It roughly corresponds to the ordering of the
631 // original LLVM instructions.
632 // This is used for turning off scheduling, because we'll forgo
633 // the normal scheduling algorithms and output the instructions according to
634 // this ordering.
635 unsigned IROrder;
636
637 /// Source line information.
638 DebugLoc debugLoc;
639
640 /// Return a pointer to the specified value type.
641 static const EVT *getValueTypeList(EVT VT);
642
643 SDNodeFlags Flags;
644
645public:
646 /// Unique and persistent id per SDNode in the DAG.
647 /// Used for debug printing.
648 uint16_t PersistentId;
649
650 //===--------------------------------------------------------------------===//
651 // Accessors
652 //
653
654 /// Return the SelectionDAG opcode value for this node. For
655 /// pre-isel nodes (those for which isMachineOpcode returns false), these
656 /// are the opcode values in the ISD and <target>ISD namespaces. For
657 /// post-isel opcodes, see getMachineOpcode.
658 unsigned getOpcode() const { return (unsigned short)NodeType; }
659
660 /// Test if this node has a target-specific opcode (in the
661 /// \<target\>ISD namespace).
662 bool isTargetOpcode() const { return NodeType >= ISD::BUILTIN_OP_END; }
663
664 /// Test if this node has a target-specific opcode that may raise
665 /// FP exceptions (in the \<target\>ISD namespace and greater than
666 /// FIRST_TARGET_STRICTFP_OPCODE). Note that all target memory
667 /// opcode are currently automatically considered to possibly raise
668 /// FP exceptions as well.
669 bool isTargetStrictFPOpcode() const {
670 return NodeType >= ISD::FIRST_TARGET_STRICTFP_OPCODE;
671 }
672
673 /// Test if this node has a target-specific
674 /// memory-referencing opcode (in the \<target\>ISD namespace and
675 /// greater than FIRST_TARGET_MEMORY_OPCODE).
676 bool isTargetMemoryOpcode() const {
677 return NodeType >= ISD::FIRST_TARGET_MEMORY_OPCODE;
678 }
679
680 /// Return true if the type of the node type undefined.
681 bool isUndef() const { return NodeType == ISD::UNDEF; }
682
683 /// Test if this node is a memory intrinsic (with valid pointer information).
684 /// INTRINSIC_W_CHAIN and INTRINSIC_VOID nodes are sometimes created for
685 /// non-memory intrinsics (with chains) that are not really instances of
686 /// MemSDNode. For such nodes, we need some extra state to determine the
687 /// proper classof relationship.
688 bool isMemIntrinsic() const {
689 return (NodeType == ISD::INTRINSIC_W_CHAIN ||
690 NodeType == ISD::INTRINSIC_VOID) &&
691 SDNodeBits.IsMemIntrinsic;
692 }
693
694 /// Test if this node is a strict floating point pseudo-op.
695 bool isStrictFPOpcode() {
696 switch (NodeType) {
697 default:
698 return false;
699 case ISD::STRICT_FP16_TO_FP:
700 case ISD::STRICT_FP_TO_FP16:
701#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
702 case ISD::STRICT_##DAGN:
703#include "llvm/IR/ConstrainedOps.def"
704 return true;
705 }
706 }
707
708 /// Test if this node has a post-isel opcode, directly
709 /// corresponding to a MachineInstr opcode.
710 bool isMachineOpcode() const { return NodeType < 0; }
711
712 /// This may only be called if isMachineOpcode returns
713 /// true. It returns the MachineInstr opcode value that the node's opcode
714 /// corresponds to.
715 unsigned getMachineOpcode() const {
716 assert(isMachineOpcode() && "Not a MachineInstr opcode!")((isMachineOpcode() && "Not a MachineInstr opcode!") ?
static_cast<void> (0) : __assert_fail ("isMachineOpcode() && \"Not a MachineInstr opcode!\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 716, __PRETTY_FUNCTION__))
;
717 return ~NodeType;
718 }
719
720 bool getHasDebugValue() const { return SDNodeBits.HasDebugValue; }
721 void setHasDebugValue(bool b) { SDNodeBits.HasDebugValue = b; }
722
723 bool isDivergent() const { return SDNodeBits.IsDivergent; }
724
725 /// Return true if there are no uses of this node.
726 bool use_empty() const { return UseList == nullptr; }
727
728 /// Return true if there is exactly one use of this node.
729 bool hasOneUse() const {
730 return !use_empty() && std::next(use_begin()) == use_end();
731 }
732
733 /// Return the number of uses of this node. This method takes
734 /// time proportional to the number of uses.
735 size_t use_size() const { return std::distance(use_begin(), use_end()); }
736
737 /// Return the unique node id.
738 int getNodeId() const { return NodeId; }
739
740 /// Set unique node id.
741 void setNodeId(int Id) { NodeId = Id; }
742
743 /// Return the node ordering.
744 unsigned getIROrder() const { return IROrder; }
745
746 /// Set the node ordering.
747 void setIROrder(unsigned Order) { IROrder = Order; }
748
749 /// Return the source location info.
750 const DebugLoc &getDebugLoc() const { return debugLoc; }
751
752 /// Set source location info. Try to avoid this, putting
753 /// it in the constructor is preferable.
754 void setDebugLoc(DebugLoc dl) { debugLoc = std::move(dl); }
755
756 /// This class provides iterator support for SDUse
757 /// operands that use a specific SDNode.
758 class use_iterator
759 : public std::iterator<std::forward_iterator_tag, SDUse, ptrdiff_t> {
760 friend class SDNode;
761
762 SDUse *Op = nullptr;
763
764 explicit use_iterator(SDUse *op) : Op(op) {}
765
766 public:
767 using reference = std::iterator<std::forward_iterator_tag,
768 SDUse, ptrdiff_t>::reference;
769 using pointer = std::iterator<std::forward_iterator_tag,
770 SDUse, ptrdiff_t>::pointer;
771
772 use_iterator() = default;
773 use_iterator(const use_iterator &I) : Op(I.Op) {}
774
775 bool operator==(const use_iterator &x) const {
776 return Op == x.Op;
777 }
778 bool operator!=(const use_iterator &x) const {
779 return !operator==(x);
780 }
781
782 /// Return true if this iterator is at the end of uses list.
783 bool atEnd() const { return Op == nullptr; }
784
785 // Iterator traversal: forward iteration only.
786 use_iterator &operator++() { // Preincrement
787 assert(Op && "Cannot increment end iterator!")((Op && "Cannot increment end iterator!") ? static_cast
<void> (0) : __assert_fail ("Op && \"Cannot increment end iterator!\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 787, __PRETTY_FUNCTION__))
;
788 Op = Op->getNext();
789 return *this;
790 }
791
792 use_iterator operator++(int) { // Postincrement
793 use_iterator tmp = *this; ++*this; return tmp;
794 }
795
796 /// Retrieve a pointer to the current user node.
797 SDNode *operator*() const {
798 assert(Op && "Cannot dereference end iterator!")((Op && "Cannot dereference end iterator!") ? static_cast
<void> (0) : __assert_fail ("Op && \"Cannot dereference end iterator!\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 798, __PRETTY_FUNCTION__))
;
799 return Op->getUser();
800 }
801
802 SDNode *operator->() const { return operator*(); }
803
804 SDUse &getUse() const { return *Op; }
805
806 /// Retrieve the operand # of this use in its user.
807 unsigned getOperandNo() const {
808 assert(Op && "Cannot dereference end iterator!")((Op && "Cannot dereference end iterator!") ? static_cast
<void> (0) : __assert_fail ("Op && \"Cannot dereference end iterator!\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 808, __PRETTY_FUNCTION__))
;
809 return (unsigned)(Op - Op->getUser()->OperandList);
810 }
811 };
812
813 /// Provide iteration support to walk over all uses of an SDNode.
814 use_iterator use_begin() const {
815 return use_iterator(UseList);
816 }
817
818 static use_iterator use_end() { return use_iterator(nullptr); }
819
820 inline iterator_range<use_iterator> uses() {
821 return make_range(use_begin(), use_end());
822 }
823 inline iterator_range<use_iterator> uses() const {
824 return make_range(use_begin(), use_end());
825 }
826
827 /// Return true if there are exactly NUSES uses of the indicated value.
828 /// This method ignores uses of other values defined by this operation.
829 bool hasNUsesOfValue(unsigned NUses, unsigned Value) const;
830
831 /// Return true if there are any use of the indicated value.
832 /// This method ignores uses of other values defined by this operation.
833 bool hasAnyUseOfValue(unsigned Value) const;
834
835 /// Return true if this node is the only use of N.
836 bool isOnlyUserOf(const SDNode *N) const;
837
838 /// Return true if this node is an operand of N.
839 bool isOperandOf(const SDNode *N) const;
840
841 /// Return true if this node is a predecessor of N.
842 /// NOTE: Implemented on top of hasPredecessor and every bit as
843 /// expensive. Use carefully.
844 bool isPredecessorOf(const SDNode *N) const {
845 return N->hasPredecessor(this);
846 }
847
848 /// Return true if N is a predecessor of this node.
849 /// N is either an operand of this node, or can be reached by recursively
850 /// traversing up the operands.
851 /// NOTE: This is an expensive method. Use it carefully.
852 bool hasPredecessor(const SDNode *N) const;
853
854 /// Returns true if N is a predecessor of any node in Worklist. This
855 /// helper keeps Visited and Worklist sets externally to allow unions
856 /// searches to be performed in parallel, caching of results across
857 /// queries and incremental addition to Worklist. Stops early if N is
858 /// found but will resume. Remember to clear Visited and Worklists
859 /// if DAG changes. MaxSteps gives a maximum number of nodes to visit before
860 /// giving up. The TopologicalPrune flag signals that positive NodeIds are
861 /// topologically ordered (Operands have strictly smaller node id) and search
862 /// can be pruned leveraging this.
863 static bool hasPredecessorHelper(const SDNode *N,
864 SmallPtrSetImpl<const SDNode *> &Visited,
865 SmallVectorImpl<const SDNode *> &Worklist,
866 unsigned int MaxSteps = 0,
867 bool TopologicalPrune = false) {
868 SmallVector<const SDNode *, 8> DeferredNodes;
869 if (Visited.count(N))
870 return true;
871
872 // Node Id's are assigned in three places: As a topological
873 // ordering (> 0), during legalization (results in values set to
874 // 0), new nodes (set to -1). If N has a topolgical id then we
875 // know that all nodes with ids smaller than it cannot be
876 // successors and we need not check them. Filter out all node
877 // that can't be matches. We add them to the worklist before exit
878 // in case of multiple calls. Note that during selection the topological id
879 // may be violated if a node's predecessor is selected before it. We mark
880 // this at selection negating the id of unselected successors and
881 // restricting topological pruning to positive ids.
882
883 int NId = N->getNodeId();
884 // If we Invalidated the Id, reconstruct original NId.
885 if (NId < -1)
886 NId = -(NId + 1);
887
888 bool Found = false;
889 while (!Worklist.empty()) {
890 const SDNode *M = Worklist.pop_back_val();
891 int MId = M->getNodeId();
892 if (TopologicalPrune && M->getOpcode() != ISD::TokenFactor && (NId > 0) &&
893 (MId > 0) && (MId < NId)) {
894 DeferredNodes.push_back(M);
895 continue;
896 }
897 for (const SDValue &OpV : M->op_values()) {
898 SDNode *Op = OpV.getNode();
899 if (Visited.insert(Op).second)
900 Worklist.push_back(Op);
901 if (Op == N)
902 Found = true;
903 }
904 if (Found)
905 break;
906 if (MaxSteps != 0 && Visited.size() >= MaxSteps)
907 break;
908 }
909 // Push deferred nodes back on worklist.
910 Worklist.append(DeferredNodes.begin(), DeferredNodes.end());
911 // If we bailed early, conservatively return found.
912 if (MaxSteps != 0 && Visited.size() >= MaxSteps)
913 return true;
914 return Found;
915 }
916
917 /// Return true if all the users of N are contained in Nodes.
918 /// NOTE: Requires at least one match, but doesn't require them all.
919 static bool areOnlyUsersOf(ArrayRef<const SDNode *> Nodes, const SDNode *N);
920
921 /// Return the number of values used by this operation.
922 unsigned getNumOperands() const { return NumOperands; }
923
924 /// Return the maximum number of operands that a SDNode can hold.
925 static constexpr size_t getMaxNumOperands() {
926 return std::numeric_limits<decltype(SDNode::NumOperands)>::max();
927 }
928
929 /// Helper method returns the integer value of a ConstantSDNode operand.
930 inline uint64_t getConstantOperandVal(unsigned Num) const;
931
932 /// Helper method returns the APInt of a ConstantSDNode operand.
933 inline const APInt &getConstantOperandAPInt(unsigned Num) const;
934
935 const SDValue &getOperand(unsigned Num) const {
936 assert(Num < NumOperands && "Invalid child # of SDNode!")((Num < NumOperands && "Invalid child # of SDNode!"
) ? static_cast<void> (0) : __assert_fail ("Num < NumOperands && \"Invalid child # of SDNode!\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 936, __PRETTY_FUNCTION__))
;
937 return OperandList[Num];
938 }
939
940 using op_iterator = SDUse *;
941
942 op_iterator op_begin() const { return OperandList; }
943 op_iterator op_end() const { return OperandList+NumOperands; }
944 ArrayRef<SDUse> ops() const { return makeArrayRef(op_begin(), op_end()); }
945
946 /// Iterator for directly iterating over the operand SDValue's.
947 struct value_op_iterator
948 : iterator_adaptor_base<value_op_iterator, op_iterator,
949 std::random_access_iterator_tag, SDValue,
950 ptrdiff_t, value_op_iterator *,
951 value_op_iterator *> {
952 explicit value_op_iterator(SDUse *U = nullptr)
953 : iterator_adaptor_base(U) {}
954
955 const SDValue &operator*() const { return I->get(); }
956 };
957
958 iterator_range<value_op_iterator> op_values() const {
959 return make_range(value_op_iterator(op_begin()),
960 value_op_iterator(op_end()));
961 }
962
963 SDVTList getVTList() const {
964 SDVTList X = { ValueList, NumValues };
965 return X;
966 }
967
968 /// If this node has a glue operand, return the node
969 /// to which the glue operand points. Otherwise return NULL.
970 SDNode *getGluedNode() const {
971 if (getNumOperands() != 0 &&
972 getOperand(getNumOperands()-1).getValueType() == MVT::Glue)
973 return getOperand(getNumOperands()-1).getNode();
974 return nullptr;
975 }
976
977 /// If this node has a glue value with a user, return
978 /// the user (there is at most one). Otherwise return NULL.
979 SDNode *getGluedUser() const {
980 for (use_iterator UI = use_begin(), UE = use_end(); UI != UE; ++UI)
981 if (UI.getUse().get().getValueType() == MVT::Glue)
982 return *UI;
983 return nullptr;
984 }
985
986 const SDNodeFlags getFlags() const { return Flags; }
987 void setFlags(SDNodeFlags NewFlags) { Flags = NewFlags; }
988
989 /// Clear any flags in this node that aren't also set in Flags.
990 /// If Flags is not in a defined state then this has no effect.
991 void intersectFlagsWith(const SDNodeFlags Flags);
992
993 /// Return the number of values defined/returned by this operator.
994 unsigned getNumValues() const { return NumValues; }
995
996 /// Return the type of a specified result.
997 EVT getValueType(unsigned ResNo) const {
998 assert(ResNo < NumValues && "Illegal result number!")((ResNo < NumValues && "Illegal result number!") ?
static_cast<void> (0) : __assert_fail ("ResNo < NumValues && \"Illegal result number!\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 998, __PRETTY_FUNCTION__))
;
999 return ValueList[ResNo];
1000 }
1001
1002 /// Return the type of a specified result as a simple type.
1003 MVT getSimpleValueType(unsigned ResNo) const {
1004 return getValueType(ResNo).getSimpleVT();
1005 }
1006
1007 /// Returns MVT::getSizeInBits(getValueType(ResNo)).
1008 ///
1009 /// If the value type is a scalable vector type, the scalable property will
1010 /// be set and the runtime size will be a positive integer multiple of the
1011 /// base size.
1012 TypeSize getValueSizeInBits(unsigned ResNo) const {
1013 return getValueType(ResNo).getSizeInBits();
1014 }
1015
1016 using value_iterator = const EVT *;
1017
1018 value_iterator value_begin() const { return ValueList; }
1019 value_iterator value_end() const { return ValueList+NumValues; }
1020
1021 /// Return the opcode of this operation for printing.
1022 std::string getOperationName(const SelectionDAG *G = nullptr) const;
1023 static const char* getIndexedModeName(ISD::MemIndexedMode AM);
1024 void print_types(raw_ostream &OS, const SelectionDAG *G) const;
1025 void print_details(raw_ostream &OS, const SelectionDAG *G) const;
1026 void print(raw_ostream &OS, const SelectionDAG *G = nullptr) const;
1027 void printr(raw_ostream &OS, const SelectionDAG *G = nullptr) const;
1028
1029 /// Print a SelectionDAG node and all children down to
1030 /// the leaves. The given SelectionDAG allows target-specific nodes
1031 /// to be printed in human-readable form. Unlike printr, this will
1032 /// print the whole DAG, including children that appear multiple
1033 /// times.
1034 ///
1035 void printrFull(raw_ostream &O, const SelectionDAG *G = nullptr) const;
1036
1037 /// Print a SelectionDAG node and children up to
1038 /// depth "depth." The given SelectionDAG allows target-specific
1039 /// nodes to be printed in human-readable form. Unlike printr, this
1040 /// will print children that appear multiple times wherever they are
1041 /// used.
1042 ///
1043 void printrWithDepth(raw_ostream &O, const SelectionDAG *G = nullptr,
1044 unsigned depth = 100) const;
1045
1046 /// Dump this node, for debugging.
1047 void dump() const;
1048
1049 /// Dump (recursively) this node and its use-def subgraph.
1050 void dumpr() const;
1051
1052 /// Dump this node, for debugging.
1053 /// The given SelectionDAG allows target-specific nodes to be printed
1054 /// in human-readable form.
1055 void dump(const SelectionDAG *G) const;
1056
1057 /// Dump (recursively) this node and its use-def subgraph.
1058 /// The given SelectionDAG allows target-specific nodes to be printed
1059 /// in human-readable form.
1060 void dumpr(const SelectionDAG *G) const;
1061
1062 /// printrFull to dbgs(). The given SelectionDAG allows
1063 /// target-specific nodes to be printed in human-readable form.
1064 /// Unlike dumpr, this will print the whole DAG, including children
1065 /// that appear multiple times.
1066 void dumprFull(const SelectionDAG *G = nullptr) const;
1067
1068 /// printrWithDepth to dbgs(). The given
1069 /// SelectionDAG allows target-specific nodes to be printed in
1070 /// human-readable form. Unlike dumpr, this will print children
1071 /// that appear multiple times wherever they are used.
1072 ///
1073 void dumprWithDepth(const SelectionDAG *G = nullptr,
1074 unsigned depth = 100) const;
1075
1076 /// Gather unique data for the node.
1077 void Profile(FoldingSetNodeID &ID) const;
1078
1079 /// This method should only be used by the SDUse class.
1080 void addUse(SDUse &U) { U.addToList(&UseList); }
1081
1082protected:
1083 static SDVTList getSDVTList(EVT VT) {
1084 SDVTList Ret = { getValueTypeList(VT), 1 };
1085 return Ret;
1086 }
1087
1088 /// Create an SDNode.
1089 ///
1090 /// SDNodes are created without any operands, and never own the operand
1091 /// storage. To add operands, see SelectionDAG::createOperands.
1092 SDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs)
1093 : NodeType(Opc), ValueList(VTs.VTs), NumValues(VTs.NumVTs),
1094 IROrder(Order), debugLoc(std::move(dl)) {
1095 memset(&RawSDNodeBits, 0, sizeof(RawSDNodeBits));
1096 assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor")((debugLoc.hasTrivialDestructor() && "Expected trivial destructor"
) ? static_cast<void> (0) : __assert_fail ("debugLoc.hasTrivialDestructor() && \"Expected trivial destructor\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1096, __PRETTY_FUNCTION__))
;
1097 assert(NumValues == VTs.NumVTs &&((NumValues == VTs.NumVTs && "NumValues wasn't wide enough for its operands!"
) ? static_cast<void> (0) : __assert_fail ("NumValues == VTs.NumVTs && \"NumValues wasn't wide enough for its operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1098, __PRETTY_FUNCTION__))
1098 "NumValues wasn't wide enough for its operands!")((NumValues == VTs.NumVTs && "NumValues wasn't wide enough for its operands!"
) ? static_cast<void> (0) : __assert_fail ("NumValues == VTs.NumVTs && \"NumValues wasn't wide enough for its operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1098, __PRETTY_FUNCTION__))
;
1099 }
1100
1101 /// Release the operands and set this node to have zero operands.
1102 void DropOperands();
1103};
1104
1105/// Wrapper class for IR location info (IR ordering and DebugLoc) to be passed
1106/// into SDNode creation functions.
1107/// When an SDNode is created from the DAGBuilder, the DebugLoc is extracted
1108/// from the original Instruction, and IROrder is the ordinal position of
1109/// the instruction.
1110/// When an SDNode is created after the DAG is being built, both DebugLoc and
1111/// the IROrder are propagated from the original SDNode.
1112/// So SDLoc class provides two constructors besides the default one, one to
1113/// be used by the DAGBuilder, the other to be used by others.
1114class SDLoc {
1115private:
1116 DebugLoc DL;
1117 int IROrder = 0;
1118
1119public:
1120 SDLoc() = default;
1121 SDLoc(const SDNode *N) : DL(N->getDebugLoc()), IROrder(N->getIROrder()) {}
1122 SDLoc(const SDValue V) : SDLoc(V.getNode()) {}
1123 SDLoc(const Instruction *I, int Order) : IROrder(Order) {
1124 assert(Order >= 0 && "bad IROrder")((Order >= 0 && "bad IROrder") ? static_cast<void
> (0) : __assert_fail ("Order >= 0 && \"bad IROrder\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1124, __PRETTY_FUNCTION__))
;
1125 if (I)
1126 DL = I->getDebugLoc();
1127 }
1128
1129 unsigned getIROrder() const { return IROrder; }
1130 const DebugLoc &getDebugLoc() const { return DL; }
1131};
1132
1133// Define inline functions from the SDValue class.
1134
1135inline SDValue::SDValue(SDNode *node, unsigned resno)
1136 : Node(node), ResNo(resno) {
1137 // Explicitly check for !ResNo to avoid use-after-free, because there are
1138 // callers that use SDValue(N, 0) with a deleted N to indicate successful
1139 // combines.
1140 assert((!Node || !ResNo || ResNo < Node->getNumValues()) &&(((!Node || !ResNo || ResNo < Node->getNumValues()) &&
"Invalid result number for the given node!") ? static_cast<
void> (0) : __assert_fail ("(!Node || !ResNo || ResNo < Node->getNumValues()) && \"Invalid result number for the given node!\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1141, __PRETTY_FUNCTION__))
1141 "Invalid result number for the given node!")(((!Node || !ResNo || ResNo < Node->getNumValues()) &&
"Invalid result number for the given node!") ? static_cast<
void> (0) : __assert_fail ("(!Node || !ResNo || ResNo < Node->getNumValues()) && \"Invalid result number for the given node!\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1141, __PRETTY_FUNCTION__))
;
1142 assert(ResNo < -2U && "Cannot use result numbers reserved for DenseMaps.")((ResNo < -2U && "Cannot use result numbers reserved for DenseMaps."
) ? static_cast<void> (0) : __assert_fail ("ResNo < -2U && \"Cannot use result numbers reserved for DenseMaps.\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1142, __PRETTY_FUNCTION__))
;
1143}
1144
1145inline unsigned SDValue::getOpcode() const {
1146 return Node->getOpcode();
15
Called C++ object pointer is null
1147}
1148
1149inline EVT SDValue::getValueType() const {
1150 return Node->getValueType(ResNo);
1151}
1152
1153inline unsigned SDValue::getNumOperands() const {
1154 return Node->getNumOperands();
1155}
1156
1157inline const SDValue &SDValue::getOperand(unsigned i) const {
1158 return Node->getOperand(i);
1159}
1160
1161inline uint64_t SDValue::getConstantOperandVal(unsigned i) const {
1162 return Node->getConstantOperandVal(i);
1163}
1164
1165inline const APInt &SDValue::getConstantOperandAPInt(unsigned i) const {
1166 return Node->getConstantOperandAPInt(i);
1167}
1168
1169inline bool SDValue::isTargetOpcode() const {
1170 return Node->isTargetOpcode();
1171}
1172
1173inline bool SDValue::isTargetMemoryOpcode() const {
1174 return Node->isTargetMemoryOpcode();
1175}
1176
1177inline bool SDValue::isMachineOpcode() const {
1178 return Node->isMachineOpcode();
1179}
1180
1181inline unsigned SDValue::getMachineOpcode() const {
1182 return Node->getMachineOpcode();
1183}
1184
1185inline bool SDValue::isUndef() const {
1186 return Node->isUndef();
1187}
1188
1189inline bool SDValue::use_empty() const {
1190 return !Node->hasAnyUseOfValue(ResNo);
1191}
1192
1193inline bool SDValue::hasOneUse() const {
1194 return Node->hasNUsesOfValue(1, ResNo);
1195}
1196
1197inline const DebugLoc &SDValue::getDebugLoc() const {
1198 return Node->getDebugLoc();
1199}
1200
1201inline void SDValue::dump() const {
1202 return Node->dump();
1203}
1204
1205inline void SDValue::dump(const SelectionDAG *G) const {
1206 return Node->dump(G);
1207}
1208
1209inline void SDValue::dumpr() const {
1210 return Node->dumpr();
1211}
1212
1213inline void SDValue::dumpr(const SelectionDAG *G) const {
1214 return Node->dumpr(G);
1215}
1216
1217// Define inline functions from the SDUse class.
1218
1219inline void SDUse::set(const SDValue &V) {
1220 if (Val.getNode()) removeFromList();
1221 Val = V;
1222 if (V.getNode()) V.getNode()->addUse(*this);
1223}
1224
1225inline void SDUse::setInitial(const SDValue &V) {
1226 Val = V;
1227 V.getNode()->addUse(*this);
1228}
1229
1230inline void SDUse::setNode(SDNode *N) {
1231 if (Val.getNode()) removeFromList();
1232 Val.setNode(N);
1233 if (N) N->addUse(*this);
1234}
1235
1236/// This class is used to form a handle around another node that
1237/// is persistent and is updated across invocations of replaceAllUsesWith on its
1238/// operand. This node should be directly created by end-users and not added to
1239/// the AllNodes list.
1240class HandleSDNode : public SDNode {
1241 SDUse Op;
1242
1243public:
1244 explicit HandleSDNode(SDValue X)
1245 : SDNode(ISD::HANDLENODE, 0, DebugLoc(), getSDVTList(MVT::Other)) {
1246 // HandleSDNodes are never inserted into the DAG, so they won't be
1247 // auto-numbered. Use ID 65535 as a sentinel.
1248 PersistentId = 0xffff;
1249
1250 // Manually set up the operand list. This node type is special in that it's
1251 // always stack allocated and SelectionDAG does not manage its operands.
1252 // TODO: This should either (a) not be in the SDNode hierarchy, or (b) not
1253 // be so special.
1254 Op.setUser(this);
1255 Op.setInitial(X);
1256 NumOperands = 1;
1257 OperandList = &Op;
1258 }
1259 ~HandleSDNode();
1260
1261 const SDValue &getValue() const { return Op; }
1262};
1263
1264class AddrSpaceCastSDNode : public SDNode {
1265private:
1266 unsigned SrcAddrSpace;
1267 unsigned DestAddrSpace;
1268
1269public:
1270 AddrSpaceCastSDNode(unsigned Order, const DebugLoc &dl, EVT VT,
1271 unsigned SrcAS, unsigned DestAS);
1272
1273 unsigned getSrcAddressSpace() const { return SrcAddrSpace; }
1274 unsigned getDestAddressSpace() const { return DestAddrSpace; }
1275
1276 static bool classof(const SDNode *N) {
1277 return N->getOpcode() == ISD::ADDRSPACECAST;
1278 }
1279};
1280
1281/// This is an abstract virtual class for memory operations.
1282class MemSDNode : public SDNode {
1283private:
1284 // VT of in-memory value.
1285 EVT MemoryVT;
1286
1287protected:
1288 /// Memory reference information.
1289 MachineMemOperand *MMO;
1290
1291public:
1292 MemSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, SDVTList VTs,
1293 EVT memvt, MachineMemOperand *MMO);
1294
1295 bool readMem() const { return MMO->isLoad(); }
1296 bool writeMem() const { return MMO->isStore(); }
1297
1298 /// Returns alignment and volatility of the memory access
1299 unsigned getOriginalAlignment() const {
1300 return MMO->getBaseAlignment();
1301 }
1302 unsigned getAlignment() const {
1303 return MMO->getAlignment();
1304 }
1305
1306 /// Return the SubclassData value, without HasDebugValue. This contains an
1307 /// encoding of the volatile flag, as well as bits used by subclasses. This
1308 /// function should only be used to compute a FoldingSetNodeID value.
1309 /// The HasDebugValue bit is masked out because CSE map needs to match
1310 /// nodes with debug info with nodes without debug info. Same is about
1311 /// isDivergent bit.
1312 unsigned getRawSubclassData() const {
1313 uint16_t Data;
1314 union {
1315 char RawSDNodeBits[sizeof(uint16_t)];
1316 SDNodeBitfields SDNodeBits;
1317 };
1318 memcpy(&RawSDNodeBits, &this->RawSDNodeBits, sizeof(this->RawSDNodeBits));
1319 SDNodeBits.HasDebugValue = 0;
1320 SDNodeBits.IsDivergent = false;
1321 memcpy(&Data, &RawSDNodeBits, sizeof(RawSDNodeBits));
1322 return Data;
1323 }
1324
1325 bool isVolatile() const { return MemSDNodeBits.IsVolatile; }
1326 bool isNonTemporal() const { return MemSDNodeBits.IsNonTemporal; }
1327 bool isDereferenceable() const { return MemSDNodeBits.IsDereferenceable; }
1328 bool isInvariant() const { return MemSDNodeBits.IsInvariant; }
1329
1330 // Returns the offset from the location of the access.
1331 int64_t getSrcValueOffset() const { return MMO->getOffset(); }
1332
1333 /// Returns the AA info that describes the dereference.
1334 AAMDNodes getAAInfo() const { return MMO->getAAInfo(); }
1335
1336 /// Returns the Ranges that describes the dereference.
1337 const MDNode *getRanges() const { return MMO->getRanges(); }
1338
1339 /// Returns the synchronization scope ID for this memory operation.
1340 SyncScope::ID getSyncScopeID() const { return MMO->getSyncScopeID(); }
1341
1342 /// Return the atomic ordering requirements for this memory operation. For
1343 /// cmpxchg atomic operations, return the atomic ordering requirements when
1344 /// store occurs.
1345 AtomicOrdering getOrdering() const { return MMO->getOrdering(); }
1346
1347 /// Return true if the memory operation ordering is Unordered or higher.
1348 bool isAtomic() const { return MMO->isAtomic(); }
1349
1350 /// Returns true if the memory operation doesn't imply any ordering
1351 /// constraints on surrounding memory operations beyond the normal memory
1352 /// aliasing rules.
1353 bool isUnordered() const { return MMO->isUnordered(); }
1354
1355 /// Returns true if the memory operation is neither atomic or volatile.
1356 bool isSimple() const { return !isAtomic() && !isVolatile(); }
1357
1358 /// Return the type of the in-memory value.
1359 EVT getMemoryVT() const { return MemoryVT; }
1360
1361 /// Return a MachineMemOperand object describing the memory
1362 /// reference performed by operation.
1363 MachineMemOperand *getMemOperand() const { return MMO; }
1364
1365 const MachinePointerInfo &getPointerInfo() const {
1366 return MMO->getPointerInfo();
1367 }
1368
1369 /// Return the address space for the associated pointer
1370 unsigned getAddressSpace() const {
1371 return getPointerInfo().getAddrSpace();
1372 }
1373
1374 /// Update this MemSDNode's MachineMemOperand information
1375 /// to reflect the alignment of NewMMO, if it has a greater alignment.
1376 /// This must only be used when the new alignment applies to all users of
1377 /// this MachineMemOperand.
1378 void refineAlignment(const MachineMemOperand *NewMMO) {
1379 MMO->refineAlignment(NewMMO);
1380 }
1381
1382 const SDValue &getChain() const { return getOperand(0); }
1383 const SDValue &getBasePtr() const {
1384 return getOperand(getOpcode() == ISD::STORE ? 2 : 1);
1385 }
1386
1387 // Methods to support isa and dyn_cast
1388 static bool classof(const SDNode *N) {
1389 // For some targets, we lower some target intrinsics to a MemIntrinsicNode
1390 // with either an intrinsic or a target opcode.
1391 return N->getOpcode() == ISD::LOAD ||
1392 N->getOpcode() == ISD::STORE ||
1393 N->getOpcode() == ISD::PREFETCH ||
1394 N->getOpcode() == ISD::ATOMIC_CMP_SWAP ||
1395 N->getOpcode() == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS ||
1396 N->getOpcode() == ISD::ATOMIC_SWAP ||
1397 N->getOpcode() == ISD::ATOMIC_LOAD_ADD ||
1398 N->getOpcode() == ISD::ATOMIC_LOAD_SUB ||
1399 N->getOpcode() == ISD::ATOMIC_LOAD_AND ||
1400 N->getOpcode() == ISD::ATOMIC_LOAD_CLR ||
1401 N->getOpcode() == ISD::ATOMIC_LOAD_OR ||
1402 N->getOpcode() == ISD::ATOMIC_LOAD_XOR ||
1403 N->getOpcode() == ISD::ATOMIC_LOAD_NAND ||
1404 N->getOpcode() == ISD::ATOMIC_LOAD_MIN ||
1405 N->getOpcode() == ISD::ATOMIC_LOAD_MAX ||
1406 N->getOpcode() == ISD::ATOMIC_LOAD_UMIN ||
1407 N->getOpcode() == ISD::ATOMIC_LOAD_UMAX ||
1408 N->getOpcode() == ISD::ATOMIC_LOAD_FADD ||
1409 N->getOpcode() == ISD::ATOMIC_LOAD_FSUB ||
1410 N->getOpcode() == ISD::ATOMIC_LOAD ||
1411 N->getOpcode() == ISD::ATOMIC_STORE ||
1412 N->getOpcode() == ISD::MLOAD ||
1413 N->getOpcode() == ISD::MSTORE ||
1414 N->getOpcode() == ISD::MGATHER ||
1415 N->getOpcode() == ISD::MSCATTER ||
1416 N->isMemIntrinsic() ||
1417 N->isTargetMemoryOpcode();
1418 }
1419};
1420
1421/// This is an SDNode representing atomic operations.
1422class AtomicSDNode : public MemSDNode {
1423public:
1424 AtomicSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, SDVTList VTL,
1425 EVT MemVT, MachineMemOperand *MMO)
1426 : MemSDNode(Opc, Order, dl, VTL, MemVT, MMO) {
1427 assert(((Opc != ISD::ATOMIC_LOAD && Opc != ISD::ATOMIC_STORE) ||((((Opc != ISD::ATOMIC_LOAD && Opc != ISD::ATOMIC_STORE
) || MMO->isAtomic()) && "then why are we using an AtomicSDNode?"
) ? static_cast<void> (0) : __assert_fail ("((Opc != ISD::ATOMIC_LOAD && Opc != ISD::ATOMIC_STORE) || MMO->isAtomic()) && \"then why are we using an AtomicSDNode?\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1428, __PRETTY_FUNCTION__))
1428 MMO->isAtomic()) && "then why are we using an AtomicSDNode?")((((Opc != ISD::ATOMIC_LOAD && Opc != ISD::ATOMIC_STORE
) || MMO->isAtomic()) && "then why are we using an AtomicSDNode?"
) ? static_cast<void> (0) : __assert_fail ("((Opc != ISD::ATOMIC_LOAD && Opc != ISD::ATOMIC_STORE) || MMO->isAtomic()) && \"then why are we using an AtomicSDNode?\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1428, __PRETTY_FUNCTION__))
;
1429 }
1430
1431 const SDValue &getBasePtr() const { return getOperand(1); }
1432 const SDValue &getVal() const { return getOperand(2); }
1433
1434 /// Returns true if this SDNode represents cmpxchg atomic operation, false
1435 /// otherwise.
1436 bool isCompareAndSwap() const {
1437 unsigned Op = getOpcode();
1438 return Op == ISD::ATOMIC_CMP_SWAP ||
1439 Op == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS;
1440 }
1441
1442 /// For cmpxchg atomic operations, return the atomic ordering requirements
1443 /// when store does not occur.
1444 AtomicOrdering getFailureOrdering() const {
1445 assert(isCompareAndSwap() && "Must be cmpxchg operation")((isCompareAndSwap() && "Must be cmpxchg operation") ?
static_cast<void> (0) : __assert_fail ("isCompareAndSwap() && \"Must be cmpxchg operation\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1445, __PRETTY_FUNCTION__))
;
1446 return MMO->getFailureOrdering();
1447 }
1448
1449 // Methods to support isa and dyn_cast
1450 static bool classof(const SDNode *N) {
1451 return N->getOpcode() == ISD::ATOMIC_CMP_SWAP ||
1452 N->getOpcode() == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS ||
1453 N->getOpcode() == ISD::ATOMIC_SWAP ||
1454 N->getOpcode() == ISD::ATOMIC_LOAD_ADD ||
1455 N->getOpcode() == ISD::ATOMIC_LOAD_SUB ||
1456 N->getOpcode() == ISD::ATOMIC_LOAD_AND ||
1457 N->getOpcode() == ISD::ATOMIC_LOAD_CLR ||
1458 N->getOpcode() == ISD::ATOMIC_LOAD_OR ||
1459 N->getOpcode() == ISD::ATOMIC_LOAD_XOR ||
1460 N->getOpcode() == ISD::ATOMIC_LOAD_NAND ||
1461 N->getOpcode() == ISD::ATOMIC_LOAD_MIN ||
1462 N->getOpcode() == ISD::ATOMIC_LOAD_MAX ||
1463 N->getOpcode() == ISD::ATOMIC_LOAD_UMIN ||
1464 N->getOpcode() == ISD::ATOMIC_LOAD_UMAX ||
1465 N->getOpcode() == ISD::ATOMIC_LOAD_FADD ||
1466 N->getOpcode() == ISD::ATOMIC_LOAD_FSUB ||
1467 N->getOpcode() == ISD::ATOMIC_LOAD ||
1468 N->getOpcode() == ISD::ATOMIC_STORE;
1469 }
1470};
1471
1472/// This SDNode is used for target intrinsics that touch
1473/// memory and need an associated MachineMemOperand. Its opcode may be
1474/// INTRINSIC_VOID, INTRINSIC_W_CHAIN, PREFETCH, or a target-specific opcode
1475/// with a value not less than FIRST_TARGET_MEMORY_OPCODE.
1476class MemIntrinsicSDNode : public MemSDNode {
1477public:
1478 MemIntrinsicSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl,
1479 SDVTList VTs, EVT MemoryVT, MachineMemOperand *MMO)
1480 : MemSDNode(Opc, Order, dl, VTs, MemoryVT, MMO) {
1481 SDNodeBits.IsMemIntrinsic = true;
1482 }
1483
1484 // Methods to support isa and dyn_cast
1485 static bool classof(const SDNode *N) {
1486 // We lower some target intrinsics to their target opcode
1487 // early a node with a target opcode can be of this class
1488 return N->isMemIntrinsic() ||
1489 N->getOpcode() == ISD::PREFETCH ||
1490 N->isTargetMemoryOpcode();
1491 }
1492};
1493
1494/// This SDNode is used to implement the code generator
1495/// support for the llvm IR shufflevector instruction. It combines elements
1496/// from two input vectors into a new input vector, with the selection and
1497/// ordering of elements determined by an array of integers, referred to as
1498/// the shuffle mask. For input vectors of width N, mask indices of 0..N-1
1499/// refer to elements from the LHS input, and indices from N to 2N-1 the RHS.
1500/// An index of -1 is treated as undef, such that the code generator may put
1501/// any value in the corresponding element of the result.
1502class ShuffleVectorSDNode : public SDNode {
1503 // The memory for Mask is owned by the SelectionDAG's OperandAllocator, and
1504 // is freed when the SelectionDAG object is destroyed.
1505 const int *Mask;
1506
1507protected:
1508 friend class SelectionDAG;
1509
1510 ShuffleVectorSDNode(EVT VT, unsigned Order, const DebugLoc &dl, const int *M)
1511 : SDNode(ISD::VECTOR_SHUFFLE, Order, dl, getSDVTList(VT)), Mask(M) {}
1512
1513public:
1514 ArrayRef<int> getMask() const {
1515 EVT VT = getValueType(0);
1516 return makeArrayRef(Mask, VT.getVectorNumElements());
1517 }
1518
1519 int getMaskElt(unsigned Idx) const {
1520 assert(Idx < getValueType(0).getVectorNumElements() && "Idx out of range!")((Idx < getValueType(0).getVectorNumElements() && "Idx out of range!"
) ? static_cast<void> (0) : __assert_fail ("Idx < getValueType(0).getVectorNumElements() && \"Idx out of range!\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1520, __PRETTY_FUNCTION__))
;
1521 return Mask[Idx];
1522 }
1523
1524 bool isSplat() const { return isSplatMask(Mask, getValueType(0)); }
1525
1526 int getSplatIndex() const {
1527 assert(isSplat() && "Cannot get splat index for non-splat!")((isSplat() && "Cannot get splat index for non-splat!"
) ? static_cast<void> (0) : __assert_fail ("isSplat() && \"Cannot get splat index for non-splat!\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1527, __PRETTY_FUNCTION__))
;
1528 EVT VT = getValueType(0);
1529 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i)
1530 if (Mask[i] >= 0)
1531 return Mask[i];
1532
1533 // We can choose any index value here and be correct because all elements
1534 // are undefined. Return 0 for better potential for callers to simplify.
1535 return 0;
1536 }
1537
1538 static bool isSplatMask(const int *Mask, EVT VT);
1539
1540 /// Change values in a shuffle permute mask assuming
1541 /// the two vector operands have swapped position.
1542 static void commuteMask(MutableArrayRef<int> Mask) {
1543 unsigned NumElems = Mask.size();
1544 for (unsigned i = 0; i != NumElems; ++i) {
1545 int idx = Mask[i];
1546 if (idx < 0)
1547 continue;
1548 else if (idx < (int)NumElems)
1549 Mask[i] = idx + NumElems;
1550 else
1551 Mask[i] = idx - NumElems;
1552 }
1553 }
1554
1555 static bool classof(const SDNode *N) {
1556 return N->getOpcode() == ISD::VECTOR_SHUFFLE;
1557 }
1558};
1559
1560class ConstantSDNode : public SDNode {
1561 friend class SelectionDAG;
1562
1563 const ConstantInt *Value;
1564
1565 ConstantSDNode(bool isTarget, bool isOpaque, const ConstantInt *val, EVT VT)
1566 : SDNode(isTarget ? ISD::TargetConstant : ISD::Constant, 0, DebugLoc(),
1567 getSDVTList(VT)),
1568 Value(val) {
1569 ConstantSDNodeBits.IsOpaque = isOpaque;
1570 }
1571
1572public:
1573 const ConstantInt *getConstantIntValue() const { return Value; }
1574 const APInt &getAPIntValue() const { return Value->getValue(); }
1575 uint64_t getZExtValue() const { return Value->getZExtValue(); }
1576 int64_t getSExtValue() const { return Value->getSExtValue(); }
1577 uint64_t getLimitedValue(uint64_t Limit = UINT64_MAX(18446744073709551615UL)) {
1578 return Value->getLimitedValue(Limit);
1579 }
1580
1581 bool isOne() const { return Value->isOne(); }
1582 bool isNullValue() const { return Value->isZero(); }
1583 bool isAllOnesValue() const { return Value->isMinusOne(); }
1584
1585 bool isOpaque() const { return ConstantSDNodeBits.IsOpaque; }
1586
1587 static bool classof(const SDNode *N) {
1588 return N->getOpcode() == ISD::Constant ||
1589 N->getOpcode() == ISD::TargetConstant;
1590 }
1591};
1592
1593uint64_t SDNode::getConstantOperandVal(unsigned Num) const {
1594 return cast<ConstantSDNode>(getOperand(Num))->getZExtValue();
1595}
1596
1597const APInt &SDNode::getConstantOperandAPInt(unsigned Num) const {
1598 return cast<ConstantSDNode>(getOperand(Num))->getAPIntValue();
1599}
1600
1601class ConstantFPSDNode : public SDNode {
1602 friend class SelectionDAG;
1603
1604 const ConstantFP *Value;
1605
1606 ConstantFPSDNode(bool isTarget, const ConstantFP *val, EVT VT)
1607 : SDNode(isTarget ? ISD::TargetConstantFP : ISD::ConstantFP, 0,
1608 DebugLoc(), getSDVTList(VT)),
1609 Value(val) {}
1610
1611public:
1612 const APFloat& getValueAPF() const { return Value->getValueAPF(); }
1613 const ConstantFP *getConstantFPValue() const { return Value; }
1614
1615 /// Return true if the value is positive or negative zero.
1616 bool isZero() const { return Value->isZero(); }
1617
1618 /// Return true if the value is a NaN.
1619 bool isNaN() const { return Value->isNaN(); }
1620
1621 /// Return true if the value is an infinity
1622 bool isInfinity() const { return Value->isInfinity(); }
1623
1624 /// Return true if the value is negative.
1625 bool isNegative() const { return Value->isNegative(); }
1626
1627 /// We don't rely on operator== working on double values, as
1628 /// it returns true for things that are clearly not equal, like -0.0 and 0.0.
1629 /// As such, this method can be used to do an exact bit-for-bit comparison of
1630 /// two floating point values.
1631
1632 /// We leave the version with the double argument here because it's just so
1633 /// convenient to write "2.0" and the like. Without this function we'd
1634 /// have to duplicate its logic everywhere it's called.
1635 bool isExactlyValue(double V) const {
1636 return Value->getValueAPF().isExactlyValue(V);
1637 }
1638 bool isExactlyValue(const APFloat& V) const;
1639
1640 static bool isValueValidForType(EVT VT, const APFloat& Val);
1641
1642 static bool classof(const SDNode *N) {
1643 return N->getOpcode() == ISD::ConstantFP ||
1644 N->getOpcode() == ISD::TargetConstantFP;
1645 }
1646};
1647
1648/// Returns true if \p V is a constant integer zero.
1649bool isNullConstant(SDValue V);
1650
1651/// Returns true if \p V is an FP constant with a value of positive zero.
1652bool isNullFPConstant(SDValue V);
1653
1654/// Returns true if \p V is an integer constant with all bits set.
1655bool isAllOnesConstant(SDValue V);
1656
1657/// Returns true if \p V is a constant integer one.
1658bool isOneConstant(SDValue V);
1659
1660/// Return the non-bitcasted source operand of \p V if it exists.
1661/// If \p V is not a bitcasted value, it is returned as-is.
1662SDValue peekThroughBitcasts(SDValue V);
1663
1664/// Return the non-bitcasted and one-use source operand of \p V if it exists.
1665/// If \p V is not a bitcasted one-use value, it is returned as-is.
1666SDValue peekThroughOneUseBitcasts(SDValue V);
1667
1668/// Return the non-extracted vector source operand of \p V if it exists.
1669/// If \p V is not an extracted subvector, it is returned as-is.
1670SDValue peekThroughExtractSubvectors(SDValue V);
1671
1672/// Returns true if \p V is a bitwise not operation. Assumes that an all ones
1673/// constant is canonicalized to be operand 1.
1674bool isBitwiseNot(SDValue V, bool AllowUndefs = false);
1675
1676/// Returns the SDNode if it is a constant splat BuildVector or constant int.
1677ConstantSDNode *isConstOrConstSplat(SDValue N, bool AllowUndefs = false,
1678 bool AllowTruncation = false);
1679
1680/// Returns the SDNode if it is a demanded constant splat BuildVector or
1681/// constant int.
1682ConstantSDNode *isConstOrConstSplat(SDValue N, const APInt &DemandedElts,
1683 bool AllowUndefs = false,
1684 bool AllowTruncation = false);
1685
1686/// Returns the SDNode if it is a constant splat BuildVector or constant float.
1687ConstantFPSDNode *isConstOrConstSplatFP(SDValue N, bool AllowUndefs = false);
1688
1689/// Returns the SDNode if it is a demanded constant splat BuildVector or
1690/// constant float.
1691ConstantFPSDNode *isConstOrConstSplatFP(SDValue N, const APInt &DemandedElts,
1692 bool AllowUndefs = false);
1693
1694/// Return true if the value is a constant 0 integer or a splatted vector of
1695/// a constant 0 integer (with no undefs by default).
1696/// Build vector implicit truncation is not an issue for null values.
1697bool isNullOrNullSplat(SDValue V, bool AllowUndefs = false);
1698
1699/// Return true if the value is a constant 1 integer or a splatted vector of a
1700/// constant 1 integer (with no undefs).
1701/// Does not permit build vector implicit truncation.
1702bool isOneOrOneSplat(SDValue V);
1703
1704/// Return true if the value is a constant -1 integer or a splatted vector of a
1705/// constant -1 integer (with no undefs).
1706/// Does not permit build vector implicit truncation.
1707bool isAllOnesOrAllOnesSplat(SDValue V);
1708
1709class GlobalAddressSDNode : public SDNode {
1710 friend class SelectionDAG;
1711
1712 const GlobalValue *TheGlobal;
1713 int64_t Offset;
1714 unsigned TargetFlags;
1715
1716 GlobalAddressSDNode(unsigned Opc, unsigned Order, const DebugLoc &DL,
1717 const GlobalValue *GA, EVT VT, int64_t o,
1718 unsigned TF);
1719
1720public:
1721 const GlobalValue *getGlobal() const { return TheGlobal; }
1722 int64_t getOffset() const { return Offset; }
1723 unsigned getTargetFlags() const { return TargetFlags; }
1724 // Return the address space this GlobalAddress belongs to.
1725 unsigned getAddressSpace() const;
1726
1727 static bool classof(const SDNode *N) {
1728 return N->getOpcode() == ISD::GlobalAddress ||
1729 N->getOpcode() == ISD::TargetGlobalAddress ||
1730 N->getOpcode() == ISD::GlobalTLSAddress ||
1731 N->getOpcode() == ISD::TargetGlobalTLSAddress;
1732 }
1733};
1734
1735class FrameIndexSDNode : public SDNode {
1736 friend class SelectionDAG;
1737
1738 int FI;
1739
1740 FrameIndexSDNode(int fi, EVT VT, bool isTarg)
1741 : SDNode(isTarg ? ISD::TargetFrameIndex : ISD::FrameIndex,
1742 0, DebugLoc(), getSDVTList(VT)), FI(fi) {
1743 }
1744
1745public:
1746 int getIndex() const { return FI; }
1747
1748 static bool classof(const SDNode *N) {
1749 return N->getOpcode() == ISD::FrameIndex ||
1750 N->getOpcode() == ISD::TargetFrameIndex;
1751 }
1752};
1753
1754/// This SDNode is used for LIFETIME_START/LIFETIME_END values, which indicate
1755/// the offet and size that are started/ended in the underlying FrameIndex.
1756class LifetimeSDNode : public SDNode {
1757 friend class SelectionDAG;
1758 int64_t Size;
1759 int64_t Offset; // -1 if offset is unknown.
1760
1761 LifetimeSDNode(unsigned Opcode, unsigned Order, const DebugLoc &dl,
1762 SDVTList VTs, int64_t Size, int64_t Offset)
1763 : SDNode(Opcode, Order, dl, VTs), Size(Size), Offset(Offset) {}
1764public:
1765 int64_t getFrameIndex() const {
1766 return cast<FrameIndexSDNode>(getOperand(1))->getIndex();
1767 }
1768
1769 bool hasOffset() const { return Offset >= 0; }
1770 int64_t getOffset() const {
1771 assert(hasOffset() && "offset is unknown")((hasOffset() && "offset is unknown") ? static_cast<
void> (0) : __assert_fail ("hasOffset() && \"offset is unknown\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1771, __PRETTY_FUNCTION__))
;
1772 return Offset;
1773 }
1774 int64_t getSize() const {
1775 assert(hasOffset() && "offset is unknown")((hasOffset() && "offset is unknown") ? static_cast<
void> (0) : __assert_fail ("hasOffset() && \"offset is unknown\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1775, __PRETTY_FUNCTION__))
;
1776 return Size;
1777 }
1778
1779 // Methods to support isa and dyn_cast
1780 static bool classof(const SDNode *N) {
1781 return N->getOpcode() == ISD::LIFETIME_START ||
1782 N->getOpcode() == ISD::LIFETIME_END;
1783 }
1784};
1785
1786class JumpTableSDNode : public SDNode {
1787 friend class SelectionDAG;
1788
1789 int JTI;
1790 unsigned TargetFlags;
1791
1792 JumpTableSDNode(int jti, EVT VT, bool isTarg, unsigned TF)
1793 : SDNode(isTarg ? ISD::TargetJumpTable : ISD::JumpTable,
1794 0, DebugLoc(), getSDVTList(VT)), JTI(jti), TargetFlags(TF) {
1795 }
1796
1797public:
1798 int getIndex() const { return JTI; }
1799 unsigned getTargetFlags() const { return TargetFlags; }
1800
1801 static bool classof(const SDNode *N) {
1802 return N->getOpcode() == ISD::JumpTable ||
1803 N->getOpcode() == ISD::TargetJumpTable;
1804 }
1805};
1806
1807class ConstantPoolSDNode : public SDNode {
1808 friend class SelectionDAG;
1809
1810 union {
1811 const Constant *ConstVal;
1812 MachineConstantPoolValue *MachineCPVal;
1813 } Val;
1814 int Offset; // It's a MachineConstantPoolValue if top bit is set.
1815 unsigned Alignment; // Minimum alignment requirement of CP (not log2 value).
1816 unsigned TargetFlags;
1817
1818 ConstantPoolSDNode(bool isTarget, const Constant *c, EVT VT, int o,
1819 unsigned Align, unsigned TF)
1820 : SDNode(isTarget ? ISD::TargetConstantPool : ISD::ConstantPool, 0,
1821 DebugLoc(), getSDVTList(VT)), Offset(o), Alignment(Align),
1822 TargetFlags(TF) {
1823 assert(Offset >= 0 && "Offset is too large")((Offset >= 0 && "Offset is too large") ? static_cast
<void> (0) : __assert_fail ("Offset >= 0 && \"Offset is too large\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1823, __PRETTY_FUNCTION__))
;
1824 Val.ConstVal = c;
1825 }
1826
1827 ConstantPoolSDNode(bool isTarget, MachineConstantPoolValue *v,
1828 EVT VT, int o, unsigned Align, unsigned TF)
1829 : SDNode(isTarget ? ISD::TargetConstantPool : ISD::ConstantPool, 0,
1830 DebugLoc(), getSDVTList(VT)), Offset(o), Alignment(Align),
1831 TargetFlags(TF) {
1832 assert(Offset >= 0 && "Offset is too large")((Offset >= 0 && "Offset is too large") ? static_cast
<void> (0) : __assert_fail ("Offset >= 0 && \"Offset is too large\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1832, __PRETTY_FUNCTION__))
;
1833 Val.MachineCPVal = v;
1834 Offset |= 1 << (sizeof(unsigned)*CHAR_BIT8-1);
1835 }
1836
1837public:
1838 bool isMachineConstantPoolEntry() const {
1839 return Offset < 0;
1840 }
1841
1842 const Constant *getConstVal() const {
1843 assert(!isMachineConstantPoolEntry() && "Wrong constantpool type")((!isMachineConstantPoolEntry() && "Wrong constantpool type"
) ? static_cast<void> (0) : __assert_fail ("!isMachineConstantPoolEntry() && \"Wrong constantpool type\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1843, __PRETTY_FUNCTION__))
;
1844 return Val.ConstVal;
1845 }
1846
1847 MachineConstantPoolValue *getMachineCPVal() const {
1848 assert(isMachineConstantPoolEntry() && "Wrong constantpool type")((isMachineConstantPoolEntry() && "Wrong constantpool type"
) ? static_cast<void> (0) : __assert_fail ("isMachineConstantPoolEntry() && \"Wrong constantpool type\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1848, __PRETTY_FUNCTION__))
;
1849 return Val.MachineCPVal;
1850 }
1851
1852 int getOffset() const {
1853 return Offset & ~(1 << (sizeof(unsigned)*CHAR_BIT8-1));
1854 }
1855
1856 // Return the alignment of this constant pool object, which is either 0 (for
1857 // default alignment) or the desired value.
1858 unsigned getAlignment() const { return Alignment; }
1859 unsigned getTargetFlags() const { return TargetFlags; }
1860
1861 Type *getType() const;
1862
1863 static bool classof(const SDNode *N) {
1864 return N->getOpcode() == ISD::ConstantPool ||
1865 N->getOpcode() == ISD::TargetConstantPool;
1866 }
1867};
1868
1869/// Completely target-dependent object reference.
1870class TargetIndexSDNode : public SDNode {
1871 friend class SelectionDAG;
1872
1873 unsigned TargetFlags;
1874 int Index;
1875 int64_t Offset;
1876
1877public:
1878 TargetIndexSDNode(int Idx, EVT VT, int64_t Ofs, unsigned TF)
1879 : SDNode(ISD::TargetIndex, 0, DebugLoc(), getSDVTList(VT)),
1880 TargetFlags(TF), Index(Idx), Offset(Ofs) {}
1881
1882 unsigned getTargetFlags() const { return TargetFlags; }
1883 int getIndex() const { return Index; }
1884 int64_t getOffset() const { return Offset; }
1885
1886 static bool classof(const SDNode *N) {
1887 return N->getOpcode() == ISD::TargetIndex;
1888 }
1889};
1890
1891class BasicBlockSDNode : public SDNode {
1892 friend class SelectionDAG;
1893
1894 MachineBasicBlock *MBB;
1895
1896 /// Debug info is meaningful and potentially useful here, but we create
1897 /// blocks out of order when they're jumped to, which makes it a bit
1898 /// harder. Let's see if we need it first.
1899 explicit BasicBlockSDNode(MachineBasicBlock *mbb)
1900 : SDNode(ISD::BasicBlock, 0, DebugLoc(), getSDVTList(MVT::Other)), MBB(mbb)
1901 {}
1902
1903public:
1904 MachineBasicBlock *getBasicBlock() const { return MBB; }
1905
1906 static bool classof(const SDNode *N) {
1907 return N->getOpcode() == ISD::BasicBlock;
1908 }
1909};
1910
1911/// A "pseudo-class" with methods for operating on BUILD_VECTORs.
1912class BuildVectorSDNode : public SDNode {
1913public:
1914 // These are constructed as SDNodes and then cast to BuildVectorSDNodes.
1915 explicit BuildVectorSDNode() = delete;
1916
1917 /// Check if this is a constant splat, and if so, find the
1918 /// smallest element size that splats the vector. If MinSplatBits is
1919 /// nonzero, the element size must be at least that large. Note that the
1920 /// splat element may be the entire vector (i.e., a one element vector).
1921 /// Returns the splat element value in SplatValue. Any undefined bits in
1922 /// that value are zero, and the corresponding bits in the SplatUndef mask
1923 /// are set. The SplatBitSize value is set to the splat element size in
1924 /// bits. HasAnyUndefs is set to true if any bits in the vector are
1925 /// undefined. isBigEndian describes the endianness of the target.
1926 bool isConstantSplat(APInt &SplatValue, APInt &SplatUndef,
1927 unsigned &SplatBitSize, bool &HasAnyUndefs,
1928 unsigned MinSplatBits = 0,
1929 bool isBigEndian = false) const;
1930
1931 /// Returns the demanded splatted value or a null value if this is not a
1932 /// splat.
1933 ///
1934 /// The DemandedElts mask indicates the elements that must be in the splat.
1935 /// If passed a non-null UndefElements bitvector, it will resize it to match
1936 /// the vector width and set the bits where elements are undef.
1937 SDValue getSplatValue(const APInt &DemandedElts,
1938 BitVector *UndefElements = nullptr) const;
1939
1940 /// Returns the splatted value or a null value if this is not a splat.
1941 ///
1942 /// If passed a non-null UndefElements bitvector, it will resize it to match
1943 /// the vector width and set the bits where elements are undef.
1944 SDValue getSplatValue(BitVector *UndefElements = nullptr) const;
1945
1946 /// Returns the demanded splatted constant or null if this is not a constant
1947 /// splat.
1948 ///
1949 /// The DemandedElts mask indicates the elements that must be in the splat.
1950 /// If passed a non-null UndefElements bitvector, it will resize it to match
1951 /// the vector width and set the bits where elements are undef.
1952 ConstantSDNode *
1953 getConstantSplatNode(const APInt &DemandedElts,
1954 BitVector *UndefElements = nullptr) const;
1955
1956 /// Returns the splatted constant or null if this is not a constant
1957 /// splat.
1958 ///
1959 /// If passed a non-null UndefElements bitvector, it will resize it to match
1960 /// the vector width and set the bits where elements are undef.
1961 ConstantSDNode *
1962 getConstantSplatNode(BitVector *UndefElements = nullptr) const;
1963
1964 /// Returns the demanded splatted constant FP or null if this is not a
1965 /// constant FP splat.
1966 ///
1967 /// The DemandedElts mask indicates the elements that must be in the splat.
1968 /// If passed a non-null UndefElements bitvector, it will resize it to match
1969 /// the vector width and set the bits where elements are undef.
1970 ConstantFPSDNode *
1971 getConstantFPSplatNode(const APInt &DemandedElts,
1972 BitVector *UndefElements = nullptr) const;
1973
1974 /// Returns the splatted constant FP or null if this is not a constant
1975 /// FP splat.
1976 ///
1977 /// If passed a non-null UndefElements bitvector, it will resize it to match
1978 /// the vector width and set the bits where elements are undef.
1979 ConstantFPSDNode *
1980 getConstantFPSplatNode(BitVector *UndefElements = nullptr) const;
1981
1982 /// If this is a constant FP splat and the splatted constant FP is an
1983 /// exact power or 2, return the log base 2 integer value. Otherwise,
1984 /// return -1.
1985 ///
1986 /// The BitWidth specifies the necessary bit precision.
1987 int32_t getConstantFPSplatPow2ToLog2Int(BitVector *UndefElements,
1988 uint32_t BitWidth) const;
1989
1990 bool isConstant() const;
1991
1992 static bool classof(const SDNode *N) {
1993 return N->getOpcode() == ISD::BUILD_VECTOR;
1994 }
1995};
1996
1997/// An SDNode that holds an arbitrary LLVM IR Value. This is
1998/// used when the SelectionDAG needs to make a simple reference to something
1999/// in the LLVM IR representation.
2000///
2001class SrcValueSDNode : public SDNode {
2002 friend class SelectionDAG;
2003
2004 const Value *V;
2005
2006 /// Create a SrcValue for a general value.
2007 explicit SrcValueSDNode(const Value *v)
2008 : SDNode(ISD::SRCVALUE, 0, DebugLoc(), getSDVTList(MVT::Other)), V(v) {}
2009
2010public:
2011 /// Return the contained Value.
2012 const Value *getValue() const { return V; }
2013
2014 static bool classof(const SDNode *N) {
2015 return N->getOpcode() == ISD::SRCVALUE;
2016 }
2017};
2018
2019class MDNodeSDNode : public SDNode {
2020 friend class SelectionDAG;
2021
2022 const MDNode *MD;
2023
2024 explicit MDNodeSDNode(const MDNode *md)
2025 : SDNode(ISD::MDNODE_SDNODE, 0, DebugLoc(), getSDVTList(MVT::Other)), MD(md)
2026 {}
2027
2028public:
2029 const MDNode *getMD() const { return MD; }
2030
2031 static bool classof(const SDNode *N) {
2032 return N->getOpcode() == ISD::MDNODE_SDNODE;
2033 }
2034};
2035
2036class RegisterSDNode : public SDNode {
2037 friend class SelectionDAG;
2038
2039 unsigned Reg;
2040
2041 RegisterSDNode(unsigned reg, EVT VT)
2042 : SDNode(ISD::Register, 0, DebugLoc(), getSDVTList(VT)), Reg(reg) {}
2043
2044public:
2045 unsigned getReg() const { return Reg; }
2046
2047 static bool classof(const SDNode *N) {
2048 return N->getOpcode() == ISD::Register;
2049 }
2050};
2051
2052class RegisterMaskSDNode : public SDNode {
2053 friend class SelectionDAG;
2054
2055 // The memory for RegMask is not owned by the node.
2056 const uint32_t *RegMask;
2057
2058 RegisterMaskSDNode(const uint32_t *mask)
2059 : SDNode(ISD::RegisterMask, 0, DebugLoc(), getSDVTList(MVT::Untyped)),
2060 RegMask(mask) {}
2061
2062public:
2063 const uint32_t *getRegMask() const { return RegMask; }
2064
2065 static bool classof(const SDNode *N) {
2066 return N->getOpcode() == ISD::RegisterMask;
2067 }
2068};
2069
2070class BlockAddressSDNode : public SDNode {
2071 friend class SelectionDAG;
2072
2073 const BlockAddress *BA;
2074 int64_t Offset;
2075 unsigned TargetFlags;
2076
2077 BlockAddressSDNode(unsigned NodeTy, EVT VT, const BlockAddress *ba,
2078 int64_t o, unsigned Flags)
2079 : SDNode(NodeTy, 0, DebugLoc(), getSDVTList(VT)),
2080 BA(ba), Offset(o), TargetFlags(Flags) {}
2081
2082public:
2083 const BlockAddress *getBlockAddress() const { return BA; }
2084 int64_t getOffset() const { return Offset; }
2085 unsigned getTargetFlags() const { return TargetFlags; }
2086
2087 static bool classof(const SDNode *N) {
2088 return N->getOpcode() == ISD::BlockAddress ||
2089 N->getOpcode() == ISD::TargetBlockAddress;
2090 }
2091};
2092
2093class LabelSDNode : public SDNode {
2094 friend class SelectionDAG;
2095
2096 MCSymbol *Label;
2097
2098 LabelSDNode(unsigned Opcode, unsigned Order, const DebugLoc &dl, MCSymbol *L)
2099 : SDNode(Opcode, Order, dl, getSDVTList(MVT::Other)), Label(L) {
2100 assert(LabelSDNode::classof(this) && "not a label opcode")((LabelSDNode::classof(this) && "not a label opcode")
? static_cast<void> (0) : __assert_fail ("LabelSDNode::classof(this) && \"not a label opcode\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 2100, __PRETTY_FUNCTION__))
;
2101 }
2102
2103public:
2104 MCSymbol *getLabel() const { return Label; }
2105
2106 static bool classof(const SDNode *N) {
2107 return N->getOpcode() == ISD::EH_LABEL ||
2108 N->getOpcode() == ISD::ANNOTATION_LABEL;
2109 }
2110};
2111
2112class ExternalSymbolSDNode : public SDNode {
2113 friend class SelectionDAG;
2114
2115 const char *Symbol;
2116 unsigned TargetFlags;
2117
2118 ExternalSymbolSDNode(bool isTarget, const char *Sym, unsigned TF, EVT VT)
2119 : SDNode(isTarget ? ISD::TargetExternalSymbol : ISD::ExternalSymbol, 0,
2120 DebugLoc(), getSDVTList(VT)),
2121 Symbol(Sym), TargetFlags(TF) {}
2122
2123public:
2124 const char *getSymbol() const { return Symbol; }
2125 unsigned getTargetFlags() const { return TargetFlags; }
2126
2127 static bool classof(const SDNode *N) {
2128 return N->getOpcode() == ISD::ExternalSymbol ||
2129 N->getOpcode() == ISD::TargetExternalSymbol;
2130 }
2131};
2132
2133class MCSymbolSDNode : public SDNode {
2134 friend class SelectionDAG;
2135
2136 MCSymbol *Symbol;
2137
2138 MCSymbolSDNode(MCSymbol *Symbol, EVT VT)
2139 : SDNode(ISD::MCSymbol, 0, DebugLoc(), getSDVTList(VT)), Symbol(Symbol) {}
2140
2141public:
2142 MCSymbol *getMCSymbol() const { return Symbol; }
2143
2144 static bool classof(const SDNode *N) {
2145 return N->getOpcode() == ISD::MCSymbol;
2146 }
2147};
2148
2149class CondCodeSDNode : public SDNode {
2150 friend class SelectionDAG;
2151
2152 ISD::CondCode Condition;
2153
2154 explicit CondCodeSDNode(ISD::CondCode Cond)
2155 : SDNode(ISD::CONDCODE, 0, DebugLoc(), getSDVTList(MVT::Other)),
2156 Condition(Cond) {}
2157
2158public:
2159 ISD::CondCode get() const { return Condition; }
2160
2161 static bool classof(const SDNode *N) {
2162 return N->getOpcode() == ISD::CONDCODE;
2163 }
2164};
2165
2166/// This class is used to represent EVT's, which are used
2167/// to parameterize some operations.
2168class VTSDNode : public SDNode {
2169 friend class SelectionDAG;
2170
2171 EVT ValueType;
2172
2173 explicit VTSDNode(EVT VT)
2174 : SDNode(ISD::VALUETYPE, 0, DebugLoc(), getSDVTList(MVT::Other)),
2175 ValueType(VT) {}
2176
2177public:
2178 EVT getVT() const { return ValueType; }
2179
2180 static bool classof(const SDNode *N) {
2181 return N->getOpcode() == ISD::VALUETYPE;
2182 }
2183};
2184
2185/// Base class for LoadSDNode and StoreSDNode
2186class LSBaseSDNode : public MemSDNode {
2187public:
2188 LSBaseSDNode(ISD::NodeType NodeTy, unsigned Order, const DebugLoc &dl,
2189 SDVTList VTs, ISD::MemIndexedMode AM, EVT MemVT,
2190 MachineMemOperand *MMO)
2191 : MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) {
2192 LSBaseSDNodeBits.AddressingMode = AM;
2193 assert(getAddressingMode() == AM && "Value truncated")((getAddressingMode() == AM && "Value truncated") ? static_cast
<void> (0) : __assert_fail ("getAddressingMode() == AM && \"Value truncated\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 2193, __PRETTY_FUNCTION__))
;
2194 }
2195
2196 const SDValue &getOffset() const {
2197 return getOperand(getOpcode() == ISD::LOAD ? 2 : 3);
2198 }
2199
2200 /// Return the addressing mode for this load or store:
2201 /// unindexed, pre-inc, pre-dec, post-inc, or post-dec.
2202 ISD::MemIndexedMode getAddressingMode() const {
2203 return static_cast<ISD::MemIndexedMode>(LSBaseSDNodeBits.AddressingMode);
2204 }
2205
2206 /// Return true if this is a pre/post inc/dec load/store.
2207 bool isIndexed() const { return getAddressingMode() != ISD::UNINDEXED; }
2208
2209 /// Return true if this is NOT a pre/post inc/dec load/store.
2210 bool isUnindexed() const { return getAddressingMode() == ISD::UNINDEXED; }
2211
2212 static bool classof(const SDNode *N) {
2213 return N->getOpcode() == ISD::LOAD ||
2214 N->getOpcode() == ISD::STORE;
2215 }
2216};
2217
2218/// This class is used to represent ISD::LOAD nodes.
2219class LoadSDNode : public LSBaseSDNode {
2220 friend class SelectionDAG;
2221
2222 LoadSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
2223 ISD::MemIndexedMode AM, ISD::LoadExtType ETy, EVT MemVT,
2224 MachineMemOperand *MMO)
2225 : LSBaseSDNode(ISD::LOAD, Order, dl, VTs, AM, MemVT, MMO) {
2226 LoadSDNodeBits.ExtTy = ETy;
2227 assert(readMem() && "Load MachineMemOperand is not a load!")((readMem() && "Load MachineMemOperand is not a load!"
) ? static_cast<void> (0) : __assert_fail ("readMem() && \"Load MachineMemOperand is not a load!\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 2227, __PRETTY_FUNCTION__))
;
2228 assert(!writeMem() && "Load MachineMemOperand is a store!")((!writeMem() && "Load MachineMemOperand is a store!"
) ? static_cast<void> (0) : __assert_fail ("!writeMem() && \"Load MachineMemOperand is a store!\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 2228, __PRETTY_FUNCTION__))
;
2229 }
2230
2231public:
2232 /// Return whether this is a plain node,
2233 /// or one of the varieties of value-extending loads.
2234 ISD::LoadExtType getExtensionType() const {
2235 return static_cast<ISD::LoadExtType>(LoadSDNodeBits.ExtTy);
2236 }
2237
2238 const SDValue &getBasePtr() const { return getOperand(1); }
2239 const SDValue &getOffset() const { return getOperand(2); }
2240
2241 static bool classof(const SDNode *N) {
2242 return N->getOpcode() == ISD::LOAD;
2243 }
2244};
2245
2246/// This class is used to represent ISD::STORE nodes.
2247class StoreSDNode : public LSBaseSDNode {
2248 friend class SelectionDAG;
2249
2250 StoreSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
2251 ISD::MemIndexedMode AM, bool isTrunc, EVT MemVT,
2252 MachineMemOperand *MMO)
2253 : LSBaseSDNode(ISD::STORE, Order, dl, VTs, AM, MemVT, MMO) {
2254 StoreSDNodeBits.IsTruncating = isTrunc;
2255 assert(!readMem() && "Store MachineMemOperand is a load!")((!readMem() && "Store MachineMemOperand is a load!")
? static_cast<void> (0) : __assert_fail ("!readMem() && \"Store MachineMemOperand is a load!\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 2255, __PRETTY_FUNCTION__))
;
2256 assert(writeMem() && "Store MachineMemOperand is not a store!")((writeMem() && "Store MachineMemOperand is not a store!"
) ? static_cast<void> (0) : __assert_fail ("writeMem() && \"Store MachineMemOperand is not a store!\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 2256, __PRETTY_FUNCTION__))
;
2257 }
2258
2259public:
2260 /// Return true if the op does a truncation before store.
2261 /// For integers this is the same as doing a TRUNCATE and storing the result.
2262 /// For floats, it is the same as doing an FP_ROUND and storing the result.
2263 bool isTruncatingStore() const { return StoreSDNodeBits.IsTruncating; }
2264 void setTruncatingStore(bool Truncating) {
2265 StoreSDNodeBits.IsTruncating = Truncating;
2266 }
2267
2268 const SDValue &getValue() const { return getOperand(1); }
2269 const SDValue &getBasePtr() const { return getOperand(2); }
2270 const SDValue &getOffset() const { return getOperand(3); }
2271
2272 static bool classof(const SDNode *N) {
2273 return N->getOpcode() == ISD::STORE;
2274 }
2275};
2276
2277/// This base class is used to represent MLOAD and MSTORE nodes
2278class MaskedLoadStoreSDNode : public MemSDNode {
2279public:
2280 friend class SelectionDAG;
2281
2282 MaskedLoadStoreSDNode(ISD::NodeType NodeTy, unsigned Order,
2283 const DebugLoc &dl, SDVTList VTs,
2284 ISD::MemIndexedMode AM, EVT MemVT,
2285 MachineMemOperand *MMO)
2286 : MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) {
2287 LSBaseSDNodeBits.AddressingMode = AM;
2288 assert(getAddressingMode() == AM && "Value truncated")((getAddressingMode() == AM && "Value truncated") ? static_cast
<void> (0) : __assert_fail ("getAddressingMode() == AM && \"Value truncated\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 2288, __PRETTY_FUNCTION__))
;
2289 }
2290
2291 // MaskedLoadSDNode (Chain, ptr, offset, mask, passthru)
2292 // MaskedStoreSDNode (Chain, data, ptr, offset, mask)
2293 // Mask is a vector of i1 elements
2294 const SDValue &getBasePtr() const {
2295 return getOperand(getOpcode() == ISD::MLOAD ? 1 : 2);
2296 }
2297 const SDValue &getOffset() const {
2298 return getOperand(getOpcode() == ISD::MLOAD ? 2 : 3);
2299 }
2300 const SDValue &getMask() const {
2301 return getOperand(getOpcode() == ISD::MLOAD ? 3 : 4);
2302 }
2303
2304 /// Return the addressing mode for this load or store:
2305 /// unindexed, pre-inc, pre-dec, post-inc, or post-dec.
2306 ISD::MemIndexedMode getAddressingMode() const {
2307 return static_cast<ISD::MemIndexedMode>(LSBaseSDNodeBits.AddressingMode);
2308 }
2309
2310 /// Return true if this is a pre/post inc/dec load/store.
2311 bool isIndexed() const { return getAddressingMode() != ISD::UNINDEXED; }
2312
2313 /// Return true if this is NOT a pre/post inc/dec load/store.
2314 bool isUnindexed() const { return getAddressingMode() == ISD::UNINDEXED; }
2315
2316 static bool classof(const SDNode *N) {
2317 return N->getOpcode() == ISD::MLOAD ||
2318 N->getOpcode() == ISD::MSTORE;
2319 }
2320};
2321
2322/// This class is used to represent an MLOAD node
2323class MaskedLoadSDNode : public MaskedLoadStoreSDNode {
2324public:
2325 friend class SelectionDAG;
2326
2327 MaskedLoadSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
2328 ISD::MemIndexedMode AM, ISD::LoadExtType ETy,
2329 bool IsExpanding, EVT MemVT, MachineMemOperand *MMO)
2330 : MaskedLoadStoreSDNode(ISD::MLOAD, Order, dl, VTs, AM, MemVT, MMO) {
2331 LoadSDNodeBits.ExtTy = ETy;
2332 LoadSDNodeBits.IsExpanding = IsExpanding;
2333 }
2334
2335 ISD::LoadExtType getExtensionType() const {
2336 return static_cast<ISD::LoadExtType>(LoadSDNodeBits.ExtTy);
2337 }
2338
2339 const SDValue &getBasePtr() const { return getOperand(1); }
2340 const SDValue &getOffset() const { return getOperand(2); }
2341 const SDValue &getMask() const { return getOperand(3); }
2342 const SDValue &getPassThru() const { return getOperand(4); }
2343
2344 static bool classof(const SDNode *N) {
2345 return N->getOpcode() == ISD::MLOAD;
2346 }
2347
2348 bool isExpandingLoad() const { return LoadSDNodeBits.IsExpanding; }
2349};
2350
2351/// This class is used to represent an MSTORE node
2352class MaskedStoreSDNode : public MaskedLoadStoreSDNode {
2353public:
2354 friend class SelectionDAG;
2355
2356 MaskedStoreSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
2357 ISD::MemIndexedMode AM, bool isTrunc, bool isCompressing,
2358 EVT MemVT, MachineMemOperand *MMO)
2359 : MaskedLoadStoreSDNode(ISD::MSTORE, Order, dl, VTs, AM, MemVT, MMO) {
2360 StoreSDNodeBits.IsTruncating = isTrunc;
2361 StoreSDNodeBits.IsCompressing = isCompressing;
2362 }
2363
2364 /// Return true if the op does a truncation before store.
2365 /// For integers this is the same as doing a TRUNCATE and storing the result.
2366 /// For floats, it is the same as doing an FP_ROUND and storing the result.
2367 bool isTruncatingStore() const { return StoreSDNodeBits.IsTruncating; }
2368
2369 /// Returns true if the op does a compression to the vector before storing.
2370 /// The node contiguously stores the active elements (integers or floats)
2371 /// in src (those with their respective bit set in writemask k) to unaligned
2372 /// memory at base_addr.
2373 bool isCompressingStore() const { return StoreSDNodeBits.IsCompressing; }
2374
2375 const SDValue &getValue() const { return getOperand(1); }
2376 const SDValue &getBasePtr() const { return getOperand(2); }
2377 const SDValue &getOffset() const { return getOperand(3); }
2378 const SDValue &getMask() const { return getOperand(4); }
2379
2380 static bool classof(const SDNode *N) {
2381 return N->getOpcode() == ISD::MSTORE;
2382 }
2383};
2384
2385/// This is a base class used to represent
2386/// MGATHER and MSCATTER nodes
2387///
2388class MaskedGatherScatterSDNode : public MemSDNode {
2389public:
2390 friend class SelectionDAG;
2391
2392 MaskedGatherScatterSDNode(ISD::NodeType NodeTy, unsigned Order,
2393 const DebugLoc &dl, SDVTList VTs, EVT MemVT,
2394 MachineMemOperand *MMO, ISD::MemIndexType IndexType)
2395 : MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) {
2396 LSBaseSDNodeBits.AddressingMode = IndexType;
2397 assert(getIndexType() == IndexType && "Value truncated")((getIndexType() == IndexType && "Value truncated") ?
static_cast<void> (0) : __assert_fail ("getIndexType() == IndexType && \"Value truncated\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 2397, __PRETTY_FUNCTION__))
;
2398 }
2399
2400 /// How is Index applied to BasePtr when computing addresses.
2401 ISD::MemIndexType getIndexType() const {
2402 return static_cast<ISD::MemIndexType>(LSBaseSDNodeBits.AddressingMode);
2403 }
2404 bool isIndexScaled() const {
2405 return (getIndexType() == ISD::SIGNED_SCALED) ||
2406 (getIndexType() == ISD::UNSIGNED_SCALED);
2407 }
2408 bool isIndexSigned() const {
2409 return (getIndexType() == ISD::SIGNED_SCALED) ||
2410 (getIndexType() == ISD::SIGNED_UNSCALED);
2411 }
2412
2413 // In the both nodes address is Op1, mask is Op2:
2414 // MaskedGatherSDNode (Chain, passthru, mask, base, index, scale)
2415 // MaskedScatterSDNode (Chain, value, mask, base, index, scale)
2416 // Mask is a vector of i1 elements
2417 const SDValue &getBasePtr() const { return getOperand(3); }
2418 const SDValue &getIndex() const { return getOperand(4); }
2419 const SDValue &getMask() const { return getOperand(2); }
2420 const SDValue &getScale() const { return getOperand(5); }
2421
2422 static bool classof(const SDNode *N) {
2423 return N->getOpcode() == ISD::MGATHER ||
2424 N->getOpcode() == ISD::MSCATTER;
2425 }
2426};
2427
2428/// This class is used to represent an MGATHER node
2429///
2430class MaskedGatherSDNode : public MaskedGatherScatterSDNode {