Bug Summary

File:llvm/include/llvm/CodeGen/SelectionDAGNodes.h
Warning:line 1153, column 10
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name HexagonISelLowering.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mthread-model posix -mframe-pointer=none -fmath-errno -fno-rounding-math -masm-verbose -mconstructor-aliases -munwind-tables -target-cpu x86-64 -dwarf-column-info -fno-split-dwarf-inlining -debugger-tuning=gdb -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-11/lib/clang/11.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/build-llvm/lib/Target/Hexagon -I /build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/Hexagon -I /build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/build-llvm/include -I /build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-11/lib/clang/11.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/build-llvm/lib/Target/Hexagon -fdebug-prefix-map=/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347=. -ferror-limit 19 -fmessage-length 0 -fvisibility hidden -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -o /tmp/scan-build-2020-03-09-184146-41876-1 -x c++ /build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp

/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp

1//===-- HexagonISelLowering.cpp - Hexagon DAG Lowering Implementation -----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the interfaces that Hexagon uses to lower LLVM code
10// into a selection DAG.
11//
12//===----------------------------------------------------------------------===//
13
14#include "HexagonISelLowering.h"
15#include "Hexagon.h"
16#include "HexagonMachineFunctionInfo.h"
17#include "HexagonRegisterInfo.h"
18#include "HexagonSubtarget.h"
19#include "HexagonTargetMachine.h"
20#include "HexagonTargetObjectFile.h"
21#include "llvm/ADT/APInt.h"
22#include "llvm/ADT/ArrayRef.h"
23#include "llvm/ADT/SmallVector.h"
24#include "llvm/ADT/StringSwitch.h"
25#include "llvm/CodeGen/CallingConvLower.h"
26#include "llvm/CodeGen/MachineFrameInfo.h"
27#include "llvm/CodeGen/MachineFunction.h"
28#include "llvm/CodeGen/MachineMemOperand.h"
29#include "llvm/CodeGen/MachineRegisterInfo.h"
30#include "llvm/CodeGen/RuntimeLibcalls.h"
31#include "llvm/CodeGen/SelectionDAG.h"
32#include "llvm/CodeGen/TargetCallingConv.h"
33#include "llvm/CodeGen/ValueTypes.h"
34#include "llvm/IR/BasicBlock.h"
35#include "llvm/IR/CallingConv.h"
36#include "llvm/IR/DataLayout.h"
37#include "llvm/IR/DerivedTypes.h"
38#include "llvm/IR/Function.h"
39#include "llvm/IR/GlobalValue.h"
40#include "llvm/IR/InlineAsm.h"
41#include "llvm/IR/Instructions.h"
42#include "llvm/IR/IntrinsicInst.h"
43#include "llvm/IR/Intrinsics.h"
44#include "llvm/IR/IntrinsicsHexagon.h"
45#include "llvm/IR/Module.h"
46#include "llvm/IR/Type.h"
47#include "llvm/IR/Value.h"
48#include "llvm/MC/MCRegisterInfo.h"
49#include "llvm/Support/Casting.h"
50#include "llvm/Support/CodeGen.h"
51#include "llvm/Support/CommandLine.h"
52#include "llvm/Support/Debug.h"
53#include "llvm/Support/ErrorHandling.h"
54#include "llvm/Support/MathExtras.h"
55#include "llvm/Support/raw_ostream.h"
56#include "llvm/Target/TargetMachine.h"
57#include <algorithm>
58#include <cassert>
59#include <cstddef>
60#include <cstdint>
61#include <limits>
62#include <utility>
63
64using namespace llvm;
65
66#define DEBUG_TYPE"hexagon-lowering" "hexagon-lowering"
67
68static cl::opt<bool> EmitJumpTables("hexagon-emit-jump-tables",
69 cl::init(true), cl::Hidden,
70 cl::desc("Control jump table emission on Hexagon target"));
71
72static cl::opt<bool> EnableHexSDNodeSched("enable-hexagon-sdnode-sched",
73 cl::Hidden, cl::ZeroOrMore, cl::init(false),
74 cl::desc("Enable Hexagon SDNode scheduling"));
75
76static cl::opt<bool> EnableFastMath("ffast-math",
77 cl::Hidden, cl::ZeroOrMore, cl::init(false),
78 cl::desc("Enable Fast Math processing"));
79
80static cl::opt<int> MinimumJumpTables("minimum-jump-tables",
81 cl::Hidden, cl::ZeroOrMore, cl::init(5),
82 cl::desc("Set minimum jump tables"));
83
84static cl::opt<int> MaxStoresPerMemcpyCL("max-store-memcpy",
85 cl::Hidden, cl::ZeroOrMore, cl::init(6),
86 cl::desc("Max #stores to inline memcpy"));
87
88static cl::opt<int> MaxStoresPerMemcpyOptSizeCL("max-store-memcpy-Os",
89 cl::Hidden, cl::ZeroOrMore, cl::init(4),
90 cl::desc("Max #stores to inline memcpy"));
91
92static cl::opt<int> MaxStoresPerMemmoveCL("max-store-memmove",
93 cl::Hidden, cl::ZeroOrMore, cl::init(6),
94 cl::desc("Max #stores to inline memmove"));
95
96static cl::opt<int> MaxStoresPerMemmoveOptSizeCL("max-store-memmove-Os",
97 cl::Hidden, cl::ZeroOrMore, cl::init(4),
98 cl::desc("Max #stores to inline memmove"));
99
100static cl::opt<int> MaxStoresPerMemsetCL("max-store-memset",
101 cl::Hidden, cl::ZeroOrMore, cl::init(8),
102 cl::desc("Max #stores to inline memset"));
103
104static cl::opt<int> MaxStoresPerMemsetOptSizeCL("max-store-memset-Os",
105 cl::Hidden, cl::ZeroOrMore, cl::init(4),
106 cl::desc("Max #stores to inline memset"));
107
108static cl::opt<bool> AlignLoads("hexagon-align-loads",
109 cl::Hidden, cl::init(false),
110 cl::desc("Rewrite unaligned loads as a pair of aligned loads"));
111
112
113namespace {
114
115 class HexagonCCState : public CCState {
116 unsigned NumNamedVarArgParams = 0;
117
118 public:
119 HexagonCCState(CallingConv::ID CC, bool IsVarArg, MachineFunction &MF,
120 SmallVectorImpl<CCValAssign> &locs, LLVMContext &C,
121 unsigned NumNamedArgs)
122 : CCState(CC, IsVarArg, MF, locs, C),
123 NumNamedVarArgParams(NumNamedArgs) {}
124 unsigned getNumNamedVarArgParams() const { return NumNamedVarArgParams; }
125 };
126
127} // end anonymous namespace
128
129
130// Implement calling convention for Hexagon.
131
132static bool CC_SkipOdd(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
133 CCValAssign::LocInfo &LocInfo,
134 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
135 static const MCPhysReg ArgRegs[] = {
136 Hexagon::R0, Hexagon::R1, Hexagon::R2,
137 Hexagon::R3, Hexagon::R4, Hexagon::R5
138 };
139 const unsigned NumArgRegs = array_lengthof(ArgRegs);
140 unsigned RegNum = State.getFirstUnallocated(ArgRegs);
141
142 // RegNum is an index into ArgRegs: skip a register if RegNum is odd.
143 if (RegNum != NumArgRegs && RegNum % 2 == 1)
144 State.AllocateReg(ArgRegs[RegNum]);
145
146 // Always return false here, as this function only makes sure that the first
147 // unallocated register has an even register number and does not actually
148 // allocate a register for the current argument.
149 return false;
150}
151
152#include "HexagonGenCallingConv.inc"
153
154
155SDValue
156HexagonTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG)
157 const {
158 return SDValue();
159}
160
161/// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified
162/// by "Src" to address "Dst" of size "Size". Alignment information is
163/// specified by the specific parameter attribute. The copy will be passed as
164/// a byval function parameter. Sometimes what we are copying is the end of a
165/// larger object, the part that does not fit in registers.
166static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst,
167 SDValue Chain, ISD::ArgFlagsTy Flags,
168 SelectionDAG &DAG, const SDLoc &dl) {
169 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32);
170 return DAG.getMemcpy(
171 Chain, dl, Dst, Src, SizeNode, Flags.getNonZeroByValAlign(),
172 /*isVolatile=*/false, /*AlwaysInline=*/false,
173 /*isTailCall=*/false, MachinePointerInfo(), MachinePointerInfo());
174}
175
176bool
177HexagonTargetLowering::CanLowerReturn(
178 CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg,
179 const SmallVectorImpl<ISD::OutputArg> &Outs,
180 LLVMContext &Context) const {
181 SmallVector<CCValAssign, 16> RVLocs;
182 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
183
184 if (MF.getSubtarget<HexagonSubtarget>().useHVXOps())
185 return CCInfo.CheckReturn(Outs, RetCC_Hexagon_HVX);
186 return CCInfo.CheckReturn(Outs, RetCC_Hexagon);
187}
188
189// LowerReturn - Lower ISD::RET. If a struct is larger than 8 bytes and is
190// passed by value, the function prototype is modified to return void and
191// the value is stored in memory pointed by a pointer passed by caller.
192SDValue
193HexagonTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
194 bool IsVarArg,
195 const SmallVectorImpl<ISD::OutputArg> &Outs,
196 const SmallVectorImpl<SDValue> &OutVals,
197 const SDLoc &dl, SelectionDAG &DAG) const {
198 // CCValAssign - represent the assignment of the return value to locations.
199 SmallVector<CCValAssign, 16> RVLocs;
200
201 // CCState - Info about the registers and stack slot.
202 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
203 *DAG.getContext());
204
205 // Analyze return values of ISD::RET
206 if (Subtarget.useHVXOps())
207 CCInfo.AnalyzeReturn(Outs, RetCC_Hexagon_HVX);
208 else
209 CCInfo.AnalyzeReturn(Outs, RetCC_Hexagon);
210
211 SDValue Flag;
212 SmallVector<SDValue, 4> RetOps(1, Chain);
213
214 // Copy the result values into the output registers.
215 for (unsigned i = 0; i != RVLocs.size(); ++i) {
216 CCValAssign &VA = RVLocs[i];
217
218 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), OutVals[i], Flag);
219
220 // Guarantee that all emitted copies are stuck together with flags.
221 Flag = Chain.getValue(1);
222 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
223 }
224
225 RetOps[0] = Chain; // Update chain.
226
227 // Add the flag if we have it.
228 if (Flag.getNode())
229 RetOps.push_back(Flag);
230
231 return DAG.getNode(HexagonISD::RET_FLAG, dl, MVT::Other, RetOps);
232}
233
234bool HexagonTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
235 // If either no tail call or told not to tail call at all, don't.
236 return CI->isTailCall();
237}
238
239Register HexagonTargetLowering::getRegisterByName(
240 const char* RegName, LLT VT, const MachineFunction &) const {
241 // Just support r19, the linux kernel uses it.
242 Register Reg = StringSwitch<Register>(RegName)
243 .Case("r0", Hexagon::R0)
244 .Case("r1", Hexagon::R1)
245 .Case("r2", Hexagon::R2)
246 .Case("r3", Hexagon::R3)
247 .Case("r4", Hexagon::R4)
248 .Case("r5", Hexagon::R5)
249 .Case("r6", Hexagon::R6)
250 .Case("r7", Hexagon::R7)
251 .Case("r8", Hexagon::R8)
252 .Case("r9", Hexagon::R9)
253 .Case("r10", Hexagon::R10)
254 .Case("r11", Hexagon::R11)
255 .Case("r12", Hexagon::R12)
256 .Case("r13", Hexagon::R13)
257 .Case("r14", Hexagon::R14)
258 .Case("r15", Hexagon::R15)
259 .Case("r16", Hexagon::R16)
260 .Case("r17", Hexagon::R17)
261 .Case("r18", Hexagon::R18)
262 .Case("r19", Hexagon::R19)
263 .Case("r20", Hexagon::R20)
264 .Case("r21", Hexagon::R21)
265 .Case("r22", Hexagon::R22)
266 .Case("r23", Hexagon::R23)
267 .Case("r24", Hexagon::R24)
268 .Case("r25", Hexagon::R25)
269 .Case("r26", Hexagon::R26)
270 .Case("r27", Hexagon::R27)
271 .Case("r28", Hexagon::R28)
272 .Case("r29", Hexagon::R29)
273 .Case("r30", Hexagon::R30)
274 .Case("r31", Hexagon::R31)
275 .Case("r1:0", Hexagon::D0)
276 .Case("r3:2", Hexagon::D1)
277 .Case("r5:4", Hexagon::D2)
278 .Case("r7:6", Hexagon::D3)
279 .Case("r9:8", Hexagon::D4)
280 .Case("r11:10", Hexagon::D5)
281 .Case("r13:12", Hexagon::D6)
282 .Case("r15:14", Hexagon::D7)
283 .Case("r17:16", Hexagon::D8)
284 .Case("r19:18", Hexagon::D9)
285 .Case("r21:20", Hexagon::D10)
286 .Case("r23:22", Hexagon::D11)
287 .Case("r25:24", Hexagon::D12)
288 .Case("r27:26", Hexagon::D13)
289 .Case("r29:28", Hexagon::D14)
290 .Case("r31:30", Hexagon::D15)
291 .Case("sp", Hexagon::R29)
292 .Case("fp", Hexagon::R30)
293 .Case("lr", Hexagon::R31)
294 .Case("p0", Hexagon::P0)
295 .Case("p1", Hexagon::P1)
296 .Case("p2", Hexagon::P2)
297 .Case("p3", Hexagon::P3)
298 .Case("sa0", Hexagon::SA0)
299 .Case("lc0", Hexagon::LC0)
300 .Case("sa1", Hexagon::SA1)
301 .Case("lc1", Hexagon::LC1)
302 .Case("m0", Hexagon::M0)
303 .Case("m1", Hexagon::M1)
304 .Case("usr", Hexagon::USR)
305 .Case("ugp", Hexagon::UGP)
306 .Default(Register());
307 if (Reg)
308 return Reg;
309
310 report_fatal_error("Invalid register name global variable");
311}
312
313/// LowerCallResult - Lower the result values of an ISD::CALL into the
314/// appropriate copies out of appropriate physical registers. This assumes that
315/// Chain/Glue are the input chain/glue to use, and that TheCall is the call
316/// being lowered. Returns a SDNode with the same number of values as the
317/// ISD::CALL.
318SDValue HexagonTargetLowering::LowerCallResult(
319 SDValue Chain, SDValue Glue, CallingConv::ID CallConv, bool IsVarArg,
320 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
321 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
322 const SmallVectorImpl<SDValue> &OutVals, SDValue Callee) const {
323 // Assign locations to each value returned by this call.
324 SmallVector<CCValAssign, 16> RVLocs;
325
326 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
327 *DAG.getContext());
328
329 if (Subtarget.useHVXOps())
330 CCInfo.AnalyzeCallResult(Ins, RetCC_Hexagon_HVX);
331 else
332 CCInfo.AnalyzeCallResult(Ins, RetCC_Hexagon);
333
334 // Copy all of the result registers out of their specified physreg.
335 for (unsigned i = 0; i != RVLocs.size(); ++i) {
336 SDValue RetVal;
337 if (RVLocs[i].getValVT() == MVT::i1) {
338 // Return values of type MVT::i1 require special handling. The reason
339 // is that MVT::i1 is associated with the PredRegs register class, but
340 // values of that type are still returned in R0. Generate an explicit
341 // copy into a predicate register from R0, and treat the value of the
342 // predicate register as the call result.
343 auto &MRI = DAG.getMachineFunction().getRegInfo();
344 SDValue FR0 = DAG.getCopyFromReg(Chain, dl, RVLocs[i].getLocReg(),
345 MVT::i32, Glue);
346 // FR0 = (Value, Chain, Glue)
347 Register PredR = MRI.createVirtualRegister(&Hexagon::PredRegsRegClass);
348 SDValue TPR = DAG.getCopyToReg(FR0.getValue(1), dl, PredR,
349 FR0.getValue(0), FR0.getValue(2));
350 // TPR = (Chain, Glue)
351 // Don't glue this CopyFromReg, because it copies from a virtual
352 // register. If it is glued to the call, InstrEmitter will add it
353 // as an implicit def to the call (EmitMachineNode).
354 RetVal = DAG.getCopyFromReg(TPR.getValue(0), dl, PredR, MVT::i1);
355 Glue = TPR.getValue(1);
356 Chain = TPR.getValue(0);
357 } else {
358 RetVal = DAG.getCopyFromReg(Chain, dl, RVLocs[i].getLocReg(),
359 RVLocs[i].getValVT(), Glue);
360 Glue = RetVal.getValue(2);
361 Chain = RetVal.getValue(1);
362 }
363 InVals.push_back(RetVal.getValue(0));
364 }
365
366 return Chain;
367}
368
369/// LowerCall - Functions arguments are copied from virtual regs to
370/// (physical regs)/(stack frame), CALLSEQ_START and CALLSEQ_END are emitted.
371SDValue
372HexagonTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
373 SmallVectorImpl<SDValue> &InVals) const {
374 SelectionDAG &DAG = CLI.DAG;
375 SDLoc &dl = CLI.DL;
376 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
377 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
378 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
379 SDValue Chain = CLI.Chain;
380 SDValue Callee = CLI.Callee;
381 CallingConv::ID CallConv = CLI.CallConv;
382 bool IsVarArg = CLI.IsVarArg;
383 bool DoesNotReturn = CLI.DoesNotReturn;
384
385 bool IsStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet();
386 MachineFunction &MF = DAG.getMachineFunction();
387 MachineFrameInfo &MFI = MF.getFrameInfo();
388 auto PtrVT = getPointerTy(MF.getDataLayout());
389
390 unsigned NumParams = CLI.CS.getInstruction()
391 ? CLI.CS.getFunctionType()->getNumParams()
392 : 0;
393 if (GlobalAddressSDNode *GAN = dyn_cast<GlobalAddressSDNode>(Callee))
394 Callee = DAG.getTargetGlobalAddress(GAN->getGlobal(), dl, MVT::i32);
395
396 // Linux ABI treats var-arg calls the same way as regular ones.
397 bool TreatAsVarArg = !Subtarget.isEnvironmentMusl() && IsVarArg;
398
399 // Analyze operands of the call, assigning locations to each operand.
400 SmallVector<CCValAssign, 16> ArgLocs;
401 HexagonCCState CCInfo(CallConv, TreatAsVarArg, MF, ArgLocs, *DAG.getContext(),
402 NumParams);
403
404 if (Subtarget.useHVXOps())
405 CCInfo.AnalyzeCallOperands(Outs, CC_Hexagon_HVX);
406 else
407 CCInfo.AnalyzeCallOperands(Outs, CC_Hexagon);
408
409 if (CLI.IsTailCall) {
410 bool StructAttrFlag = MF.getFunction().hasStructRetAttr();
411 CLI.IsTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
412 IsVarArg, IsStructRet, StructAttrFlag, Outs,
413 OutVals, Ins, DAG);
414 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
415 CCValAssign &VA = ArgLocs[i];
416 if (VA.isMemLoc()) {
417 CLI.IsTailCall = false;
418 break;
419 }
420 }
421 LLVM_DEBUG(dbgs() << (CLI.IsTailCall ? "Eligible for Tail Call\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-lowering")) { dbgs() << (CLI.IsTailCall ? "Eligible for Tail Call\n"
: "Argument must be passed on stack. " "Not eligible for Tail Call\n"
); } } while (false)
422 : "Argument must be passed on stack. "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-lowering")) { dbgs() << (CLI.IsTailCall ? "Eligible for Tail Call\n"
: "Argument must be passed on stack. " "Not eligible for Tail Call\n"
); } } while (false)
423 "Not eligible for Tail Call\n"))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-lowering")) { dbgs() << (CLI.IsTailCall ? "Eligible for Tail Call\n"
: "Argument must be passed on stack. " "Not eligible for Tail Call\n"
); } } while (false)
;
424 }
425 // Get a count of how many bytes are to be pushed on the stack.
426 unsigned NumBytes = CCInfo.getNextStackOffset();
427 SmallVector<std::pair<unsigned, SDValue>, 16> RegsToPass;
428 SmallVector<SDValue, 8> MemOpChains;
429
430 const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
431 SDValue StackPtr =
432 DAG.getCopyFromReg(Chain, dl, HRI.getStackRegister(), PtrVT);
433
434 bool NeedsArgAlign = false;
435 unsigned LargestAlignSeen = 0;
436 // Walk the register/memloc assignments, inserting copies/loads.
437 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
438 CCValAssign &VA = ArgLocs[i];
439 SDValue Arg = OutVals[i];
440 ISD::ArgFlagsTy Flags = Outs[i].Flags;
441 // Record if we need > 8 byte alignment on an argument.
442 bool ArgAlign = Subtarget.isHVXVectorType(VA.getValVT());
443 NeedsArgAlign |= ArgAlign;
444
445 // Promote the value if needed.
446 switch (VA.getLocInfo()) {
447 default:
448 // Loc info must be one of Full, BCvt, SExt, ZExt, or AExt.
449 llvm_unreachable("Unknown loc info!")::llvm::llvm_unreachable_internal("Unknown loc info!", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 449)
;
450 case CCValAssign::Full:
451 break;
452 case CCValAssign::BCvt:
453 Arg = DAG.getBitcast(VA.getLocVT(), Arg);
454 break;
455 case CCValAssign::SExt:
456 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
457 break;
458 case CCValAssign::ZExt:
459 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
460 break;
461 case CCValAssign::AExt:
462 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
463 break;
464 }
465
466 if (VA.isMemLoc()) {
467 unsigned LocMemOffset = VA.getLocMemOffset();
468 SDValue MemAddr = DAG.getConstant(LocMemOffset, dl,
469 StackPtr.getValueType());
470 MemAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, MemAddr);
471 if (ArgAlign)
472 LargestAlignSeen = std::max(LargestAlignSeen,
473 (unsigned)VA.getLocVT().getStoreSizeInBits() >> 3);
474 if (Flags.isByVal()) {
475 // The argument is a struct passed by value. According to LLVM, "Arg"
476 // is a pointer.
477 MemOpChains.push_back(CreateCopyOfByValArgument(Arg, MemAddr, Chain,
478 Flags, DAG, dl));
479 } else {
480 MachinePointerInfo LocPI = MachinePointerInfo::getStack(
481 DAG.getMachineFunction(), LocMemOffset);
482 SDValue S = DAG.getStore(Chain, dl, Arg, MemAddr, LocPI);
483 MemOpChains.push_back(S);
484 }
485 continue;
486 }
487
488 // Arguments that can be passed on register must be kept at RegsToPass
489 // vector.
490 if (VA.isRegLoc())
491 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
492 }
493
494 if (NeedsArgAlign && Subtarget.hasV60Ops()) {
495 LLVM_DEBUG(dbgs() << "Function needs byte stack align due to call args\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-lowering")) { dbgs() << "Function needs byte stack align due to call args\n"
; } } while (false)
;
496 unsigned VecAlign = HRI.getSpillAlignment(Hexagon::HvxVRRegClass);
497 LargestAlignSeen = std::max(LargestAlignSeen, VecAlign);
498 MFI.ensureMaxAlignment(LargestAlignSeen);
499 }
500 // Transform all store nodes into one single node because all store
501 // nodes are independent of each other.
502 if (!MemOpChains.empty())
503 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
504
505 SDValue Glue;
506 if (!CLI.IsTailCall) {
507 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
508 Glue = Chain.getValue(1);
509 }
510
511 // Build a sequence of copy-to-reg nodes chained together with token
512 // chain and flag operands which copy the outgoing args into registers.
513 // The Glue is necessary since all emitted instructions must be
514 // stuck together.
515 if (!CLI.IsTailCall) {
516 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
517 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
518 RegsToPass[i].second, Glue);
519 Glue = Chain.getValue(1);
520 }
521 } else {
522 // For tail calls lower the arguments to the 'real' stack slot.
523 //
524 // Force all the incoming stack arguments to be loaded from the stack
525 // before any new outgoing arguments are stored to the stack, because the
526 // outgoing stack slots may alias the incoming argument stack slots, and
527 // the alias isn't otherwise explicit. This is slightly more conservative
528 // than necessary, because it means that each store effectively depends
529 // on every argument instead of just those arguments it would clobber.
530 //
531 // Do not flag preceding copytoreg stuff together with the following stuff.
532 Glue = SDValue();
533 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
534 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
535 RegsToPass[i].second, Glue);
536 Glue = Chain.getValue(1);
537 }
538 Glue = SDValue();
539 }
540
541 bool LongCalls = MF.getSubtarget<HexagonSubtarget>().useLongCalls();
542 unsigned Flags = LongCalls ? HexagonII::HMOTF_ConstExtended : 0;
543
544 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
545 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
546 // node so that legalize doesn't hack it.
547 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
548 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, PtrVT, 0, Flags);
549 } else if (ExternalSymbolSDNode *S =
550 dyn_cast<ExternalSymbolSDNode>(Callee)) {
551 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), PtrVT, Flags);
552 }
553
554 // Returns a chain & a flag for retval copy to use.
555 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
556 SmallVector<SDValue, 8> Ops;
557 Ops.push_back(Chain);
558 Ops.push_back(Callee);
559
560 // Add argument registers to the end of the list so that they are
561 // known live into the call.
562 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
563 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
564 RegsToPass[i].second.getValueType()));
565 }
566
567 const uint32_t *Mask = HRI.getCallPreservedMask(MF, CallConv);
568 assert(Mask && "Missing call preserved mask for calling convention")((Mask && "Missing call preserved mask for calling convention"
) ? static_cast<void> (0) : __assert_fail ("Mask && \"Missing call preserved mask for calling convention\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 568, __PRETTY_FUNCTION__))
;
569 Ops.push_back(DAG.getRegisterMask(Mask));
570
571 if (Glue.getNode())
572 Ops.push_back(Glue);
573
574 if (CLI.IsTailCall) {
575 MFI.setHasTailCall();
576 return DAG.getNode(HexagonISD::TC_RETURN, dl, NodeTys, Ops);
577 }
578
579 // Set this here because we need to know this for "hasFP" in frame lowering.
580 // The target-independent code calls getFrameRegister before setting it, and
581 // getFrameRegister uses hasFP to determine whether the function has FP.
582 MFI.setHasCalls(true);
583
584 unsigned OpCode = DoesNotReturn ? HexagonISD::CALLnr : HexagonISD::CALL;
585 Chain = DAG.getNode(OpCode, dl, NodeTys, Ops);
586 Glue = Chain.getValue(1);
587
588 // Create the CALLSEQ_END node.
589 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
590 DAG.getIntPtrConstant(0, dl, true), Glue, dl);
591 Glue = Chain.getValue(1);
592
593 // Handle result values, copying them out of physregs into vregs that we
594 // return.
595 return LowerCallResult(Chain, Glue, CallConv, IsVarArg, Ins, dl, DAG,
596 InVals, OutVals, Callee);
597}
598
599/// Returns true by value, base pointer and offset pointer and addressing
600/// mode by reference if this node can be combined with a load / store to
601/// form a post-indexed load / store.
602bool HexagonTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op,
603 SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM,
604 SelectionDAG &DAG) const {
605 LSBaseSDNode *LSN = dyn_cast<LSBaseSDNode>(N);
606 if (!LSN)
607 return false;
608 EVT VT = LSN->getMemoryVT();
609 if (!VT.isSimple())
610 return false;
611 bool IsLegalType = VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32 ||
612 VT == MVT::i64 || VT == MVT::f32 || VT == MVT::f64 ||
613 VT == MVT::v2i16 || VT == MVT::v2i32 || VT == MVT::v4i8 ||
614 VT == MVT::v4i16 || VT == MVT::v8i8 ||
615 Subtarget.isHVXVectorType(VT.getSimpleVT());
616 if (!IsLegalType)
617 return false;
618
619 if (Op->getOpcode() != ISD::ADD)
620 return false;
621 Base = Op->getOperand(0);
622 Offset = Op->getOperand(1);
623 if (!isa<ConstantSDNode>(Offset.getNode()))
624 return false;
625 AM = ISD::POST_INC;
626
627 int32_t V = cast<ConstantSDNode>(Offset.getNode())->getSExtValue();
628 return Subtarget.getInstrInfo()->isValidAutoIncImm(VT, V);
629}
630
631SDValue
632HexagonTargetLowering::LowerINLINEASM(SDValue Op, SelectionDAG &DAG) const {
633 MachineFunction &MF = DAG.getMachineFunction();
634 auto &HMFI = *MF.getInfo<HexagonMachineFunctionInfo>();
635 const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
636 unsigned LR = HRI.getRARegister();
637
638 if ((Op.getOpcode() != ISD::INLINEASM &&
639 Op.getOpcode() != ISD::INLINEASM_BR) || HMFI.hasClobberLR())
640 return Op;
641
642 unsigned NumOps = Op.getNumOperands();
643 if (Op.getOperand(NumOps-1).getValueType() == MVT::Glue)
644 --NumOps; // Ignore the flag operand.
645
646 for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
647 unsigned Flags = cast<ConstantSDNode>(Op.getOperand(i))->getZExtValue();
648 unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags);
649 ++i; // Skip the ID value.
650
651 switch (InlineAsm::getKind(Flags)) {
652 default:
653 llvm_unreachable("Bad flags!")::llvm::llvm_unreachable_internal("Bad flags!", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 653)
;
654 case InlineAsm::Kind_RegUse:
655 case InlineAsm::Kind_Imm:
656 case InlineAsm::Kind_Mem:
657 i += NumVals;
658 break;
659 case InlineAsm::Kind_Clobber:
660 case InlineAsm::Kind_RegDef:
661 case InlineAsm::Kind_RegDefEarlyClobber: {
662 for (; NumVals; --NumVals, ++i) {
663 unsigned Reg = cast<RegisterSDNode>(Op.getOperand(i))->getReg();
664 if (Reg != LR)
665 continue;
666 HMFI.setHasClobberLR(true);
667 return Op;
668 }
669 break;
670 }
671 }
672 }
673
674 return Op;
675}
676
677// Need to transform ISD::PREFETCH into something that doesn't inherit
678// all of the properties of ISD::PREFETCH, specifically SDNPMayLoad and
679// SDNPMayStore.
680SDValue HexagonTargetLowering::LowerPREFETCH(SDValue Op,
681 SelectionDAG &DAG) const {
682 SDValue Chain = Op.getOperand(0);
683 SDValue Addr = Op.getOperand(1);
684 // Lower it to DCFETCH($reg, #0). A "pat" will try to merge the offset in,
685 // if the "reg" is fed by an "add".
686 SDLoc DL(Op);
687 SDValue Zero = DAG.getConstant(0, DL, MVT::i32);
688 return DAG.getNode(HexagonISD::DCFETCH, DL, MVT::Other, Chain, Addr, Zero);
689}
690
691// Custom-handle ISD::READCYCLECOUNTER because the target-independent SDNode
692// is marked as having side-effects, while the register read on Hexagon does
693// not have any. TableGen refuses to accept the direct pattern from that node
694// to the A4_tfrcpp.
695SDValue HexagonTargetLowering::LowerREADCYCLECOUNTER(SDValue Op,
696 SelectionDAG &DAG) const {
697 SDValue Chain = Op.getOperand(0);
698 SDLoc dl(Op);
699 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
700 return DAG.getNode(HexagonISD::READCYCLE, dl, VTs, Chain);
701}
702
703SDValue HexagonTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
704 SelectionDAG &DAG) const {
705 SDValue Chain = Op.getOperand(0);
706 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
707 // Lower the hexagon_prefetch builtin to DCFETCH, as above.
708 if (IntNo == Intrinsic::hexagon_prefetch) {
709 SDValue Addr = Op.getOperand(2);
710 SDLoc DL(Op);
711 SDValue Zero = DAG.getConstant(0, DL, MVT::i32);
712 return DAG.getNode(HexagonISD::DCFETCH, DL, MVT::Other, Chain, Addr, Zero);
713 }
714 return SDValue();
715}
716
717SDValue
718HexagonTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
719 SelectionDAG &DAG) const {
720 SDValue Chain = Op.getOperand(0);
721 SDValue Size = Op.getOperand(1);
722 SDValue Align = Op.getOperand(2);
723 SDLoc dl(Op);
724
725 ConstantSDNode *AlignConst = dyn_cast<ConstantSDNode>(Align);
726 assert(AlignConst && "Non-constant Align in LowerDYNAMIC_STACKALLOC")((AlignConst && "Non-constant Align in LowerDYNAMIC_STACKALLOC"
) ? static_cast<void> (0) : __assert_fail ("AlignConst && \"Non-constant Align in LowerDYNAMIC_STACKALLOC\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 726, __PRETTY_FUNCTION__))
;
727
728 unsigned A = AlignConst->getSExtValue();
729 auto &HFI = *Subtarget.getFrameLowering();
730 // "Zero" means natural stack alignment.
731 if (A == 0)
732 A = HFI.getStackAlignment();
733
734 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-lowering")) { { dbgs () << __func__ << " Align: "
<< A << " Size: "; Size.getNode()->dump(&
DAG); dbgs() << "\n"; }; } } while (false)
735 dbgs () << __func__ << " Align: " << A << " Size: ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-lowering")) { { dbgs () << __func__ << " Align: "
<< A << " Size: "; Size.getNode()->dump(&
DAG); dbgs() << "\n"; }; } } while (false)
736 Size.getNode()->dump(&DAG);do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-lowering")) { { dbgs () << __func__ << " Align: "
<< A << " Size: "; Size.getNode()->dump(&
DAG); dbgs() << "\n"; }; } } while (false)
737 dbgs() << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-lowering")) { { dbgs () << __func__ << " Align: "
<< A << " Size: "; Size.getNode()->dump(&
DAG); dbgs() << "\n"; }; } } while (false)
738 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-lowering")) { { dbgs () << __func__ << " Align: "
<< A << " Size: "; Size.getNode()->dump(&
DAG); dbgs() << "\n"; }; } } while (false)
;
739
740 SDValue AC = DAG.getConstant(A, dl, MVT::i32);
741 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
742 SDValue AA = DAG.getNode(HexagonISD::ALLOCA, dl, VTs, Chain, Size, AC);
743
744 DAG.ReplaceAllUsesOfValueWith(Op, AA);
745 return AA;
746}
747
748SDValue HexagonTargetLowering::LowerFormalArguments(
749 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
750 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
751 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
752 MachineFunction &MF = DAG.getMachineFunction();
753 MachineFrameInfo &MFI = MF.getFrameInfo();
754 MachineRegisterInfo &MRI = MF.getRegInfo();
755
756 // Linux ABI treats var-arg calls the same way as regular ones.
757 bool TreatAsVarArg = !Subtarget.isEnvironmentMusl() && IsVarArg;
758
759 // Assign locations to all of the incoming arguments.
760 SmallVector<CCValAssign, 16> ArgLocs;
761 HexagonCCState CCInfo(CallConv, TreatAsVarArg, MF, ArgLocs,
762 *DAG.getContext(),
763 MF.getFunction().getFunctionType()->getNumParams());
764
765 if (Subtarget.useHVXOps())
766 CCInfo.AnalyzeFormalArguments(Ins, CC_Hexagon_HVX);
767 else
768 CCInfo.AnalyzeFormalArguments(Ins, CC_Hexagon);
769
770 // For LLVM, in the case when returning a struct by value (>8byte),
771 // the first argument is a pointer that points to the location on caller's
772 // stack where the return value will be stored. For Hexagon, the location on
773 // caller's stack is passed only when the struct size is smaller than (and
774 // equal to) 8 bytes. If not, no address will be passed into callee and
775 // callee return the result direclty through R0/R1.
776 auto NextSingleReg = [] (const TargetRegisterClass &RC, unsigned Reg) {
777 switch (RC.getID()) {
778 case Hexagon::IntRegsRegClassID:
779 return Reg - Hexagon::R0 + 1;
780 case Hexagon::DoubleRegsRegClassID:
781 return (Reg - Hexagon::D0 + 1) * 2;
782 case Hexagon::HvxVRRegClassID:
783 return Reg - Hexagon::V0 + 1;
784 case Hexagon::HvxWRRegClassID:
785 return (Reg - Hexagon::W0 + 1) * 2;
786 }
787 llvm_unreachable("Unexpected register class")::llvm::llvm_unreachable_internal("Unexpected register class"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 787)
;
788 };
789
790 auto &HFL = const_cast<HexagonFrameLowering&>(*Subtarget.getFrameLowering());
791 auto &HMFI = *MF.getInfo<HexagonMachineFunctionInfo>();
792 HFL.FirstVarArgSavedReg = 0;
793 HMFI.setFirstNamedArgFrameIndex(-int(MFI.getNumFixedObjects()));
794
795 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
796 CCValAssign &VA = ArgLocs[i];
797 ISD::ArgFlagsTy Flags = Ins[i].Flags;
798 bool ByVal = Flags.isByVal();
799
800 // Arguments passed in registers:
801 // 1. 32- and 64-bit values and HVX vectors are passed directly,
802 // 2. Large structs are passed via an address, and the address is
803 // passed in a register.
804 if (VA.isRegLoc() && ByVal && Flags.getByValSize() <= 8)
805 llvm_unreachable("ByValSize must be bigger than 8 bytes")::llvm::llvm_unreachable_internal("ByValSize must be bigger than 8 bytes"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 805)
;
806
807 bool InReg = VA.isRegLoc() &&
808 (!ByVal || (ByVal && Flags.getByValSize() > 8));
809
810 if (InReg) {
811 MVT RegVT = VA.getLocVT();
812 if (VA.getLocInfo() == CCValAssign::BCvt)
813 RegVT = VA.getValVT();
814
815 const TargetRegisterClass *RC = getRegClassFor(RegVT);
816 Register VReg = MRI.createVirtualRegister(RC);
817 SDValue Copy = DAG.getCopyFromReg(Chain, dl, VReg, RegVT);
818
819 // Treat values of type MVT::i1 specially: they are passed in
820 // registers of type i32, but they need to remain as values of
821 // type i1 for consistency of the argument lowering.
822 if (VA.getValVT() == MVT::i1) {
823 assert(RegVT.getSizeInBits() <= 32)((RegVT.getSizeInBits() <= 32) ? static_cast<void> (
0) : __assert_fail ("RegVT.getSizeInBits() <= 32", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 823, __PRETTY_FUNCTION__))
;
824 SDValue T = DAG.getNode(ISD::AND, dl, RegVT,
825 Copy, DAG.getConstant(1, dl, RegVT));
826 Copy = DAG.getSetCC(dl, MVT::i1, T, DAG.getConstant(0, dl, RegVT),
827 ISD::SETNE);
828 } else {
829#ifndef NDEBUG
830 unsigned RegSize = RegVT.getSizeInBits();
831 assert(RegSize == 32 || RegSize == 64 ||((RegSize == 32 || RegSize == 64 || Subtarget.isHVXVectorType
(RegVT)) ? static_cast<void> (0) : __assert_fail ("RegSize == 32 || RegSize == 64 || Subtarget.isHVXVectorType(RegVT)"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 832, __PRETTY_FUNCTION__))
832 Subtarget.isHVXVectorType(RegVT))((RegSize == 32 || RegSize == 64 || Subtarget.isHVXVectorType
(RegVT)) ? static_cast<void> (0) : __assert_fail ("RegSize == 32 || RegSize == 64 || Subtarget.isHVXVectorType(RegVT)"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 832, __PRETTY_FUNCTION__))
;
833#endif
834 }
835 InVals.push_back(Copy);
836 MRI.addLiveIn(VA.getLocReg(), VReg);
837 HFL.FirstVarArgSavedReg = NextSingleReg(*RC, VA.getLocReg());
838 } else {
839 assert(VA.isMemLoc() && "Argument should be passed in memory")((VA.isMemLoc() && "Argument should be passed in memory"
) ? static_cast<void> (0) : __assert_fail ("VA.isMemLoc() && \"Argument should be passed in memory\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 839, __PRETTY_FUNCTION__))
;
840
841 // If it's a byval parameter, then we need to compute the
842 // "real" size, not the size of the pointer.
843 unsigned ObjSize = Flags.isByVal()
844 ? Flags.getByValSize()
845 : VA.getLocVT().getStoreSizeInBits() / 8;
846
847 // Create the frame index object for this incoming parameter.
848 int Offset = HEXAGON_LRFP_SIZE8 + VA.getLocMemOffset();
849 int FI = MFI.CreateFixedObject(ObjSize, Offset, true);
850 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
851
852 if (Flags.isByVal()) {
853 // If it's a pass-by-value aggregate, then do not dereference the stack
854 // location. Instead, we should generate a reference to the stack
855 // location.
856 InVals.push_back(FIN);
857 } else {
858 SDValue L = DAG.getLoad(VA.getValVT(), dl, Chain, FIN,
859 MachinePointerInfo::getFixedStack(MF, FI, 0));
860 InVals.push_back(L);
861 }
862 }
863 }
864
865 if (IsVarArg && Subtarget.isEnvironmentMusl()) {
866 for (int i = HFL.FirstVarArgSavedReg; i < 6; i++)
867 MRI.addLiveIn(Hexagon::R0+i);
868 }
869
870 if (IsVarArg && Subtarget.isEnvironmentMusl()) {
871 HMFI.setFirstNamedArgFrameIndex(HMFI.getFirstNamedArgFrameIndex() - 1);
872 HMFI.setLastNamedArgFrameIndex(-int(MFI.getNumFixedObjects()));
873
874 // Create Frame index for the start of register saved area.
875 int NumVarArgRegs = 6 - HFL.FirstVarArgSavedReg;
876 bool RequiresPadding = (NumVarArgRegs & 1);
877 int RegSaveAreaSizePlusPadding = RequiresPadding
878 ? (NumVarArgRegs + 1) * 4
879 : NumVarArgRegs * 4;
880
881 if (RegSaveAreaSizePlusPadding > 0) {
882 // The offset to saved register area should be 8 byte aligned.
883 int RegAreaStart = HEXAGON_LRFP_SIZE8 + CCInfo.getNextStackOffset();
884 if (!(RegAreaStart % 8))
885 RegAreaStart = (RegAreaStart + 7) & -8;
886
887 int RegSaveAreaFrameIndex =
888 MFI.CreateFixedObject(RegSaveAreaSizePlusPadding, RegAreaStart, true);
889 HMFI.setRegSavedAreaStartFrameIndex(RegSaveAreaFrameIndex);
890
891 // This will point to the next argument passed via stack.
892 int Offset = RegAreaStart + RegSaveAreaSizePlusPadding;
893 int FI = MFI.CreateFixedObject(Hexagon_PointerSize(4), Offset, true);
894 HMFI.setVarArgsFrameIndex(FI);
895 } else {
896 // This will point to the next argument passed via stack, when
897 // there is no saved register area.
898 int Offset = HEXAGON_LRFP_SIZE8 + CCInfo.getNextStackOffset();
899 int FI = MFI.CreateFixedObject(Hexagon_PointerSize(4), Offset, true);
900 HMFI.setRegSavedAreaStartFrameIndex(FI);
901 HMFI.setVarArgsFrameIndex(FI);
902 }
903 }
904
905
906 if (IsVarArg && !Subtarget.isEnvironmentMusl()) {
907 // This will point to the next argument passed via stack.
908 int Offset = HEXAGON_LRFP_SIZE8 + CCInfo.getNextStackOffset();
909 int FI = MFI.CreateFixedObject(Hexagon_PointerSize(4), Offset, true);
910 HMFI.setVarArgsFrameIndex(FI);
911 }
912
913 return Chain;
914}
915
916SDValue
917HexagonTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
918 // VASTART stores the address of the VarArgsFrameIndex slot into the
919 // memory location argument.
920 MachineFunction &MF = DAG.getMachineFunction();
921 HexagonMachineFunctionInfo *QFI = MF.getInfo<HexagonMachineFunctionInfo>();
922 SDValue Addr = DAG.getFrameIndex(QFI->getVarArgsFrameIndex(), MVT::i32);
923 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
924
925 if (!Subtarget.isEnvironmentMusl()) {
926 return DAG.getStore(Op.getOperand(0), SDLoc(Op), Addr, Op.getOperand(1),
927 MachinePointerInfo(SV));
928 }
929 auto &FuncInfo = *MF.getInfo<HexagonMachineFunctionInfo>();
930 auto &HFL = *Subtarget.getFrameLowering();
931 SDLoc DL(Op);
932 SmallVector<SDValue, 8> MemOps;
933
934 // Get frame index of va_list.
935 SDValue FIN = Op.getOperand(1);
936
937 // If first Vararg register is odd, add 4 bytes to start of
938 // saved register area to point to the first register location.
939 // This is because the saved register area has to be 8 byte aligned.
940 // Incase of an odd start register, there will be 4 bytes of padding in
941 // the beginning of saved register area. If all registers area used up,
942 // the following condition will handle it correctly.
943 SDValue SavedRegAreaStartFrameIndex =
944 DAG.getFrameIndex(FuncInfo.getRegSavedAreaStartFrameIndex(), MVT::i32);
945
946 auto PtrVT = getPointerTy(DAG.getDataLayout());
947
948 if (HFL.FirstVarArgSavedReg & 1)
949 SavedRegAreaStartFrameIndex =
950 DAG.getNode(ISD::ADD, DL, PtrVT,
951 DAG.getFrameIndex(FuncInfo.getRegSavedAreaStartFrameIndex(),
952 MVT::i32),
953 DAG.getIntPtrConstant(4, DL));
954
955 // Store the saved register area start pointer.
956 SDValue Store =
957 DAG.getStore(Op.getOperand(0), DL,
958 SavedRegAreaStartFrameIndex,
959 FIN, MachinePointerInfo(SV));
960 MemOps.push_back(Store);
961
962 // Store saved register area end pointer.
963 FIN = DAG.getNode(ISD::ADD, DL, PtrVT,
964 FIN, DAG.getIntPtrConstant(4, DL));
965 Store = DAG.getStore(Op.getOperand(0), DL,
966 DAG.getFrameIndex(FuncInfo.getVarArgsFrameIndex(),
967 PtrVT),
968 FIN, MachinePointerInfo(SV, 4));
969 MemOps.push_back(Store);
970
971 // Store overflow area pointer.
972 FIN = DAG.getNode(ISD::ADD, DL, PtrVT,
973 FIN, DAG.getIntPtrConstant(4, DL));
974 Store = DAG.getStore(Op.getOperand(0), DL,
975 DAG.getFrameIndex(FuncInfo.getVarArgsFrameIndex(),
976 PtrVT),
977 FIN, MachinePointerInfo(SV, 8));
978 MemOps.push_back(Store);
979
980 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
981}
982
983SDValue
984HexagonTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const {
985 // Assert that the linux ABI is enabled for the current compilation.
986 assert(Subtarget.isEnvironmentMusl() && "Linux ABI should be enabled")((Subtarget.isEnvironmentMusl() && "Linux ABI should be enabled"
) ? static_cast<void> (0) : __assert_fail ("Subtarget.isEnvironmentMusl() && \"Linux ABI should be enabled\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 986, __PRETTY_FUNCTION__))
;
987 SDValue Chain = Op.getOperand(0);
988 SDValue DestPtr = Op.getOperand(1);
989 SDValue SrcPtr = Op.getOperand(2);
990 const Value *DestSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
991 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
992 SDLoc DL(Op);
993 // Size of the va_list is 12 bytes as it has 3 pointers. Therefore,
994 // we need to memcopy 12 bytes from va_list to another similar list.
995 return DAG.getMemcpy(Chain, DL, DestPtr, SrcPtr,
996 DAG.getIntPtrConstant(12, DL), Align(4),
997 /*isVolatile*/ false, false, false,
998 MachinePointerInfo(DestSV), MachinePointerInfo(SrcSV));
999}
1000
1001SDValue HexagonTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
1002 const SDLoc &dl(Op);
1003 SDValue LHS = Op.getOperand(0);
1004 SDValue RHS = Op.getOperand(1);
1005 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
1006 MVT ResTy = ty(Op);
1007 MVT OpTy = ty(LHS);
1008
1009 if (OpTy == MVT::v2i16 || OpTy == MVT::v4i8) {
1010 MVT ElemTy = OpTy.getVectorElementType();
1011 assert(ElemTy.isScalarInteger())((ElemTy.isScalarInteger()) ? static_cast<void> (0) : __assert_fail
("ElemTy.isScalarInteger()", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 1011, __PRETTY_FUNCTION__))
;
1012 MVT WideTy = MVT::getVectorVT(MVT::getIntegerVT(2*ElemTy.getSizeInBits()),
1013 OpTy.getVectorNumElements());
1014 return DAG.getSetCC(dl, ResTy,
1015 DAG.getSExtOrTrunc(LHS, SDLoc(LHS), WideTy),
1016 DAG.getSExtOrTrunc(RHS, SDLoc(RHS), WideTy), CC);
1017 }
1018
1019 // Treat all other vector types as legal.
1020 if (ResTy.isVector())
1021 return Op;
1022
1023 // Comparisons of short integers should use sign-extend, not zero-extend,
1024 // since we can represent small negative values in the compare instructions.
1025 // The LLVM default is to use zero-extend arbitrarily in these cases.
1026 auto isSExtFree = [this](SDValue N) {
1027 switch (N.getOpcode()) {
1028 case ISD::TRUNCATE: {
1029 // A sign-extend of a truncate of a sign-extend is free.
1030 SDValue Op = N.getOperand(0);
1031 if (Op.getOpcode() != ISD::AssertSext)
1032 return false;
1033 EVT OrigTy = cast<VTSDNode>(Op.getOperand(1))->getVT();
1034 unsigned ThisBW = ty(N).getSizeInBits();
1035 unsigned OrigBW = OrigTy.getSizeInBits();
1036 // The type that was sign-extended to get the AssertSext must be
1037 // narrower than the type of N (so that N has still the same value
1038 // as the original).
1039 return ThisBW >= OrigBW;
1040 }
1041 case ISD::LOAD:
1042 // We have sign-extended loads.
1043 return true;
1044 }
1045 return false;
1046 };
1047
1048 if (OpTy == MVT::i8 || OpTy == MVT::i16) {
1049 ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS);
1050 bool IsNegative = C && C->getAPIntValue().isNegative();
1051 if (IsNegative || isSExtFree(LHS) || isSExtFree(RHS))
1052 return DAG.getSetCC(dl, ResTy,
1053 DAG.getSExtOrTrunc(LHS, SDLoc(LHS), MVT::i32),
1054 DAG.getSExtOrTrunc(RHS, SDLoc(RHS), MVT::i32), CC);
1055 }
1056
1057 return SDValue();
1058}
1059
1060SDValue
1061HexagonTargetLowering::LowerVSELECT(SDValue Op, SelectionDAG &DAG) const {
1062 SDValue PredOp = Op.getOperand(0);
1063 SDValue Op1 = Op.getOperand(1), Op2 = Op.getOperand(2);
1064 MVT OpTy = ty(Op1);
1065 const SDLoc &dl(Op);
1066
1067 if (OpTy == MVT::v2i16 || OpTy == MVT::v4i8) {
1068 MVT ElemTy = OpTy.getVectorElementType();
1069 assert(ElemTy.isScalarInteger())((ElemTy.isScalarInteger()) ? static_cast<void> (0) : __assert_fail
("ElemTy.isScalarInteger()", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 1069, __PRETTY_FUNCTION__))
;
1070 MVT WideTy = MVT::getVectorVT(MVT::getIntegerVT(2*ElemTy.getSizeInBits()),
1071 OpTy.getVectorNumElements());
1072 // Generate (trunc (select (_, sext, sext))).
1073 return DAG.getSExtOrTrunc(
1074 DAG.getSelect(dl, WideTy, PredOp,
1075 DAG.getSExtOrTrunc(Op1, dl, WideTy),
1076 DAG.getSExtOrTrunc(Op2, dl, WideTy)),
1077 dl, OpTy);
1078 }
1079
1080 return SDValue();
1081}
1082
1083SDValue
1084HexagonTargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const {
1085 EVT ValTy = Op.getValueType();
1086 ConstantPoolSDNode *CPN = cast<ConstantPoolSDNode>(Op);
1087 Constant *CVal = nullptr;
1088 bool isVTi1Type = false;
1089 if (auto *CV = dyn_cast<ConstantVector>(CPN->getConstVal())) {
1090 if (CV->getType()->getVectorElementType()->isIntegerTy(1)) {
1091 IRBuilder<> IRB(CV->getContext());
1092 SmallVector<Constant*, 128> NewConst;
1093 unsigned VecLen = CV->getNumOperands();
1094 assert(isPowerOf2_32(VecLen) &&((isPowerOf2_32(VecLen) && "conversion only supported for pow2 VectorSize"
) ? static_cast<void> (0) : __assert_fail ("isPowerOf2_32(VecLen) && \"conversion only supported for pow2 VectorSize\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 1095, __PRETTY_FUNCTION__))
1095 "conversion only supported for pow2 VectorSize")((isPowerOf2_32(VecLen) && "conversion only supported for pow2 VectorSize"
) ? static_cast<void> (0) : __assert_fail ("isPowerOf2_32(VecLen) && \"conversion only supported for pow2 VectorSize\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 1095, __PRETTY_FUNCTION__))
;
1096 for (unsigned i = 0; i < VecLen; ++i)
1097 NewConst.push_back(IRB.getInt8(CV->getOperand(i)->isZeroValue()));
1098
1099 CVal = ConstantVector::get(NewConst);
1100 isVTi1Type = true;
1101 }
1102 }
1103 unsigned Align = CPN->getAlignment();
1104 bool IsPositionIndependent = isPositionIndependent();
1105 unsigned char TF = IsPositionIndependent ? HexagonII::MO_PCREL : 0;
1106
1107 unsigned Offset = 0;
1108 SDValue T;
1109 if (CPN->isMachineConstantPoolEntry())
1110 T = DAG.getTargetConstantPool(CPN->getMachineCPVal(), ValTy, Align, Offset,
1111 TF);
1112 else if (isVTi1Type)
1113 T = DAG.getTargetConstantPool(CVal, ValTy, Align, Offset, TF);
1114 else
1115 T = DAG.getTargetConstantPool(CPN->getConstVal(), ValTy, Align, Offset, TF);
1116
1117 assert(cast<ConstantPoolSDNode>(T)->getTargetFlags() == TF &&((cast<ConstantPoolSDNode>(T)->getTargetFlags() == TF
&& "Inconsistent target flag encountered") ? static_cast
<void> (0) : __assert_fail ("cast<ConstantPoolSDNode>(T)->getTargetFlags() == TF && \"Inconsistent target flag encountered\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 1118, __PRETTY_FUNCTION__))
1118 "Inconsistent target flag encountered")((cast<ConstantPoolSDNode>(T)->getTargetFlags() == TF
&& "Inconsistent target flag encountered") ? static_cast
<void> (0) : __assert_fail ("cast<ConstantPoolSDNode>(T)->getTargetFlags() == TF && \"Inconsistent target flag encountered\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 1118, __PRETTY_FUNCTION__))
;
1119
1120 if (IsPositionIndependent)
1121 return DAG.getNode(HexagonISD::AT_PCREL, SDLoc(Op), ValTy, T);
1122 return DAG.getNode(HexagonISD::CP, SDLoc(Op), ValTy, T);
1123}
1124
1125SDValue
1126HexagonTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
1127 EVT VT = Op.getValueType();
1128 int Idx = cast<JumpTableSDNode>(Op)->getIndex();
1129 if (isPositionIndependent()) {
1130 SDValue T = DAG.getTargetJumpTable(Idx, VT, HexagonII::MO_PCREL);
1131 return DAG.getNode(HexagonISD::AT_PCREL, SDLoc(Op), VT, T);
1132 }
1133
1134 SDValue T = DAG.getTargetJumpTable(Idx, VT);
1135 return DAG.getNode(HexagonISD::JT, SDLoc(Op), VT, T);
1136}
1137
1138SDValue
1139HexagonTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const {
1140 const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
1141 MachineFunction &MF = DAG.getMachineFunction();
1142 MachineFrameInfo &MFI = MF.getFrameInfo();
1143 MFI.setReturnAddressIsTaken(true);
1144
1145 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
1146 return SDValue();
1147
1148 EVT VT = Op.getValueType();
1149 SDLoc dl(Op);
1150 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1151 if (Depth) {
1152 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
1153 SDValue Offset = DAG.getConstant(4, dl, MVT::i32);
1154 return DAG.getLoad(VT, dl, DAG.getEntryNode(),
1155 DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset),
1156 MachinePointerInfo());
1157 }
1158
1159 // Return LR, which contains the return address. Mark it an implicit live-in.
1160 unsigned Reg = MF.addLiveIn(HRI.getRARegister(), getRegClassFor(MVT::i32));
1161 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT);
1162}
1163
1164SDValue
1165HexagonTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
1166 const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
1167 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
1168 MFI.setFrameAddressIsTaken(true);
1169
1170 EVT VT = Op.getValueType();
1171 SDLoc dl(Op);
1172 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1173 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl,
1174 HRI.getFrameRegister(), VT);
1175 while (Depth--)
1176 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
1177 MachinePointerInfo());
1178 return FrameAddr;
1179}
1180
1181SDValue
1182HexagonTargetLowering::LowerATOMIC_FENCE(SDValue Op, SelectionDAG& DAG) const {
1183 SDLoc dl(Op);
1184 return DAG.getNode(HexagonISD::BARRIER, dl, MVT::Other, Op.getOperand(0));
1185}
1186
1187SDValue
1188HexagonTargetLowering::LowerGLOBALADDRESS(SDValue Op, SelectionDAG &DAG) const {
1189 SDLoc dl(Op);
1190 auto *GAN = cast<GlobalAddressSDNode>(Op);
1191 auto PtrVT = getPointerTy(DAG.getDataLayout());
1192 auto *GV = GAN->getGlobal();
1193 int64_t Offset = GAN->getOffset();
1194
1195 auto &HLOF = *HTM.getObjFileLowering();
1196 Reloc::Model RM = HTM.getRelocationModel();
1197
1198 if (RM == Reloc::Static) {
1199 SDValue GA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, Offset);
1200 const GlobalObject *GO = GV->getBaseObject();
1201 if (GO && Subtarget.useSmallData() && HLOF.isGlobalInSmallSection(GO, HTM))
1202 return DAG.getNode(HexagonISD::CONST32_GP, dl, PtrVT, GA);
1203 return DAG.getNode(HexagonISD::CONST32, dl, PtrVT, GA);
1204 }
1205
1206 bool UsePCRel = getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
1207 if (UsePCRel) {
1208 SDValue GA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, Offset,
1209 HexagonII::MO_PCREL);
1210 return DAG.getNode(HexagonISD::AT_PCREL, dl, PtrVT, GA);
1211 }
1212
1213 // Use GOT index.
1214 SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(PtrVT);
1215 SDValue GA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, HexagonII::MO_GOT);
1216 SDValue Off = DAG.getConstant(Offset, dl, MVT::i32);
1217 return DAG.getNode(HexagonISD::AT_GOT, dl, PtrVT, GOT, GA, Off);
1218}
1219
1220// Specifies that for loads and stores VT can be promoted to PromotedLdStVT.
1221SDValue
1222HexagonTargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
1223 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
1224 SDLoc dl(Op);
1225 EVT PtrVT = getPointerTy(DAG.getDataLayout());
1226
1227 Reloc::Model RM = HTM.getRelocationModel();
1228 if (RM == Reloc::Static) {
1229 SDValue A = DAG.getTargetBlockAddress(BA, PtrVT);
1230 return DAG.getNode(HexagonISD::CONST32_GP, dl, PtrVT, A);
1231 }
1232
1233 SDValue A = DAG.getTargetBlockAddress(BA, PtrVT, 0, HexagonII::MO_PCREL);
1234 return DAG.getNode(HexagonISD::AT_PCREL, dl, PtrVT, A);
1235}
1236
1237SDValue
1238HexagonTargetLowering::LowerGLOBAL_OFFSET_TABLE(SDValue Op, SelectionDAG &DAG)
1239 const {
1240 EVT PtrVT = getPointerTy(DAG.getDataLayout());
1241 SDValue GOTSym = DAG.getTargetExternalSymbol(HEXAGON_GOT_SYM_NAME"_GLOBAL_OFFSET_TABLE_", PtrVT,
1242 HexagonII::MO_PCREL);
1243 return DAG.getNode(HexagonISD::AT_PCREL, SDLoc(Op), PtrVT, GOTSym);
1244}
1245
1246SDValue
1247HexagonTargetLowering::GetDynamicTLSAddr(SelectionDAG &DAG, SDValue Chain,
1248 GlobalAddressSDNode *GA, SDValue Glue, EVT PtrVT, unsigned ReturnReg,
1249 unsigned char OperandFlags) const {
1250 MachineFunction &MF = DAG.getMachineFunction();
1251 MachineFrameInfo &MFI = MF.getFrameInfo();
1252 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1253 SDLoc dl(GA);
1254 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
1255 GA->getValueType(0),
1256 GA->getOffset(),
1257 OperandFlags);
1258 // Create Operands for the call.The Operands should have the following:
1259 // 1. Chain SDValue
1260 // 2. Callee which in this case is the Global address value.
1261 // 3. Registers live into the call.In this case its R0, as we
1262 // have just one argument to be passed.
1263 // 4. Glue.
1264 // Note: The order is important.
1265
1266 const auto &HRI = *Subtarget.getRegisterInfo();
1267 const uint32_t *Mask = HRI.getCallPreservedMask(MF, CallingConv::C);
1268 assert(Mask && "Missing call preserved mask for calling convention")((Mask && "Missing call preserved mask for calling convention"
) ? static_cast<void> (0) : __assert_fail ("Mask && \"Missing call preserved mask for calling convention\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 1268, __PRETTY_FUNCTION__))
;
1269 SDValue Ops[] = { Chain, TGA, DAG.getRegister(Hexagon::R0, PtrVT),
1270 DAG.getRegisterMask(Mask), Glue };
1271 Chain = DAG.getNode(HexagonISD::CALL, dl, NodeTys, Ops);
1272
1273 // Inform MFI that function has calls.
1274 MFI.setAdjustsStack(true);
1275
1276 Glue = Chain.getValue(1);
1277 return DAG.getCopyFromReg(Chain, dl, ReturnReg, PtrVT, Glue);
1278}
1279
1280//
1281// Lower using the intial executable model for TLS addresses
1282//
1283SDValue
1284HexagonTargetLowering::LowerToTLSInitialExecModel(GlobalAddressSDNode *GA,
1285 SelectionDAG &DAG) const {
1286 SDLoc dl(GA);
1287 int64_t Offset = GA->getOffset();
1288 auto PtrVT = getPointerTy(DAG.getDataLayout());
1289
1290 // Get the thread pointer.
1291 SDValue TP = DAG.getCopyFromReg(DAG.getEntryNode(), dl, Hexagon::UGP, PtrVT);
1292
1293 bool IsPositionIndependent = isPositionIndependent();
1294 unsigned char TF =
1295 IsPositionIndependent ? HexagonII::MO_IEGOT : HexagonII::MO_IE;
1296
1297 // First generate the TLS symbol address
1298 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, PtrVT,
1299 Offset, TF);
1300
1301 SDValue Sym = DAG.getNode(HexagonISD::CONST32, dl, PtrVT, TGA);
1302
1303 if (IsPositionIndependent) {
1304 // Generate the GOT pointer in case of position independent code
1305 SDValue GOT = LowerGLOBAL_OFFSET_TABLE(Sym, DAG);
1306
1307 // Add the TLS Symbol address to GOT pointer.This gives
1308 // GOT relative relocation for the symbol.
1309 Sym = DAG.getNode(ISD::ADD, dl, PtrVT, GOT, Sym);
1310 }
1311
1312 // Load the offset value for TLS symbol.This offset is relative to
1313 // thread pointer.
1314 SDValue LoadOffset =
1315 DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Sym, MachinePointerInfo());
1316
1317 // Address of the thread local variable is the add of thread
1318 // pointer and the offset of the variable.
1319 return DAG.getNode(ISD::ADD, dl, PtrVT, TP, LoadOffset);
1320}
1321
1322//
1323// Lower using the local executable model for TLS addresses
1324//
1325SDValue
1326HexagonTargetLowering::LowerToTLSLocalExecModel(GlobalAddressSDNode *GA,
1327 SelectionDAG &DAG) const {
1328 SDLoc dl(GA);
1329 int64_t Offset = GA->getOffset();
1330 auto PtrVT = getPointerTy(DAG.getDataLayout());
1331
1332 // Get the thread pointer.
1333 SDValue TP = DAG.getCopyFromReg(DAG.getEntryNode(), dl, Hexagon::UGP, PtrVT);
1334 // Generate the TLS symbol address
1335 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, PtrVT, Offset,
1336 HexagonII::MO_TPREL);
1337 SDValue Sym = DAG.getNode(HexagonISD::CONST32, dl, PtrVT, TGA);
1338
1339 // Address of the thread local variable is the add of thread
1340 // pointer and the offset of the variable.
1341 return DAG.getNode(ISD::ADD, dl, PtrVT, TP, Sym);
1342}
1343
1344//
1345// Lower using the general dynamic model for TLS addresses
1346//
1347SDValue
1348HexagonTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
1349 SelectionDAG &DAG) const {
1350 SDLoc dl(GA);
1351 int64_t Offset = GA->getOffset();
1352 auto PtrVT = getPointerTy(DAG.getDataLayout());
1353
1354 // First generate the TLS symbol address
1355 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, PtrVT, Offset,
1356 HexagonII::MO_GDGOT);
1357
1358 // Then, generate the GOT pointer
1359 SDValue GOT = LowerGLOBAL_OFFSET_TABLE(TGA, DAG);
1360
1361 // Add the TLS symbol and the GOT pointer
1362 SDValue Sym = DAG.getNode(HexagonISD::CONST32, dl, PtrVT, TGA);
1363 SDValue Chain = DAG.getNode(ISD::ADD, dl, PtrVT, GOT, Sym);
1364
1365 // Copy over the argument to R0
1366 SDValue InFlag;
1367 Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, Hexagon::R0, Chain, InFlag);
1368 InFlag = Chain.getValue(1);
1369
1370 unsigned Flags =
1371 static_cast<const HexagonSubtarget &>(DAG.getSubtarget()).useLongCalls()
1372 ? HexagonII::MO_GDPLT | HexagonII::HMOTF_ConstExtended
1373 : HexagonII::MO_GDPLT;
1374
1375 return GetDynamicTLSAddr(DAG, Chain, GA, InFlag, PtrVT,
1376 Hexagon::R0, Flags);
1377}
1378
1379//
1380// Lower TLS addresses.
1381//
1382// For now for dynamic models, we only support the general dynamic model.
1383//
1384SDValue
1385HexagonTargetLowering::LowerGlobalTLSAddress(SDValue Op,
1386 SelectionDAG &DAG) const {
1387 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
1388
1389 switch (HTM.getTLSModel(GA->getGlobal())) {
1390 case TLSModel::GeneralDynamic:
1391 case TLSModel::LocalDynamic:
1392 return LowerToTLSGeneralDynamicModel(GA, DAG);
1393 case TLSModel::InitialExec:
1394 return LowerToTLSInitialExecModel(GA, DAG);
1395 case TLSModel::LocalExec:
1396 return LowerToTLSLocalExecModel(GA, DAG);
1397 }
1398 llvm_unreachable("Bogus TLS model")::llvm::llvm_unreachable_internal("Bogus TLS model", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 1398)
;
1399}
1400
1401//===----------------------------------------------------------------------===//
1402// TargetLowering Implementation
1403//===----------------------------------------------------------------------===//
1404
1405HexagonTargetLowering::HexagonTargetLowering(const TargetMachine &TM,
1406 const HexagonSubtarget &ST)
1407 : TargetLowering(TM), HTM(static_cast<const HexagonTargetMachine&>(TM)),
1408 Subtarget(ST) {
1409 auto &HRI = *Subtarget.getRegisterInfo();
1410
1411 setPrefLoopAlignment(Align(16));
1412 setMinFunctionAlignment(Align(4));
1413 setPrefFunctionAlignment(Align(16));
1414 setStackPointerRegisterToSaveRestore(HRI.getStackRegister());
1415 setBooleanContents(TargetLoweringBase::UndefinedBooleanContent);
1416 setBooleanVectorContents(TargetLoweringBase::UndefinedBooleanContent);
1417
1418 setMaxAtomicSizeInBitsSupported(64);
1419 setMinCmpXchgSizeInBits(32);
1420
1421 if (EnableHexSDNodeSched)
1422 setSchedulingPreference(Sched::VLIW);
1423 else
1424 setSchedulingPreference(Sched::Source);
1425
1426 // Limits for inline expansion of memcpy/memmove
1427 MaxStoresPerMemcpy = MaxStoresPerMemcpyCL;
1428 MaxStoresPerMemcpyOptSize = MaxStoresPerMemcpyOptSizeCL;
1429 MaxStoresPerMemmove = MaxStoresPerMemmoveCL;
1430 MaxStoresPerMemmoveOptSize = MaxStoresPerMemmoveOptSizeCL;
1431 MaxStoresPerMemset = MaxStoresPerMemsetCL;
1432 MaxStoresPerMemsetOptSize = MaxStoresPerMemsetOptSizeCL;
1433
1434 //
1435 // Set up register classes.
1436 //
1437
1438 addRegisterClass(MVT::i1, &Hexagon::PredRegsRegClass);
1439 addRegisterClass(MVT::v2i1, &Hexagon::PredRegsRegClass); // bbbbaaaa
1440 addRegisterClass(MVT::v4i1, &Hexagon::PredRegsRegClass); // ddccbbaa
1441 addRegisterClass(MVT::v8i1, &Hexagon::PredRegsRegClass); // hgfedcba
1442 addRegisterClass(MVT::i32, &Hexagon::IntRegsRegClass);
1443 addRegisterClass(MVT::v2i16, &Hexagon::IntRegsRegClass);
1444 addRegisterClass(MVT::v4i8, &Hexagon::IntRegsRegClass);
1445 addRegisterClass(MVT::i64, &Hexagon::DoubleRegsRegClass);
1446 addRegisterClass(MVT::v8i8, &Hexagon::DoubleRegsRegClass);
1447 addRegisterClass(MVT::v4i16, &Hexagon::DoubleRegsRegClass);
1448 addRegisterClass(MVT::v2i32, &Hexagon::DoubleRegsRegClass);
1449
1450 addRegisterClass(MVT::f32, &Hexagon::IntRegsRegClass);
1451 addRegisterClass(MVT::f64, &Hexagon::DoubleRegsRegClass);
1452
1453 //
1454 // Handling of scalar operations.
1455 //
1456 // All operations default to "legal", except:
1457 // - indexed loads and stores (pre-/post-incremented),
1458 // - ANY_EXTEND_VECTOR_INREG, ATOMIC_CMP_SWAP_WITH_SUCCESS, CONCAT_VECTORS,
1459 // ConstantFP, DEBUGTRAP, FCEIL, FCOPYSIGN, FEXP, FEXP2, FFLOOR, FGETSIGN,
1460 // FLOG, FLOG2, FLOG10, FMAXNUM, FMINNUM, FNEARBYINT, FRINT, FROUND, TRAP,
1461 // FTRUNC, PREFETCH, SIGN_EXTEND_VECTOR_INREG, ZERO_EXTEND_VECTOR_INREG,
1462 // which default to "expand" for at least one type.
1463
1464 // Misc operations.
1465 setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
1466 setOperationAction(ISD::ConstantFP, MVT::f64, Legal);
1467 setOperationAction(ISD::TRAP, MVT::Other, Legal);
1468 setOperationAction(ISD::ConstantPool, MVT::i32, Custom);
1469 setOperationAction(ISD::JumpTable, MVT::i32, Custom);
1470 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
1471 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
1472 setOperationAction(ISD::INLINEASM, MVT::Other, Custom);
1473 setOperationAction(ISD::INLINEASM_BR, MVT::Other, Custom);
1474 setOperationAction(ISD::PREFETCH, MVT::Other, Custom);
1475 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Custom);
1476 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
1477 setOperationAction(ISD::EH_RETURN, MVT::Other, Custom);
1478 setOperationAction(ISD::GLOBAL_OFFSET_TABLE, MVT::i32, Custom);
1479 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
1480 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom);
1481
1482 // Custom legalize GlobalAddress nodes into CONST32.
1483 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
1484 setOperationAction(ISD::GlobalAddress, MVT::i8, Custom);
1485 setOperationAction(ISD::BlockAddress, MVT::i32, Custom);
1486
1487 // Hexagon needs to optimize cases with negative constants.
1488 setOperationAction(ISD::SETCC, MVT::i8, Custom);
1489 setOperationAction(ISD::SETCC, MVT::i16, Custom);
1490 setOperationAction(ISD::SETCC, MVT::v4i8, Custom);
1491 setOperationAction(ISD::SETCC, MVT::v2i16, Custom);
1492
1493 // VASTART needs to be custom lowered to use the VarArgsFrameIndex.
1494 setOperationAction(ISD::VASTART, MVT::Other, Custom);
1495 setOperationAction(ISD::VAEND, MVT::Other, Expand);
1496 setOperationAction(ISD::VAARG, MVT::Other, Expand);
1497 if (Subtarget.isEnvironmentMusl())
1498 setOperationAction(ISD::VACOPY, MVT::Other, Custom);
1499 else
1500 setOperationAction(ISD::VACOPY, MVT::Other, Expand);
1501
1502 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
1503 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
1504 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom);
1505
1506 if (EmitJumpTables)
1507 setMinimumJumpTableEntries(MinimumJumpTables);
1508 else
1509 setMinimumJumpTableEntries(std::numeric_limits<unsigned>::max());
1510 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
1511
1512 setOperationAction(ISD::ABS, MVT::i32, Legal);
1513 setOperationAction(ISD::ABS, MVT::i64, Legal);
1514
1515 // Hexagon has A4_addp_c and A4_subp_c that take and generate a carry bit,
1516 // but they only operate on i64.
1517 for (MVT VT : MVT::integer_valuetypes()) {
1518 setOperationAction(ISD::UADDO, VT, Custom);
1519 setOperationAction(ISD::USUBO, VT, Custom);
1520 setOperationAction(ISD::SADDO, VT, Expand);
1521 setOperationAction(ISD::SSUBO, VT, Expand);
1522 setOperationAction(ISD::ADDCARRY, VT, Expand);
1523 setOperationAction(ISD::SUBCARRY, VT, Expand);
1524 }
1525 setOperationAction(ISD::ADDCARRY, MVT::i64, Custom);
1526 setOperationAction(ISD::SUBCARRY, MVT::i64, Custom);
1527
1528 setOperationAction(ISD::CTLZ, MVT::i8, Promote);
1529 setOperationAction(ISD::CTLZ, MVT::i16, Promote);
1530 setOperationAction(ISD::CTTZ, MVT::i8, Promote);
1531 setOperationAction(ISD::CTTZ, MVT::i16, Promote);
1532
1533 // Popcount can count # of 1s in i64 but returns i32.
1534 setOperationAction(ISD::CTPOP, MVT::i8, Promote);
1535 setOperationAction(ISD::CTPOP, MVT::i16, Promote);
1536 setOperationAction(ISD::CTPOP, MVT::i32, Promote);
1537 setOperationAction(ISD::CTPOP, MVT::i64, Legal);
1538
1539 setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
1540 setOperationAction(ISD::BITREVERSE, MVT::i64, Legal);
1541 setOperationAction(ISD::BSWAP, MVT::i32, Legal);
1542 setOperationAction(ISD::BSWAP, MVT::i64, Legal);
1543
1544 setOperationAction(ISD::FSHL, MVT::i32, Legal);
1545 setOperationAction(ISD::FSHL, MVT::i64, Legal);
1546 setOperationAction(ISD::FSHR, MVT::i32, Legal);
1547 setOperationAction(ISD::FSHR, MVT::i64, Legal);
1548
1549 for (unsigned IntExpOp :
1550 {ISD::SDIV, ISD::UDIV, ISD::SREM, ISD::UREM,
1551 ISD::SDIVREM, ISD::UDIVREM, ISD::ROTL, ISD::ROTR,
1552 ISD::SHL_PARTS, ISD::SRA_PARTS, ISD::SRL_PARTS,
1553 ISD::SMUL_LOHI, ISD::UMUL_LOHI}) {
1554 for (MVT VT : MVT::integer_valuetypes())
1555 setOperationAction(IntExpOp, VT, Expand);
1556 }
1557
1558 for (unsigned FPExpOp :
1559 {ISD::FDIV, ISD::FREM, ISD::FSQRT, ISD::FSIN, ISD::FCOS, ISD::FSINCOS,
1560 ISD::FPOW, ISD::FCOPYSIGN}) {
1561 for (MVT VT : MVT::fp_valuetypes())
1562 setOperationAction(FPExpOp, VT, Expand);
1563 }
1564
1565 // No extending loads from i32.
1566 for (MVT VT : MVT::integer_valuetypes()) {
1567 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i32, Expand);
1568 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i32, Expand);
1569 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i32, Expand);
1570 }
1571 // Turn FP truncstore into trunc + store.
1572 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
1573 // Turn FP extload into load/fpextend.
1574 for (MVT VT : MVT::fp_valuetypes())
1575 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand);
1576
1577 // Expand BR_CC and SELECT_CC for all integer and fp types.
1578 for (MVT VT : MVT::integer_valuetypes()) {
1579 setOperationAction(ISD::BR_CC, VT, Expand);
1580 setOperationAction(ISD::SELECT_CC, VT, Expand);
1581 }
1582 for (MVT VT : MVT::fp_valuetypes()) {
1583 setOperationAction(ISD::BR_CC, VT, Expand);
1584 setOperationAction(ISD::SELECT_CC, VT, Expand);
1585 }
1586 setOperationAction(ISD::BR_CC, MVT::Other, Expand);
1587
1588 //
1589 // Handling of vector operations.
1590 //
1591
1592 // Set the action for vector operations to "expand", then override it with
1593 // either "custom" or "legal" for specific cases.
1594 static const unsigned VectExpOps[] = {
1595 // Integer arithmetic:
1596 ISD::ADD, ISD::SUB, ISD::MUL, ISD::SDIV, ISD::UDIV,
1597 ISD::SREM, ISD::UREM, ISD::SDIVREM, ISD::UDIVREM, ISD::SADDO,
1598 ISD::UADDO, ISD::SSUBO, ISD::USUBO, ISD::SMUL_LOHI, ISD::UMUL_LOHI,
1599 // Logical/bit:
1600 ISD::AND, ISD::OR, ISD::XOR, ISD::ROTL, ISD::ROTR,
1601 ISD::CTPOP, ISD::CTLZ, ISD::CTTZ,
1602 // Floating point arithmetic/math functions:
1603 ISD::FADD, ISD::FSUB, ISD::FMUL, ISD::FMA, ISD::FDIV,
1604 ISD::FREM, ISD::FNEG, ISD::FABS, ISD::FSQRT, ISD::FSIN,
1605 ISD::FCOS, ISD::FPOW, ISD::FLOG, ISD::FLOG2,
1606 ISD::FLOG10, ISD::FEXP, ISD::FEXP2, ISD::FCEIL, ISD::FTRUNC,
1607 ISD::FRINT, ISD::FNEARBYINT, ISD::FROUND, ISD::FFLOOR,
1608 ISD::FMINNUM, ISD::FMAXNUM, ISD::FSINCOS,
1609 // Misc:
1610 ISD::BR_CC, ISD::SELECT_CC, ISD::ConstantPool,
1611 // Vector:
1612 ISD::BUILD_VECTOR, ISD::SCALAR_TO_VECTOR,
1613 ISD::EXTRACT_VECTOR_ELT, ISD::INSERT_VECTOR_ELT,
1614 ISD::EXTRACT_SUBVECTOR, ISD::INSERT_SUBVECTOR,
1615 ISD::CONCAT_VECTORS, ISD::VECTOR_SHUFFLE
1616 };
1617
1618 for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
1619 for (unsigned VectExpOp : VectExpOps)
1620 setOperationAction(VectExpOp, VT, Expand);
1621
1622 // Expand all extending loads and truncating stores:
1623 for (MVT TargetVT : MVT::fixedlen_vector_valuetypes()) {
1624 if (TargetVT == VT)
1625 continue;
1626 setLoadExtAction(ISD::EXTLOAD, TargetVT, VT, Expand);
1627 setLoadExtAction(ISD::ZEXTLOAD, TargetVT, VT, Expand);
1628 setLoadExtAction(ISD::SEXTLOAD, TargetVT, VT, Expand);
1629 setTruncStoreAction(VT, TargetVT, Expand);
1630 }
1631
1632 // Normalize all inputs to SELECT to be vectors of i32.
1633 if (VT.getVectorElementType() != MVT::i32) {
1634 MVT VT32 = MVT::getVectorVT(MVT::i32, VT.getSizeInBits()/32);
1635 setOperationAction(ISD::SELECT, VT, Promote);
1636 AddPromotedToType(ISD::SELECT, VT, VT32);
1637 }
1638 setOperationAction(ISD::SRA, VT, Custom);
1639 setOperationAction(ISD::SHL, VT, Custom);
1640 setOperationAction(ISD::SRL, VT, Custom);
1641 }
1642
1643 // Extending loads from (native) vectors of i8 into (native) vectors of i16
1644 // are legal.
1645 setLoadExtAction(ISD::EXTLOAD, MVT::v2i16, MVT::v2i8, Legal);
1646 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i16, MVT::v2i8, Legal);
1647 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i16, MVT::v2i8, Legal);
1648 setLoadExtAction(ISD::EXTLOAD, MVT::v4i16, MVT::v4i8, Legal);
1649 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i16, MVT::v4i8, Legal);
1650 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i16, MVT::v4i8, Legal);
1651
1652 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Legal);
1653 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Legal);
1654 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i32, Legal);
1655
1656 // Types natively supported:
1657 for (MVT NativeVT : {MVT::v8i1, MVT::v4i1, MVT::v2i1, MVT::v4i8,
1658 MVT::v8i8, MVT::v2i16, MVT::v4i16, MVT::v2i32}) {
1659 setOperationAction(ISD::BUILD_VECTOR, NativeVT, Custom);
1660 setOperationAction(ISD::EXTRACT_VECTOR_ELT, NativeVT, Custom);
1661 setOperationAction(ISD::INSERT_VECTOR_ELT, NativeVT, Custom);
1662 setOperationAction(ISD::EXTRACT_SUBVECTOR, NativeVT, Custom);
1663 setOperationAction(ISD::INSERT_SUBVECTOR, NativeVT, Custom);
1664 setOperationAction(ISD::CONCAT_VECTORS, NativeVT, Custom);
1665
1666 setOperationAction(ISD::ADD, NativeVT, Legal);
1667 setOperationAction(ISD::SUB, NativeVT, Legal);
1668 setOperationAction(ISD::MUL, NativeVT, Legal);
1669 setOperationAction(ISD::AND, NativeVT, Legal);
1670 setOperationAction(ISD::OR, NativeVT, Legal);
1671 setOperationAction(ISD::XOR, NativeVT, Legal);
1672 }
1673
1674 // Custom lower unaligned loads.
1675 // Also, for both loads and stores, verify the alignment of the address
1676 // in case it is a compile-time constant. This is a usability feature to
1677 // provide a meaningful error message to users.
1678 for (MVT VT : {MVT::i16, MVT::i32, MVT::v4i8, MVT::i64, MVT::v8i8,
1679 MVT::v2i16, MVT::v4i16, MVT::v2i32}) {
1680 setOperationAction(ISD::LOAD, VT, Custom);
1681 setOperationAction(ISD::STORE, VT, Custom);
1682 }
1683
1684 for (MVT VT : {MVT::v2i16, MVT::v4i8, MVT::v8i8, MVT::v2i32, MVT::v4i16,
1685 MVT::v2i32}) {
1686 setCondCodeAction(ISD::SETNE, VT, Expand);
1687 setCondCodeAction(ISD::SETLE, VT, Expand);
1688 setCondCodeAction(ISD::SETGE, VT, Expand);
1689 setCondCodeAction(ISD::SETLT, VT, Expand);
1690 setCondCodeAction(ISD::SETULE, VT, Expand);
1691 setCondCodeAction(ISD::SETUGE, VT, Expand);
1692 setCondCodeAction(ISD::SETULT, VT, Expand);
1693 }
1694
1695 // Custom-lower bitcasts from i8 to v8i1.
1696 setOperationAction(ISD::BITCAST, MVT::i8, Custom);
1697 setOperationAction(ISD::SETCC, MVT::v2i16, Custom);
1698 setOperationAction(ISD::VSELECT, MVT::v4i8, Custom);
1699 setOperationAction(ISD::VSELECT, MVT::v2i16, Custom);
1700 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i8, Custom);
1701 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i16, Custom);
1702 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i8, Custom);
1703
1704 // V5+.
1705 setOperationAction(ISD::FMA, MVT::f64, Expand);
1706 setOperationAction(ISD::FADD, MVT::f64, Expand);
1707 setOperationAction(ISD::FSUB, MVT::f64, Expand);
1708 setOperationAction(ISD::FMUL, MVT::f64, Expand);
1709
1710 setOperationAction(ISD::FMINNUM, MVT::f32, Legal);
1711 setOperationAction(ISD::FMAXNUM, MVT::f32, Legal);
1712
1713 setOperationAction(ISD::FP_TO_UINT, MVT::i1, Promote);
1714 setOperationAction(ISD::FP_TO_UINT, MVT::i8, Promote);
1715 setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote);
1716 setOperationAction(ISD::FP_TO_SINT, MVT::i1, Promote);
1717 setOperationAction(ISD::FP_TO_SINT, MVT::i8, Promote);
1718 setOperationAction(ISD::FP_TO_SINT, MVT::i16, Promote);
1719 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote);
1720 setOperationAction(ISD::UINT_TO_FP, MVT::i8, Promote);
1721 setOperationAction(ISD::UINT_TO_FP, MVT::i16, Promote);
1722 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote);
1723 setOperationAction(ISD::SINT_TO_FP, MVT::i8, Promote);
1724 setOperationAction(ISD::SINT_TO_FP, MVT::i16, Promote);
1725
1726 // Handling of indexed loads/stores: default is "expand".
1727 //
1728 for (MVT VT : {MVT::i8, MVT::i16, MVT::i32, MVT::i64, MVT::f32, MVT::f64,
1729 MVT::v2i16, MVT::v2i32, MVT::v4i8, MVT::v4i16, MVT::v8i8}) {
1730 setIndexedLoadAction(ISD::POST_INC, VT, Legal);
1731 setIndexedStoreAction(ISD::POST_INC, VT, Legal);
1732 }
1733
1734 // Subtarget-specific operation actions.
1735 //
1736 if (Subtarget.hasV60Ops()) {
1737 setOperationAction(ISD::ROTL, MVT::i32, Legal);
1738 setOperationAction(ISD::ROTL, MVT::i64, Legal);
1739 setOperationAction(ISD::ROTR, MVT::i32, Legal);
1740 setOperationAction(ISD::ROTR, MVT::i64, Legal);
1741 }
1742 if (Subtarget.hasV66Ops()) {
1743 setOperationAction(ISD::FADD, MVT::f64, Legal);
1744 setOperationAction(ISD::FSUB, MVT::f64, Legal);
1745 }
1746 if (Subtarget.hasV67Ops()) {
1747 setOperationAction(ISD::FMINNUM, MVT::f64, Legal);
1748 setOperationAction(ISD::FMAXNUM, MVT::f64, Legal);
1749 setOperationAction(ISD::FMUL, MVT::f64, Legal);
1750 }
1751
1752 setTargetDAGCombine(ISD::VSELECT);
1753
1754 if (Subtarget.useHVXOps())
1755 initializeHVXLowering();
1756
1757 computeRegisterProperties(&HRI);
1758
1759 //
1760 // Library calls for unsupported operations
1761 //
1762 bool FastMath = EnableFastMath;
1763
1764 setLibcallName(RTLIB::SDIV_I32, "__hexagon_divsi3");
1765 setLibcallName(RTLIB::SDIV_I64, "__hexagon_divdi3");
1766 setLibcallName(RTLIB::UDIV_I32, "__hexagon_udivsi3");
1767 setLibcallName(RTLIB::UDIV_I64, "__hexagon_udivdi3");
1768 setLibcallName(RTLIB::SREM_I32, "__hexagon_modsi3");
1769 setLibcallName(RTLIB::SREM_I64, "__hexagon_moddi3");
1770 setLibcallName(RTLIB::UREM_I32, "__hexagon_umodsi3");
1771 setLibcallName(RTLIB::UREM_I64, "__hexagon_umoddi3");
1772
1773 setLibcallName(RTLIB::SINTTOFP_I128_F64, "__hexagon_floattidf");
1774 setLibcallName(RTLIB::SINTTOFP_I128_F32, "__hexagon_floattisf");
1775 setLibcallName(RTLIB::FPTOUINT_F32_I128, "__hexagon_fixunssfti");
1776 setLibcallName(RTLIB::FPTOUINT_F64_I128, "__hexagon_fixunsdfti");
1777 setLibcallName(RTLIB::FPTOSINT_F32_I128, "__hexagon_fixsfti");
1778 setLibcallName(RTLIB::FPTOSINT_F64_I128, "__hexagon_fixdfti");
1779
1780 // This is the only fast library function for sqrtd.
1781 if (FastMath)
1782 setLibcallName(RTLIB::SQRT_F64, "__hexagon_fast2_sqrtdf2");
1783
1784 // Prefix is: nothing for "slow-math",
1785 // "fast2_" for V5+ fast-math double-precision
1786 // (actually, keep fast-math and fast-math2 separate for now)
1787 if (FastMath) {
1788 setLibcallName(RTLIB::ADD_F64, "__hexagon_fast_adddf3");
1789 setLibcallName(RTLIB::SUB_F64, "__hexagon_fast_subdf3");
1790 setLibcallName(RTLIB::MUL_F64, "__hexagon_fast_muldf3");
1791 setLibcallName(RTLIB::DIV_F64, "__hexagon_fast_divdf3");
1792 setLibcallName(RTLIB::DIV_F32, "__hexagon_fast_divsf3");
1793 } else {
1794 setLibcallName(RTLIB::ADD_F64, "__hexagon_adddf3");
1795 setLibcallName(RTLIB::SUB_F64, "__hexagon_subdf3");
1796 setLibcallName(RTLIB::MUL_F64, "__hexagon_muldf3");
1797 setLibcallName(RTLIB::DIV_F64, "__hexagon_divdf3");
1798 setLibcallName(RTLIB::DIV_F32, "__hexagon_divsf3");
1799 }
1800
1801 if (FastMath)
1802 setLibcallName(RTLIB::SQRT_F32, "__hexagon_fast2_sqrtf");
1803 else
1804 setLibcallName(RTLIB::SQRT_F32, "__hexagon_sqrtf");
1805
1806 // These cause problems when the shift amount is non-constant.
1807 setLibcallName(RTLIB::SHL_I128, nullptr);
1808 setLibcallName(RTLIB::SRL_I128, nullptr);
1809 setLibcallName(RTLIB::SRA_I128, nullptr);
1810}
1811
1812const char* HexagonTargetLowering::getTargetNodeName(unsigned Opcode) const {
1813 switch ((HexagonISD::NodeType)Opcode) {
1814 case HexagonISD::ADDC: return "HexagonISD::ADDC";
1815 case HexagonISD::SUBC: return "HexagonISD::SUBC";
1816 case HexagonISD::ALLOCA: return "HexagonISD::ALLOCA";
1817 case HexagonISD::AT_GOT: return "HexagonISD::AT_GOT";
1818 case HexagonISD::AT_PCREL: return "HexagonISD::AT_PCREL";
1819 case HexagonISD::BARRIER: return "HexagonISD::BARRIER";
1820 case HexagonISD::CALL: return "HexagonISD::CALL";
1821 case HexagonISD::CALLnr: return "HexagonISD::CALLnr";
1822 case HexagonISD::CALLR: return "HexagonISD::CALLR";
1823 case HexagonISD::COMBINE: return "HexagonISD::COMBINE";
1824 case HexagonISD::CONST32_GP: return "HexagonISD::CONST32_GP";
1825 case HexagonISD::CONST32: return "HexagonISD::CONST32";
1826 case HexagonISD::CP: return "HexagonISD::CP";
1827 case HexagonISD::DCFETCH: return "HexagonISD::DCFETCH";
1828 case HexagonISD::EH_RETURN: return "HexagonISD::EH_RETURN";
1829 case HexagonISD::TSTBIT: return "HexagonISD::TSTBIT";
1830 case HexagonISD::EXTRACTU: return "HexagonISD::EXTRACTU";
1831 case HexagonISD::INSERT: return "HexagonISD::INSERT";
1832 case HexagonISD::JT: return "HexagonISD::JT";
1833 case HexagonISD::RET_FLAG: return "HexagonISD::RET_FLAG";
1834 case HexagonISD::TC_RETURN: return "HexagonISD::TC_RETURN";
1835 case HexagonISD::VASL: return "HexagonISD::VASL";
1836 case HexagonISD::VASR: return "HexagonISD::VASR";
1837 case HexagonISD::VLSR: return "HexagonISD::VLSR";
1838 case HexagonISD::VSPLAT: return "HexagonISD::VSPLAT";
1839 case HexagonISD::VEXTRACTW: return "HexagonISD::VEXTRACTW";
1840 case HexagonISD::VINSERTW0: return "HexagonISD::VINSERTW0";
1841 case HexagonISD::VROR: return "HexagonISD::VROR";
1842 case HexagonISD::READCYCLE: return "HexagonISD::READCYCLE";
1843 case HexagonISD::PTRUE: return "HexagonISD::PTRUE";
1844 case HexagonISD::PFALSE: return "HexagonISD::PFALSE";
1845 case HexagonISD::VZERO: return "HexagonISD::VZERO";
1846 case HexagonISD::VSPLATW: return "HexagonISD::VSPLATW";
1847 case HexagonISD::D2P: return "HexagonISD::D2P";
1848 case HexagonISD::P2D: return "HexagonISD::P2D";
1849 case HexagonISD::V2Q: return "HexagonISD::V2Q";
1850 case HexagonISD::Q2V: return "HexagonISD::Q2V";
1851 case HexagonISD::QCAT: return "HexagonISD::QCAT";
1852 case HexagonISD::QTRUE: return "HexagonISD::QTRUE";
1853 case HexagonISD::QFALSE: return "HexagonISD::QFALSE";
1854 case HexagonISD::TYPECAST: return "HexagonISD::TYPECAST";
1855 case HexagonISD::VALIGN: return "HexagonISD::VALIGN";
1856 case HexagonISD::VALIGNADDR: return "HexagonISD::VALIGNADDR";
1857 case HexagonISD::OP_END: break;
1858 }
1859 return nullptr;
1860}
1861
1862void
1863HexagonTargetLowering::validateConstPtrAlignment(SDValue Ptr, const SDLoc &dl,
1864 unsigned NeedAlign) const {
1865 auto *CA = dyn_cast<ConstantSDNode>(Ptr);
1866 if (!CA)
1867 return;
1868 unsigned Addr = CA->getZExtValue();
1869 unsigned HaveAlign = Addr != 0 ? 1u << countTrailingZeros(Addr) : NeedAlign;
1870 if (HaveAlign < NeedAlign) {
1871 std::string ErrMsg;
1872 raw_string_ostream O(ErrMsg);
1873 O << "Misaligned constant address: " << format_hex(Addr, 10)
1874 << " has alignment " << HaveAlign
1875 << ", but the memory access requires " << NeedAlign;
1876 if (DebugLoc DL = dl.getDebugLoc())
1877 DL.print(O << ", at ");
1878 report_fatal_error(O.str());
1879 }
1880}
1881
1882// Bit-reverse Load Intrinsic: Check if the instruction is a bit reverse load
1883// intrinsic.
1884static bool isBrevLdIntrinsic(const Value *Inst) {
1885 unsigned ID = cast<IntrinsicInst>(Inst)->getIntrinsicID();
1886 return (ID == Intrinsic::hexagon_L2_loadrd_pbr ||
1887 ID == Intrinsic::hexagon_L2_loadri_pbr ||
1888 ID == Intrinsic::hexagon_L2_loadrh_pbr ||
1889 ID == Intrinsic::hexagon_L2_loadruh_pbr ||
1890 ID == Intrinsic::hexagon_L2_loadrb_pbr ||
1891 ID == Intrinsic::hexagon_L2_loadrub_pbr);
1892}
1893
1894// Bit-reverse Load Intrinsic :Crawl up and figure out the object from previous
1895// instruction. So far we only handle bitcast, extract value and bit reverse
1896// load intrinsic instructions. Should we handle CGEP ?
1897static Value *getBrevLdObject(Value *V) {
1898 if (Operator::getOpcode(V) == Instruction::ExtractValue ||
1899 Operator::getOpcode(V) == Instruction::BitCast)
1900 V = cast<Operator>(V)->getOperand(0);
1901 else if (isa<IntrinsicInst>(V) && isBrevLdIntrinsic(V))
1902 V = cast<Instruction>(V)->getOperand(0);
1903 return V;
1904}
1905
1906// Bit-reverse Load Intrinsic: For a PHI Node return either an incoming edge or
1907// a back edge. If the back edge comes from the intrinsic itself, the incoming
1908// edge is returned.
1909static Value *returnEdge(const PHINode *PN, Value *IntrBaseVal) {
1910 const BasicBlock *Parent = PN->getParent();
1911 int Idx = -1;
1912 for (unsigned i = 0, e = PN->getNumIncomingValues(); i < e; ++i) {
1913 BasicBlock *Blk = PN->getIncomingBlock(i);
1914 // Determine if the back edge is originated from intrinsic.
1915 if (Blk == Parent) {
1916 Value *BackEdgeVal = PN->getIncomingValue(i);
1917 Value *BaseVal;
1918 // Loop over till we return the same Value or we hit the IntrBaseVal.
1919 do {
1920 BaseVal = BackEdgeVal;
1921 BackEdgeVal = getBrevLdObject(BackEdgeVal);
1922 } while ((BaseVal != BackEdgeVal) && (IntrBaseVal != BackEdgeVal));
1923 // If the getBrevLdObject returns IntrBaseVal, we should return the
1924 // incoming edge.
1925 if (IntrBaseVal == BackEdgeVal)
1926 continue;
1927 Idx = i;
1928 break;
1929 } else // Set the node to incoming edge.
1930 Idx = i;
1931 }
1932 assert(Idx >= 0 && "Unexpected index to incoming argument in PHI")((Idx >= 0 && "Unexpected index to incoming argument in PHI"
) ? static_cast<void> (0) : __assert_fail ("Idx >= 0 && \"Unexpected index to incoming argument in PHI\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 1932, __PRETTY_FUNCTION__))
;
1933 return PN->getIncomingValue(Idx);
1934}
1935
1936// Bit-reverse Load Intrinsic: Figure out the underlying object the base
1937// pointer points to, for the bit-reverse load intrinsic. Setting this to
1938// memoperand might help alias analysis to figure out the dependencies.
1939static Value *getUnderLyingObjectForBrevLdIntr(Value *V) {
1940 Value *IntrBaseVal = V;
1941 Value *BaseVal;
1942 // Loop over till we return the same Value, implies we either figure out
1943 // the object or we hit a PHI
1944 do {
1945 BaseVal = V;
1946 V = getBrevLdObject(V);
1947 } while (BaseVal != V);
1948
1949 // Identify the object from PHINode.
1950 if (const PHINode *PN = dyn_cast<PHINode>(V))
1951 return returnEdge(PN, IntrBaseVal);
1952 // For non PHI nodes, the object is the last value returned by getBrevLdObject
1953 else
1954 return V;
1955}
1956
1957/// Given an intrinsic, checks if on the target the intrinsic will need to map
1958/// to a MemIntrinsicNode (touches memory). If this is the case, it returns
1959/// true and store the intrinsic information into the IntrinsicInfo that was
1960/// passed to the function.
1961bool HexagonTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
1962 const CallInst &I,
1963 MachineFunction &MF,
1964 unsigned Intrinsic) const {
1965 switch (Intrinsic) {
1966 case Intrinsic::hexagon_L2_loadrd_pbr:
1967 case Intrinsic::hexagon_L2_loadri_pbr:
1968 case Intrinsic::hexagon_L2_loadrh_pbr:
1969 case Intrinsic::hexagon_L2_loadruh_pbr:
1970 case Intrinsic::hexagon_L2_loadrb_pbr:
1971 case Intrinsic::hexagon_L2_loadrub_pbr: {
1972 Info.opc = ISD::INTRINSIC_W_CHAIN;
1973 auto &DL = I.getCalledFunction()->getParent()->getDataLayout();
1974 auto &Cont = I.getCalledFunction()->getParent()->getContext();
1975 // The intrinsic function call is of the form { ElTy, i8* }
1976 // @llvm.hexagon.L2.loadXX.pbr(i8*, i32). The pointer and memory access type
1977 // should be derived from ElTy.
1978 Type *ElTy = I.getCalledFunction()->getReturnType()->getStructElementType(0);
1979 Info.memVT = MVT::getVT(ElTy);
1980 llvm::Value *BasePtrVal = I.getOperand(0);
1981 Info.ptrVal = getUnderLyingObjectForBrevLdIntr(BasePtrVal);
1982 // The offset value comes through Modifier register. For now, assume the
1983 // offset is 0.
1984 Info.offset = 0;
1985 Info.align =
1986 MaybeAlign(DL.getABITypeAlignment(Info.memVT.getTypeForEVT(Cont)));
1987 Info.flags = MachineMemOperand::MOLoad;
1988 return true;
1989 }
1990 case Intrinsic::hexagon_V6_vgathermw:
1991 case Intrinsic::hexagon_V6_vgathermw_128B:
1992 case Intrinsic::hexagon_V6_vgathermh:
1993 case Intrinsic::hexagon_V6_vgathermh_128B:
1994 case Intrinsic::hexagon_V6_vgathermhw:
1995 case Intrinsic::hexagon_V6_vgathermhw_128B:
1996 case Intrinsic::hexagon_V6_vgathermwq:
1997 case Intrinsic::hexagon_V6_vgathermwq_128B:
1998 case Intrinsic::hexagon_V6_vgathermhq:
1999 case Intrinsic::hexagon_V6_vgathermhq_128B:
2000 case Intrinsic::hexagon_V6_vgathermhwq:
2001 case Intrinsic::hexagon_V6_vgathermhwq_128B: {
2002 const Module &M = *I.getParent()->getParent()->getParent();
2003 Info.opc = ISD::INTRINSIC_W_CHAIN;
2004 Type *VecTy = I.getArgOperand(1)->getType();
2005 Info.memVT = MVT::getVT(VecTy);
2006 Info.ptrVal = I.getArgOperand(0);
2007 Info.offset = 0;
2008 Info.align =
2009 MaybeAlign(M.getDataLayout().getTypeAllocSizeInBits(VecTy) / 8);
2010 Info.flags = MachineMemOperand::MOLoad |
2011 MachineMemOperand::MOStore |
2012 MachineMemOperand::MOVolatile;
2013 return true;
2014 }
2015 default:
2016 break;
2017 }
2018 return false;
2019}
2020
2021bool HexagonTargetLowering::hasBitTest(SDValue X, SDValue Y) const {
2022 return X.getValueType().isScalarInteger(); // 'tstbit'
2023}
2024
2025bool HexagonTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
2026 return isTruncateFree(EVT::getEVT(Ty1), EVT::getEVT(Ty2));
2027}
2028
2029bool HexagonTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
2030 if (!VT1.isSimple() || !VT2.isSimple())
2031 return false;
2032 return VT1.getSimpleVT() == MVT::i64 && VT2.getSimpleVT() == MVT::i32;
2033}
2034
2035bool HexagonTargetLowering::isFMAFasterThanFMulAndFAdd(
2036 const MachineFunction &MF, EVT VT) const {
2037 return isOperationLegalOrCustom(ISD::FMA, VT);
2038}
2039
2040// Should we expand the build vector with shuffles?
2041bool HexagonTargetLowering::shouldExpandBuildVectorWithShuffles(EVT VT,
2042 unsigned DefinedValues) const {
2043 return false;
2044}
2045
2046bool HexagonTargetLowering::isShuffleMaskLegal(ArrayRef<int> Mask,
2047 EVT VT) const {
2048 return true;
2049}
2050
2051TargetLoweringBase::LegalizeTypeAction
2052HexagonTargetLowering::getPreferredVectorAction(MVT VT) const {
2053 unsigned VecLen = VT.getVectorNumElements();
2054 MVT ElemTy = VT.getVectorElementType();
2055
2056 if (VecLen == 1 || VT.isScalableVector())
2057 return TargetLoweringBase::TypeScalarizeVector;
2058
2059 if (Subtarget.useHVXOps()) {
2060 unsigned HwLen = Subtarget.getVectorLength();
2061 // If the size of VT is at least half of the vector length,
2062 // widen the vector. Note: the threshold was not selected in
2063 // any scientific way.
2064 ArrayRef<MVT> Tys = Subtarget.getHVXElementTypes();
2065 if (llvm::find(Tys, ElemTy) != Tys.end()) {
2066 unsigned HwWidth = 8*HwLen;
2067 unsigned VecWidth = VT.getSizeInBits();
2068 if (VecWidth >= HwWidth/2 && VecWidth < HwWidth)
2069 return TargetLoweringBase::TypeWidenVector;
2070 }
2071 // Split vectors of i1 that correspond to (byte) vector pairs.
2072 if (ElemTy == MVT::i1 && VecLen == 2*HwLen)
2073 return TargetLoweringBase::TypeSplitVector;
2074 }
2075
2076 // Always widen (remaining) vectors of i1.
2077 if (ElemTy == MVT::i1)
2078 return TargetLoweringBase::TypeWidenVector;
2079
2080 return TargetLoweringBase::TypeSplitVector;
2081}
2082
2083std::pair<SDValue, int>
2084HexagonTargetLowering::getBaseAndOffset(SDValue Addr) const {
2085 if (Addr.getOpcode() == ISD::ADD) {
2086 SDValue Op1 = Addr.getOperand(1);
2087 if (auto *CN = dyn_cast<const ConstantSDNode>(Op1.getNode()))
2088 return { Addr.getOperand(0), CN->getSExtValue() };
2089 }
2090 return { Addr, 0 };
2091}
2092
2093// Lower a vector shuffle (V1, V2, V3). V1 and V2 are the two vectors
2094// to select data from, V3 is the permutation.
2095SDValue
2096HexagonTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG)
2097 const {
2098 const auto *SVN = cast<ShuffleVectorSDNode>(Op);
2099 ArrayRef<int> AM = SVN->getMask();
2100 assert(AM.size() <= 8 && "Unexpected shuffle mask")((AM.size() <= 8 && "Unexpected shuffle mask") ? static_cast
<void> (0) : __assert_fail ("AM.size() <= 8 && \"Unexpected shuffle mask\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 2100, __PRETTY_FUNCTION__))
;
2101 unsigned VecLen = AM.size();
2102
2103 MVT VecTy = ty(Op);
2104 assert(!Subtarget.isHVXVectorType(VecTy, true) &&((!Subtarget.isHVXVectorType(VecTy, true) && "HVX shuffles should be legal"
) ? static_cast<void> (0) : __assert_fail ("!Subtarget.isHVXVectorType(VecTy, true) && \"HVX shuffles should be legal\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 2105, __PRETTY_FUNCTION__))
2105 "HVX shuffles should be legal")((!Subtarget.isHVXVectorType(VecTy, true) && "HVX shuffles should be legal"
) ? static_cast<void> (0) : __assert_fail ("!Subtarget.isHVXVectorType(VecTy, true) && \"HVX shuffles should be legal\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 2105, __PRETTY_FUNCTION__))
;
2106 assert(VecTy.getSizeInBits() <= 64 && "Unexpected vector length")((VecTy.getSizeInBits() <= 64 && "Unexpected vector length"
) ? static_cast<void> (0) : __assert_fail ("VecTy.getSizeInBits() <= 64 && \"Unexpected vector length\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 2106, __PRETTY_FUNCTION__))
;
2107
2108 SDValue Op0 = Op.getOperand(0);
2109 SDValue Op1 = Op.getOperand(1);
2110 const SDLoc &dl(Op);
2111
2112 // If the inputs are not the same as the output, bail. This is not an
2113 // error situation, but complicates the handling and the default expansion
2114 // (into BUILD_VECTOR) should be adequate.
2115 if (ty(Op0) != VecTy || ty(Op1) != VecTy)
2116 return SDValue();
2117
2118 // Normalize the mask so that the first non-negative index comes from
2119 // the first operand.
2120 SmallVector<int,8> Mask(AM.begin(), AM.end());
2121 unsigned F = llvm::find_if(AM, [](int M) { return M >= 0; }) - AM.data();
2122 if (F == AM.size())
2123 return DAG.getUNDEF(VecTy);
2124 if (AM[F] >= int(VecLen)) {
2125 ShuffleVectorSDNode::commuteMask(Mask);
2126 std::swap(Op0, Op1);
2127 }
2128
2129 // Express the shuffle mask in terms of bytes.
2130 SmallVector<int,8> ByteMask;
2131 unsigned ElemBytes = VecTy.getVectorElementType().getSizeInBits() / 8;
2132 for (unsigned i = 0, e = Mask.size(); i != e; ++i) {
2133 int M = Mask[i];
2134 if (M < 0) {
2135 for (unsigned j = 0; j != ElemBytes; ++j)
2136 ByteMask.push_back(-1);
2137 } else {
2138 for (unsigned j = 0; j != ElemBytes; ++j)
2139 ByteMask.push_back(M*ElemBytes + j);
2140 }
2141 }
2142 assert(ByteMask.size() <= 8)((ByteMask.size() <= 8) ? static_cast<void> (0) : __assert_fail
("ByteMask.size() <= 8", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 2142, __PRETTY_FUNCTION__))
;
2143
2144 // All non-undef (non-negative) indexes are well within [0..127], so they
2145 // fit in a single byte. Build two 64-bit words:
2146 // - MaskIdx where each byte is the corresponding index (for non-negative
2147 // indexes), and 0xFF for negative indexes, and
2148 // - MaskUnd that has 0xFF for each negative index.
2149 uint64_t MaskIdx = 0;
2150 uint64_t MaskUnd = 0;
2151 for (unsigned i = 0, e = ByteMask.size(); i != e; ++i) {
2152 unsigned S = 8*i;
2153 uint64_t M = ByteMask[i] & 0xFF;
2154 if (M == 0xFF)
2155 MaskUnd |= M << S;
2156 MaskIdx |= M << S;
2157 }
2158
2159 if (ByteMask.size() == 4) {
2160 // Identity.
2161 if (MaskIdx == (0x03020100 | MaskUnd))
2162 return Op0;
2163 // Byte swap.
2164 if (MaskIdx == (0x00010203 | MaskUnd)) {
2165 SDValue T0 = DAG.getBitcast(MVT::i32, Op0);
2166 SDValue T1 = DAG.getNode(ISD::BSWAP, dl, MVT::i32, T0);
2167 return DAG.getBitcast(VecTy, T1);
2168 }
2169
2170 // Byte packs.
2171 SDValue Concat10 = DAG.getNode(HexagonISD::COMBINE, dl,
2172 typeJoin({ty(Op1), ty(Op0)}), {Op1, Op0});
2173 if (MaskIdx == (0x06040200 | MaskUnd))
2174 return getInstr(Hexagon::S2_vtrunehb, dl, VecTy, {Concat10}, DAG);
2175 if (MaskIdx == (0x07050301 | MaskUnd))
2176 return getInstr(Hexagon::S2_vtrunohb, dl, VecTy, {Concat10}, DAG);
2177
2178 SDValue Concat01 = DAG.getNode(HexagonISD::COMBINE, dl,
2179 typeJoin({ty(Op0), ty(Op1)}), {Op0, Op1});
2180 if (MaskIdx == (0x02000604 | MaskUnd))
2181 return getInstr(Hexagon::S2_vtrunehb, dl, VecTy, {Concat01}, DAG);
2182 if (MaskIdx == (0x03010705 | MaskUnd))
2183 return getInstr(Hexagon::S2_vtrunohb, dl, VecTy, {Concat01}, DAG);
2184 }
2185
2186 if (ByteMask.size() == 8) {
2187 // Identity.
2188 if (MaskIdx == (0x0706050403020100ull | MaskUnd))
2189 return Op0;
2190 // Byte swap.
2191 if (MaskIdx == (0x0001020304050607ull | MaskUnd)) {
2192 SDValue T0 = DAG.getBitcast(MVT::i64, Op0);
2193 SDValue T1 = DAG.getNode(ISD::BSWAP, dl, MVT::i64, T0);
2194 return DAG.getBitcast(VecTy, T1);
2195 }
2196
2197 // Halfword picks.
2198 if (MaskIdx == (0x0d0c050409080100ull | MaskUnd))
2199 return getInstr(Hexagon::S2_shuffeh, dl, VecTy, {Op1, Op0}, DAG);
2200 if (MaskIdx == (0x0f0e07060b0a0302ull | MaskUnd))
2201 return getInstr(Hexagon::S2_shuffoh, dl, VecTy, {Op1, Op0}, DAG);
2202 if (MaskIdx == (0x0d0c090805040100ull | MaskUnd))
2203 return getInstr(Hexagon::S2_vtrunewh, dl, VecTy, {Op1, Op0}, DAG);
2204 if (MaskIdx == (0x0f0e0b0a07060302ull | MaskUnd))
2205 return getInstr(Hexagon::S2_vtrunowh, dl, VecTy, {Op1, Op0}, DAG);
2206 if (MaskIdx == (0x0706030205040100ull | MaskUnd)) {
2207 VectorPair P = opSplit(Op0, dl, DAG);
2208 return getInstr(Hexagon::S2_packhl, dl, VecTy, {P.second, P.first}, DAG);
2209 }
2210
2211 // Byte packs.
2212 if (MaskIdx == (0x0e060c040a020800ull | MaskUnd))
2213 return getInstr(Hexagon::S2_shuffeb, dl, VecTy, {Op1, Op0}, DAG);
2214 if (MaskIdx == (0x0f070d050b030901ull | MaskUnd))
2215 return getInstr(Hexagon::S2_shuffob, dl, VecTy, {Op1, Op0}, DAG);
2216 }
2217
2218 return SDValue();
2219}
2220
2221// Create a Hexagon-specific node for shifting a vector by an integer.
2222SDValue
2223HexagonTargetLowering::getVectorShiftByInt(SDValue Op, SelectionDAG &DAG)
2224 const {
2225 if (auto *BVN = dyn_cast<BuildVectorSDNode>(Op.getOperand(1).getNode())) {
2226 if (SDValue S = BVN->getSplatValue()) {
2227 unsigned NewOpc;
2228 switch (Op.getOpcode()) {
2229 case ISD::SHL:
2230 NewOpc = HexagonISD::VASL;
2231 break;
2232 case ISD::SRA:
2233 NewOpc = HexagonISD::VASR;
2234 break;
2235 case ISD::SRL:
2236 NewOpc = HexagonISD::VLSR;
2237 break;
2238 default:
2239 llvm_unreachable("Unexpected shift opcode")::llvm::llvm_unreachable_internal("Unexpected shift opcode", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 2239)
;
2240 }
2241 return DAG.getNode(NewOpc, SDLoc(Op), ty(Op), Op.getOperand(0), S);
2242 }
2243 }
2244
2245 return SDValue();
2246}
2247
2248SDValue
2249HexagonTargetLowering::LowerVECTOR_SHIFT(SDValue Op, SelectionDAG &DAG) const {
2250 return getVectorShiftByInt(Op, DAG);
2251}
2252
2253SDValue
2254HexagonTargetLowering::LowerROTL(SDValue Op, SelectionDAG &DAG) const {
2255 if (isa<ConstantSDNode>(Op.getOperand(1).getNode()))
2256 return Op;
2257 return SDValue();
2258}
2259
2260SDValue
2261HexagonTargetLowering::LowerBITCAST(SDValue Op, SelectionDAG &DAG) const {
2262 MVT ResTy = ty(Op);
2263 SDValue InpV = Op.getOperand(0);
2264 MVT InpTy = ty(InpV);
2265 assert(ResTy.getSizeInBits() == InpTy.getSizeInBits())((ResTy.getSizeInBits() == InpTy.getSizeInBits()) ? static_cast
<void> (0) : __assert_fail ("ResTy.getSizeInBits() == InpTy.getSizeInBits()"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 2265, __PRETTY_FUNCTION__))
;
2266 const SDLoc &dl(Op);
2267
2268 // Handle conversion from i8 to v8i1.
2269 if (InpTy == MVT::i8) {
2270 if (ResTy == MVT::v8i1) {
2271 SDValue Sc = DAG.getBitcast(tyScalar(InpTy), InpV);
2272 SDValue Ext = DAG.getZExtOrTrunc(Sc, dl, MVT::i32);
2273 return getInstr(Hexagon::C2_tfrrp, dl, ResTy, Ext, DAG);
2274 }
2275 return SDValue();
2276 }
2277
2278 return Op;
2279}
2280
2281bool
2282HexagonTargetLowering::getBuildVectorConstInts(ArrayRef<SDValue> Values,
2283 MVT VecTy, SelectionDAG &DAG,
2284 MutableArrayRef<ConstantInt*> Consts) const {
2285 MVT ElemTy = VecTy.getVectorElementType();
2286 unsigned ElemWidth = ElemTy.getSizeInBits();
2287 IntegerType *IntTy = IntegerType::get(*DAG.getContext(), ElemWidth);
2288 bool AllConst = true;
2289
2290 for (unsigned i = 0, e = Values.size(); i != e; ++i) {
2291 SDValue V = Values[i];
2292 if (V.isUndef()) {
2293 Consts[i] = ConstantInt::get(IntTy, 0);
2294 continue;
2295 }
2296 // Make sure to always cast to IntTy.
2297 if (auto *CN = dyn_cast<ConstantSDNode>(V.getNode())) {
2298 const ConstantInt *CI = CN->getConstantIntValue();
2299 Consts[i] = ConstantInt::get(IntTy, CI->getValue().getSExtValue());
2300 } else if (auto *CN = dyn_cast<ConstantFPSDNode>(V.getNode())) {
2301 const ConstantFP *CF = CN->getConstantFPValue();
2302 APInt A = CF->getValueAPF().bitcastToAPInt();
2303 Consts[i] = ConstantInt::get(IntTy, A.getZExtValue());
2304 } else {
2305 AllConst = false;
2306 }
2307 }
2308 return AllConst;
2309}
2310
2311SDValue
2312HexagonTargetLowering::buildVector32(ArrayRef<SDValue> Elem, const SDLoc &dl,
2313 MVT VecTy, SelectionDAG &DAG) const {
2314 MVT ElemTy = VecTy.getVectorElementType();
2315 assert(VecTy.getVectorNumElements() == Elem.size())((VecTy.getVectorNumElements() == Elem.size()) ? static_cast<
void> (0) : __assert_fail ("VecTy.getVectorNumElements() == Elem.size()"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 2315, __PRETTY_FUNCTION__))
;
2316
2317 SmallVector<ConstantInt*,4> Consts(Elem.size());
2318 bool AllConst = getBuildVectorConstInts(Elem, VecTy, DAG, Consts);
2319
2320 unsigned First, Num = Elem.size();
2321 for (First = 0; First != Num; ++First)
2322 if (!isUndef(Elem[First]))
2323 break;
2324 if (First == Num)
2325 return DAG.getUNDEF(VecTy);
2326
2327 if (AllConst &&
2328 llvm::all_of(Consts, [](ConstantInt *CI) { return CI->isZero(); }))
2329 return getZero(dl, VecTy, DAG);
2330
2331 if (ElemTy == MVT::i16) {
2332 assert(Elem.size() == 2)((Elem.size() == 2) ? static_cast<void> (0) : __assert_fail
("Elem.size() == 2", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 2332, __PRETTY_FUNCTION__))
;
2333 if (AllConst) {
2334 uint32_t V = (Consts[0]->getZExtValue() & 0xFFFF) |
2335 Consts[1]->getZExtValue() << 16;
2336 return DAG.getBitcast(MVT::v2i16, DAG.getConstant(V, dl, MVT::i32));
2337 }
2338 SDValue N = getInstr(Hexagon::A2_combine_ll, dl, MVT::i32,
2339 {Elem[1], Elem[0]}, DAG);
2340 return DAG.getBitcast(MVT::v2i16, N);
2341 }
2342
2343 if (ElemTy == MVT::i8) {
2344 // First try generating a constant.
2345 if (AllConst) {
2346 int32_t V = (Consts[0]->getZExtValue() & 0xFF) |
2347 (Consts[1]->getZExtValue() & 0xFF) << 8 |
2348 (Consts[1]->getZExtValue() & 0xFF) << 16 |
2349 Consts[2]->getZExtValue() << 24;
2350 return DAG.getBitcast(MVT::v4i8, DAG.getConstant(V, dl, MVT::i32));
2351 }
2352
2353 // Then try splat.
2354 bool IsSplat = true;
2355 for (unsigned i = 0; i != Num; ++i) {
2356 if (i == First)
2357 continue;
2358 if (Elem[i] == Elem[First] || isUndef(Elem[i]))
2359 continue;
2360 IsSplat = false;
2361 break;
2362 }
2363 if (IsSplat) {
2364 // Legalize the operand to VSPLAT.
2365 SDValue Ext = DAG.getZExtOrTrunc(Elem[First], dl, MVT::i32);
2366 return DAG.getNode(HexagonISD::VSPLAT, dl, VecTy, Ext);
2367 }
2368
2369 // Generate
2370 // (zxtb(Elem[0]) | (zxtb(Elem[1]) << 8)) |
2371 // (zxtb(Elem[2]) | (zxtb(Elem[3]) << 8)) << 16
2372 assert(Elem.size() == 4)((Elem.size() == 4) ? static_cast<void> (0) : __assert_fail
("Elem.size() == 4", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 2372, __PRETTY_FUNCTION__))
;
2373 SDValue Vs[4];
2374 for (unsigned i = 0; i != 4; ++i) {
2375 Vs[i] = DAG.getZExtOrTrunc(Elem[i], dl, MVT::i32);
2376 Vs[i] = DAG.getZeroExtendInReg(Vs[i], dl, MVT::i8);
2377 }
2378 SDValue S8 = DAG.getConstant(8, dl, MVT::i32);
2379 SDValue T0 = DAG.getNode(ISD::SHL, dl, MVT::i32, {Vs[1], S8});
2380 SDValue T1 = DAG.getNode(ISD::SHL, dl, MVT::i32, {Vs[3], S8});
2381 SDValue B0 = DAG.getNode(ISD::OR, dl, MVT::i32, {Vs[0], T0});
2382 SDValue B1 = DAG.getNode(ISD::OR, dl, MVT::i32, {Vs[2], T1});
2383
2384 SDValue R = getInstr(Hexagon::A2_combine_ll, dl, MVT::i32, {B1, B0}, DAG);
2385 return DAG.getBitcast(MVT::v4i8, R);
2386 }
2387
2388#ifndef NDEBUG
2389 dbgs() << "VecTy: " << EVT(VecTy).getEVTString() << '\n';
2390#endif
2391 llvm_unreachable("Unexpected vector element type")::llvm::llvm_unreachable_internal("Unexpected vector element type"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 2391)
;
2392}
2393
2394SDValue
2395HexagonTargetLowering::buildVector64(ArrayRef<SDValue> Elem, const SDLoc &dl,
2396 MVT VecTy, SelectionDAG &DAG) const {
2397 MVT ElemTy = VecTy.getVectorElementType();
2398 assert(VecTy.getVectorNumElements() == Elem.size())((VecTy.getVectorNumElements() == Elem.size()) ? static_cast<
void> (0) : __assert_fail ("VecTy.getVectorNumElements() == Elem.size()"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 2398, __PRETTY_FUNCTION__))
;
2399
2400 SmallVector<ConstantInt*,8> Consts(Elem.size());
2401 bool AllConst = getBuildVectorConstInts(Elem, VecTy, DAG, Consts);
2402
2403 unsigned First, Num = Elem.size();
2404 for (First = 0; First != Num; ++First)
2405 if (!isUndef(Elem[First]))
2406 break;
2407 if (First == Num)
2408 return DAG.getUNDEF(VecTy);
2409
2410 if (AllConst &&
2411 llvm::all_of(Consts, [](ConstantInt *CI) { return CI->isZero(); }))
2412 return getZero(dl, VecTy, DAG);
2413
2414 // First try splat if possible.
2415 if (ElemTy == MVT::i16) {
2416 bool IsSplat = true;
2417 for (unsigned i = 0; i != Num; ++i) {
2418 if (i == First)
2419 continue;
2420 if (Elem[i] == Elem[First] || isUndef(Elem[i]))
2421 continue;
2422 IsSplat = false;
2423 break;
2424 }
2425 if (IsSplat) {
2426 // Legalize the operand to VSPLAT.
2427 SDValue Ext = DAG.getZExtOrTrunc(Elem[First], dl, MVT::i32);
2428 return DAG.getNode(HexagonISD::VSPLAT, dl, VecTy, Ext);
2429 }
2430 }
2431
2432 // Then try constant.
2433 if (AllConst) {
2434 uint64_t Val = 0;
2435 unsigned W = ElemTy.getSizeInBits();
2436 uint64_t Mask = (ElemTy == MVT::i8) ? 0xFFull
2437 : (ElemTy == MVT::i16) ? 0xFFFFull : 0xFFFFFFFFull;
2438 for (unsigned i = 0; i != Num; ++i)
2439 Val = (Val << W) | (Consts[Num-1-i]->getZExtValue() & Mask);
2440 SDValue V0 = DAG.getConstant(Val, dl, MVT::i64);
2441 return DAG.getBitcast(VecTy, V0);
2442 }
2443
2444 // Build two 32-bit vectors and concatenate.
2445 MVT HalfTy = MVT::getVectorVT(ElemTy, Num/2);
2446 SDValue L = (ElemTy == MVT::i32)
2447 ? Elem[0]
2448 : buildVector32(Elem.take_front(Num/2), dl, HalfTy, DAG);
2449 SDValue H = (ElemTy == MVT::i32)
2450 ? Elem[1]
2451 : buildVector32(Elem.drop_front(Num/2), dl, HalfTy, DAG);
2452 return DAG.getNode(HexagonISD::COMBINE, dl, VecTy, {H, L});
2453}
2454
2455SDValue
2456HexagonTargetLowering::extractVector(SDValue VecV, SDValue IdxV,
2457 const SDLoc &dl, MVT ValTy, MVT ResTy,
2458 SelectionDAG &DAG) const {
2459 MVT VecTy = ty(VecV);
2460 assert(!ValTy.isVector() ||((!ValTy.isVector() || VecTy.getVectorElementType() == ValTy.
getVectorElementType()) ? static_cast<void> (0) : __assert_fail
("!ValTy.isVector() || VecTy.getVectorElementType() == ValTy.getVectorElementType()"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 2461, __PRETTY_FUNCTION__))
2461 VecTy.getVectorElementType() == ValTy.getVectorElementType())((!ValTy.isVector() || VecTy.getVectorElementType() == ValTy.
getVectorElementType()) ? static_cast<void> (0) : __assert_fail
("!ValTy.isVector() || VecTy.getVectorElementType() == ValTy.getVectorElementType()"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 2461, __PRETTY_FUNCTION__))
;
2462 unsigned VecWidth = VecTy.getSizeInBits();
2463 unsigned ValWidth = ValTy.getSizeInBits();
2464 unsigned ElemWidth = VecTy.getVectorElementType().getSizeInBits();
2465 assert((VecWidth % ElemWidth) == 0)(((VecWidth % ElemWidth) == 0) ? static_cast<void> (0) :
__assert_fail ("(VecWidth % ElemWidth) == 0", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 2465, __PRETTY_FUNCTION__))
;
2466 auto *IdxN = dyn_cast<ConstantSDNode>(IdxV);
2467
2468 // Special case for v{8,4,2}i1 (the only boolean vectors legal in Hexagon
2469 // without any coprocessors).
2470 if (ElemWidth == 1) {
2471 assert(VecWidth == VecTy.getVectorNumElements() && "Sanity failure")((VecWidth == VecTy.getVectorNumElements() && "Sanity failure"
) ? static_cast<void> (0) : __assert_fail ("VecWidth == VecTy.getVectorNumElements() && \"Sanity failure\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 2471, __PRETTY_FUNCTION__))
;
2472 assert(VecWidth == 8 || VecWidth == 4 || VecWidth == 2)((VecWidth == 8 || VecWidth == 4 || VecWidth == 2) ? static_cast
<void> (0) : __assert_fail ("VecWidth == 8 || VecWidth == 4 || VecWidth == 2"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 2472, __PRETTY_FUNCTION__))
;
2473 // Check if this is an extract of the lowest bit.
2474 if (IdxN) {
2475 // Extracting the lowest bit is a no-op, but it changes the type,
2476 // so it must be kept as an operation to avoid errors related to
2477 // type mismatches.
2478 if (IdxN->isNullValue() && ValTy.getSizeInBits() == 1)
2479 return DAG.getNode(HexagonISD::TYPECAST, dl, MVT::i1, VecV);
2480 }
2481
2482 // If the value extracted is a single bit, use tstbit.
2483 if (ValWidth == 1) {
2484 SDValue A0 = getInstr(Hexagon::C2_tfrpr, dl, MVT::i32, {VecV}, DAG);
2485 SDValue M0 = DAG.getConstant(8 / VecWidth, dl, MVT::i32);
2486 SDValue I0 = DAG.getNode(ISD::MUL, dl, MVT::i32, IdxV, M0);
2487 return DAG.getNode(HexagonISD::TSTBIT, dl, MVT::i1, A0, I0);
2488 }
2489
2490 // Each bool vector (v2i1, v4i1, v8i1) always occupies 8 bits in
2491 // a predicate register. The elements of the vector are repeated
2492 // in the register (if necessary) so that the total number is 8.
2493 // The extracted subvector will need to be expanded in such a way.
2494 unsigned Scale = VecWidth / ValWidth;
2495
2496 // Generate (p2d VecV) >> 8*Idx to move the interesting bytes to
2497 // position 0.
2498 assert(ty(IdxV) == MVT::i32)((ty(IdxV) == MVT::i32) ? static_cast<void> (0) : __assert_fail
("ty(IdxV) == MVT::i32", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 2498, __PRETTY_FUNCTION__))
;
2499 unsigned VecRep = 8 / VecWidth;
2500 SDValue S0 = DAG.getNode(ISD::MUL, dl, MVT::i32, IdxV,
2501 DAG.getConstant(8*VecRep, dl, MVT::i32));
2502 SDValue T0 = DAG.getNode(HexagonISD::P2D, dl, MVT::i64, VecV);
2503 SDValue T1 = DAG.getNode(ISD::SRL, dl, MVT::i64, T0, S0);
2504 while (Scale > 1) {
2505 // The longest possible subvector is at most 32 bits, so it is always
2506 // contained in the low subregister.
2507 T1 = DAG.getTargetExtractSubreg(Hexagon::isub_lo, dl, MVT::i32, T1);
2508 T1 = expandPredicate(T1, dl, DAG);
2509 Scale /= 2;
2510 }
2511
2512 return DAG.getNode(HexagonISD::D2P, dl, ResTy, T1);
2513 }
2514
2515 assert(VecWidth == 32 || VecWidth == 64)((VecWidth == 32 || VecWidth == 64) ? static_cast<void>
(0) : __assert_fail ("VecWidth == 32 || VecWidth == 64", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 2515, __PRETTY_FUNCTION__))
;
2516
2517 // Cast everything to scalar integer types.
2518 MVT ScalarTy = tyScalar(VecTy);
2519 VecV = DAG.getBitcast(ScalarTy, VecV);
2520
2521 SDValue WidthV = DAG.getConstant(ValWidth, dl, MVT::i32);
2522 SDValue ExtV;
2523
2524 if (IdxN) {
2525 unsigned Off = IdxN->getZExtValue() * ElemWidth;
2526 if (VecWidth == 64 && ValWidth == 32) {
2527 assert(Off == 0 || Off == 32)((Off == 0 || Off == 32) ? static_cast<void> (0) : __assert_fail
("Off == 0 || Off == 32", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 2527, __PRETTY_FUNCTION__))
;
2528 unsigned SubIdx = Off == 0 ? Hexagon::isub_lo : Hexagon::isub_hi;
2529 ExtV = DAG.getTargetExtractSubreg(SubIdx, dl, MVT::i32, VecV);
2530 } else if (Off == 0 && (ValWidth % 8) == 0) {
2531 ExtV = DAG.getZeroExtendInReg(VecV, dl, tyScalar(ValTy));
2532 } else {
2533 SDValue OffV = DAG.getConstant(Off, dl, MVT::i32);
2534 // The return type of EXTRACTU must be the same as the type of the
2535 // input vector.
2536 ExtV = DAG.getNode(HexagonISD::EXTRACTU, dl, ScalarTy,
2537 {VecV, WidthV, OffV});
2538 }
2539 } else {
2540 if (ty(IdxV) != MVT::i32)
2541 IdxV = DAG.getZExtOrTrunc(IdxV, dl, MVT::i32);
2542 SDValue OffV = DAG.getNode(ISD::MUL, dl, MVT::i32, IdxV,
2543 DAG.getConstant(ElemWidth, dl, MVT::i32));
2544 ExtV = DAG.getNode(HexagonISD::EXTRACTU, dl, ScalarTy,
2545 {VecV, WidthV, OffV});
2546 }
2547
2548 // Cast ExtV to the requested result type.
2549 ExtV = DAG.getZExtOrTrunc(ExtV, dl, tyScalar(ResTy));
2550 ExtV = DAG.getBitcast(ResTy, ExtV);
2551 return ExtV;
2552}
2553
2554SDValue
2555HexagonTargetLowering::insertVector(SDValue VecV, SDValue ValV, SDValue IdxV,
2556 const SDLoc &dl, MVT ValTy,
2557 SelectionDAG &DAG) const {
2558 MVT VecTy = ty(VecV);
2559 if (VecTy.getVectorElementType() == MVT::i1) {
9
Taking false branch
2560 MVT ValTy = ty(ValV);
2561 assert(ValTy.getVectorElementType() == MVT::i1)((ValTy.getVectorElementType() == MVT::i1) ? static_cast<void
> (0) : __assert_fail ("ValTy.getVectorElementType() == MVT::i1"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 2561, __PRETTY_FUNCTION__))
;
2562 SDValue ValR = DAG.getNode(HexagonISD::P2D, dl, MVT::i64, ValV);
2563 unsigned VecLen = VecTy.getVectorNumElements();
2564 unsigned Scale = VecLen / ValTy.getVectorNumElements();
2565 assert(Scale > 1)((Scale > 1) ? static_cast<void> (0) : __assert_fail
("Scale > 1", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 2565, __PRETTY_FUNCTION__))
;
2566
2567 for (unsigned R = Scale; R > 1; R /= 2) {
2568 ValR = contractPredicate(ValR, dl, DAG);
2569 ValR = DAG.getNode(HexagonISD::COMBINE, dl, MVT::i64,
2570 DAG.getUNDEF(MVT::i32), ValR);
2571 }
2572 // The longest possible subvector is at most 32 bits, so it is always
2573 // contained in the low subregister.
2574 ValR = DAG.getTargetExtractSubreg(Hexagon::isub_lo, dl, MVT::i32, ValR);
2575
2576 unsigned ValBytes = 64 / Scale;
2577 SDValue Width = DAG.getConstant(ValBytes*8, dl, MVT::i32);
2578 SDValue Idx = DAG.getNode(ISD::MUL, dl, MVT::i32, IdxV,
2579 DAG.getConstant(8, dl, MVT::i32));
2580 SDValue VecR = DAG.getNode(HexagonISD::P2D, dl, MVT::i64, VecV);
2581 SDValue Ins = DAG.getNode(HexagonISD::INSERT, dl, MVT::i32,
2582 {VecR, ValR, Width, Idx});
2583 return DAG.getNode(HexagonISD::D2P, dl, VecTy, Ins);
2584 }
2585
2586 unsigned VecWidth = VecTy.getSizeInBits();
2587 unsigned ValWidth = ValTy.getSizeInBits();
2588 assert(VecWidth == 32 || VecWidth == 64)((VecWidth == 32 || VecWidth == 64) ? static_cast<void>
(0) : __assert_fail ("VecWidth == 32 || VecWidth == 64", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 2588, __PRETTY_FUNCTION__))
;
10
Assuming 'VecWidth' is not equal to 32
11
Assuming 'VecWidth' is equal to 64
12
'?' condition is true
2589 assert((VecWidth % ValWidth) == 0)(((VecWidth % ValWidth) == 0) ? static_cast<void> (0) :
__assert_fail ("(VecWidth % ValWidth) == 0", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 2589, __PRETTY_FUNCTION__))
;
13
Assuming the condition is true
14
'?' condition is true
2590
2591 // Cast everything to scalar integer types.
2592 MVT ScalarTy = MVT::getIntegerVT(VecWidth);
2593 // The actual type of ValV may be different than ValTy (which is related
2594 // to the vector type).
2595 unsigned VW = ty(ValV).getSizeInBits();
2596 ValV = DAG.getBitcast(MVT::getIntegerVT(VW), ValV);
2597 VecV = DAG.getBitcast(ScalarTy, VecV);
2598 if (VW
14.1
'VW' is equal to 'VecWidth'
14.1
'VW' is equal to 'VecWidth'
14.1
'VW' is equal to 'VecWidth'
!= VecWidth)
15
Taking false branch
2599 ValV = DAG.getAnyExtOrTrunc(ValV, dl, ScalarTy);
2600
2601 SDValue WidthV = DAG.getConstant(ValWidth, dl, MVT::i32);
2602 SDValue InsV;
2603
2604 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(IdxV)) {
16
Assuming 'C' is null
17
Assuming pointer value is null
18
Taking false branch
2605 unsigned W = C->getZExtValue() * ValWidth;
2606 SDValue OffV = DAG.getConstant(W, dl, MVT::i32);
2607 InsV = DAG.getNode(HexagonISD::INSERT, dl, ScalarTy,
2608 {VecV, ValV, WidthV, OffV});
2609 } else {
2610 if (ty(IdxV) != MVT::i32)
19
Value assigned to 'Op.Node'
20
Calling 'HexagonTargetLowering::ty'
2611 IdxV = DAG.getZExtOrTrunc(IdxV, dl, MVT::i32);
2612 SDValue OffV = DAG.getNode(ISD::MUL, dl, MVT::i32, IdxV, WidthV);
2613 InsV = DAG.getNode(HexagonISD::INSERT, dl, ScalarTy,
2614 {VecV, ValV, WidthV, OffV});
2615 }
2616
2617 return DAG.getNode(ISD::BITCAST, dl, VecTy, InsV);
2618}
2619
2620SDValue
2621HexagonTargetLowering::expandPredicate(SDValue Vec32, const SDLoc &dl,
2622 SelectionDAG &DAG) const {
2623 assert(ty(Vec32).getSizeInBits() == 32)((ty(Vec32).getSizeInBits() == 32) ? static_cast<void> (
0) : __assert_fail ("ty(Vec32).getSizeInBits() == 32", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 2623, __PRETTY_FUNCTION__))
;
2624 if (isUndef(Vec32))
2625 return DAG.getUNDEF(MVT::i64);
2626 return getInstr(Hexagon::S2_vsxtbh, dl, MVT::i64, {Vec32}, DAG);
2627}
2628
2629SDValue
2630HexagonTargetLowering::contractPredicate(SDValue Vec64, const SDLoc &dl,
2631 SelectionDAG &DAG) const {
2632 assert(ty(Vec64).getSizeInBits() == 64)((ty(Vec64).getSizeInBits() == 64) ? static_cast<void> (
0) : __assert_fail ("ty(Vec64).getSizeInBits() == 64", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 2632, __PRETTY_FUNCTION__))
;
2633 if (isUndef(Vec64))
2634 return DAG.getUNDEF(MVT::i32);
2635 return getInstr(Hexagon::S2_vtrunehb, dl, MVT::i32, {Vec64}, DAG);
2636}
2637
2638SDValue
2639HexagonTargetLowering::getZero(const SDLoc &dl, MVT Ty, SelectionDAG &DAG)
2640 const {
2641 if (Ty.isVector()) {
2642 assert(Ty.isInteger() && "Only integer vectors are supported here")((Ty.isInteger() && "Only integer vectors are supported here"
) ? static_cast<void> (0) : __assert_fail ("Ty.isInteger() && \"Only integer vectors are supported here\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 2642, __PRETTY_FUNCTION__))
;
2643 unsigned W = Ty.getSizeInBits();
2644 if (W <= 64)
2645 return DAG.getBitcast(Ty, DAG.getConstant(0, dl, MVT::getIntegerVT(W)));
2646 return DAG.getNode(HexagonISD::VZERO, dl, Ty);
2647 }
2648
2649 if (Ty.isInteger())
2650 return DAG.getConstant(0, dl, Ty);
2651 if (Ty.isFloatingPoint())
2652 return DAG.getConstantFP(0.0, dl, Ty);
2653 llvm_unreachable("Invalid type for zero")::llvm::llvm_unreachable_internal("Invalid type for zero", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 2653)
;
2654}
2655
2656SDValue
2657HexagonTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
2658 MVT VecTy = ty(Op);
2659 unsigned BW = VecTy.getSizeInBits();
2660 const SDLoc &dl(Op);
2661 SmallVector<SDValue,8> Ops;
2662 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i)
2663 Ops.push_back(Op.getOperand(i));
2664
2665 if (BW == 32)
2666 return buildVector32(Ops, dl, VecTy, DAG);
2667 if (BW == 64)
2668 return buildVector64(Ops, dl, VecTy, DAG);
2669
2670 if (VecTy == MVT::v8i1 || VecTy == MVT::v4i1 || VecTy == MVT::v2i1) {
2671 // Check if this is a special case or all-0 or all-1.
2672 bool All0 = true, All1 = true;
2673 for (SDValue P : Ops) {
2674 auto *CN = dyn_cast<ConstantSDNode>(P.getNode());
2675 if (CN == nullptr) {
2676 All0 = All1 = false;
2677 break;
2678 }
2679 uint32_t C = CN->getZExtValue();
2680 All0 &= (C == 0);
2681 All1 &= (C == 1);
2682 }
2683 if (All0)
2684 return DAG.getNode(HexagonISD::PFALSE, dl, VecTy);
2685 if (All1)
2686 return DAG.getNode(HexagonISD::PTRUE, dl, VecTy);
2687
2688 // For each i1 element in the resulting predicate register, put 1
2689 // shifted by the index of the element into a general-purpose register,
2690 // then or them together and transfer it back into a predicate register.
2691 SDValue Rs[8];
2692 SDValue Z = getZero(dl, MVT::i32, DAG);
2693 // Always produce 8 bits, repeat inputs if necessary.
2694 unsigned Rep = 8 / VecTy.getVectorNumElements();
2695 for (unsigned i = 0; i != 8; ++i) {
2696 SDValue S = DAG.getConstant(1ull << i, dl, MVT::i32);
2697 Rs[i] = DAG.getSelect(dl, MVT::i32, Ops[i/Rep], S, Z);
2698 }
2699 for (ArrayRef<SDValue> A(Rs); A.size() != 1; A = A.drop_back(A.size()/2)) {
2700 for (unsigned i = 0, e = A.size()/2; i != e; ++i)
2701 Rs[i] = DAG.getNode(ISD::OR, dl, MVT::i32, Rs[2*i], Rs[2*i+1]);
2702 }
2703 // Move the value directly to a predicate register.
2704 return getInstr(Hexagon::C2_tfrrp, dl, VecTy, {Rs[0]}, DAG);
2705 }
2706
2707 return SDValue();
2708}
2709
2710SDValue
2711HexagonTargetLowering::LowerCONCAT_VECTORS(SDValue Op,
2712 SelectionDAG &DAG) const {
2713 MVT VecTy = ty(Op);
2714 const SDLoc &dl(Op);
2715 if (VecTy.getSizeInBits() == 64) {
2716 assert(Op.getNumOperands() == 2)((Op.getNumOperands() == 2) ? static_cast<void> (0) : __assert_fail
("Op.getNumOperands() == 2", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 2716, __PRETTY_FUNCTION__))
;
2717 return DAG.getNode(HexagonISD::COMBINE, dl, VecTy, Op.getOperand(1),
2718 Op.getOperand(0));
2719 }
2720
2721 MVT ElemTy = VecTy.getVectorElementType();
2722 if (ElemTy == MVT::i1) {
2723 assert(VecTy == MVT::v2i1 || VecTy == MVT::v4i1 || VecTy == MVT::v8i1)((VecTy == MVT::v2i1 || VecTy == MVT::v4i1 || VecTy == MVT::v8i1
) ? static_cast<void> (0) : __assert_fail ("VecTy == MVT::v2i1 || VecTy == MVT::v4i1 || VecTy == MVT::v8i1"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 2723, __PRETTY_FUNCTION__))
;
2724 MVT OpTy = ty(Op.getOperand(0));
2725 // Scale is how many times the operands need to be contracted to match
2726 // the representation in the target register.
2727 unsigned Scale = VecTy.getVectorNumElements() / OpTy.getVectorNumElements();
2728 assert(Scale == Op.getNumOperands() && Scale > 1)((Scale == Op.getNumOperands() && Scale > 1) ? static_cast
<void> (0) : __assert_fail ("Scale == Op.getNumOperands() && Scale > 1"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 2728, __PRETTY_FUNCTION__))
;
2729
2730 // First, convert all bool vectors to integers, then generate pairwise
2731 // inserts to form values of doubled length. Up until there are only
2732 // two values left to concatenate, all of these values will fit in a
2733 // 32-bit integer, so keep them as i32 to use 32-bit inserts.
2734 SmallVector<SDValue,4> Words[2];
2735 unsigned IdxW = 0;
2736
2737 for (SDValue P : Op.getNode()->op_values()) {
2738 SDValue W = DAG.getNode(HexagonISD::P2D, dl, MVT::i64, P);
2739 for (unsigned R = Scale; R > 1; R /= 2) {
2740 W = contractPredicate(W, dl, DAG);
2741 W = DAG.getNode(HexagonISD::COMBINE, dl, MVT::i64,
2742 DAG.getUNDEF(MVT::i32), W);
2743 }
2744 W = DAG.getTargetExtractSubreg(Hexagon::isub_lo, dl, MVT::i32, W);
2745 Words[IdxW].push_back(W);
2746 }
2747
2748 while (Scale > 2) {
2749 SDValue WidthV = DAG.getConstant(64 / Scale, dl, MVT::i32);
2750 Words[IdxW ^ 1].clear();
2751
2752 for (unsigned i = 0, e = Words[IdxW].size(); i != e; i += 2) {
2753 SDValue W0 = Words[IdxW][i], W1 = Words[IdxW][i+1];
2754 // Insert W1 into W0 right next to the significant bits of W0.
2755 SDValue T = DAG.getNode(HexagonISD::INSERT, dl, MVT::i32,
2756 {W0, W1, WidthV, WidthV});
2757 Words[IdxW ^ 1].push_back(T);
2758 }
2759 IdxW ^= 1;
2760 Scale /= 2;
2761 }
2762
2763 // Another sanity check. At this point there should only be two words
2764 // left, and Scale should be 2.
2765 assert(Scale == 2 && Words[IdxW].size() == 2)((Scale == 2 && Words[IdxW].size() == 2) ? static_cast
<void> (0) : __assert_fail ("Scale == 2 && Words[IdxW].size() == 2"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 2765, __PRETTY_FUNCTION__))
;
2766
2767 SDValue WW = DAG.getNode(HexagonISD::COMBINE, dl, MVT::i64,
2768 Words[IdxW][1], Words[IdxW][0]);
2769 return DAG.getNode(HexagonISD::D2P, dl, VecTy, WW);
2770 }
2771
2772 return SDValue();
2773}
2774
2775SDValue
2776HexagonTargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
2777 SelectionDAG &DAG) const {
2778 SDValue Vec = Op.getOperand(0);
2779 MVT ElemTy = ty(Vec).getVectorElementType();
2780 return extractVector(Vec, Op.getOperand(1), SDLoc(Op), ElemTy, ty(Op), DAG);
2781}
2782
2783SDValue
2784HexagonTargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op,
2785 SelectionDAG &DAG) const {
2786 return extractVector(Op.getOperand(0), Op.getOperand(1), SDLoc(Op),
2787 ty(Op), ty(Op), DAG);
2788}
2789
2790SDValue
2791HexagonTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
2792 SelectionDAG &DAG) const {
2793 return insertVector(Op.getOperand(0), Op.getOperand(1), Op.getOperand(2),
8
Calling 'HexagonTargetLowering::insertVector'
2794 SDLoc(Op), ty(Op).getVectorElementType(), DAG);
2795}
2796
2797SDValue
2798HexagonTargetLowering::LowerINSERT_SUBVECTOR(SDValue Op,
2799 SelectionDAG &DAG) const {
2800 SDValue ValV = Op.getOperand(1);
2801 return insertVector(Op.getOperand(0), ValV, Op.getOperand(2),
2802 SDLoc(Op), ty(ValV), DAG);
2803}
2804
2805bool
2806HexagonTargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const {
2807 // Assuming the caller does not have either a signext or zeroext modifier, and
2808 // only one value is accepted, any reasonable truncation is allowed.
2809 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
2810 return false;
2811
2812 // FIXME: in principle up to 64-bit could be made safe, but it would be very
2813 // fragile at the moment: any support for multiple value returns would be
2814 // liable to disallow tail calls involving i64 -> iN truncation in many cases.
2815 return Ty1->getPrimitiveSizeInBits() <= 32;
2816}
2817
2818SDValue
2819HexagonTargetLowering::LowerLoad(SDValue Op, SelectionDAG &DAG) const {
2820 LoadSDNode *LN = cast<LoadSDNode>(Op.getNode());
2821 unsigned ClaimAlign = LN->getAlignment();
2822 validateConstPtrAlignment(LN->getBasePtr(), SDLoc(Op), ClaimAlign);
2823 // Call LowerUnalignedLoad for all loads, it recognizes loads that
2824 // don't need extra aligning.
2825 return LowerUnalignedLoad(Op, DAG);
2826}
2827
2828SDValue
2829HexagonTargetLowering::LowerStore(SDValue Op, SelectionDAG &DAG) const {
2830 StoreSDNode *SN = cast<StoreSDNode>(Op.getNode());
2831 unsigned ClaimAlign = SN->getAlignment();
2832 SDValue Ptr = SN->getBasePtr();
2833 const SDLoc &dl(Op);
2834 validateConstPtrAlignment(Ptr, dl, ClaimAlign);
2835
2836 MVT StoreTy = SN->getMemoryVT().getSimpleVT();
2837 unsigned NeedAlign = Subtarget.getTypeAlignment(StoreTy);
2838 if (ClaimAlign < NeedAlign)
2839 return expandUnalignedStore(SN, DAG);
2840 return Op;
2841}
2842
2843SDValue
2844HexagonTargetLowering::LowerUnalignedLoad(SDValue Op, SelectionDAG &DAG)
2845 const {
2846 LoadSDNode *LN = cast<LoadSDNode>(Op.getNode());
2847 MVT LoadTy = ty(Op);
2848 unsigned NeedAlign = Subtarget.getTypeAlignment(LoadTy);
2849 unsigned HaveAlign = LN->getAlignment();
2850 if (HaveAlign >= NeedAlign)
2851 return Op;
2852
2853 const SDLoc &dl(Op);
2854 const DataLayout &DL = DAG.getDataLayout();
2855 LLVMContext &Ctx = *DAG.getContext();
2856
2857 // If the load aligning is disabled or the load can be broken up into two
2858 // smaller legal loads, do the default (target-independent) expansion.
2859 bool DoDefault = false;
2860 // Handle it in the default way if this is an indexed load.
2861 if (!LN->isUnindexed())
2862 DoDefault = true;
2863
2864 if (!AlignLoads) {
2865 if (allowsMemoryAccessForAlignment(Ctx, DL, LN->getMemoryVT(),
2866 *LN->getMemOperand()))
2867 return Op;
2868 DoDefault = true;
2869 }
2870 if (!DoDefault && (2 * HaveAlign) == NeedAlign) {
2871 // The PartTy is the equivalent of "getLoadableTypeOfSize(HaveAlign)".
2872 MVT PartTy = HaveAlign <= 8 ? MVT::getIntegerVT(8 * HaveAlign)
2873 : MVT::getVectorVT(MVT::i8, HaveAlign);
2874 DoDefault =
2875 allowsMemoryAccessForAlignment(Ctx, DL, PartTy, *LN->getMemOperand());
2876 }
2877 if (DoDefault) {
2878 std::pair<SDValue, SDValue> P = expandUnalignedLoad(LN, DAG);
2879 return DAG.getMergeValues({P.first, P.second}, dl);
2880 }
2881
2882 // The code below generates two loads, both aligned as NeedAlign, and
2883 // with the distance of NeedAlign between them. For that to cover the
2884 // bits that need to be loaded (and without overlapping), the size of
2885 // the loads should be equal to NeedAlign. This is true for all loadable
2886 // types, but add an assertion in case something changes in the future.
2887 assert(LoadTy.getSizeInBits() == 8*NeedAlign)((LoadTy.getSizeInBits() == 8*NeedAlign) ? static_cast<void
> (0) : __assert_fail ("LoadTy.getSizeInBits() == 8*NeedAlign"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 2887, __PRETTY_FUNCTION__))
;
2888
2889 unsigned LoadLen = NeedAlign;
2890 SDValue Base = LN->getBasePtr();
2891 SDValue Chain = LN->getChain();
2892 auto BO = getBaseAndOffset(Base);
2893 unsigned BaseOpc = BO.first.getOpcode();
2894 if (BaseOpc == HexagonISD::VALIGNADDR && BO.second % LoadLen == 0)
2895 return Op;
2896
2897 if (BO.second % LoadLen != 0) {
2898 BO.first = DAG.getNode(ISD::ADD, dl, MVT::i32, BO.first,
2899 DAG.getConstant(BO.second % LoadLen, dl, MVT::i32));
2900 BO.second -= BO.second % LoadLen;
2901 }
2902 SDValue BaseNoOff = (BaseOpc != HexagonISD::VALIGNADDR)
2903 ? DAG.getNode(HexagonISD::VALIGNADDR, dl, MVT::i32, BO.first,
2904 DAG.getConstant(NeedAlign, dl, MVT::i32))
2905 : BO.first;
2906 SDValue Base0 = DAG.getMemBasePlusOffset(BaseNoOff, BO.second, dl);
2907 SDValue Base1 = DAG.getMemBasePlusOffset(BaseNoOff, BO.second+LoadLen, dl);
2908
2909 MachineMemOperand *WideMMO = nullptr;
2910 if (MachineMemOperand *MMO = LN->getMemOperand()) {
2911 MachineFunction &MF = DAG.getMachineFunction();
2912 WideMMO = MF.getMachineMemOperand(MMO->getPointerInfo(), MMO->getFlags(),
2913 2*LoadLen, LoadLen, MMO->getAAInfo(), MMO->getRanges(),
2914 MMO->getSyncScopeID(), MMO->getOrdering(),
2915 MMO->getFailureOrdering());
2916 }
2917
2918 SDValue Load0 = DAG.getLoad(LoadTy, dl, Chain, Base0, WideMMO);
2919 SDValue Load1 = DAG.getLoad(LoadTy, dl, Chain, Base1, WideMMO);
2920
2921 SDValue Aligned = DAG.getNode(HexagonISD::VALIGN, dl, LoadTy,
2922 {Load1, Load0, BaseNoOff.getOperand(0)});
2923 SDValue NewChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
2924 Load0.getValue(1), Load1.getValue(1));
2925 SDValue M = DAG.getMergeValues({Aligned, NewChain}, dl);
2926 return M;
2927}
2928
2929SDValue
2930HexagonTargetLowering::LowerUAddSubO(SDValue Op, SelectionDAG &DAG) const {
2931 SDValue X = Op.getOperand(0), Y = Op.getOperand(1);
2932 auto *CY = dyn_cast<ConstantSDNode>(Y);
2933 if (!CY)
2934 return SDValue();
2935
2936 const SDLoc &dl(Op);
2937 SDVTList VTs = Op.getNode()->getVTList();
2938 assert(VTs.NumVTs == 2)((VTs.NumVTs == 2) ? static_cast<void> (0) : __assert_fail
("VTs.NumVTs == 2", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 2938, __PRETTY_FUNCTION__))
;
2939 assert(VTs.VTs[1] == MVT::i1)((VTs.VTs[1] == MVT::i1) ? static_cast<void> (0) : __assert_fail
("VTs.VTs[1] == MVT::i1", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 2939, __PRETTY_FUNCTION__))
;
2940 unsigned Opc = Op.getOpcode();
2941
2942 if (CY) {
2943 uint32_t VY = CY->getZExtValue();
2944 assert(VY != 0 && "This should have been folded")((VY != 0 && "This should have been folded") ? static_cast
<void> (0) : __assert_fail ("VY != 0 && \"This should have been folded\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 2944, __PRETTY_FUNCTION__))
;
2945 // X +/- 1
2946 if (VY != 1)
2947 return SDValue();
2948
2949 if (Opc == ISD::UADDO) {
2950 SDValue Op = DAG.getNode(ISD::ADD, dl, VTs.VTs[0], {X, Y});
2951 SDValue Ov = DAG.getSetCC(dl, MVT::i1, Op, getZero(dl, ty(Op), DAG),
2952 ISD::SETEQ);
2953 return DAG.getMergeValues({Op, Ov}, dl);
2954 }
2955 if (Opc == ISD::USUBO) {
2956 SDValue Op = DAG.getNode(ISD::SUB, dl, VTs.VTs[0], {X, Y});
2957 SDValue Ov = DAG.getSetCC(dl, MVT::i1, Op,
2958 DAG.getConstant(-1, dl, ty(Op)), ISD::SETEQ);
2959 return DAG.getMergeValues({Op, Ov}, dl);
2960 }
2961 }
2962
2963 return SDValue();
2964}
2965
2966SDValue
2967HexagonTargetLowering::LowerAddSubCarry(SDValue Op, SelectionDAG &DAG) const {
2968 const SDLoc &dl(Op);
2969 unsigned Opc = Op.getOpcode();
2970 SDValue X = Op.getOperand(0), Y = Op.getOperand(1), C = Op.getOperand(2);
2971
2972 if (Opc == ISD::ADDCARRY)
2973 return DAG.getNode(HexagonISD::ADDC, dl, Op.getNode()->getVTList(),
2974 { X, Y, C });
2975
2976 EVT CarryTy = C.getValueType();
2977 SDValue SubC = DAG.getNode(HexagonISD::SUBC, dl, Op.getNode()->getVTList(),
2978 { X, Y, DAG.getLogicalNOT(dl, C, CarryTy) });
2979 SDValue Out[] = { SubC.getValue(0),
2980 DAG.getLogicalNOT(dl, SubC.getValue(1), CarryTy) };
2981 return DAG.getMergeValues(Out, dl);
2982}
2983
2984SDValue
2985HexagonTargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
2986 SDValue Chain = Op.getOperand(0);
2987 SDValue Offset = Op.getOperand(1);
2988 SDValue Handler = Op.getOperand(2);
2989 SDLoc dl(Op);
2990 auto PtrVT = getPointerTy(DAG.getDataLayout());
2991
2992 // Mark function as containing a call to EH_RETURN.
2993 HexagonMachineFunctionInfo *FuncInfo =
2994 DAG.getMachineFunction().getInfo<HexagonMachineFunctionInfo>();
2995 FuncInfo->setHasEHReturn();
2996
2997 unsigned OffsetReg = Hexagon::R28;
2998
2999 SDValue StoreAddr =
3000 DAG.getNode(ISD::ADD, dl, PtrVT, DAG.getRegister(Hexagon::R30, PtrVT),
3001 DAG.getIntPtrConstant(4, dl));
3002 Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo());
3003 Chain = DAG.getCopyToReg(Chain, dl, OffsetReg, Offset);
3004
3005 // Not needed we already use it as explict input to EH_RETURN.
3006 // MF.getRegInfo().addLiveOut(OffsetReg);
3007
3008 return DAG.getNode(HexagonISD::EH_RETURN, dl, MVT::Other, Chain);
3009}
3010
3011SDValue
3012HexagonTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
3013 unsigned Opc = Op.getOpcode();
3014
3015 // Handle INLINEASM first.
3016 if (Opc == ISD::INLINEASM || Opc == ISD::INLINEASM_BR)
1
Assuming 'Opc' is not equal to INLINEASM
2
Assuming 'Opc' is not equal to INLINEASM_BR
3
Taking false branch
3017 return LowerINLINEASM(Op, DAG);
3018
3019 if (isHvxOperation(Op)) {
4
Assuming the condition is false
5
Taking false branch
3020 // If HVX lowering returns nothing, try the default lowering.
3021 if (SDValue V = LowerHvxOperation(Op, DAG))
3022 return V;
3023 }
3024
3025 switch (Opc) {
6
Control jumps to 'case INSERT_VECTOR_ELT:' at line 3035
3026 default:
3027#ifndef NDEBUG
3028 Op.getNode()->dumpr(&DAG);
3029 if (Opc > HexagonISD::OP_BEGIN && Opc < HexagonISD::OP_END)
3030 errs() << "Error: check for a non-legal type in this operation\n";
3031#endif
3032 llvm_unreachable("Should not custom lower this!")::llvm::llvm_unreachable_internal("Should not custom lower this!"
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 3032)
;
3033 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
3034 case ISD::INSERT_SUBVECTOR: return LowerINSERT_SUBVECTOR(Op, DAG);
3035 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
7
Calling 'HexagonTargetLowering::LowerINSERT_VECTOR_ELT'
3036 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG);
3037 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
3038 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
3039 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
3040 case ISD::BITCAST: return LowerBITCAST(Op, DAG);
3041 case ISD::LOAD: return LowerLoad(Op, DAG);
3042 case ISD::STORE: return LowerStore(Op, DAG);
3043 case ISD::UADDO:
3044 case ISD::USUBO: return LowerUAddSubO(Op, DAG);
3045 case ISD::ADDCARRY:
3046 case ISD::SUBCARRY: return LowerAddSubCarry(Op, DAG);
3047 case ISD::SRA:
3048 case ISD::SHL:
3049 case ISD::SRL: return LowerVECTOR_SHIFT(Op, DAG);
3050 case ISD::ROTL: return LowerROTL(Op, DAG);
3051 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
3052 case ISD::JumpTable: return LowerJumpTable(Op, DAG);
3053 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG);
3054 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
3055 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
3056 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
3057 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG);
3058 case ISD::GlobalAddress: return LowerGLOBALADDRESS(Op, DAG);
3059 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
3060 case ISD::GLOBAL_OFFSET_TABLE: return LowerGLOBAL_OFFSET_TABLE(Op, DAG);
3061 case ISD::VACOPY: return LowerVACOPY(Op, DAG);
3062 case ISD::VASTART: return LowerVASTART(Op, DAG);
3063 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
3064 case ISD::SETCC: return LowerSETCC(Op, DAG);
3065 case ISD::VSELECT: return LowerVSELECT(Op, DAG);
3066 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
3067 case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG);
3068 case ISD::PREFETCH: return LowerPREFETCH(Op, DAG);
3069 case ISD::READCYCLECOUNTER: return LowerREADCYCLECOUNTER(Op, DAG);
3070 break;
3071 }
3072
3073 return SDValue();
3074}
3075
3076void
3077HexagonTargetLowering::LowerOperationWrapper(SDNode *N,
3078 SmallVectorImpl<SDValue> &Results,
3079 SelectionDAG &DAG) const {
3080 if (isHvxOperation(N)) {
3081 LowerHvxOperationWrapper(N, Results, DAG);
3082 if (!Results.empty())
3083 return;
3084 }
3085
3086 // We are only custom-lowering stores to verify the alignment of the
3087 // address if it is a compile-time constant. Since a store can be modified
3088 // during type-legalization (the value being stored may need legalization),
3089 // return empty Results here to indicate that we don't really make any
3090 // changes in the custom lowering.
3091 if (N->getOpcode() != ISD::STORE)
3092 return TargetLowering::LowerOperationWrapper(N, Results, DAG);
3093}
3094
3095void
3096HexagonTargetLowering::ReplaceNodeResults(SDNode *N,
3097 SmallVectorImpl<SDValue> &Results,
3098 SelectionDAG &DAG) const {
3099 if (isHvxOperation(N)) {
3100 ReplaceHvxNodeResults(N, Results, DAG);
3101 if (!Results.empty())
3102 return;
3103 }
3104
3105 const SDLoc &dl(N);
3106 switch (N->getOpcode()) {
3107 case ISD::SRL:
3108 case ISD::SRA:
3109 case ISD::SHL:
3110 return;
3111 case ISD::BITCAST:
3112 // Handle a bitcast from v8i1 to i8.
3113 if (N->getValueType(0) == MVT::i8) {
3114 SDValue P = getInstr(Hexagon::C2_tfrpr, dl, MVT::i32,
3115 N->getOperand(0), DAG);
3116 SDValue T = DAG.getAnyExtOrTrunc(P, dl, MVT::i8);
3117 Results.push_back(T);
3118 }
3119 break;
3120 }
3121}
3122
3123SDValue
3124HexagonTargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI)
3125 const {
3126 SDValue Op(N, 0);
3127 if (isHvxOperation(Op)) {
3128 if (SDValue V = PerformHvxDAGCombine(N, DCI))
3129 return V;
3130 return SDValue();
3131 }
3132
3133 const SDLoc &dl(Op);
3134 unsigned Opc = Op.getOpcode();
3135
3136 if (Opc == HexagonISD::P2D) {
3137 SDValue P = Op.getOperand(0);
3138 switch (P.getOpcode()) {
3139 case HexagonISD::PTRUE:
3140 return DCI.DAG.getConstant(-1, dl, ty(Op));
3141 case HexagonISD::PFALSE:
3142 return getZero(dl, ty(Op), DCI.DAG);
3143 default:
3144 break;
3145 }
3146 } else if (Opc == ISD::VSELECT) {
3147 // This is pretty much duplicated in HexagonISelLoweringHVX...
3148 //
3149 // (vselect (xor x, ptrue), v0, v1) -> (vselect x, v1, v0)
3150 SDValue Cond = Op.getOperand(0);
3151 if (Cond->getOpcode() == ISD::XOR) {
3152 SDValue C0 = Cond.getOperand(0), C1 = Cond.getOperand(1);
3153 if (C1->getOpcode() == HexagonISD::PTRUE) {
3154 SDValue VSel = DCI.DAG.getNode(ISD::VSELECT, dl, ty(Op), C0,
3155 Op.getOperand(2), Op.getOperand(1));
3156 return VSel;
3157 }
3158 }
3159 }
3160
3161 return SDValue();
3162}
3163
3164/// Returns relocation base for the given PIC jumptable.
3165SDValue
3166HexagonTargetLowering::getPICJumpTableRelocBase(SDValue Table,
3167 SelectionDAG &DAG) const {
3168 int Idx = cast<JumpTableSDNode>(Table)->getIndex();
3169 EVT VT = Table.getValueType();
3170 SDValue T = DAG.getTargetJumpTable(Idx, VT, HexagonII::MO_PCREL);
3171 return DAG.getNode(HexagonISD::AT_PCREL, SDLoc(Table), VT, T);
3172}
3173
3174//===----------------------------------------------------------------------===//
3175// Inline Assembly Support
3176//===----------------------------------------------------------------------===//
3177
3178TargetLowering::ConstraintType
3179HexagonTargetLowering::getConstraintType(StringRef Constraint) const {
3180 if (Constraint.size() == 1) {
3181 switch (Constraint[0]) {
3182 case 'q':
3183 case 'v':
3184 if (Subtarget.useHVXOps())
3185 return C_RegisterClass;
3186 break;
3187 case 'a':
3188 return C_RegisterClass;
3189 default:
3190 break;
3191 }
3192 }
3193 return TargetLowering::getConstraintType(Constraint);
3194}
3195
3196std::pair<unsigned, const TargetRegisterClass*>
3197HexagonTargetLowering::getRegForInlineAsmConstraint(
3198 const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const {
3199
3200 if (Constraint.size() == 1) {
3201 switch (Constraint[0]) {
3202 case 'r': // R0-R31
3203 switch (VT.SimpleTy) {
3204 default:
3205 return {0u, nullptr};
3206 case MVT::i1:
3207 case MVT::i8:
3208 case MVT::i16:
3209 case MVT::i32:
3210 case MVT::f32:
3211 return {0u, &Hexagon::IntRegsRegClass};
3212 case MVT::i64:
3213 case MVT::f64:
3214 return {0u, &Hexagon::DoubleRegsRegClass};
3215 }
3216 break;
3217 case 'a': // M0-M1
3218 if (VT != MVT::i32)
3219 return {0u, nullptr};
3220 return {0u, &Hexagon::ModRegsRegClass};
3221 case 'q': // q0-q3
3222 switch (VT.getSizeInBits()) {
3223 default:
3224 return {0u, nullptr};
3225 case 64:
3226 case 128:
3227 return {0u, &Hexagon::HvxQRRegClass};
3228 }
3229 break;
3230 case 'v': // V0-V31
3231 switch (VT.getSizeInBits()) {
3232 default:
3233 return {0u, nullptr};
3234 case 512:
3235 return {0u, &Hexagon::HvxVRRegClass};
3236 case 1024:
3237 if (Subtarget.hasV60Ops() && Subtarget.useHVX128BOps())
3238 return {0u, &Hexagon::HvxVRRegClass};
3239 return {0u, &Hexagon::HvxWRRegClass};
3240 case 2048:
3241 return {0u, &Hexagon::HvxWRRegClass};
3242 }
3243 break;
3244 default:
3245 return {0u, nullptr};
3246 }
3247 }
3248
3249 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
3250}
3251
3252/// isFPImmLegal - Returns true if the target can instruction select the
3253/// specified FP immediate natively. If false, the legalizer will
3254/// materialize the FP immediate as a load from a constant pool.
3255bool HexagonTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
3256 bool ForCodeSize) const {
3257 return true;
3258}
3259
3260/// isLegalAddressingMode - Return true if the addressing mode represented by
3261/// AM is legal for this target, for a load/store of the specified type.
3262bool HexagonTargetLowering::isLegalAddressingMode(const DataLayout &DL,
3263 const AddrMode &AM, Type *Ty,
3264 unsigned AS, Instruction *I) const {
3265 if (Ty->isSized()) {
3266 // When LSR detects uses of the same base address to access different
3267 // types (e.g. unions), it will assume a conservative type for these
3268 // uses:
3269 // LSR Use: Kind=Address of void in addrspace(4294967295), ...
3270 // The type Ty passed here would then be "void". Skip the alignment
3271 // checks, but do not return false right away, since that confuses
3272 // LSR into crashing.
3273 unsigned A = DL.getABITypeAlignment(Ty);
3274 // The base offset must be a multiple of the alignment.
3275 if ((AM.BaseOffs % A) != 0)
3276 return false;
3277 // The shifted offset must fit in 11 bits.
3278 if (!isInt<11>(AM.BaseOffs >> Log2_32(A)))
3279 return false;
3280 }
3281
3282 // No global is ever allowed as a base.
3283 if (AM.BaseGV)
3284 return false;
3285
3286 int Scale = AM.Scale;
3287 if (Scale < 0)
3288 Scale = -Scale;
3289 switch (Scale) {
3290 case 0: // No scale reg, "r+i", "r", or just "i".
3291 break;
3292 default: // No scaled addressing mode.
3293 return false;
3294 }
3295 return true;
3296}
3297
3298/// Return true if folding a constant offset with the given GlobalAddress is
3299/// legal. It is frequently not legal in PIC relocation models.
3300bool HexagonTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA)
3301 const {
3302 return HTM.getRelocationModel() == Reloc::Static;
3303}
3304
3305/// isLegalICmpImmediate - Return true if the specified immediate is legal
3306/// icmp immediate, that is the target has icmp instructions which can compare
3307/// a register against the immediate without having to materialize the
3308/// immediate into a register.
3309bool HexagonTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
3310 return Imm >= -512 && Imm <= 511;
3311}
3312
3313/// IsEligibleForTailCallOptimization - Check whether the call is eligible
3314/// for tail call optimization. Targets which want to do tail call
3315/// optimization should implement this function.
3316bool HexagonTargetLowering::IsEligibleForTailCallOptimization(
3317 SDValue Callee,
3318 CallingConv::ID CalleeCC,
3319 bool IsVarArg,
3320 bool IsCalleeStructRet,
3321 bool IsCallerStructRet,
3322 const SmallVectorImpl<ISD::OutputArg> &Outs,
3323 const SmallVectorImpl<SDValue> &OutVals,
3324 const SmallVectorImpl<ISD::InputArg> &Ins,
3325 SelectionDAG& DAG) const {
3326 const Function &CallerF = DAG.getMachineFunction().getFunction();
3327 CallingConv::ID CallerCC = CallerF.getCallingConv();
3328 bool CCMatch = CallerCC == CalleeCC;
3329
3330 // ***************************************************************************
3331 // Look for obvious safe cases to perform tail call optimization that do not
3332 // require ABI changes.
3333 // ***************************************************************************
3334
3335 // If this is a tail call via a function pointer, then don't do it!
3336 if (!isa<GlobalAddressSDNode>(Callee) &&
3337 !isa<ExternalSymbolSDNode>(Callee)) {
3338 return false;
3339 }
3340
3341 // Do not optimize if the calling conventions do not match and the conventions
3342 // used are not C or Fast.
3343 if (!CCMatch) {
3344 bool R = (CallerCC == CallingConv::C || CallerCC == CallingConv::Fast);
3345 bool E = (CalleeCC == CallingConv::C || CalleeCC == CallingConv::Fast);
3346 // If R & E, then ok.
3347 if (!R || !E)
3348 return false;
3349 }
3350
3351 // Do not tail call optimize vararg calls.
3352 if (IsVarArg)
3353 return false;
3354
3355 // Also avoid tail call optimization if either caller or callee uses struct
3356 // return semantics.
3357 if (IsCalleeStructRet || IsCallerStructRet)
3358 return false;
3359
3360 // In addition to the cases above, we also disable Tail Call Optimization if
3361 // the calling convention code that at least one outgoing argument needs to
3362 // go on the stack. We cannot check that here because at this point that
3363 // information is not available.
3364 return true;
3365}
3366
3367/// Returns the target specific optimal type for load and store operations as
3368/// a result of memset, memcpy, and memmove lowering.
3369///
3370/// If DstAlign is zero that means it's safe to destination alignment can
3371/// satisfy any constraint. Similarly if SrcAlign is zero it means there isn't
3372/// a need to check it against alignment requirement, probably because the
3373/// source does not need to be loaded. If 'IsMemset' is true, that means it's
3374/// expanding a memset. If 'ZeroMemset' is true, that means it's a memset of
3375/// zero. 'MemcpyStrSrc' indicates whether the memcpy source is constant so it
3376/// does not need to be loaded. It returns EVT::Other if the type should be
3377/// determined using generic target-independent logic.
3378EVT HexagonTargetLowering::getOptimalMemOpType(
3379 const MemOp &Op, const AttributeList &FuncAttributes) const {
3380 if (Op.size() >= 8 && Op.isAligned(Align(8)))
3381 return MVT::i64;
3382 if (Op.size() >= 4 && Op.isAligned(Align(4)))
3383 return MVT::i32;
3384 if (Op.size() >= 2 && Op.isAligned(Align(2)))
3385 return MVT::i16;
3386 return MVT::Other;
3387}
3388
3389bool HexagonTargetLowering::allowsMemoryAccess(LLVMContext &Context,
3390 const DataLayout &DL, EVT VT, unsigned AddrSpace, unsigned Alignment,
3391 MachineMemOperand::Flags Flags, bool *Fast) const {
3392 MVT SVT = VT.getSimpleVT();
3393 if (Subtarget.isHVXVectorType(SVT, true))
3394 return allowsHvxMemoryAccess(SVT, Alignment, Flags, Fast);
3395 return TargetLoweringBase::allowsMemoryAccess(
3396 Context, DL, VT, AddrSpace, Alignment, Flags, Fast);
3397}
3398
3399bool HexagonTargetLowering::allowsMisalignedMemoryAccesses(
3400 EVT VT, unsigned AddrSpace, unsigned Alignment,
3401 MachineMemOperand::Flags Flags, bool *Fast) const {
3402 MVT SVT = VT.getSimpleVT();
3403 if (Subtarget.isHVXVectorType(SVT, true))
3404 return allowsHvxMisalignedMemoryAccesses(SVT, Alignment, Flags, Fast);
3405 if (Fast)
3406 *Fast = false;
3407 return false;
3408}
3409
3410std::pair<const TargetRegisterClass*, uint8_t>
3411HexagonTargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI,
3412 MVT VT) const {
3413 if (Subtarget.isHVXVectorType(VT, true)) {
3414 unsigned BitWidth = VT.getSizeInBits();
3415 unsigned VecWidth = Subtarget.getVectorLength() * 8;
3416
3417 if (VT.getVectorElementType() == MVT::i1)
3418 return std::make_pair(&Hexagon::HvxQRRegClass, 1);
3419 if (BitWidth == VecWidth)
3420 return std::make_pair(&Hexagon::HvxVRRegClass, 1);
3421 assert(BitWidth == 2 * VecWidth)((BitWidth == 2 * VecWidth) ? static_cast<void> (0) : __assert_fail
("BitWidth == 2 * VecWidth", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 3421, __PRETTY_FUNCTION__))
;
3422 return std::make_pair(&Hexagon::HvxWRRegClass, 1);
3423 }
3424
3425 return TargetLowering::findRepresentativeClass(TRI, VT);
3426}
3427
3428bool HexagonTargetLowering::shouldReduceLoadWidth(SDNode *Load,
3429 ISD::LoadExtType ExtTy, EVT NewVT) const {
3430 // TODO: This may be worth removing. Check regression tests for diffs.
3431 if (!TargetLoweringBase::shouldReduceLoadWidth(Load, ExtTy, NewVT))
3432 return false;
3433
3434 auto *L = cast<LoadSDNode>(Load);
3435 std::pair<SDValue,int> BO = getBaseAndOffset(L->getBasePtr());
3436 // Small-data object, do not shrink.
3437 if (BO.first.getOpcode() == HexagonISD::CONST32_GP)
3438 return false;
3439 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(BO.first)) {
3440 auto &HTM = static_cast<const HexagonTargetMachine&>(getTargetMachine());
3441 const auto *GO = dyn_cast_or_null<const GlobalObject>(GA->getGlobal());
3442 return !GO || !HTM.getObjFileLowering()->isGlobalInSmallSection(GO, HTM);
3443 }
3444 return true;
3445}
3446
3447Value *HexagonTargetLowering::emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
3448 AtomicOrdering Ord) const {
3449 BasicBlock *BB = Builder.GetInsertBlock();
3450 Module *M = BB->getParent()->getParent();
3451 auto PT = cast<PointerType>(Addr->getType());
3452 Type *Ty = PT->getElementType();
3453 unsigned SZ = Ty->getPrimitiveSizeInBits();
3454 assert((SZ == 32 || SZ == 64) && "Only 32/64-bit atomic loads supported")(((SZ == 32 || SZ == 64) && "Only 32/64-bit atomic loads supported"
) ? static_cast<void> (0) : __assert_fail ("(SZ == 32 || SZ == 64) && \"Only 32/64-bit atomic loads supported\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 3454, __PRETTY_FUNCTION__))
;
3455 Intrinsic::ID IntID = (SZ == 32) ? Intrinsic::hexagon_L2_loadw_locked
3456 : Intrinsic::hexagon_L4_loadd_locked;
3457 Function *Fn = Intrinsic::getDeclaration(M, IntID);
3458
3459 PointerType *NewPtrTy
3460 = Builder.getIntNTy(SZ)->getPointerTo(PT->getAddressSpace());
3461 Addr = Builder.CreateBitCast(Addr, NewPtrTy);
3462
3463 Value *Call = Builder.CreateCall(Fn, Addr, "larx");
3464
3465 return Builder.CreateBitCast(Call, Ty);
3466}
3467
3468/// Perform a store-conditional operation to Addr. Return the status of the
3469/// store. This should be 0 if the store succeeded, non-zero otherwise.
3470Value *HexagonTargetLowering::emitStoreConditional(IRBuilder<> &Builder,
3471 Value *Val, Value *Addr, AtomicOrdering Ord) const {
3472 BasicBlock *BB = Builder.GetInsertBlock();
3473 Module *M = BB->getParent()->getParent();
3474 Type *Ty = Val->getType();
3475 unsigned SZ = Ty->getPrimitiveSizeInBits();
3476
3477 Type *CastTy = Builder.getIntNTy(SZ);
3478 assert((SZ == 32 || SZ == 64) && "Only 32/64-bit atomic stores supported")(((SZ == 32 || SZ == 64) && "Only 32/64-bit atomic stores supported"
) ? static_cast<void> (0) : __assert_fail ("(SZ == 32 || SZ == 64) && \"Only 32/64-bit atomic stores supported\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp"
, 3478, __PRETTY_FUNCTION__))
;
3479 Intrinsic::ID IntID = (SZ == 32) ? Intrinsic::hexagon_S2_storew_locked
3480 : Intrinsic::hexagon_S4_stored_locked;
3481 Function *Fn = Intrinsic::getDeclaration(M, IntID);
3482
3483 unsigned AS = Addr->getType()->getPointerAddressSpace();
3484 Addr = Builder.CreateBitCast(Addr, CastTy->getPointerTo(AS));
3485 Val = Builder.CreateBitCast(Val, CastTy);
3486
3487 Value *Call = Builder.CreateCall(Fn, {Addr, Val}, "stcx");
3488 Value *Cmp = Builder.CreateICmpEQ(Call, Builder.getInt32(0), "");
3489 Value *Ext = Builder.CreateZExt(Cmp, Type::getInt32Ty(M->getContext()));
3490 return Ext;
3491}
3492
3493TargetLowering::AtomicExpansionKind
3494HexagonTargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
3495 // Do not expand loads and stores that don't exceed 64 bits.
3496 return LI->getType()->getPrimitiveSizeInBits() > 64
3497 ? AtomicExpansionKind::LLOnly
3498 : AtomicExpansionKind::None;
3499}
3500
3501bool HexagonTargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
3502 // Do not expand loads and stores that don't exceed 64 bits.
3503 return SI->getValueOperand()->getType()->getPrimitiveSizeInBits() > 64;
3504}
3505
3506TargetLowering::AtomicExpansionKind
3507HexagonTargetLowering::shouldExpandAtomicCmpXchgInIR(
3508 AtomicCmpXchgInst *AI) const {
3509 const DataLayout &DL = AI->getModule()->getDataLayout();
3510 unsigned Size = DL.getTypeStoreSize(AI->getCompareOperand()->getType());
3511 if (Size >= 4 && Size <= 8)
3512 return AtomicExpansionKind::LLSC;
3513 return AtomicExpansionKind::None;
3514}

/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/Hexagon/HexagonISelLowering.h

1//===-- HexagonISelLowering.h - Hexagon DAG Lowering Interface --*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the interfaces that Hexagon uses to lower LLVM code into a
10// selection DAG.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_LIB_TARGET_HEXAGON_HEXAGONISELLOWERING_H
15#define LLVM_LIB_TARGET_HEXAGON_HEXAGONISELLOWERING_H
16
17#include "Hexagon.h"
18#include "llvm/ADT/StringRef.h"
19#include "llvm/CodeGen/ISDOpcodes.h"
20#include "llvm/CodeGen/SelectionDAGNodes.h"
21#include "llvm/CodeGen/TargetLowering.h"
22#include "llvm/CodeGen/ValueTypes.h"
23#include "llvm/IR/CallingConv.h"
24#include "llvm/IR/InlineAsm.h"
25#include "llvm/Support/MachineValueType.h"
26#include <cstdint>
27#include <utility>
28
29namespace llvm {
30
31namespace HexagonISD {
32
33 enum NodeType : unsigned {
34 OP_BEGIN = ISD::BUILTIN_OP_END,
35
36 CONST32 = OP_BEGIN,
37 CONST32_GP, // For marking data present in GP.
38 ADDC, // Add with carry: (X, Y, Cin) -> (X+Y, Cout).
39 SUBC, // Sub with carry: (X, Y, Cin) -> (X+~Y+Cin, Cout).
40 ALLOCA,
41
42 AT_GOT, // Index in GOT.
43 AT_PCREL, // Offset relative to PC.
44
45 CALL, // Function call.
46 CALLnr, // Function call that does not return.
47 CALLR,
48
49 RET_FLAG, // Return with a flag operand.
50 BARRIER, // Memory barrier.
51 JT, // Jump table.
52 CP, // Constant pool.
53
54 COMBINE,
55 VSPLAT, // Generic splat, selection depends on argument/return
56 // types.
57 VASL,
58 VASR,
59 VLSR,
60
61 TSTBIT,
62 INSERT,
63 EXTRACTU,
64 VEXTRACTW,
65 VINSERTW0,
66 VROR,
67 TC_RETURN,
68 EH_RETURN,
69 DCFETCH,
70 READCYCLE,
71 PTRUE,
72 PFALSE,
73 D2P, // Convert 8-byte value to 8-bit predicate register. [*]
74 P2D, // Convert 8-bit predicate register to 8-byte value. [*]
75 V2Q, // Convert HVX vector to a vector predicate reg. [*]
76 Q2V, // Convert vector predicate to an HVX vector. [*]
77 // [*] The equivalence is defined as "Q <=> (V != 0)",
78 // where the != operation compares bytes.
79 // Note: V != 0 is implemented as V >u 0.
80 QCAT,
81 QTRUE,
82 QFALSE,
83 VZERO,
84 VSPLATW, // HVX splat of a 32-bit word with an arbitrary result type.
85 TYPECAST, // No-op that's used to convert between different legal
86 // types in a register.
87 VALIGN, // Align two vectors (in Op0, Op1) to one that would have
88 // been loaded from address in Op2.
89 VALIGNADDR, // Align vector address: Op0 & -Op1, except when it is
90 // an address in a vector load, then it's a no-op.
91 OP_END
92 };
93
94} // end namespace HexagonISD
95
96 class HexagonSubtarget;
97
98 class HexagonTargetLowering : public TargetLowering {
99 int VarArgsFrameOffset; // Frame offset to start of varargs area.
100 const HexagonTargetMachine &HTM;
101 const HexagonSubtarget &Subtarget;
102
103 bool CanReturnSmallStruct(const Function* CalleeFn, unsigned& RetSize)
104 const;
105
106 public:
107 explicit HexagonTargetLowering(const TargetMachine &TM,
108 const HexagonSubtarget &ST);
109
110 bool isHVXVectorType(MVT Ty) const;
111
112 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
113 /// for tail call optimization. Targets which want to do tail call
114 /// optimization should implement this function.
115 bool IsEligibleForTailCallOptimization(SDValue Callee,
116 CallingConv::ID CalleeCC, bool isVarArg, bool isCalleeStructRet,
117 bool isCallerStructRet, const SmallVectorImpl<ISD::OutputArg> &Outs,
118 const SmallVectorImpl<SDValue> &OutVals,
119 const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG& DAG) const;
120
121 bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I,
122 MachineFunction &MF,
123 unsigned Intrinsic) const override;
124
125 bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
126 bool isTruncateFree(EVT VT1, EVT VT2) const override;
127
128 bool isCheapToSpeculateCttz() const override { return true; }
129 bool isCheapToSpeculateCtlz() const override { return true; }
130 bool isCtlzFast() const override { return true; }
131
132 bool hasBitTest(SDValue X, SDValue Y) const override;
133
134 bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const override;
135
136 /// Return true if an FMA operation is faster than a pair of mul and add
137 /// instructions. fmuladd intrinsics will be expanded to FMAs when this
138 /// method returns true (and FMAs are legal), otherwise fmuladd is
139 /// expanded to mul + add.
140 bool isFMAFasterThanFMulAndFAdd(const MachineFunction &,
141 EVT) const override;
142
143 // Should we expand the build vector with shuffles?
144 bool shouldExpandBuildVectorWithShuffles(EVT VT,
145 unsigned DefinedValues) const override;
146
147 bool isShuffleMaskLegal(ArrayRef<int> Mask, EVT VT) const override;
148 TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT)
149 const override;
150
151 SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
152 void LowerOperationWrapper(SDNode *N, SmallVectorImpl<SDValue> &Results,
153 SelectionDAG &DAG) const override;
154 void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
155 SelectionDAG &DAG) const override;
156
157 const char *getTargetNodeName(unsigned Opcode) const override;
158
159 SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
160 SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const;
161 SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
162 SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
163 SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
164 SDValue LowerINSERT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
165 SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
166 SDValue LowerVECTOR_SHIFT(SDValue Op, SelectionDAG &DAG) const;
167 SDValue LowerROTL(SDValue Op, SelectionDAG &DAG) const;
168 SDValue LowerBITCAST(SDValue Op, SelectionDAG &DAG) const;
169 SDValue LowerANY_EXTEND(SDValue Op, SelectionDAG &DAG) const;
170 SDValue LowerSIGN_EXTEND(SDValue Op, SelectionDAG &DAG) const;
171 SDValue LowerZERO_EXTEND(SDValue Op, SelectionDAG &DAG) const;
172 SDValue LowerLoad(SDValue Op, SelectionDAG &DAG) const;
173 SDValue LowerStore(SDValue Op, SelectionDAG &DAG) const;
174 SDValue LowerUnalignedLoad(SDValue Op, SelectionDAG &DAG) const;
175 SDValue LowerUAddSubO(SDValue Op, SelectionDAG &DAG) const;
176 SDValue LowerAddSubCarry(SDValue Op, SelectionDAG &DAG) const;
177
178 SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
179 SDValue LowerINLINEASM(SDValue Op, SelectionDAG &DAG) const;
180 SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG) const;
181 SDValue LowerREADCYCLECOUNTER(SDValue Op, SelectionDAG &DAG) const;
182 SDValue LowerEH_LABEL(SDValue Op, SelectionDAG &DAG) const;
183 SDValue LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const;
184 SDValue
185 LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
186 const SmallVectorImpl<ISD::InputArg> &Ins,
187 const SDLoc &dl, SelectionDAG &DAG,
188 SmallVectorImpl<SDValue> &InVals) const override;
189 SDValue LowerGLOBALADDRESS(SDValue Op, SelectionDAG &DAG) const;
190 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
191 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
192 SDValue LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
193 SelectionDAG &DAG) const;
194 SDValue LowerToTLSInitialExecModel(GlobalAddressSDNode *GA,
195 SelectionDAG &DAG) const;
196 SDValue LowerToTLSLocalExecModel(GlobalAddressSDNode *GA,
197 SelectionDAG &DAG) const;
198 SDValue GetDynamicTLSAddr(SelectionDAG &DAG, SDValue Chain,
199 GlobalAddressSDNode *GA, SDValue InFlag, EVT PtrVT,
200 unsigned ReturnReg, unsigned char OperandFlags) const;
201 SDValue LowerGLOBAL_OFFSET_TABLE(SDValue Op, SelectionDAG &DAG) const;
202
203 SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI,
204 SmallVectorImpl<SDValue> &InVals) const override;
205 SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
206 CallingConv::ID CallConv, bool isVarArg,
207 const SmallVectorImpl<ISD::InputArg> &Ins,
208 const SDLoc &dl, SelectionDAG &DAG,
209 SmallVectorImpl<SDValue> &InVals,
210 const SmallVectorImpl<SDValue> &OutVals,
211 SDValue Callee) const;
212
213 SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
214 SDValue LowerVSELECT(SDValue Op, SelectionDAG &DAG) const;
215 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
216 SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG& DAG) const;
217 SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
218
219 bool CanLowerReturn(CallingConv::ID CallConv,
220 MachineFunction &MF, bool isVarArg,
221 const SmallVectorImpl<ISD::OutputArg> &Outs,
222 LLVMContext &Context) const override;
223
224 SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
225 const SmallVectorImpl<ISD::OutputArg> &Outs,
226 const SmallVectorImpl<SDValue> &OutVals,
227 const SDLoc &dl, SelectionDAG &DAG) const override;
228
229 SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
230
231 bool mayBeEmittedAsTailCall(const CallInst *CI) const override;
232
233 Register getRegisterByName(const char* RegName, LLT VT,
234 const MachineFunction &MF) const override;
235
236 /// If a physical register, this returns the register that receives the
237 /// exception address on entry to an EH pad.
238 unsigned
239 getExceptionPointerRegister(const Constant *PersonalityFn) const override {
240 return Hexagon::R0;
241 }
242
243 /// If a physical register, this returns the register that receives the
244 /// exception typeid on entry to a landing pad.
245 unsigned
246 getExceptionSelectorRegister(const Constant *PersonalityFn) const override {
247 return Hexagon::R1;
248 }
249
250 SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
251 SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const;
252 SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
253 SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
254
255 EVT getSetCCResultType(const DataLayout &, LLVMContext &C,
256 EVT VT) const override {
257 if (!VT.isVector())
258 return MVT::i1;
259 else
260 return EVT::getVectorVT(C, MVT::i1, VT.getVectorNumElements());
261 }
262
263 bool getPostIndexedAddressParts(SDNode *N, SDNode *Op,
264 SDValue &Base, SDValue &Offset,
265 ISD::MemIndexedMode &AM,
266 SelectionDAG &DAG) const override;
267
268 ConstraintType getConstraintType(StringRef Constraint) const override;
269
270 std::pair<unsigned, const TargetRegisterClass *>
271 getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
272 StringRef Constraint, MVT VT) const override;
273
274 unsigned
275 getInlineAsmMemConstraint(StringRef ConstraintCode) const override {
276 if (ConstraintCode == "o")
277 return InlineAsm::Constraint_o;
278 return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
279 }
280
281 // Intrinsics
282 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
283 SDValue LowerINTRINSIC_VOID(SDValue Op, SelectionDAG &DAG) const;
284 /// isLegalAddressingMode - Return true if the addressing mode represented
285 /// by AM is legal for this target, for a load/store of the specified type.
286 /// The type may be VoidTy, in which case only return true if the addressing
287 /// mode is legal for a load/store of any legal type.
288 /// TODO: Handle pre/postinc as well.
289 bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM,
290 Type *Ty, unsigned AS,
291 Instruction *I = nullptr) const override;
292 /// Return true if folding a constant offset with the given GlobalAddress
293 /// is legal. It is frequently not legal in PIC relocation models.
294 bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
295
296 bool isFPImmLegal(const APFloat &Imm, EVT VT,
297 bool ForCodeSize) const override;
298
299 /// isLegalICmpImmediate - Return true if the specified immediate is legal
300 /// icmp immediate, that is the target has icmp instructions which can
301 /// compare a register against the immediate without having to materialize
302 /// the immediate into a register.
303 bool isLegalICmpImmediate(int64_t Imm) const override;
304
305 EVT getOptimalMemOpType(const MemOp &Op,
306 const AttributeList &FuncAttributes) const override;
307
308 bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT,
309 unsigned AddrSpace, unsigned Alignment, MachineMemOperand::Flags Flags,
310 bool *Fast) const override;
311
312 bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace,
313 unsigned Alignment, MachineMemOperand::Flags Flags, bool *Fast)
314 const override;
315
316 /// Returns relocation base for the given PIC jumptable.
317 SDValue getPICJumpTableRelocBase(SDValue Table, SelectionDAG &DAG)
318 const override;
319
320 bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy,
321 EVT NewVT) const override;
322
323 // Handling of atomic RMW instructions.
324 Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
325 AtomicOrdering Ord) const override;
326 Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val,
327 Value *Addr, AtomicOrdering Ord) const override;
328 AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
329 bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
330 AtomicExpansionKind
331 shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override;
332
333 AtomicExpansionKind
334 shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override {
335 return AtomicExpansionKind::LLSC;
336 }
337
338 private:
339 void initializeHVXLowering();
340 void validateConstPtrAlignment(SDValue Ptr, const SDLoc &dl,
341 unsigned NeedAlign) const;
342
343 std::pair<SDValue,int> getBaseAndOffset(SDValue Addr) const;
344
345 bool getBuildVectorConstInts(ArrayRef<SDValue> Values, MVT VecTy,
346 SelectionDAG &DAG,
347 MutableArrayRef<ConstantInt*> Consts) const;
348 SDValue buildVector32(ArrayRef<SDValue> Elem, const SDLoc &dl, MVT VecTy,
349 SelectionDAG &DAG) const;
350 SDValue buildVector64(ArrayRef<SDValue> Elem, const SDLoc &dl, MVT VecTy,
351 SelectionDAG &DAG) const;
352 SDValue extractVector(SDValue VecV, SDValue IdxV, const SDLoc &dl,
353 MVT ValTy, MVT ResTy, SelectionDAG &DAG) const;
354 SDValue insertVector(SDValue VecV, SDValue ValV, SDValue IdxV,
355 const SDLoc &dl, MVT ValTy, SelectionDAG &DAG) const;
356 SDValue expandPredicate(SDValue Vec32, const SDLoc &dl,
357 SelectionDAG &DAG) const;
358 SDValue contractPredicate(SDValue Vec64, const SDLoc &dl,
359 SelectionDAG &DAG) const;
360 SDValue getVectorShiftByInt(SDValue Op, SelectionDAG &DAG) const;
361
362 bool isUndef(SDValue Op) const {
363 if (Op.isMachineOpcode())
364 return Op.getMachineOpcode() == TargetOpcode::IMPLICIT_DEF;
365 return Op.getOpcode() == ISD::UNDEF;
366 }
367 SDValue getInstr(unsigned MachineOpc, const SDLoc &dl, MVT Ty,
368 ArrayRef<SDValue> Ops, SelectionDAG &DAG) const {
369 SDNode *N = DAG.getMachineNode(MachineOpc, dl, Ty, Ops);
370 return SDValue(N, 0);
371 }
372 SDValue getZero(const SDLoc &dl, MVT Ty, SelectionDAG &DAG) const;
373
374 using VectorPair = std::pair<SDValue, SDValue>;
375 using TypePair = std::pair<MVT, MVT>;
376
377 SDValue getInt(unsigned IntId, MVT ResTy, ArrayRef<SDValue> Ops,
378 const SDLoc &dl, SelectionDAG &DAG) const;
379
380 MVT ty(SDValue Op) const {
381 return Op.getValueType().getSimpleVT();
21
Calling 'SDValue::getValueType'
382 }
383 TypePair ty(const VectorPair &Ops) const {
384 return { Ops.first.getValueType().getSimpleVT(),
385 Ops.second.getValueType().getSimpleVT() };
386 }
387 MVT tyScalar(MVT Ty) const {
388 if (!Ty.isVector())
389 return Ty;
390 return MVT::getIntegerVT(Ty.getSizeInBits());
391 }
392 MVT tyVector(MVT Ty, MVT ElemTy) const {
393 if (Ty.isVector() && Ty.getVectorElementType() == ElemTy)
394 return Ty;
395 unsigned TyWidth = Ty.getSizeInBits();
396 unsigned ElemWidth = ElemTy.getSizeInBits();
397 assert((TyWidth % ElemWidth) == 0)(((TyWidth % ElemWidth) == 0) ? static_cast<void> (0) :
__assert_fail ("(TyWidth % ElemWidth) == 0", "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/lib/Target/Hexagon/HexagonISelLowering.h"
, 397, __PRETTY_FUNCTION__))
;
398 return MVT::getVectorVT(ElemTy, TyWidth/ElemWidth);
399 }
400
401 MVT typeJoin(const TypePair &Tys) const;
402 TypePair typeSplit(MVT Ty) const;
403 MVT typeExtElem(MVT VecTy, unsigned Factor) const;
404 MVT typeTruncElem(MVT VecTy, unsigned Factor) const;
405
406 SDValue opJoin(const VectorPair &Ops, const SDLoc &dl,
407 SelectionDAG &DAG) const;
408 VectorPair opSplit(SDValue Vec, const SDLoc &dl, SelectionDAG &DAG) const;
409 SDValue opCastElem(SDValue Vec, MVT ElemTy, SelectionDAG &DAG) const;
410
411 bool allowsHvxMemoryAccess(MVT VecTy, unsigned Alignment,
412 MachineMemOperand::Flags Flags, bool *Fast) const;
413 bool allowsHvxMisalignedMemoryAccesses(MVT VecTy, unsigned Align,
414 MachineMemOperand::Flags Flags, bool *Fast) const;
415
416 bool isHvxSingleTy(MVT Ty) const;
417 bool isHvxPairTy(MVT Ty) const;
418 bool isHvxBoolTy(MVT Ty) const;
419 SDValue convertToByteIndex(SDValue ElemIdx, MVT ElemTy,
420 SelectionDAG &DAG) const;
421 SDValue getIndexInWord32(SDValue Idx, MVT ElemTy, SelectionDAG &DAG) const;
422 SDValue getByteShuffle(const SDLoc &dl, SDValue Op0, SDValue Op1,
423 ArrayRef<int> Mask, SelectionDAG &DAG) const;
424
425 SDValue buildHvxVectorReg(ArrayRef<SDValue> Values, const SDLoc &dl,
426 MVT VecTy, SelectionDAG &DAG) const;
427 SDValue buildHvxVectorPred(ArrayRef<SDValue> Values, const SDLoc &dl,
428 MVT VecTy, SelectionDAG &DAG) const;
429 SDValue createHvxPrefixPred(SDValue PredV, const SDLoc &dl,
430 unsigned BitBytes, bool ZeroFill,
431 SelectionDAG &DAG) const;
432 SDValue extractHvxElementReg(SDValue VecV, SDValue IdxV, const SDLoc &dl,
433 MVT ResTy, SelectionDAG &DAG) const;
434 SDValue extractHvxElementPred(SDValue VecV, SDValue IdxV, const SDLoc &dl,
435 MVT ResTy, SelectionDAG &DAG) const;
436 SDValue insertHvxElementReg(SDValue VecV, SDValue IdxV, SDValue ValV,
437 const SDLoc &dl, SelectionDAG &DAG) const;
438 SDValue insertHvxElementPred(SDValue VecV, SDValue IdxV, SDValue ValV,
439 const SDLoc &dl, SelectionDAG &DAG) const;
440 SDValue extractHvxSubvectorReg(SDValue VecV, SDValue IdxV, const SDLoc &dl,
441 MVT ResTy, SelectionDAG &DAG) const;
442 SDValue extractHvxSubvectorPred(SDValue VecV, SDValue IdxV, const SDLoc &dl,
443 MVT ResTy, SelectionDAG &DAG) const;
444 SDValue insertHvxSubvectorReg(SDValue VecV, SDValue SubV, SDValue IdxV,
445 const SDLoc &dl, SelectionDAG &DAG) const;
446 SDValue insertHvxSubvectorPred(SDValue VecV, SDValue SubV, SDValue IdxV,
447 const SDLoc &dl, SelectionDAG &DAG) const;
448 SDValue extendHvxVectorPred(SDValue VecV, const SDLoc &dl, MVT ResTy,
449 bool ZeroExt, SelectionDAG &DAG) const;
450 SDValue compressHvxPred(SDValue VecQ, const SDLoc &dl, MVT ResTy,
451 SelectionDAG &DAG) const;
452
453 SDValue LowerHvxBuildVector(SDValue Op, SelectionDAG &DAG) const;
454 SDValue LowerHvxConcatVectors(SDValue Op, SelectionDAG &DAG) const;
455 SDValue LowerHvxExtractElement(SDValue Op, SelectionDAG &DAG) const;
456 SDValue LowerHvxInsertElement(SDValue Op, SelectionDAG &DAG) const;
457 SDValue LowerHvxExtractSubvector(SDValue Op, SelectionDAG &DAG) const;
458 SDValue LowerHvxInsertSubvector(SDValue Op, SelectionDAG &DAG) const;
459 SDValue LowerHvxBitcast(SDValue Op, SelectionDAG &DAG) const;
460 SDValue LowerHvxAnyExt(SDValue Op, SelectionDAG &DAG) const;
461 SDValue LowerHvxSignExt(SDValue Op, SelectionDAG &DAG) const;
462 SDValue LowerHvxZeroExt(SDValue Op, SelectionDAG &DAG) const;
463 SDValue LowerHvxCttz(SDValue Op, SelectionDAG &DAG) const;
464 SDValue LowerHvxMul(SDValue Op, SelectionDAG &DAG) const;
465 SDValue LowerHvxMulh(SDValue Op, SelectionDAG &DAG) const;
466 SDValue LowerHvxSetCC(SDValue Op, SelectionDAG &DAG) const;
467 SDValue LowerHvxExtend(SDValue Op, SelectionDAG &DAG) const;
468 SDValue LowerHvxShift(SDValue Op, SelectionDAG &DAG) const;
469 SDValue LowerHvxIntrinsic(SDValue Op, SelectionDAG &DAG) const;
470 SDValue LowerHvxStore(SDValue Op, SelectionDAG &DAG) const;
471 SDValue HvxVecPredBitcastComputation(SDValue Op, SelectionDAG &DAG) const;
472
473 SDValue SplitHvxPairOp(SDValue Op, SelectionDAG &DAG) const;
474 SDValue SplitHvxMemOp(SDValue Op, SelectionDAG &DAG) const;
475
476 std::pair<const TargetRegisterClass*, uint8_t>
477 findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT)
478 const override;
479
480 bool isHvxOperation(SDValue Op) const;
481 bool isHvxOperation(SDNode *N) const;
482 SDValue LowerHvxOperation(SDValue Op, SelectionDAG &DAG) const;
483 void LowerHvxOperationWrapper(SDNode *N, SmallVectorImpl<SDValue> &Results,
484 SelectionDAG &DAG) const;
485 void ReplaceHvxNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
486 SelectionDAG &DAG) const;
487 SDValue PerformHvxDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
488 };
489
490} // end namespace llvm
491
492#endif // LLVM_LIB_TARGET_HEXAGON_HEXAGONISELLOWERING_H

/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/SelectionDAGNodes.h

1//===- llvm/CodeGen/SelectionDAGNodes.h - SelectionDAG Nodes ----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file declares the SDNode class and derived classes, which are used to
10// represent the nodes and operations present in a SelectionDAG. These nodes
11// and operations are machine code level operations, with some similarities to
12// the GCC RTL representation.
13//
14// Clients should include the SelectionDAG.h file instead of this file directly.
15//
16//===----------------------------------------------------------------------===//
17
18#ifndef LLVM_CODEGEN_SELECTIONDAGNODES_H
19#define LLVM_CODEGEN_SELECTIONDAGNODES_H
20
21#include "llvm/ADT/APFloat.h"
22#include "llvm/ADT/ArrayRef.h"
23#include "llvm/ADT/BitVector.h"
24#include "llvm/ADT/FoldingSet.h"
25#include "llvm/ADT/GraphTraits.h"
26#include "llvm/ADT/SmallPtrSet.h"
27#include "llvm/ADT/SmallVector.h"
28#include "llvm/ADT/ilist_node.h"
29#include "llvm/ADT/iterator.h"
30#include "llvm/ADT/iterator_range.h"
31#include "llvm/CodeGen/ISDOpcodes.h"
32#include "llvm/CodeGen/MachineMemOperand.h"
33#include "llvm/CodeGen/ValueTypes.h"
34#include "llvm/IR/Constants.h"
35#include "llvm/IR/DebugLoc.h"
36#include "llvm/IR/Instruction.h"
37#include "llvm/IR/Instructions.h"
38#include "llvm/IR/Metadata.h"
39#include "llvm/IR/Operator.h"
40#include "llvm/Support/AlignOf.h"
41#include "llvm/Support/AtomicOrdering.h"
42#include "llvm/Support/Casting.h"
43#include "llvm/Support/ErrorHandling.h"
44#include "llvm/Support/MachineValueType.h"
45#include "llvm/Support/TypeSize.h"
46#include <algorithm>
47#include <cassert>
48#include <climits>
49#include <cstddef>
50#include <cstdint>
51#include <cstring>
52#include <iterator>
53#include <string>
54#include <tuple>
55
56namespace llvm {
57
58class APInt;
59class Constant;
60template <typename T> struct DenseMapInfo;
61class GlobalValue;
62class MachineBasicBlock;
63class MachineConstantPoolValue;
64class MCSymbol;
65class raw_ostream;
66class SDNode;
67class SelectionDAG;
68class Type;
69class Value;
70
71void checkForCycles(const SDNode *N, const SelectionDAG *DAG = nullptr,
72 bool force = false);
73
74/// This represents a list of ValueType's that has been intern'd by
75/// a SelectionDAG. Instances of this simple value class are returned by
76/// SelectionDAG::getVTList(...).
77///
78struct SDVTList {
79 const EVT *VTs;
80 unsigned int NumVTs;
81};
82
83namespace ISD {
84
85 /// Node predicates
86
87 /// If N is a BUILD_VECTOR node whose elements are all the same constant or
88 /// undefined, return true and return the constant value in \p SplatValue.
89 bool isConstantSplatVector(const SDNode *N, APInt &SplatValue);
90
91 /// Return true if the specified node is a BUILD_VECTOR where all of the
92 /// elements are ~0 or undef.
93 bool isBuildVectorAllOnes(const SDNode *N);
94
95 /// Return true if the specified node is a BUILD_VECTOR where all of the
96 /// elements are 0 or undef.
97 bool isBuildVectorAllZeros(const SDNode *N);
98
99 /// Return true if the specified node is a BUILD_VECTOR node of all
100 /// ConstantSDNode or undef.
101 bool isBuildVectorOfConstantSDNodes(const SDNode *N);
102
103 /// Return true if the specified node is a BUILD_VECTOR node of all
104 /// ConstantFPSDNode or undef.
105 bool isBuildVectorOfConstantFPSDNodes(const SDNode *N);
106
107 /// Return true if the node has at least one operand and all operands of the
108 /// specified node are ISD::UNDEF.
109 bool allOperandsUndef(const SDNode *N);
110
111} // end namespace ISD
112
113//===----------------------------------------------------------------------===//
114/// Unlike LLVM values, Selection DAG nodes may return multiple
115/// values as the result of a computation. Many nodes return multiple values,
116/// from loads (which define a token and a return value) to ADDC (which returns
117/// a result and a carry value), to calls (which may return an arbitrary number
118/// of values).
119///
120/// As such, each use of a SelectionDAG computation must indicate the node that
121/// computes it as well as which return value to use from that node. This pair
122/// of information is represented with the SDValue value type.
123///
124class SDValue {
125 friend struct DenseMapInfo<SDValue>;
126
127 SDNode *Node = nullptr; // The node defining the value we are using.
128 unsigned ResNo = 0; // Which return value of the node we are using.
129
130public:
131 SDValue() = default;
132 SDValue(SDNode *node, unsigned resno);
133
134 /// get the index which selects a specific result in the SDNode
135 unsigned getResNo() const { return ResNo; }
136
137 /// get the SDNode which holds the desired result
138 SDNode *getNode() const { return Node; }
139
140 /// set the SDNode
141 void setNode(SDNode *N) { Node = N; }
142
143 inline SDNode *operator->() const { return Node; }
144
145 bool operator==(const SDValue &O) const {
146 return Node == O.Node && ResNo == O.ResNo;
147 }
148 bool operator!=(const SDValue &O) const {
149 return !operator==(O);
150 }
151 bool operator<(const SDValue &O) const {
152 return std::tie(Node, ResNo) < std::tie(O.Node, O.ResNo);
153 }
154 explicit operator bool() const {
155 return Node != nullptr;
156 }
157
158 SDValue getValue(unsigned R) const {
159 return SDValue(Node, R);
160 }
161
162 /// Return true if this node is an operand of N.
163 bool isOperandOf(const SDNode *N) const;
164
165 /// Return the ValueType of the referenced return value.
166 inline EVT getValueType() const;
167
168 /// Return the simple ValueType of the referenced return value.
169 MVT getSimpleValueType() const {
170 return getValueType().getSimpleVT();
171 }
172
173 /// Returns the size of the value in bits.
174 ///
175 /// If the value type is a scalable vector type, the scalable property will
176 /// be set and the runtime size will be a positive integer multiple of the
177 /// base size.
178 TypeSize getValueSizeInBits() const {
179 return getValueType().getSizeInBits();
180 }
181
182 TypeSize getScalarValueSizeInBits() const {
183 return getValueType().getScalarType().getSizeInBits();
184 }
185
186 // Forwarding methods - These forward to the corresponding methods in SDNode.
187 inline unsigned getOpcode() const;
188 inline unsigned getNumOperands() const;
189 inline const SDValue &getOperand(unsigned i) const;
190 inline uint64_t getConstantOperandVal(unsigned i) const;
191 inline const APInt &getConstantOperandAPInt(unsigned i) const;
192 inline bool isTargetMemoryOpcode() const;
193 inline bool isTargetOpcode() const;
194 inline bool isMachineOpcode() const;
195 inline bool isUndef() const;
196 inline unsigned getMachineOpcode() const;
197 inline const DebugLoc &getDebugLoc() const;
198 inline void dump() const;
199 inline void dump(const SelectionDAG *G) const;
200 inline void dumpr() const;
201 inline void dumpr(const SelectionDAG *G) const;
202
203 /// Return true if this operand (which must be a chain) reaches the
204 /// specified operand without crossing any side-effecting instructions.
205 /// In practice, this looks through token factors and non-volatile loads.
206 /// In order to remain efficient, this only
207 /// looks a couple of nodes in, it does not do an exhaustive search.
208 bool reachesChainWithoutSideEffects(SDValue Dest,
209 unsigned Depth = 2) const;
210
211 /// Return true if there are no nodes using value ResNo of Node.
212 inline bool use_empty() const;
213
214 /// Return true if there is exactly one node using value ResNo of Node.
215 inline bool hasOneUse() const;
216};
217
218template<> struct DenseMapInfo<SDValue> {
219 static inline SDValue getEmptyKey() {
220 SDValue V;
221 V.ResNo = -1U;
222 return V;
223 }
224
225 static inline SDValue getTombstoneKey() {
226 SDValue V;
227 V.ResNo = -2U;
228 return V;
229 }
230
231 static unsigned getHashValue(const SDValue &Val) {
232 return ((unsigned)((uintptr_t)Val.getNode() >> 4) ^
233 (unsigned)((uintptr_t)Val.getNode() >> 9)) + Val.getResNo();
234 }
235
236 static bool isEqual(const SDValue &LHS, const SDValue &RHS) {
237 return LHS == RHS;
238 }
239};
240
241/// Allow casting operators to work directly on
242/// SDValues as if they were SDNode*'s.
243template<> struct simplify_type<SDValue> {
244 using SimpleType = SDNode *;
245
246 static SimpleType getSimplifiedValue(SDValue &Val) {
247 return Val.getNode();
248 }
249};
250template<> struct simplify_type<const SDValue> {
251 using SimpleType = /*const*/ SDNode *;
252
253 static SimpleType getSimplifiedValue(const SDValue &Val) {
254 return Val.getNode();
255 }
256};
257
258/// Represents a use of a SDNode. This class holds an SDValue,
259/// which records the SDNode being used and the result number, a
260/// pointer to the SDNode using the value, and Next and Prev pointers,
261/// which link together all the uses of an SDNode.
262///
263class SDUse {
264 /// Val - The value being used.
265 SDValue Val;
266 /// User - The user of this value.
267 SDNode *User = nullptr;
268 /// Prev, Next - Pointers to the uses list of the SDNode referred by
269 /// this operand.
270 SDUse **Prev = nullptr;
271 SDUse *Next = nullptr;
272
273public:
274 SDUse() = default;
275 SDUse(const SDUse &U) = delete;
276 SDUse &operator=(const SDUse &) = delete;
277
278 /// Normally SDUse will just implicitly convert to an SDValue that it holds.
279 operator const SDValue&() const { return Val; }
280
281 /// If implicit conversion to SDValue doesn't work, the get() method returns
282 /// the SDValue.
283 const SDValue &get() const { return Val; }
284
285 /// This returns the SDNode that contains this Use.
286 SDNode *getUser() { return User; }
287
288 /// Get the next SDUse in the use list.
289 SDUse *getNext() const { return Next; }
290
291 /// Convenience function for get().getNode().
292 SDNode *getNode() const { return Val.getNode(); }
293 /// Convenience function for get().getResNo().
294 unsigned getResNo() const { return Val.getResNo(); }
295 /// Convenience function for get().getValueType().
296 EVT getValueType() const { return Val.getValueType(); }
297
298 /// Convenience function for get().operator==
299 bool operator==(const SDValue &V) const {
300 return Val == V;
301 }
302
303 /// Convenience function for get().operator!=
304 bool operator!=(const SDValue &V) const {
305 return Val != V;
306 }
307
308 /// Convenience function for get().operator<
309 bool operator<(const SDValue &V) const {
310 return Val < V;
311 }
312
313private:
314 friend class SelectionDAG;
315 friend class SDNode;
316 // TODO: unfriend HandleSDNode once we fix its operand handling.
317 friend class HandleSDNode;
318
319 void setUser(SDNode *p) { User = p; }
320
321 /// Remove this use from its existing use list, assign it the
322 /// given value, and add it to the new value's node's use list.
323 inline void set(const SDValue &V);
324 /// Like set, but only supports initializing a newly-allocated
325 /// SDUse with a non-null value.
326 inline void setInitial(const SDValue &V);
327 /// Like set, but only sets the Node portion of the value,
328 /// leaving the ResNo portion unmodified.
329 inline void setNode(SDNode *N);
330
331 void addToList(SDUse **List) {
332 Next = *List;
333 if (Next) Next->Prev = &Next;
334 Prev = List;
335 *List = this;
336 }
337
338 void removeFromList() {
339 *Prev = Next;
340 if (Next) Next->Prev = Prev;
341 }
342};
343
344/// simplify_type specializations - Allow casting operators to work directly on
345/// SDValues as if they were SDNode*'s.
346template<> struct simplify_type<SDUse> {
347 using SimpleType = SDNode *;
348
349 static SimpleType getSimplifiedValue(SDUse &Val) {
350 return Val.getNode();
351 }
352};
353
354/// These are IR-level optimization flags that may be propagated to SDNodes.
355/// TODO: This data structure should be shared by the IR optimizer and the
356/// the backend.
357struct SDNodeFlags {
358private:
359 // This bit is used to determine if the flags are in a defined state.
360 // Flag bits can only be masked out during intersection if the masking flags
361 // are defined.
362 bool AnyDefined : 1;
363
364 bool NoUnsignedWrap : 1;
365 bool NoSignedWrap : 1;
366 bool Exact : 1;
367 bool NoNaNs : 1;
368 bool NoInfs : 1;
369 bool NoSignedZeros : 1;
370 bool AllowReciprocal : 1;
371 bool VectorReduction : 1;
372 bool AllowContract : 1;
373 bool ApproximateFuncs : 1;
374 bool AllowReassociation : 1;
375
376 // We assume instructions do not raise floating-point exceptions by default,
377 // and only those marked explicitly may do so. We could choose to represent
378 // this via a positive "FPExcept" flags like on the MI level, but having a
379 // negative "NoFPExcept" flag here (that defaults to true) makes the flag
380 // intersection logic more straightforward.
381 bool NoFPExcept : 1;
382
383public:
384 /// Default constructor turns off all optimization flags.
385 SDNodeFlags()
386 : AnyDefined(false), NoUnsignedWrap(false), NoSignedWrap(false),
387 Exact(false), NoNaNs(false), NoInfs(false),
388 NoSignedZeros(false), AllowReciprocal(false), VectorReduction(false),
389 AllowContract(false), ApproximateFuncs(false),
390 AllowReassociation(false), NoFPExcept(false) {}
391
392 /// Propagate the fast-math-flags from an IR FPMathOperator.
393 void copyFMF(const FPMathOperator &FPMO) {
394 setNoNaNs(FPMO.hasNoNaNs());
395 setNoInfs(FPMO.hasNoInfs());
396 setNoSignedZeros(FPMO.hasNoSignedZeros());
397 setAllowReciprocal(FPMO.hasAllowReciprocal());
398 setAllowContract(FPMO.hasAllowContract());
399 setApproximateFuncs(FPMO.hasApproxFunc());
400 setAllowReassociation(FPMO.hasAllowReassoc());
401 }
402
403 /// Sets the state of the flags to the defined state.
404 void setDefined() { AnyDefined = true; }
405 /// Returns true if the flags are in a defined state.
406 bool isDefined() const { return AnyDefined; }
407
408 // These are mutators for each flag.
409 void setNoUnsignedWrap(bool b) {
410 setDefined();
411 NoUnsignedWrap = b;
412 }
413 void setNoSignedWrap(bool b) {
414 setDefined();
415 NoSignedWrap = b;
416 }
417 void setExact(bool b) {
418 setDefined();
419 Exact = b;
420 }
421 void setNoNaNs(bool b) {
422 setDefined();
423 NoNaNs = b;
424 }
425 void setNoInfs(bool b) {
426 setDefined();
427 NoInfs = b;
428 }
429 void setNoSignedZeros(bool b) {
430 setDefined();
431 NoSignedZeros = b;
432 }
433 void setAllowReciprocal(bool b) {
434 setDefined();
435 AllowReciprocal = b;
436 }
437 void setVectorReduction(bool b) {
438 setDefined();
439 VectorReduction = b;
440 }
441 void setAllowContract(bool b) {
442 setDefined();
443 AllowContract = b;
444 }
445 void setApproximateFuncs(bool b) {
446 setDefined();
447 ApproximateFuncs = b;
448 }
449 void setAllowReassociation(bool b) {
450 setDefined();
451 AllowReassociation = b;
452 }
453 void setNoFPExcept(bool b) {
454 setDefined();
455 NoFPExcept = b;
456 }
457
458 // These are accessors for each flag.
459 bool hasNoUnsignedWrap() const { return NoUnsignedWrap; }
460 bool hasNoSignedWrap() const { return NoSignedWrap; }
461 bool hasExact() const { return Exact; }
462 bool hasNoNaNs() const { return NoNaNs; }
463 bool hasNoInfs() const { return NoInfs; }
464 bool hasNoSignedZeros() const { return NoSignedZeros; }
465 bool hasAllowReciprocal() const { return AllowReciprocal; }
466 bool hasVectorReduction() const { return VectorReduction; }
467 bool hasAllowContract() const { return AllowContract; }
468 bool hasApproximateFuncs() const { return ApproximateFuncs; }
469 bool hasAllowReassociation() const { return AllowReassociation; }
470 bool hasNoFPExcept() const { return NoFPExcept; }
471
472 /// Clear any flags in this flag set that aren't also set in Flags.
473 /// If the given Flags are undefined then don't do anything.
474 void intersectWith(const SDNodeFlags Flags) {
475 if (!Flags.isDefined())
476 return;
477 NoUnsignedWrap &= Flags.NoUnsignedWrap;
478 NoSignedWrap &= Flags.NoSignedWrap;
479 Exact &= Flags.Exact;
480 NoNaNs &= Flags.NoNaNs;
481 NoInfs &= Flags.NoInfs;
482 NoSignedZeros &= Flags.NoSignedZeros;
483 AllowReciprocal &= Flags.AllowReciprocal;
484 VectorReduction &= Flags.VectorReduction;
485 AllowContract &= Flags.AllowContract;
486 ApproximateFuncs &= Flags.ApproximateFuncs;
487 AllowReassociation &= Flags.AllowReassociation;
488 NoFPExcept &= Flags.NoFPExcept;
489 }
490};
491
492/// Represents one node in the SelectionDAG.
493///
494class SDNode : public FoldingSetNode, public ilist_node<SDNode> {
495private:
496 /// The operation that this node performs.
497 int16_t NodeType;
498
499protected:
500 // We define a set of mini-helper classes to help us interpret the bits in our
501 // SubclassData. These are designed to fit within a uint16_t so they pack
502 // with NodeType.
503
504#if defined(_AIX) && (!defined(__GNUC__4) || defined(__ibmxl__))
505// Except for GCC; by default, AIX compilers store bit-fields in 4-byte words
506// and give the `pack` pragma push semantics.
507#define BEGIN_TWO_BYTE_PACK() _Pragma("pack(2)")pack(2)
508#define END_TWO_BYTE_PACK() _Pragma("pack(pop)")pack(pop)
509#else
510#define BEGIN_TWO_BYTE_PACK()
511#define END_TWO_BYTE_PACK()
512#endif
513
514BEGIN_TWO_BYTE_PACK()
515 class SDNodeBitfields {
516 friend class SDNode;
517 friend class MemIntrinsicSDNode;
518 friend class MemSDNode;
519 friend class SelectionDAG;
520
521 uint16_t HasDebugValue : 1;
522 uint16_t IsMemIntrinsic : 1;
523 uint16_t IsDivergent : 1;
524 };
525 enum { NumSDNodeBits = 3 };
526
527 class ConstantSDNodeBitfields {
528 friend class ConstantSDNode;
529
530 uint16_t : NumSDNodeBits;
531
532 uint16_t IsOpaque : 1;
533 };
534
535 class MemSDNodeBitfields {
536 friend class MemSDNode;
537 friend class MemIntrinsicSDNode;
538 friend class AtomicSDNode;
539
540 uint16_t : NumSDNodeBits;
541
542 uint16_t IsVolatile : 1;
543 uint16_t IsNonTemporal : 1;
544 uint16_t IsDereferenceable : 1;
545 uint16_t IsInvariant : 1;
546 };
547 enum { NumMemSDNodeBits = NumSDNodeBits + 4 };
548
549 class LSBaseSDNodeBitfields {
550 friend class LSBaseSDNode;
551 friend class MaskedLoadStoreSDNode;
552 friend class MaskedGatherScatterSDNode;
553
554 uint16_t : NumMemSDNodeBits;
555
556 // This storage is shared between disparate class hierarchies to hold an
557 // enumeration specific to the class hierarchy in use.
558 // LSBaseSDNode => enum ISD::MemIndexedMode
559 // MaskedLoadStoreBaseSDNode => enum ISD::MemIndexedMode
560 // MaskedGatherScatterSDNode => enum ISD::MemIndexType
561 uint16_t AddressingMode : 3;
562 };
563 enum { NumLSBaseSDNodeBits = NumMemSDNodeBits + 3 };
564
565 class LoadSDNodeBitfields {
566 friend class LoadSDNode;
567 friend class MaskedLoadSDNode;
568
569 uint16_t : NumLSBaseSDNodeBits;
570
571 uint16_t ExtTy : 2; // enum ISD::LoadExtType
572 uint16_t IsExpanding : 1;
573 };
574
575 class StoreSDNodeBitfields {
576 friend class StoreSDNode;
577 friend class MaskedStoreSDNode;
578
579 uint16_t : NumLSBaseSDNodeBits;
580
581 uint16_t IsTruncating : 1;
582 uint16_t IsCompressing : 1;
583 };
584
585 union {
586 char RawSDNodeBits[sizeof(uint16_t)];
587 SDNodeBitfields SDNodeBits;
588 ConstantSDNodeBitfields ConstantSDNodeBits;
589 MemSDNodeBitfields MemSDNodeBits;
590 LSBaseSDNodeBitfields LSBaseSDNodeBits;
591 LoadSDNodeBitfields LoadSDNodeBits;
592 StoreSDNodeBitfields StoreSDNodeBits;
593 };
594END_TWO_BYTE_PACK()
595#undef BEGIN_TWO_BYTE_PACK
596#undef END_TWO_BYTE_PACK
597
598 // RawSDNodeBits must cover the entirety of the union. This means that all of
599 // the union's members must have size <= RawSDNodeBits. We write the RHS as
600 // "2" instead of sizeof(RawSDNodeBits) because MSVC can't handle the latter.
601 static_assert(sizeof(SDNodeBitfields) <= 2, "field too wide");
602 static_assert(sizeof(ConstantSDNodeBitfields) <= 2, "field too wide");
603 static_assert(sizeof(MemSDNodeBitfields) <= 2, "field too wide");
604 static_assert(sizeof(LSBaseSDNodeBitfields) <= 2, "field too wide");
605 static_assert(sizeof(LoadSDNodeBitfields) <= 2, "field too wide");
606 static_assert(sizeof(StoreSDNodeBitfields) <= 2, "field too wide");
607
608private:
609 friend class SelectionDAG;
610 // TODO: unfriend HandleSDNode once we fix its operand handling.
611 friend class HandleSDNode;
612
613 /// Unique id per SDNode in the DAG.
614 int NodeId = -1;
615
616 /// The values that are used by this operation.
617 SDUse *OperandList = nullptr;
618
619 /// The types of the values this node defines. SDNode's may
620 /// define multiple values simultaneously.
621 const EVT *ValueList;
622
623 /// List of uses for this SDNode.
624 SDUse *UseList = nullptr;
625
626 /// The number of entries in the Operand/Value list.
627 unsigned short NumOperands = 0;
628 unsigned short NumValues;
629
630 // The ordering of the SDNodes. It roughly corresponds to the ordering of the
631 // original LLVM instructions.
632 // This is used for turning off scheduling, because we'll forgo
633 // the normal scheduling algorithms and output the instructions according to
634 // this ordering.
635 unsigned IROrder;
636
637 /// Source line information.
638 DebugLoc debugLoc;
639
640 /// Return a pointer to the specified value type.
641 static const EVT *getValueTypeList(EVT VT);
642
643 SDNodeFlags Flags;
644
645public:
646 /// Unique and persistent id per SDNode in the DAG.
647 /// Used for debug printing.
648 uint16_t PersistentId;
649
650 //===--------------------------------------------------------------------===//
651 // Accessors
652 //
653
654 /// Return the SelectionDAG opcode value for this node. For
655 /// pre-isel nodes (those for which isMachineOpcode returns false), these
656 /// are the opcode values in the ISD and <target>ISD namespaces. For
657 /// post-isel opcodes, see getMachineOpcode.
658 unsigned getOpcode() const { return (unsigned short)NodeType; }
659
660 /// Test if this node has a target-specific opcode (in the
661 /// \<target\>ISD namespace).
662 bool isTargetOpcode() const { return NodeType >= ISD::BUILTIN_OP_END; }
663
664 /// Test if this node has a target-specific opcode that may raise
665 /// FP exceptions (in the \<target\>ISD namespace and greater than
666 /// FIRST_TARGET_STRICTFP_OPCODE). Note that all target memory
667 /// opcode are currently automatically considered to possibly raise
668 /// FP exceptions as well.
669 bool isTargetStrictFPOpcode() const {
670 return NodeType >= ISD::FIRST_TARGET_STRICTFP_OPCODE;
671 }
672
673 /// Test if this node has a target-specific
674 /// memory-referencing opcode (in the \<target\>ISD namespace and
675 /// greater than FIRST_TARGET_MEMORY_OPCODE).
676 bool isTargetMemoryOpcode() const {
677 return NodeType >= ISD::FIRST_TARGET_MEMORY_OPCODE;
678 }
679
680 /// Return true if the type of the node type undefined.
681 bool isUndef() const { return NodeType == ISD::UNDEF; }
682
683 /// Test if this node is a memory intrinsic (with valid pointer information).
684 /// INTRINSIC_W_CHAIN and INTRINSIC_VOID nodes are sometimes created for
685 /// non-memory intrinsics (with chains) that are not really instances of
686 /// MemSDNode. For such nodes, we need some extra state to determine the
687 /// proper classof relationship.
688 bool isMemIntrinsic() const {
689 return (NodeType == ISD::INTRINSIC_W_CHAIN ||
690 NodeType == ISD::INTRINSIC_VOID) &&
691 SDNodeBits.IsMemIntrinsic;
692 }
693
694 /// Test if this node is a strict floating point pseudo-op.
695 bool isStrictFPOpcode() {
696 switch (NodeType) {
697 default:
698 return false;
699 case ISD::STRICT_FP16_TO_FP:
700 case ISD::STRICT_FP_TO_FP16:
701#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
702 case ISD::STRICT_##DAGN:
703#include "llvm/IR/ConstrainedOps.def"
704 return true;
705 }
706 }
707
708 /// Test if this node has a post-isel opcode, directly
709 /// corresponding to a MachineInstr opcode.
710 bool isMachineOpcode() const { return NodeType < 0; }
711
712 /// This may only be called if isMachineOpcode returns
713 /// true. It returns the MachineInstr opcode value that the node's opcode
714 /// corresponds to.
715 unsigned getMachineOpcode() const {
716 assert(isMachineOpcode() && "Not a MachineInstr opcode!")((isMachineOpcode() && "Not a MachineInstr opcode!") ?
static_cast<void> (0) : __assert_fail ("isMachineOpcode() && \"Not a MachineInstr opcode!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 716, __PRETTY_FUNCTION__))
;
717 return ~NodeType;
718 }
719
720 bool getHasDebugValue() const { return SDNodeBits.HasDebugValue; }
721 void setHasDebugValue(bool b) { SDNodeBits.HasDebugValue = b; }
722
723 bool isDivergent() const { return SDNodeBits.IsDivergent; }
724
725 /// Return true if there are no uses of this node.
726 bool use_empty() const { return UseList == nullptr; }
727
728 /// Return true if there is exactly one use of this node.
729 bool hasOneUse() const {
730 return !use_empty() && std::next(use_begin()) == use_end();
731 }
732
733 /// Return the number of uses of this node. This method takes
734 /// time proportional to the number of uses.
735 size_t use_size() const { return std::distance(use_begin(), use_end()); }
736
737 /// Return the unique node id.
738 int getNodeId() const { return NodeId; }
739
740 /// Set unique node id.
741 void setNodeId(int Id) { NodeId = Id; }
742
743 /// Return the node ordering.
744 unsigned getIROrder() const { return IROrder; }
745
746 /// Set the node ordering.
747 void setIROrder(unsigned Order) { IROrder = Order; }
748
749 /// Return the source location info.
750 const DebugLoc &getDebugLoc() const { return debugLoc; }
751
752 /// Set source location info. Try to avoid this, putting
753 /// it in the constructor is preferable.
754 void setDebugLoc(DebugLoc dl) { debugLoc = std::move(dl); }
755
756 /// This class provides iterator support for SDUse
757 /// operands that use a specific SDNode.
758 class use_iterator
759 : public std::iterator<std::forward_iterator_tag, SDUse, ptrdiff_t> {
760 friend class SDNode;
761
762 SDUse *Op = nullptr;
763
764 explicit use_iterator(SDUse *op) : Op(op) {}
765
766 public:
767 using reference = std::iterator<std::forward_iterator_tag,
768 SDUse, ptrdiff_t>::reference;
769 using pointer = std::iterator<std::forward_iterator_tag,
770 SDUse, ptrdiff_t>::pointer;
771
772 use_iterator() = default;
773 use_iterator(const use_iterator &I) : Op(I.Op) {}
774
775 bool operator==(const use_iterator &x) const {
776 return Op == x.Op;
777 }
778 bool operator!=(const use_iterator &x) const {
779 return !operator==(x);
780 }
781
782 /// Return true if this iterator is at the end of uses list.
783 bool atEnd() const { return Op == nullptr; }
784
785 // Iterator traversal: forward iteration only.
786 use_iterator &operator++() { // Preincrement
787 assert(Op && "Cannot increment end iterator!")((Op && "Cannot increment end iterator!") ? static_cast
<void> (0) : __assert_fail ("Op && \"Cannot increment end iterator!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 787, __PRETTY_FUNCTION__))
;
788 Op = Op->getNext();
789 return *this;
790 }
791
792 use_iterator operator++(int) { // Postincrement
793 use_iterator tmp = *this; ++*this; return tmp;
794 }
795
796 /// Retrieve a pointer to the current user node.
797 SDNode *operator*() const {
798 assert(Op && "Cannot dereference end iterator!")((Op && "Cannot dereference end iterator!") ? static_cast
<void> (0) : __assert_fail ("Op && \"Cannot dereference end iterator!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 798, __PRETTY_FUNCTION__))
;
799 return Op->getUser();
800 }
801
802 SDNode *operator->() const { return operator*(); }
803
804 SDUse &getUse() const { return *Op; }
805
806 /// Retrieve the operand # of this use in its user.
807 unsigned getOperandNo() const {
808 assert(Op && "Cannot dereference end iterator!")((Op && "Cannot dereference end iterator!") ? static_cast
<void> (0) : __assert_fail ("Op && \"Cannot dereference end iterator!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 808, __PRETTY_FUNCTION__))
;
809 return (unsigned)(Op - Op->getUser()->OperandList);
810 }
811 };
812
813 /// Provide iteration support to walk over all uses of an SDNode.
814 use_iterator use_begin() const {
815 return use_iterator(UseList);
816 }
817
818 static use_iterator use_end() { return use_iterator(nullptr); }
819
820 inline iterator_range<use_iterator> uses() {
821 return make_range(use_begin(), use_end());
822 }
823 inline iterator_range<use_iterator> uses() const {
824 return make_range(use_begin(), use_end());
825 }
826
827 /// Return true if there are exactly NUSES uses of the indicated value.
828 /// This method ignores uses of other values defined by this operation.
829 bool hasNUsesOfValue(unsigned NUses, unsigned Value) const;
830
831 /// Return true if there are any use of the indicated value.
832 /// This method ignores uses of other values defined by this operation.
833 bool hasAnyUseOfValue(unsigned Value) const;
834
835 /// Return true if this node is the only use of N.
836 bool isOnlyUserOf(const SDNode *N) const;
837
838 /// Return true if this node is an operand of N.
839 bool isOperandOf(const SDNode *N) const;
840
841 /// Return true if this node is a predecessor of N.
842 /// NOTE: Implemented on top of hasPredecessor and every bit as
843 /// expensive. Use carefully.
844 bool isPredecessorOf(const SDNode *N) const {
845 return N->hasPredecessor(this);
846 }
847
848 /// Return true if N is a predecessor of this node.
849 /// N is either an operand of this node, or can be reached by recursively
850 /// traversing up the operands.
851 /// NOTE: This is an expensive method. Use it carefully.
852 bool hasPredecessor(const SDNode *N) const;
853
854 /// Returns true if N is a predecessor of any node in Worklist. This
855 /// helper keeps Visited and Worklist sets externally to allow unions
856 /// searches to be performed in parallel, caching of results across
857 /// queries and incremental addition to Worklist. Stops early if N is
858 /// found but will resume. Remember to clear Visited and Worklists
859 /// if DAG changes. MaxSteps gives a maximum number of nodes to visit before
860 /// giving up. The TopologicalPrune flag signals that positive NodeIds are
861 /// topologically ordered (Operands have strictly smaller node id) and search
862 /// can be pruned leveraging this.
863 static bool hasPredecessorHelper(const SDNode *N,
864 SmallPtrSetImpl<const SDNode *> &Visited,
865 SmallVectorImpl<const SDNode *> &Worklist,
866 unsigned int MaxSteps = 0,
867 bool TopologicalPrune = false) {
868 SmallVector<const SDNode *, 8> DeferredNodes;
869 if (Visited.count(N))
870 return true;
871
872 // Node Id's are assigned in three places: As a topological
873 // ordering (> 0), during legalization (results in values set to
874 // 0), new nodes (set to -1). If N has a topolgical id then we
875 // know that all nodes with ids smaller than it cannot be
876 // successors and we need not check them. Filter out all node
877 // that can't be matches. We add them to the worklist before exit
878 // in case of multiple calls. Note that during selection the topological id
879 // may be violated if a node's predecessor is selected before it. We mark
880 // this at selection negating the id of unselected successors and
881 // restricting topological pruning to positive ids.
882
883 int NId = N->getNodeId();
884 // If we Invalidated the Id, reconstruct original NId.
885 if (NId < -1)
886 NId = -(NId + 1);
887
888 bool Found = false;
889 while (!Worklist.empty()) {
890 const SDNode *M = Worklist.pop_back_val();
891 int MId = M->getNodeId();
892 if (TopologicalPrune && M->getOpcode() != ISD::TokenFactor && (NId > 0) &&
893 (MId > 0) && (MId < NId)) {
894 DeferredNodes.push_back(M);
895 continue;
896 }
897 for (const SDValue &OpV : M->op_values()) {
898 SDNode *Op = OpV.getNode();
899 if (Visited.insert(Op).second)
900 Worklist.push_back(Op);
901 if (Op == N)
902 Found = true;
903 }
904 if (Found)
905 break;
906 if (MaxSteps != 0 && Visited.size() >= MaxSteps)
907 break;
908 }
909 // Push deferred nodes back on worklist.
910 Worklist.append(DeferredNodes.begin(), DeferredNodes.end());
911 // If we bailed early, conservatively return found.
912 if (MaxSteps != 0 && Visited.size() >= MaxSteps)
913 return true;
914 return Found;
915 }
916
917 /// Return true if all the users of N are contained in Nodes.
918 /// NOTE: Requires at least one match, but doesn't require them all.
919 static bool areOnlyUsersOf(ArrayRef<const SDNode *> Nodes, const SDNode *N);
920
921 /// Return the number of values used by this operation.
922 unsigned getNumOperands() const { return NumOperands; }
923
924 /// Return the maximum number of operands that a SDNode can hold.
925 static constexpr size_t getMaxNumOperands() {
926 return std::numeric_limits<decltype(SDNode::NumOperands)>::max();
927 }
928
929 /// Helper method returns the integer value of a ConstantSDNode operand.
930 inline uint64_t getConstantOperandVal(unsigned Num) const;
931
932 /// Helper method returns the APInt of a ConstantSDNode operand.
933 inline const APInt &getConstantOperandAPInt(unsigned Num) const;
934
935 const SDValue &getOperand(unsigned Num) const {
936 assert(Num < NumOperands && "Invalid child # of SDNode!")((Num < NumOperands && "Invalid child # of SDNode!"
) ? static_cast<void> (0) : __assert_fail ("Num < NumOperands && \"Invalid child # of SDNode!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 936, __PRETTY_FUNCTION__))
;
937 return OperandList[Num];
938 }
939
940 using op_iterator = SDUse *;
941
942 op_iterator op_begin() const { return OperandList; }
943 op_iterator op_end() const { return OperandList+NumOperands; }
944 ArrayRef<SDUse> ops() const { return makeArrayRef(op_begin(), op_end()); }
945
946 /// Iterator for directly iterating over the operand SDValue's.
947 struct value_op_iterator
948 : iterator_adaptor_base<value_op_iterator, op_iterator,
949 std::random_access_iterator_tag, SDValue,
950 ptrdiff_t, value_op_iterator *,
951 value_op_iterator *> {
952 explicit value_op_iterator(SDUse *U = nullptr)
953 : iterator_adaptor_base(U) {}
954
955 const SDValue &operator*() const { return I->get(); }
956 };
957
958 iterator_range<value_op_iterator> op_values() const {
959 return make_range(value_op_iterator(op_begin()),
960 value_op_iterator(op_end()));
961 }
962
963 SDVTList getVTList() const {
964 SDVTList X = { ValueList, NumValues };
965 return X;
966 }
967
968 /// If this node has a glue operand, return the node
969 /// to which the glue operand points. Otherwise return NULL.
970 SDNode *getGluedNode() const {
971 if (getNumOperands() != 0 &&
972 getOperand(getNumOperands()-1).getValueType() == MVT::Glue)
973 return getOperand(getNumOperands()-1).getNode();
974 return nullptr;
975 }
976
977 /// If this node has a glue value with a user, return
978 /// the user (there is at most one). Otherwise return NULL.
979 SDNode *getGluedUser() const {
980 for (use_iterator UI = use_begin(), UE = use_end(); UI != UE; ++UI)
981 if (UI.getUse().get().getValueType() == MVT::Glue)
982 return *UI;
983 return nullptr;
984 }
985
986 const SDNodeFlags getFlags() const { return Flags; }
987 void setFlags(SDNodeFlags NewFlags) { Flags = NewFlags; }
988
989 /// Clear any flags in this node that aren't also set in Flags.
990 /// If Flags is not in a defined state then this has no effect.
991 void intersectFlagsWith(const SDNodeFlags Flags);
992
993 /// Return the number of values defined/returned by this operator.
994 unsigned getNumValues() const { return NumValues; }
995
996 /// Return the type of a specified result.
997 EVT getValueType(unsigned ResNo) const {
998 assert(ResNo < NumValues && "Illegal result number!")((ResNo < NumValues && "Illegal result number!") ?
static_cast<void> (0) : __assert_fail ("ResNo < NumValues && \"Illegal result number!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 998, __PRETTY_FUNCTION__))
;
999 return ValueList[ResNo];
1000 }
1001
1002 /// Return the type of a specified result as a simple type.
1003 MVT getSimpleValueType(unsigned ResNo) const {
1004 return getValueType(ResNo).getSimpleVT();
1005 }
1006
1007 /// Returns MVT::getSizeInBits(getValueType(ResNo)).
1008 ///
1009 /// If the value type is a scalable vector type, the scalable property will
1010 /// be set and the runtime size will be a positive integer multiple of the
1011 /// base size.
1012 TypeSize getValueSizeInBits(unsigned ResNo) const {
1013 return getValueType(ResNo).getSizeInBits();
1014 }
1015
1016 using value_iterator = const EVT *;
1017
1018 value_iterator value_begin() const { return ValueList; }
1019 value_iterator value_end() const { return ValueList+NumValues; }
1020 iterator_range<value_iterator> values() const {
1021 return llvm::make_range(value_begin(), value_end());
1022 }
1023
1024 /// Return the opcode of this operation for printing.
1025 std::string getOperationName(const SelectionDAG *G = nullptr) const;
1026 static const char* getIndexedModeName(ISD::MemIndexedMode AM);
1027 void print_types(raw_ostream &OS, const SelectionDAG *G) const;
1028 void print_details(raw_ostream &OS, const SelectionDAG *G) const;
1029 void print(raw_ostream &OS, const SelectionDAG *G = nullptr) const;
1030 void printr(raw_ostream &OS, const SelectionDAG *G = nullptr) const;
1031
1032 /// Print a SelectionDAG node and all children down to
1033 /// the leaves. The given SelectionDAG allows target-specific nodes
1034 /// to be printed in human-readable form. Unlike printr, this will
1035 /// print the whole DAG, including children that appear multiple
1036 /// times.
1037 ///
1038 void printrFull(raw_ostream &O, const SelectionDAG *G = nullptr) const;
1039
1040 /// Print a SelectionDAG node and children up to
1041 /// depth "depth." The given SelectionDAG allows target-specific
1042 /// nodes to be printed in human-readable form. Unlike printr, this
1043 /// will print children that appear multiple times wherever they are
1044 /// used.
1045 ///
1046 void printrWithDepth(raw_ostream &O, const SelectionDAG *G = nullptr,
1047 unsigned depth = 100) const;
1048
1049 /// Dump this node, for debugging.
1050 void dump() const;
1051
1052 /// Dump (recursively) this node and its use-def subgraph.
1053 void dumpr() const;
1054
1055 /// Dump this node, for debugging.
1056 /// The given SelectionDAG allows target-specific nodes to be printed
1057 /// in human-readable form.
1058 void dump(const SelectionDAG *G) const;
1059
1060 /// Dump (recursively) this node and its use-def subgraph.
1061 /// The given SelectionDAG allows target-specific nodes to be printed
1062 /// in human-readable form.
1063 void dumpr(const SelectionDAG *G) const;
1064
1065 /// printrFull to dbgs(). The given SelectionDAG allows
1066 /// target-specific nodes to be printed in human-readable form.
1067 /// Unlike dumpr, this will print the whole DAG, including children
1068 /// that appear multiple times.
1069 void dumprFull(const SelectionDAG *G = nullptr) const;
1070
1071 /// printrWithDepth to dbgs(). The given
1072 /// SelectionDAG allows target-specific nodes to be printed in
1073 /// human-readable form. Unlike dumpr, this will print children
1074 /// that appear multiple times wherever they are used.
1075 ///
1076 void dumprWithDepth(const SelectionDAG *G = nullptr,
1077 unsigned depth = 100) const;
1078
1079 /// Gather unique data for the node.
1080 void Profile(FoldingSetNodeID &ID) const;
1081
1082 /// This method should only be used by the SDUse class.
1083 void addUse(SDUse &U) { U.addToList(&UseList); }
1084
1085protected:
1086 static SDVTList getSDVTList(EVT VT) {
1087 SDVTList Ret = { getValueTypeList(VT), 1 };
1088 return Ret;
1089 }
1090
1091 /// Create an SDNode.
1092 ///
1093 /// SDNodes are created without any operands, and never own the operand
1094 /// storage. To add operands, see SelectionDAG::createOperands.
1095 SDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs)
1096 : NodeType(Opc), ValueList(VTs.VTs), NumValues(VTs.NumVTs),
1097 IROrder(Order), debugLoc(std::move(dl)) {
1098 memset(&RawSDNodeBits, 0, sizeof(RawSDNodeBits));
1099 assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor")((debugLoc.hasTrivialDestructor() && "Expected trivial destructor"
) ? static_cast<void> (0) : __assert_fail ("debugLoc.hasTrivialDestructor() && \"Expected trivial destructor\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1099, __PRETTY_FUNCTION__))
;
1100 assert(NumValues == VTs.NumVTs &&((NumValues == VTs.NumVTs && "NumValues wasn't wide enough for its operands!"
) ? static_cast<void> (0) : __assert_fail ("NumValues == VTs.NumVTs && \"NumValues wasn't wide enough for its operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1101, __PRETTY_FUNCTION__))
1101 "NumValues wasn't wide enough for its operands!")((NumValues == VTs.NumVTs && "NumValues wasn't wide enough for its operands!"
) ? static_cast<void> (0) : __assert_fail ("NumValues == VTs.NumVTs && \"NumValues wasn't wide enough for its operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1101, __PRETTY_FUNCTION__))
;
1102 }
1103
1104 /// Release the operands and set this node to have zero operands.
1105 void DropOperands();
1106};
1107
1108/// Wrapper class for IR location info (IR ordering and DebugLoc) to be passed
1109/// into SDNode creation functions.
1110/// When an SDNode is created from the DAGBuilder, the DebugLoc is extracted
1111/// from the original Instruction, and IROrder is the ordinal position of
1112/// the instruction.
1113/// When an SDNode is created after the DAG is being built, both DebugLoc and
1114/// the IROrder are propagated from the original SDNode.
1115/// So SDLoc class provides two constructors besides the default one, one to
1116/// be used by the DAGBuilder, the other to be used by others.
1117class SDLoc {
1118private:
1119 DebugLoc DL;
1120 int IROrder = 0;
1121
1122public:
1123 SDLoc() = default;
1124 SDLoc(const SDNode *N) : DL(N->getDebugLoc()), IROrder(N->getIROrder()) {}
1125 SDLoc(const SDValue V) : SDLoc(V.getNode()) {}
1126 SDLoc(const Instruction *I, int Order) : IROrder(Order) {
1127 assert(Order >= 0 && "bad IROrder")((Order >= 0 && "bad IROrder") ? static_cast<void
> (0) : __assert_fail ("Order >= 0 && \"bad IROrder\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1127, __PRETTY_FUNCTION__))
;
1128 if (I)
1129 DL = I->getDebugLoc();
1130 }
1131
1132 unsigned getIROrder() const { return IROrder; }
1133 const DebugLoc &getDebugLoc() const { return DL; }
1134};
1135
1136// Define inline functions from the SDValue class.
1137
1138inline SDValue::SDValue(SDNode *node, unsigned resno)
1139 : Node(node), ResNo(resno) {
1140 // Explicitly check for !ResNo to avoid use-after-free, because there are
1141 // callers that use SDValue(N, 0) with a deleted N to indicate successful
1142 // combines.
1143 assert((!Node || !ResNo || ResNo < Node->getNumValues()) &&(((!Node || !ResNo || ResNo < Node->getNumValues()) &&
"Invalid result number for the given node!") ? static_cast<
void> (0) : __assert_fail ("(!Node || !ResNo || ResNo < Node->getNumValues()) && \"Invalid result number for the given node!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1144, __PRETTY_FUNCTION__))
1144 "Invalid result number for the given node!")(((!Node || !ResNo || ResNo < Node->getNumValues()) &&
"Invalid result number for the given node!") ? static_cast<
void> (0) : __assert_fail ("(!Node || !ResNo || ResNo < Node->getNumValues()) && \"Invalid result number for the given node!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1144, __PRETTY_FUNCTION__))
;
1145 assert(ResNo < -2U && "Cannot use result numbers reserved for DenseMaps.")((ResNo < -2U && "Cannot use result numbers reserved for DenseMaps."
) ? static_cast<void> (0) : __assert_fail ("ResNo < -2U && \"Cannot use result numbers reserved for DenseMaps.\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1145, __PRETTY_FUNCTION__))
;
1146}
1147
1148inline unsigned SDValue::getOpcode() const {
1149 return Node->getOpcode();
1150}
1151
1152inline EVT SDValue::getValueType() const {
1153 return Node->getValueType(ResNo);
22
Called C++ object pointer is null
1154}
1155
1156inline unsigned SDValue::getNumOperands() const {
1157 return Node->getNumOperands();
1158}
1159
1160inline const SDValue &SDValue::getOperand(unsigned i) const {
1161 return Node->getOperand(i);
1162}
1163
1164inline uint64_t SDValue::getConstantOperandVal(unsigned i) const {
1165 return Node->getConstantOperandVal(i);
1166}
1167
1168inline const APInt &SDValue::getConstantOperandAPInt(unsigned i) const {
1169 return Node->getConstantOperandAPInt(i);
1170}
1171
1172inline bool SDValue::isTargetOpcode() const {
1173 return Node->isTargetOpcode();
1174}
1175
1176inline bool SDValue::isTargetMemoryOpcode() const {
1177 return Node->isTargetMemoryOpcode();
1178}
1179
1180inline bool SDValue::isMachineOpcode() const {
1181 return Node->isMachineOpcode();
1182}
1183
1184inline unsigned SDValue::getMachineOpcode() const {
1185 return Node->getMachineOpcode();
1186}
1187
1188inline bool SDValue::isUndef() const {
1189 return Node->isUndef();
1190}
1191
1192inline bool SDValue::use_empty() const {
1193 return !Node->hasAnyUseOfValue(ResNo);
1194}
1195
1196inline bool SDValue::hasOneUse() const {
1197 return Node->hasNUsesOfValue(1, ResNo);
1198}
1199
1200inline const DebugLoc &SDValue::getDebugLoc() const {
1201 return Node->getDebugLoc();
1202}
1203
1204inline void SDValue::dump() const {
1205 return Node->dump();
1206}
1207
1208inline void SDValue::dump(const SelectionDAG *G) const {
1209 return Node->dump(G);
1210}
1211
1212inline void SDValue::dumpr() const {
1213 return Node->dumpr();
1214}
1215
1216inline void SDValue::dumpr(const SelectionDAG *G) const {
1217 return Node->dumpr(G);
1218}
1219
1220// Define inline functions from the SDUse class.
1221
1222inline void SDUse::set(const SDValue &V) {
1223 if (Val.getNode()) removeFromList();
1224 Val = V;
1225 if (V.getNode()) V.getNode()->addUse(*this);
1226}
1227
1228inline void SDUse::setInitial(const SDValue &V) {
1229 Val = V;
1230 V.getNode()->addUse(*this);
1231}
1232
1233inline void SDUse::setNode(SDNode *N) {
1234 if (Val.getNode()) removeFromList();
1235 Val.setNode(N);
1236 if (N) N->addUse(*this);
1237}
1238
1239/// This class is used to form a handle around another node that
1240/// is persistent and is updated across invocations of replaceAllUsesWith on its
1241/// operand. This node should be directly created by end-users and not added to
1242/// the AllNodes list.
1243class HandleSDNode : public SDNode {
1244 SDUse Op;
1245
1246public:
1247 explicit HandleSDNode(SDValue X)
1248 : SDNode(ISD::HANDLENODE, 0, DebugLoc(), getSDVTList(MVT::Other)) {
1249 // HandleSDNodes are never inserted into the DAG, so they won't be
1250 // auto-numbered. Use ID 65535 as a sentinel.
1251 PersistentId = 0xffff;
1252
1253 // Manually set up the operand list. This node type is special in that it's
1254 // always stack allocated and SelectionDAG does not manage its operands.
1255 // TODO: This should either (a) not be in the SDNode hierarchy, or (b) not
1256 // be so special.
1257 Op.setUser(this);
1258 Op.setInitial(X);
1259 NumOperands = 1;
1260 OperandList = &Op;
1261 }
1262 ~HandleSDNode();
1263
1264 const SDValue &getValue() const { return Op; }
1265};
1266
1267class AddrSpaceCastSDNode : public SDNode {
1268private:
1269 unsigned SrcAddrSpace;
1270 unsigned DestAddrSpace;
1271
1272public:
1273 AddrSpaceCastSDNode(unsigned Order, const DebugLoc &dl, EVT VT,
1274 unsigned SrcAS, unsigned DestAS);
1275
1276 unsigned getSrcAddressSpace() const { return SrcAddrSpace; }
1277 unsigned getDestAddressSpace() const { return DestAddrSpace; }
1278
1279 static bool classof(const SDNode *N) {
1280 return N->getOpcode() == ISD::ADDRSPACECAST;
1281 }
1282};
1283
1284/// This is an abstract virtual class for memory operations.
1285class MemSDNode : public SDNode {
1286private:
1287 // VT of in-memory value.
1288 EVT MemoryVT;
1289
1290protected:
1291 /// Memory reference information.
1292 MachineMemOperand *MMO;
1293
1294public:
1295 MemSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, SDVTList VTs,
1296 EVT memvt, MachineMemOperand *MMO);
1297
1298 bool readMem() const { return MMO->isLoad(); }
1299 bool writeMem() const { return MMO->isStore(); }
1300
1301 /// Returns alignment and volatility of the memory access
1302 unsigned getOriginalAlignment() const {
1303 return MMO->getBaseAlignment();
1304 }
1305 unsigned getAlignment() const {
1306 return MMO->getAlignment();
1307 }
1308
1309 /// Return the SubclassData value, without HasDebugValue. This contains an
1310 /// encoding of the volatile flag, as well as bits used by subclasses. This
1311 /// function should only be used to compute a FoldingSetNodeID value.
1312 /// The HasDebugValue bit is masked out because CSE map needs to match
1313 /// nodes with debug info with nodes without debug info. Same is about
1314 /// isDivergent bit.
1315 unsigned getRawSubclassData() const {
1316 uint16_t Data;
1317 union {
1318 char RawSDNodeBits[sizeof(uint16_t)];
1319 SDNodeBitfields SDNodeBits;
1320 };
1321 memcpy(&RawSDNodeBits, &this->RawSDNodeBits, sizeof(this->RawSDNodeBits));
1322 SDNodeBits.HasDebugValue = 0;
1323 SDNodeBits.IsDivergent = false;
1324 memcpy(&Data, &RawSDNodeBits, sizeof(RawSDNodeBits));
1325 return Data;
1326 }
1327
1328 bool isVolatile() const { return MemSDNodeBits.IsVolatile; }
1329 bool isNonTemporal() const { return MemSDNodeBits.IsNonTemporal; }
1330 bool isDereferenceable() const { return MemSDNodeBits.IsDereferenceable; }
1331 bool isInvariant() const { return MemSDNodeBits.IsInvariant; }
1332
1333 // Returns the offset from the location of the access.
1334 int64_t getSrcValueOffset() const { return MMO->getOffset(); }
1335
1336 /// Returns the AA info that describes the dereference.
1337 AAMDNodes getAAInfo() const { return MMO->getAAInfo(); }
1338
1339 /// Returns the Ranges that describes the dereference.
1340 const MDNode *getRanges() const { return MMO->getRanges(); }
1341
1342 /// Returns the synchronization scope ID for this memory operation.
1343 SyncScope::ID getSyncScopeID() const { return MMO->getSyncScopeID(); }
1344
1345 /// Return the atomic ordering requirements for this memory operation. For
1346 /// cmpxchg atomic operations, return the atomic ordering requirements when
1347 /// store occurs.
1348 AtomicOrdering getOrdering() const { return MMO->getOrdering(); }
1349
1350 /// Return true if the memory operation ordering is Unordered or higher.
1351 bool isAtomic() const { return MMO->isAtomic(); }
1352
1353 /// Returns true if the memory operation doesn't imply any ordering
1354 /// constraints on surrounding memory operations beyond the normal memory
1355 /// aliasing rules.
1356 bool isUnordered() const { return MMO->isUnordered(); }
1357
1358 /// Returns true if the memory operation is neither atomic or volatile.
1359 bool isSimple() const { return !isAtomic() && !isVolatile(); }
1360
1361 /// Return the type of the in-memory value.
1362 EVT getMemoryVT() const { return MemoryVT; }
1363
1364 /// Return a MachineMemOperand object describing the memory
1365 /// reference performed by operation.
1366 MachineMemOperand *getMemOperand() const { return MMO; }
1367
1368 const MachinePointerInfo &getPointerInfo() const {
1369 return MMO->getPointerInfo();
1370 }
1371
1372 /// Return the address space for the associated pointer
1373 unsigned getAddressSpace() const {
1374 return getPointerInfo().getAddrSpace();
1375 }
1376
1377 /// Update this MemSDNode's MachineMemOperand information
1378 /// to reflect the alignment of NewMMO, if it has a greater alignment.
1379 /// This must only be used when the new alignment applies to all users of
1380 /// this MachineMemOperand.
1381 void refineAlignment(const MachineMemOperand *NewMMO) {
1382 MMO->refineAlignment(NewMMO);
1383 }
1384
1385 const SDValue &getChain() const { return getOperand(0); }
1386 const SDValue &getBasePtr() const {
1387 return getOperand(getOpcode() == ISD::STORE ? 2 : 1);
1388 }
1389
1390 // Methods to support isa and dyn_cast
1391 static bool classof(const SDNode *N) {
1392 // For some targets, we lower some target intrinsics to a MemIntrinsicNode
1393 // with either an intrinsic or a target opcode.
1394 return N->getOpcode() == ISD::LOAD ||
1395 N->getOpcode() == ISD::STORE ||
1396 N->getOpcode() == ISD::PREFETCH ||
1397 N->getOpcode() == ISD::ATOMIC_CMP_SWAP ||
1398 N->getOpcode() == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS ||
1399 N->getOpcode() == ISD::ATOMIC_SWAP ||
1400 N->getOpcode() == ISD::ATOMIC_LOAD_ADD ||
1401 N->getOpcode() == ISD::ATOMIC_LOAD_SUB ||
1402 N->getOpcode() == ISD::ATOMIC_LOAD_AND ||
1403 N->getOpcode() == ISD::ATOMIC_LOAD_CLR ||
1404 N->getOpcode() == ISD::ATOMIC_LOAD_OR ||
1405 N->getOpcode() == ISD::ATOMIC_LOAD_XOR ||
1406 N->getOpcode() == ISD::ATOMIC_LOAD_NAND ||
1407 N->getOpcode() == ISD::ATOMIC_LOAD_MIN ||
1408 N->getOpcode() == ISD::ATOMIC_LOAD_MAX ||
1409 N->getOpcode() == ISD::ATOMIC_LOAD_UMIN ||
1410 N->getOpcode() == ISD::ATOMIC_LOAD_UMAX ||
1411 N->getOpcode() == ISD::ATOMIC_LOAD_FADD ||
1412 N->getOpcode() == ISD::ATOMIC_LOAD_FSUB ||
1413 N->getOpcode() == ISD::ATOMIC_LOAD ||
1414 N->getOpcode() == ISD::ATOMIC_STORE ||
1415 N->getOpcode() == ISD::MLOAD ||
1416 N->getOpcode() == ISD::MSTORE ||
1417 N->getOpcode() == ISD::MGATHER ||
1418 N->getOpcode() == ISD::MSCATTER ||
1419 N->isMemIntrinsic() ||
1420 N->isTargetMemoryOpcode();
1421 }
1422};
1423
1424/// This is an SDNode representing atomic operations.
1425class AtomicSDNode : public MemSDNode {
1426public:
1427 AtomicSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, SDVTList VTL,
1428 EVT MemVT, MachineMemOperand *MMO)
1429 : MemSDNode(Opc, Order, dl, VTL, MemVT, MMO) {
1430 assert(((Opc != ISD::ATOMIC_LOAD && Opc != ISD::ATOMIC_STORE) ||((((Opc != ISD::ATOMIC_LOAD && Opc != ISD::ATOMIC_STORE
) || MMO->isAtomic()) && "then why are we using an AtomicSDNode?"
) ? static_cast<void> (0) : __assert_fail ("((Opc != ISD::ATOMIC_LOAD && Opc != ISD::ATOMIC_STORE) || MMO->isAtomic()) && \"then why are we using an AtomicSDNode?\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1431, __PRETTY_FUNCTION__))
1431 MMO->isAtomic()) && "then why are we using an AtomicSDNode?")((((Opc != ISD::ATOMIC_LOAD && Opc != ISD::ATOMIC_STORE
) || MMO->isAtomic()) && "then why are we using an AtomicSDNode?"
) ? static_cast<void> (0) : __assert_fail ("((Opc != ISD::ATOMIC_LOAD && Opc != ISD::ATOMIC_STORE) || MMO->isAtomic()) && \"then why are we using an AtomicSDNode?\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1431, __PRETTY_FUNCTION__))
;
1432 }
1433
1434 const SDValue &getBasePtr() const { return getOperand(1); }
1435 const SDValue &getVal() const { return getOperand(2); }
1436
1437 /// Returns true if this SDNode represents cmpxchg atomic operation, false
1438 /// otherwise.
1439 bool isCompareAndSwap() const {
1440 unsigned Op = getOpcode();
1441 return Op == ISD::ATOMIC_CMP_SWAP ||
1442 Op == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS;
1443 }
1444
1445 /// For cmpxchg atomic operations, return the atomic ordering requirements
1446 /// when store does not occur.
1447 AtomicOrdering getFailureOrdering() const {
1448 assert(isCompareAndSwap() && "Must be cmpxchg operation")((isCompareAndSwap() && "Must be cmpxchg operation") ?
static_cast<void> (0) : __assert_fail ("isCompareAndSwap() && \"Must be cmpxchg operation\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1448, __PRETTY_FUNCTION__))
;
1449 return MMO->getFailureOrdering();
1450 }
1451
1452 // Methods to support isa and dyn_cast
1453 static bool classof(const SDNode *N) {
1454 return N->getOpcode() == ISD::ATOMIC_CMP_SWAP ||
1455 N->getOpcode() == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS ||
1456 N->getOpcode() == ISD::ATOMIC_SWAP ||
1457 N->getOpcode() == ISD::ATOMIC_LOAD_ADD ||
1458 N->getOpcode() == ISD::ATOMIC_LOAD_SUB ||
1459 N->getOpcode() == ISD::ATOMIC_LOAD_AND ||
1460 N->getOpcode() == ISD::ATOMIC_LOAD_CLR ||
1461 N->getOpcode() == ISD::ATOMIC_LOAD_OR ||
1462 N->getOpcode() == ISD::ATOMIC_LOAD_XOR ||
1463 N->getOpcode() == ISD::ATOMIC_LOAD_NAND ||
1464 N->getOpcode() == ISD::ATOMIC_LOAD_MIN ||
1465 N->getOpcode() == ISD::ATOMIC_LOAD_MAX ||
1466 N->getOpcode() == ISD::ATOMIC_LOAD_UMIN ||
1467 N->getOpcode() == ISD::ATOMIC_LOAD_UMAX ||
1468 N->getOpcode() == ISD::ATOMIC_LOAD_FADD ||
1469 N->getOpcode() == ISD::ATOMIC_LOAD_FSUB ||
1470 N->getOpcode() == ISD::ATOMIC_LOAD ||
1471 N->getOpcode() == ISD::ATOMIC_STORE;
1472 }
1473};
1474
1475/// This SDNode is used for target intrinsics that touch
1476/// memory and need an associated MachineMemOperand. Its opcode may be
1477/// INTRINSIC_VOID, INTRINSIC_W_CHAIN, PREFETCH, or a target-specific opcode
1478/// with a value not less than FIRST_TARGET_MEMORY_OPCODE.
1479class MemIntrinsicSDNode : public MemSDNode {
1480public:
1481 MemIntrinsicSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl,
1482 SDVTList VTs, EVT MemoryVT, MachineMemOperand *MMO)
1483 : MemSDNode(Opc, Order, dl, VTs, MemoryVT, MMO) {
1484 SDNodeBits.IsMemIntrinsic = true;
1485 }
1486
1487 // Methods to support isa and dyn_cast
1488 static bool classof(const SDNode *N) {
1489 // We lower some target intrinsics to their target opcode
1490 // early a node with a target opcode can be of this class
1491 return N->isMemIntrinsic() ||
1492 N->getOpcode() == ISD::PREFETCH ||
1493 N->isTargetMemoryOpcode();
1494 }
1495};
1496
1497/// This SDNode is used to implement the code generator
1498/// support for the llvm IR shufflevector instruction. It combines elements
1499/// from two input vectors into a new input vector, with the selection and
1500/// ordering of elements determined by an array of integers, referred to as
1501/// the shuffle mask. For input vectors of width N, mask indices of 0..N-1
1502/// refer to elements from the LHS input, and indices from N to 2N-1 the RHS.
1503/// An index of -1 is treated as undef, such that the code generator may put
1504/// any value in the corresponding element of the result.
1505class ShuffleVectorSDNode : public SDNode {
1506 // The memory for Mask is owned by the SelectionDAG's OperandAllocator, and
1507 // is freed when the SelectionDAG object is destroyed.
1508 const int *Mask;
1509
1510protected:
1511 friend class SelectionDAG;
1512
1513 ShuffleVectorSDNode(EVT VT, unsigned Order, const DebugLoc &dl, const int *M)
1514 : SDNode(ISD::VECTOR_SHUFFLE, Order, dl, getSDVTList(VT)), Mask(M) {}
1515
1516public:
1517 ArrayRef<int> getMask() const {
1518 EVT VT = getValueType(0);
1519 return makeArrayRef(Mask, VT.getVectorNumElements());
1520 }
1521
1522 int getMaskElt(unsigned Idx) const {
1523 assert(Idx < getValueType(0).getVectorNumElements() && "Idx out of range!")((Idx < getValueType(0).getVectorNumElements() && "Idx out of range!"
) ? static_cast<void> (0) : __assert_fail ("Idx < getValueType(0).getVectorNumElements() && \"Idx out of range!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1523, __PRETTY_FUNCTION__))
;
1524 return Mask[Idx];
1525 }
1526
1527 bool isSplat() const { return isSplatMask(Mask, getValueType(0)); }
1528
1529 int getSplatIndex() const {
1530 assert(isSplat() && "Cannot get splat index for non-splat!")((isSplat() && "Cannot get splat index for non-splat!"
) ? static_cast<void> (0) : __assert_fail ("isSplat() && \"Cannot get splat index for non-splat!\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1530, __PRETTY_FUNCTION__))
;
1531 EVT VT = getValueType(0);
1532 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i)
1533 if (Mask[i] >= 0)
1534 return Mask[i];
1535
1536 // We can choose any index value here and be correct because all elements
1537 // are undefined. Return 0 for better potential for callers to simplify.
1538 return 0;
1539 }
1540
1541 static bool isSplatMask(const int *Mask, EVT VT);
1542
1543 /// Change values in a shuffle permute mask assuming
1544 /// the two vector operands have swapped position.
1545 static void commuteMask(MutableArrayRef<int> Mask) {
1546 unsigned NumElems = Mask.size();
1547 for (unsigned i = 0; i != NumElems; ++i) {
1548 int idx = Mask[i];
1549 if (idx < 0)
1550 continue;
1551 else if (idx < (int)NumElems)
1552 Mask[i] = idx + NumElems;
1553 else
1554 Mask[i] = idx - NumElems;
1555 }
1556 }
1557
1558 static bool classof(const SDNode *N) {
1559 return N->getOpcode() == ISD::VECTOR_SHUFFLE;
1560 }
1561};
1562
1563class ConstantSDNode : public SDNode {
1564 friend class SelectionDAG;
1565
1566 const ConstantInt *Value;
1567
1568 ConstantSDNode(bool isTarget, bool isOpaque, const ConstantInt *val, EVT VT)
1569 : SDNode(isTarget ? ISD::TargetConstant : ISD::Constant, 0, DebugLoc(),
1570 getSDVTList(VT)),
1571 Value(val) {
1572 ConstantSDNodeBits.IsOpaque = isOpaque;
1573 }
1574
1575public:
1576 const ConstantInt *getConstantIntValue() const { return Value; }
1577 const APInt &getAPIntValue() const { return Value->getValue(); }
1578 uint64_t getZExtValue() const { return Value->getZExtValue(); }
1579 int64_t getSExtValue() const { return Value->getSExtValue(); }
1580 uint64_t getLimitedValue(uint64_t Limit = UINT64_MAX(18446744073709551615UL)) {
1581 return Value->getLimitedValue(Limit);
1582 }
1583
1584 bool isOne() const { return Value->isOne(); }
1585 bool isNullValue() const { return Value->isZero(); }
1586 bool isAllOnesValue() const { return Value->isMinusOne(); }
1587
1588 bool isOpaque() const { return ConstantSDNodeBits.IsOpaque; }
1589
1590 static bool classof(const SDNode *N) {
1591 return N->getOpcode() == ISD::Constant ||
1592 N->getOpcode() == ISD::TargetConstant;
1593 }
1594};
1595
1596uint64_t SDNode::getConstantOperandVal(unsigned Num) const {
1597 return cast<ConstantSDNode>(getOperand(Num))->getZExtValue();
1598}
1599
1600const APInt &SDNode::getConstantOperandAPInt(unsigned Num) const {
1601 return cast<ConstantSDNode>(getOperand(Num))->getAPIntValue();
1602}
1603
1604class ConstantFPSDNode : public SDNode {
1605 friend class SelectionDAG;
1606
1607 const ConstantFP *Value;
1608
1609 ConstantFPSDNode(bool isTarget, const ConstantFP *val, EVT VT)
1610 : SDNode(isTarget ? ISD::TargetConstantFP : ISD::ConstantFP, 0,
1611 DebugLoc(), getSDVTList(VT)),
1612 Value(val) {}
1613
1614public:
1615 const APFloat& getValueAPF() const { return Value->getValueAPF(); }
1616 const ConstantFP *getConstantFPValue() const { return Value; }
1617
1618 /// Return true if the value is positive or negative zero.
1619 bool isZero() const { return Value->isZero(); }
1620
1621 /// Return true if the value is a NaN.
1622 bool isNaN() const { return Value->isNaN(); }
1623
1624 /// Return true if the value is an infinity
1625 bool isInfinity() const { return Value->isInfinity(); }
1626
1627 /// Return true if the value is negative.
1628 bool isNegative() const { return Value->isNegative(); }
1629
1630 /// We don't rely on operator== working on double values, as
1631 /// it returns true for things that are clearly not equal, like -0.0 and 0.0.
1632 /// As such, this method can be used to do an exact bit-for-bit comparison of
1633 /// two floating point values.
1634
1635 /// We leave the version with the double argument here because it's just so
1636 /// convenient to write "2.0" and the like. Without this function we'd
1637 /// have to duplicate its logic everywhere it's called.
1638 bool isExactlyValue(double V) const {
1639 return Value->getValueAPF().isExactlyValue(V);
1640 }
1641 bool isExactlyValue(const APFloat& V) const;
1642
1643 static bool isValueValidForType(EVT VT, const APFloat& Val);
1644
1645 static bool classof(const SDNode *N) {
1646 return N->getOpcode() == ISD::ConstantFP ||
1647 N->getOpcode() == ISD::TargetConstantFP;
1648 }
1649};
1650
1651/// Returns true if \p V is a constant integer zero.
1652bool isNullConstant(SDValue V);
1653
1654/// Returns true if \p V is an FP constant with a value of positive zero.
1655bool isNullFPConstant(SDValue V);
1656
1657/// Returns true if \p V is an integer constant with all bits set.
1658bool isAllOnesConstant(SDValue V);
1659
1660/// Returns true if \p V is a constant integer one.
1661bool isOneConstant(SDValue V);
1662
1663/// Return the non-bitcasted source operand of \p V if it exists.
1664/// If \p V is not a bitcasted value, it is returned as-is.
1665SDValue peekThroughBitcasts(SDValue V);
1666
1667/// Return the non-bitcasted and one-use source operand of \p V if it exists.
1668/// If \p V is not a bitcasted one-use value, it is returned as-is.
1669SDValue peekThroughOneUseBitcasts(SDValue V);
1670
1671/// Return the non-extracted vector source operand of \p V if it exists.
1672/// If \p V is not an extracted subvector, it is returned as-is.
1673SDValue peekThroughExtractSubvectors(SDValue V);
1674
1675/// Returns true if \p V is a bitwise not operation. Assumes that an all ones
1676/// constant is canonicalized to be operand 1.
1677bool isBitwiseNot(SDValue V, bool AllowUndefs = false);
1678
1679/// Returns the SDNode if it is a constant splat BuildVector or constant int.
1680ConstantSDNode *isConstOrConstSplat(SDValue N, bool AllowUndefs = false,
1681 bool AllowTruncation = false);
1682
1683/// Returns the SDNode if it is a demanded constant splat BuildVector or
1684/// constant int.
1685ConstantSDNode *isConstOrConstSplat(SDValue N, const APInt &DemandedElts,
1686 bool AllowUndefs = false,
1687 bool AllowTruncation = false);
1688
1689/// Returns the SDNode if it is a constant splat BuildVector or constant float.
1690ConstantFPSDNode *isConstOrConstSplatFP(SDValue N, bool AllowUndefs = false);
1691
1692/// Returns the SDNode if it is a demanded constant splat BuildVector or
1693/// constant float.
1694ConstantFPSDNode *isConstOrConstSplatFP(SDValue N, const APInt &DemandedElts,
1695 bool AllowUndefs = false);
1696
1697/// Return true if the value is a constant 0 integer or a splatted vector of
1698/// a constant 0 integer (with no undefs by default).
1699/// Build vector implicit truncation is not an issue for null values.
1700bool isNullOrNullSplat(SDValue V, bool AllowUndefs = false);
1701
1702/// Return true if the value is a constant 1 integer or a splatted vector of a
1703/// constant 1 integer (with no undefs).
1704/// Does not permit build vector implicit truncation.
1705bool isOneOrOneSplat(SDValue V);
1706
1707/// Return true if the value is a constant -1 integer or a splatted vector of a
1708/// constant -1 integer (with no undefs).
1709/// Does not permit build vector implicit truncation.
1710bool isAllOnesOrAllOnesSplat(SDValue V);
1711
1712class GlobalAddressSDNode : public SDNode {
1713 friend class SelectionDAG;
1714
1715 const GlobalValue *TheGlobal;
1716 int64_t Offset;
1717 unsigned TargetFlags;
1718
1719 GlobalAddressSDNode(unsigned Opc, unsigned Order, const DebugLoc &DL,
1720 const GlobalValue *GA, EVT VT, int64_t o,
1721 unsigned TF);
1722
1723public:
1724 const GlobalValue *getGlobal() const { return TheGlobal; }
1725 int64_t getOffset() const { return Offset; }
1726 unsigned getTargetFlags() const { return TargetFlags; }
1727 // Return the address space this GlobalAddress belongs to.
1728 unsigned getAddressSpace() const;
1729
1730 static bool classof(const SDNode *N) {
1731 return N->getOpcode() == ISD::GlobalAddress ||
1732 N->getOpcode() == ISD::TargetGlobalAddress ||
1733 N->getOpcode() == ISD::GlobalTLSAddress ||
1734 N->getOpcode() == ISD::TargetGlobalTLSAddress;
1735 }
1736};
1737
1738class FrameIndexSDNode : public SDNode {
1739 friend class SelectionDAG;
1740
1741 int FI;
1742
1743 FrameIndexSDNode(int fi, EVT VT, bool isTarg)
1744 : SDNode(isTarg ? ISD::TargetFrameIndex : ISD::FrameIndex,
1745 0, DebugLoc(), getSDVTList(VT)), FI(fi) {
1746 }
1747
1748public:
1749 int getIndex() const { return FI; }
1750
1751 static bool classof(const SDNode *N) {
1752 return N->getOpcode() == ISD::FrameIndex ||
1753 N->getOpcode() == ISD::TargetFrameIndex;
1754 }
1755};
1756
1757/// This SDNode is used for LIFETIME_START/LIFETIME_END values, which indicate
1758/// the offet and size that are started/ended in the underlying FrameIndex.
1759class LifetimeSDNode : public SDNode {
1760 friend class SelectionDAG;
1761 int64_t Size;
1762 int64_t Offset; // -1 if offset is unknown.
1763
1764 LifetimeSDNode(unsigned Opcode, unsigned Order, const DebugLoc &dl,
1765 SDVTList VTs, int64_t Size, int64_t Offset)
1766 : SDNode(Opcode, Order, dl, VTs), Size(Size), Offset(Offset) {}
1767public:
1768 int64_t getFrameIndex() const {
1769 return cast<FrameIndexSDNode>(getOperand(1))->getIndex();
1770 }
1771
1772 bool hasOffset() const { return Offset >= 0; }
1773 int64_t getOffset() const {
1774 assert(hasOffset() && "offset is unknown")((hasOffset() && "offset is unknown") ? static_cast<
void> (0) : __assert_fail ("hasOffset() && \"offset is unknown\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1774, __PRETTY_FUNCTION__))
;
1775 return Offset;
1776 }
1777 int64_t getSize() const {
1778 assert(hasOffset() && "offset is unknown")((hasOffset() && "offset is unknown") ? static_cast<
void> (0) : __assert_fail ("hasOffset() && \"offset is unknown\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1778, __PRETTY_FUNCTION__))
;
1779 return Size;
1780 }
1781
1782 // Methods to support isa and dyn_cast
1783 static bool classof(const SDNode *N) {
1784 return N->getOpcode() == ISD::LIFETIME_START ||
1785 N->getOpcode() == ISD::LIFETIME_END;
1786 }
1787};
1788
1789class JumpTableSDNode : public SDNode {
1790 friend class SelectionDAG;
1791
1792 int JTI;
1793 unsigned TargetFlags;
1794
1795 JumpTableSDNode(int jti, EVT VT, bool isTarg, unsigned TF)
1796 : SDNode(isTarg ? ISD::TargetJumpTable : ISD::JumpTable,
1797 0, DebugLoc(), getSDVTList(VT)), JTI(jti), TargetFlags(TF) {
1798 }
1799
1800public:
1801 int getIndex() const { return JTI; }
1802 unsigned getTargetFlags() const { return TargetFlags; }
1803
1804 static bool classof(const SDNode *N) {
1805 return N->getOpcode() == ISD::JumpTable ||
1806 N->getOpcode() == ISD::TargetJumpTable;
1807 }
1808};
1809
1810class ConstantPoolSDNode : public SDNode {
1811 friend class SelectionDAG;
1812
1813 union {
1814 const Constant *ConstVal;
1815 MachineConstantPoolValue *MachineCPVal;
1816 } Val;
1817 int Offset; // It's a MachineConstantPoolValue if top bit is set.
1818 unsigned Alignment; // Minimum alignment requirement of CP (not log2 value).
1819 unsigned TargetFlags;
1820
1821 ConstantPoolSDNode(bool isTarget, const Constant *c, EVT VT, int o,
1822 unsigned Align, unsigned TF)
1823 : SDNode(isTarget ? ISD::TargetConstantPool : ISD::ConstantPool, 0,
1824 DebugLoc(), getSDVTList(VT)), Offset(o), Alignment(Align),
1825 TargetFlags(TF) {
1826 assert(Offset >= 0 && "Offset is too large")((Offset >= 0 && "Offset is too large") ? static_cast
<void> (0) : __assert_fail ("Offset >= 0 && \"Offset is too large\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1826, __PRETTY_FUNCTION__))
;
1827 Val.ConstVal = c;
1828 }
1829
1830 ConstantPoolSDNode(bool isTarget, MachineConstantPoolValue *v,
1831 EVT VT, int o, unsigned Align, unsigned TF)
1832 : SDNode(isTarget ? ISD::TargetConstantPool : ISD::ConstantPool, 0,
1833 DebugLoc(), getSDVTList(VT)), Offset(o), Alignment(Align),
1834 TargetFlags(TF) {
1835 assert(Offset >= 0 && "Offset is too large")((Offset >= 0 && "Offset is too large") ? static_cast
<void> (0) : __assert_fail ("Offset >= 0 && \"Offset is too large\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1835, __PRETTY_FUNCTION__))
;
1836 Val.MachineCPVal = v;
1837 Offset |= 1 << (sizeof(unsigned)*CHAR_BIT8-1);
1838 }
1839
1840public:
1841 bool isMachineConstantPoolEntry() const {
1842 return Offset < 0;
1843 }
1844
1845 const Constant *getConstVal() const {
1846 assert(!isMachineConstantPoolEntry() && "Wrong constantpool type")((!isMachineConstantPoolEntry() && "Wrong constantpool type"
) ? static_cast<void> (0) : __assert_fail ("!isMachineConstantPoolEntry() && \"Wrong constantpool type\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1846, __PRETTY_FUNCTION__))
;
1847 return Val.ConstVal;
1848 }
1849
1850 MachineConstantPoolValue *getMachineCPVal() const {
1851 assert(isMachineConstantPoolEntry() && "Wrong constantpool type")((isMachineConstantPoolEntry() && "Wrong constantpool type"
) ? static_cast<void> (0) : __assert_fail ("isMachineConstantPoolEntry() && \"Wrong constantpool type\""
, "/build/llvm-toolchain-snapshot-11~++20200309111110+2c36c23f347/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1851, __PRETTY_FUNCTION__))
;
1852 return Val.MachineCPVal;
1853 }
1854
1855 int getOffset() const {
1856 return Offset & ~(1 << (sizeof(unsigned)*CHAR_BIT8-1));
1857 }
1858
1859 // Return the alignment of this constant pool object, which is either 0 (for
1860 // default alignment) or the desired value.
1861 unsigned getAlignment() const { return Alignment; }
1862 unsigned getTargetFlags() const { return TargetFlags; }
1863
1864 Type *getType() const;
1865
1866 static bool classof(const SDNode *N) {
1867 return N->getOpcode() == ISD::ConstantPool ||
1868 N->getOpcode() == ISD::TargetConstantPool;
1869 }
1870};
1871
1872/// Completely target-dependent object reference.
1873class TargetIndexSDNode : public SDNode {
1874 friend class SelectionDAG;
1875
1876 unsigned TargetFlags;
1877 int Index;
1878 int64_t Offset;
1879
1880public:
1881 TargetIndexSDNode(int Idx, EVT VT, int64_t Ofs, unsigned TF)
1882 : SDNode(ISD::TargetIndex, 0, DebugLoc(), getSDVTList(VT)),
1883 TargetFlags(TF), Index(Idx), Offset(Ofs) {}
1884
1885 unsign