Bug Summary

File:llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
Warning:line 3299, column 20
1st function call argument is an uninitialized value

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name ARMISelDAGToDAG.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/build-llvm/lib/Target/ARM -resource-dir /usr/lib/llvm-14/lib/clang/14.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/build-llvm/lib/Target/ARM -I /build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM -I /build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/build-llvm/include -I /build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-14/lib/clang/14.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/build-llvm/lib/Target/ARM -fdebug-prefix-map=/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0=. -ferror-limit 19 -fvisibility hidden -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2021-08-28-193554-24367-1 -x c++ /build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp

/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp

1//===-- ARMISelDAGToDAG.cpp - A dag to dag inst selector for ARM ----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines an instruction selector for the ARM target.
10//
11//===----------------------------------------------------------------------===//
12
13#include "ARM.h"
14#include "ARMBaseInstrInfo.h"
15#include "ARMTargetMachine.h"
16#include "MCTargetDesc/ARMAddressingModes.h"
17#include "Utils/ARMBaseInfo.h"
18#include "llvm/ADT/APSInt.h"
19#include "llvm/ADT/StringSwitch.h"
20#include "llvm/CodeGen/MachineFrameInfo.h"
21#include "llvm/CodeGen/MachineFunction.h"
22#include "llvm/CodeGen/MachineInstrBuilder.h"
23#include "llvm/CodeGen/MachineRegisterInfo.h"
24#include "llvm/CodeGen/SelectionDAG.h"
25#include "llvm/CodeGen/SelectionDAGISel.h"
26#include "llvm/CodeGen/TargetLowering.h"
27#include "llvm/IR/CallingConv.h"
28#include "llvm/IR/Constants.h"
29#include "llvm/IR/DerivedTypes.h"
30#include "llvm/IR/Function.h"
31#include "llvm/IR/Intrinsics.h"
32#include "llvm/IR/IntrinsicsARM.h"
33#include "llvm/IR/LLVMContext.h"
34#include "llvm/Support/CommandLine.h"
35#include "llvm/Support/Debug.h"
36#include "llvm/Support/ErrorHandling.h"
37#include "llvm/Target/TargetOptions.h"
38
39using namespace llvm;
40
41#define DEBUG_TYPE"arm-isel" "arm-isel"
42
43static cl::opt<bool>
44DisableShifterOp("disable-shifter-op", cl::Hidden,
45 cl::desc("Disable isel of shifter-op"),
46 cl::init(false));
47
48//===--------------------------------------------------------------------===//
49/// ARMDAGToDAGISel - ARM specific code to select ARM machine
50/// instructions for SelectionDAG operations.
51///
52namespace {
53
54class ARMDAGToDAGISel : public SelectionDAGISel {
55 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
56 /// make the right decision when generating code for different targets.
57 const ARMSubtarget *Subtarget;
58
59public:
60 explicit ARMDAGToDAGISel(ARMBaseTargetMachine &tm, CodeGenOpt::Level OptLevel)
61 : SelectionDAGISel(tm, OptLevel) {}
62
63 bool runOnMachineFunction(MachineFunction &MF) override {
64 // Reset the subtarget each time through.
65 Subtarget = &MF.getSubtarget<ARMSubtarget>();
66 SelectionDAGISel::runOnMachineFunction(MF);
67 return true;
68 }
69
70 StringRef getPassName() const override { return "ARM Instruction Selection"; }
71
72 void PreprocessISelDAG() override;
73
74 /// getI32Imm - Return a target constant of type i32 with the specified
75 /// value.
76 inline SDValue getI32Imm(unsigned Imm, const SDLoc &dl) {
77 return CurDAG->getTargetConstant(Imm, dl, MVT::i32);
78 }
79
80 void Select(SDNode *N) override;
81
82 /// Return true as some complex patterns, like those that call
83 /// canExtractShiftFromMul can modify the DAG inplace.
84 bool ComplexPatternFuncMutatesDAG() const override { return true; }
85
86 bool hasNoVMLxHazardUse(SDNode *N) const;
87 bool isShifterOpProfitable(const SDValue &Shift,
88 ARM_AM::ShiftOpc ShOpcVal, unsigned ShAmt);
89 bool SelectRegShifterOperand(SDValue N, SDValue &A,
90 SDValue &B, SDValue &C,
91 bool CheckProfitability = true);
92 bool SelectImmShifterOperand(SDValue N, SDValue &A,
93 SDValue &B, bool CheckProfitability = true);
94 bool SelectShiftRegShifterOperand(SDValue N, SDValue &A, SDValue &B,
95 SDValue &C) {
96 // Don't apply the profitability check
97 return SelectRegShifterOperand(N, A, B, C, false);
98 }
99 bool SelectShiftImmShifterOperand(SDValue N, SDValue &A, SDValue &B) {
100 // Don't apply the profitability check
101 return SelectImmShifterOperand(N, A, B, false);
102 }
103 bool SelectShiftImmShifterOperandOneUse(SDValue N, SDValue &A, SDValue &B) {
104 if (!N.hasOneUse())
105 return false;
106 return SelectImmShifterOperand(N, A, B, false);
107 }
108
109 bool SelectAddLikeOr(SDNode *Parent, SDValue N, SDValue &Out);
110
111 bool SelectAddrModeImm12(SDValue N, SDValue &Base, SDValue &OffImm);
112 bool SelectLdStSOReg(SDValue N, SDValue &Base, SDValue &Offset, SDValue &Opc);
113
114 bool SelectCMOVPred(SDValue N, SDValue &Pred, SDValue &Reg) {
115 const ConstantSDNode *CN = cast<ConstantSDNode>(N);
116 Pred = CurDAG->getTargetConstant(CN->getZExtValue(), SDLoc(N), MVT::i32);
117 Reg = CurDAG->getRegister(ARM::CPSR, MVT::i32);
118 return true;
119 }
120
121 bool SelectAddrMode2OffsetReg(SDNode *Op, SDValue N,
122 SDValue &Offset, SDValue &Opc);
123 bool SelectAddrMode2OffsetImm(SDNode *Op, SDValue N,
124 SDValue &Offset, SDValue &Opc);
125 bool SelectAddrMode2OffsetImmPre(SDNode *Op, SDValue N,
126 SDValue &Offset, SDValue &Opc);
127 bool SelectAddrOffsetNone(SDValue N, SDValue &Base);
128 bool SelectAddrMode3(SDValue N, SDValue &Base,
129 SDValue &Offset, SDValue &Opc);
130 bool SelectAddrMode3Offset(SDNode *Op, SDValue N,
131 SDValue &Offset, SDValue &Opc);
132 bool IsAddressingMode5(SDValue N, SDValue &Base, SDValue &Offset, bool FP16);
133 bool SelectAddrMode5(SDValue N, SDValue &Base, SDValue &Offset);
134 bool SelectAddrMode5FP16(SDValue N, SDValue &Base, SDValue &Offset);
135 bool SelectAddrMode6(SDNode *Parent, SDValue N, SDValue &Addr,SDValue &Align);
136 bool SelectAddrMode6Offset(SDNode *Op, SDValue N, SDValue &Offset);
137
138 bool SelectAddrModePC(SDValue N, SDValue &Offset, SDValue &Label);
139
140 // Thumb Addressing Modes:
141 bool SelectThumbAddrModeRR(SDValue N, SDValue &Base, SDValue &Offset);
142 bool SelectThumbAddrModeRRSext(SDValue N, SDValue &Base, SDValue &Offset);
143 bool SelectThumbAddrModeImm5S(SDValue N, unsigned Scale, SDValue &Base,
144 SDValue &OffImm);
145 bool SelectThumbAddrModeImm5S1(SDValue N, SDValue &Base,
146 SDValue &OffImm);
147 bool SelectThumbAddrModeImm5S2(SDValue N, SDValue &Base,
148 SDValue &OffImm);
149 bool SelectThumbAddrModeImm5S4(SDValue N, SDValue &Base,
150 SDValue &OffImm);
151 bool SelectThumbAddrModeSP(SDValue N, SDValue &Base, SDValue &OffImm);
152 template <unsigned Shift>
153 bool SelectTAddrModeImm7(SDValue N, SDValue &Base, SDValue &OffImm);
154
155 // Thumb 2 Addressing Modes:
156 bool SelectT2AddrModeImm12(SDValue N, SDValue &Base, SDValue &OffImm);
157 template <unsigned Shift>
158 bool SelectT2AddrModeImm8(SDValue N, SDValue &Base, SDValue &OffImm);
159 bool SelectT2AddrModeImm8(SDValue N, SDValue &Base,
160 SDValue &OffImm);
161 bool SelectT2AddrModeImm8Offset(SDNode *Op, SDValue N,
162 SDValue &OffImm);
163 template <unsigned Shift>
164 bool SelectT2AddrModeImm7Offset(SDNode *Op, SDValue N, SDValue &OffImm);
165 bool SelectT2AddrModeImm7Offset(SDNode *Op, SDValue N, SDValue &OffImm,
166 unsigned Shift);
167 template <unsigned Shift>
168 bool SelectT2AddrModeImm7(SDValue N, SDValue &Base, SDValue &OffImm);
169 bool SelectT2AddrModeSoReg(SDValue N, SDValue &Base,
170 SDValue &OffReg, SDValue &ShImm);
171 bool SelectT2AddrModeExclusive(SDValue N, SDValue &Base, SDValue &OffImm);
172
173 template<int Min, int Max>
174 bool SelectImmediateInRange(SDValue N, SDValue &OffImm);
175
176 inline bool is_so_imm(unsigned Imm) const {
177 return ARM_AM::getSOImmVal(Imm) != -1;
178 }
179
180 inline bool is_so_imm_not(unsigned Imm) const {
181 return ARM_AM::getSOImmVal(~Imm) != -1;
182 }
183
184 inline bool is_t2_so_imm(unsigned Imm) const {
185 return ARM_AM::getT2SOImmVal(Imm) != -1;
186 }
187
188 inline bool is_t2_so_imm_not(unsigned Imm) const {
189 return ARM_AM::getT2SOImmVal(~Imm) != -1;
190 }
191
192 // Include the pieces autogenerated from the target description.
193#include "ARMGenDAGISel.inc"
194
195private:
196 void transferMemOperands(SDNode *Src, SDNode *Dst);
197
198 /// Indexed (pre/post inc/dec) load matching code for ARM.
199 bool tryARMIndexedLoad(SDNode *N);
200 bool tryT1IndexedLoad(SDNode *N);
201 bool tryT2IndexedLoad(SDNode *N);
202 bool tryMVEIndexedLoad(SDNode *N);
203 bool tryFMULFixed(SDNode *N, SDLoc dl);
204 bool tryFP_TO_INT(SDNode *N, SDLoc dl);
205 bool transformFixedFloatingPointConversion(SDNode *N, SDNode *FMul,
206 bool IsUnsigned,
207 bool FixedToFloat);
208
209 /// SelectVLD - Select NEON load intrinsics. NumVecs should be
210 /// 1, 2, 3 or 4. The opcode arrays specify the instructions used for
211 /// loads of D registers and even subregs and odd subregs of Q registers.
212 /// For NumVecs <= 2, QOpcodes1 is not used.
213 void SelectVLD(SDNode *N, bool isUpdating, unsigned NumVecs,
214 const uint16_t *DOpcodes, const uint16_t *QOpcodes0,
215 const uint16_t *QOpcodes1);
216
217 /// SelectVST - Select NEON store intrinsics. NumVecs should
218 /// be 1, 2, 3 or 4. The opcode arrays specify the instructions used for
219 /// stores of D registers and even subregs and odd subregs of Q registers.
220 /// For NumVecs <= 2, QOpcodes1 is not used.
221 void SelectVST(SDNode *N, bool isUpdating, unsigned NumVecs,
222 const uint16_t *DOpcodes, const uint16_t *QOpcodes0,
223 const uint16_t *QOpcodes1);
224
225 /// SelectVLDSTLane - Select NEON load/store lane intrinsics. NumVecs should
226 /// be 2, 3 or 4. The opcode arrays specify the instructions used for
227 /// load/store of D registers and Q registers.
228 void SelectVLDSTLane(SDNode *N, bool IsLoad, bool isUpdating,
229 unsigned NumVecs, const uint16_t *DOpcodes,
230 const uint16_t *QOpcodes);
231
232 /// Helper functions for setting up clusters of MVE predication operands.
233 template <typename SDValueVector>
234 void AddMVEPredicateToOps(SDValueVector &Ops, SDLoc Loc,
235 SDValue PredicateMask);
236 template <typename SDValueVector>
237 void AddMVEPredicateToOps(SDValueVector &Ops, SDLoc Loc,
238 SDValue PredicateMask, SDValue Inactive);
239
240 template <typename SDValueVector>
241 void AddEmptyMVEPredicateToOps(SDValueVector &Ops, SDLoc Loc);
242 template <typename SDValueVector>
243 void AddEmptyMVEPredicateToOps(SDValueVector &Ops, SDLoc Loc, EVT InactiveTy);
244
245 /// SelectMVE_WB - Select MVE writeback load/store intrinsics.
246 void SelectMVE_WB(SDNode *N, const uint16_t *Opcodes, bool Predicated);
247
248 /// SelectMVE_LongShift - Select MVE 64-bit scalar shift intrinsics.
249 void SelectMVE_LongShift(SDNode *N, uint16_t Opcode, bool Immediate,
250 bool HasSaturationOperand);
251
252 /// SelectMVE_VADCSBC - Select MVE vector add/sub-with-carry intrinsics.
253 void SelectMVE_VADCSBC(SDNode *N, uint16_t OpcodeWithCarry,
254 uint16_t OpcodeWithNoCarry, bool Add, bool Predicated);
255
256 /// SelectMVE_VSHLC - Select MVE intrinsics for a shift that carries between
257 /// vector lanes.
258 void SelectMVE_VSHLC(SDNode *N, bool Predicated);
259
260 /// Select long MVE vector reductions with two vector operands
261 /// Stride is the number of vector element widths the instruction can operate
262 /// on:
263 /// 2 for long non-rounding variants, vml{a,s}ldav[a][x]: [i16, i32]
264 /// 1 for long rounding variants: vrml{a,s}ldavh[a][x]: [i32]
265 /// Stride is used when addressing the OpcodesS array which contains multiple
266 /// opcodes for each element width.
267 /// TySize is the index into the list of element types listed above
268 void SelectBaseMVE_VMLLDAV(SDNode *N, bool Predicated,
269 const uint16_t *OpcodesS, const uint16_t *OpcodesU,
270 size_t Stride, size_t TySize);
271
272 /// Select a 64-bit MVE vector reduction with two vector operands
273 /// arm_mve_vmlldava_[predicated]
274 void SelectMVE_VMLLDAV(SDNode *N, bool Predicated, const uint16_t *OpcodesS,
275 const uint16_t *OpcodesU);
276 /// Select a 72-bit MVE vector rounding reduction with two vector operands
277 /// int_arm_mve_vrmlldavha[_predicated]
278 void SelectMVE_VRMLLDAVH(SDNode *N, bool Predicated, const uint16_t *OpcodesS,
279 const uint16_t *OpcodesU);
280
281 /// SelectMVE_VLD - Select MVE interleaving load intrinsics. NumVecs
282 /// should be 2 or 4. The opcode array specifies the instructions
283 /// used for 8, 16 and 32-bit lane sizes respectively, and each
284 /// pointer points to a set of NumVecs sub-opcodes used for the
285 /// different stages (e.g. VLD20 versus VLD21) of each load family.
286 void SelectMVE_VLD(SDNode *N, unsigned NumVecs,
287 const uint16_t *const *Opcodes, bool HasWriteback);
288
289 /// SelectMVE_VxDUP - Select MVE incrementing-dup instructions. Opcodes is an
290 /// array of 3 elements for the 8, 16 and 32-bit lane sizes.
291 void SelectMVE_VxDUP(SDNode *N, const uint16_t *Opcodes,
292 bool Wrapping, bool Predicated);
293
294 /// Select SelectCDE_CXxD - Select CDE dual-GPR instruction (one of CX1D,
295 /// CX1DA, CX2D, CX2DA, CX3, CX3DA).
296 /// \arg \c NumExtraOps number of extra operands besides the coprocossor,
297 /// the accumulator and the immediate operand, i.e. 0
298 /// for CX1*, 1 for CX2*, 2 for CX3*
299 /// \arg \c HasAccum whether the instruction has an accumulator operand
300 void SelectCDE_CXxD(SDNode *N, uint16_t Opcode, size_t NumExtraOps,
301 bool HasAccum);
302
303 /// SelectVLDDup - Select NEON load-duplicate intrinsics. NumVecs
304 /// should be 1, 2, 3 or 4. The opcode array specifies the instructions used
305 /// for loading D registers.
306 void SelectVLDDup(SDNode *N, bool IsIntrinsic, bool isUpdating,
307 unsigned NumVecs, const uint16_t *DOpcodes,
308 const uint16_t *QOpcodes0 = nullptr,
309 const uint16_t *QOpcodes1 = nullptr);
310
311 /// Try to select SBFX/UBFX instructions for ARM.
312 bool tryV6T2BitfieldExtractOp(SDNode *N, bool isSigned);
313
314 bool tryInsertVectorElt(SDNode *N);
315
316 // Select special operations if node forms integer ABS pattern
317 bool tryABSOp(SDNode *N);
318
319 bool tryReadRegister(SDNode *N);
320 bool tryWriteRegister(SDNode *N);
321
322 bool tryInlineAsm(SDNode *N);
323
324 void SelectCMPZ(SDNode *N, bool &SwitchEQNEToPLMI);
325
326 void SelectCMP_SWAP(SDNode *N);
327
328 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
329 /// inline asm expressions.
330 bool SelectInlineAsmMemoryOperand(const SDValue &Op, unsigned ConstraintID,
331 std::vector<SDValue> &OutOps) override;
332
333 // Form pairs of consecutive R, S, D, or Q registers.
334 SDNode *createGPRPairNode(EVT VT, SDValue V0, SDValue V1);
335 SDNode *createSRegPairNode(EVT VT, SDValue V0, SDValue V1);
336 SDNode *createDRegPairNode(EVT VT, SDValue V0, SDValue V1);
337 SDNode *createQRegPairNode(EVT VT, SDValue V0, SDValue V1);
338
339 // Form sequences of 4 consecutive S, D, or Q registers.
340 SDNode *createQuadSRegsNode(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3);
341 SDNode *createQuadDRegsNode(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3);
342 SDNode *createQuadQRegsNode(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3);
343
344 // Get the alignment operand for a NEON VLD or VST instruction.
345 SDValue GetVLDSTAlign(SDValue Align, const SDLoc &dl, unsigned NumVecs,
346 bool is64BitVector);
347
348 /// Checks if N is a multiplication by a constant where we can extract out a
349 /// power of two from the constant so that it can be used in a shift, but only
350 /// if it simplifies the materialization of the constant. Returns true if it
351 /// is, and assigns to PowerOfTwo the power of two that should be extracted
352 /// out and to NewMulConst the new constant to be multiplied by.
353 bool canExtractShiftFromMul(const SDValue &N, unsigned MaxShift,
354 unsigned &PowerOfTwo, SDValue &NewMulConst) const;
355
356 /// Replace N with M in CurDAG, in a way that also ensures that M gets
357 /// selected when N would have been selected.
358 void replaceDAGValue(const SDValue &N, SDValue M);
359};
360}
361
362/// isInt32Immediate - This method tests to see if the node is a 32-bit constant
363/// operand. If so Imm will receive the 32-bit value.
364static bool isInt32Immediate(SDNode *N, unsigned &Imm) {
365 if (N->getOpcode() == ISD::Constant && N->getValueType(0) == MVT::i32) {
366 Imm = cast<ConstantSDNode>(N)->getZExtValue();
367 return true;
368 }
369 return false;
370}
371
372// isInt32Immediate - This method tests to see if a constant operand.
373// If so Imm will receive the 32 bit value.
374static bool isInt32Immediate(SDValue N, unsigned &Imm) {
375 return isInt32Immediate(N.getNode(), Imm);
376}
377
378// isOpcWithIntImmediate - This method tests to see if the node is a specific
379// opcode and that it has a immediate integer right operand.
380// If so Imm will receive the 32 bit value.
381static bool isOpcWithIntImmediate(SDNode *N, unsigned Opc, unsigned& Imm) {
382 return N->getOpcode() == Opc &&
383 isInt32Immediate(N->getOperand(1).getNode(), Imm);
384}
385
386/// Check whether a particular node is a constant value representable as
387/// (N * Scale) where (N in [\p RangeMin, \p RangeMax).
388///
389/// \param ScaledConstant [out] - On success, the pre-scaled constant value.
390static bool isScaledConstantInRange(SDValue Node, int Scale,
391 int RangeMin, int RangeMax,
392 int &ScaledConstant) {
393 assert(Scale > 0 && "Invalid scale!")(static_cast <bool> (Scale > 0 && "Invalid scale!"
) ? void (0) : __assert_fail ("Scale > 0 && \"Invalid scale!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 393, __extension__ __PRETTY_FUNCTION__))
;
394
395 // Check that this is a constant.
396 const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Node);
397 if (!C)
398 return false;
399
400 ScaledConstant = (int) C->getZExtValue();
401 if ((ScaledConstant % Scale) != 0)
402 return false;
403
404 ScaledConstant /= Scale;
405 return ScaledConstant >= RangeMin && ScaledConstant < RangeMax;
406}
407
408void ARMDAGToDAGISel::PreprocessISelDAG() {
409 if (!Subtarget->hasV6T2Ops())
410 return;
411
412 bool isThumb2 = Subtarget->isThumb();
413 for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
414 E = CurDAG->allnodes_end(); I != E; ) {
415 SDNode *N = &*I++; // Preincrement iterator to avoid invalidation issues.
416
417 if (N->getOpcode() != ISD::ADD)
418 continue;
419
420 // Look for (add X1, (and (srl X2, c1), c2)) where c2 is constant with
421 // leading zeros, followed by consecutive set bits, followed by 1 or 2
422 // trailing zeros, e.g. 1020.
423 // Transform the expression to
424 // (add X1, (shl (and (srl X2, c1), (c2>>tz)), tz)) where tz is the number
425 // of trailing zeros of c2. The left shift would be folded as an shifter
426 // operand of 'add' and the 'and' and 'srl' would become a bits extraction
427 // node (UBFX).
428
429 SDValue N0 = N->getOperand(0);
430 SDValue N1 = N->getOperand(1);
431 unsigned And_imm = 0;
432 if (!isOpcWithIntImmediate(N1.getNode(), ISD::AND, And_imm)) {
433 if (isOpcWithIntImmediate(N0.getNode(), ISD::AND, And_imm))
434 std::swap(N0, N1);
435 }
436 if (!And_imm)
437 continue;
438
439 // Check if the AND mask is an immediate of the form: 000.....1111111100
440 unsigned TZ = countTrailingZeros(And_imm);
441 if (TZ != 1 && TZ != 2)
442 // Be conservative here. Shifter operands aren't always free. e.g. On
443 // Swift, left shifter operand of 1 / 2 for free but others are not.
444 // e.g.
445 // ubfx r3, r1, #16, #8
446 // ldr.w r3, [r0, r3, lsl #2]
447 // vs.
448 // mov.w r9, #1020
449 // and.w r2, r9, r1, lsr #14
450 // ldr r2, [r0, r2]
451 continue;
452 And_imm >>= TZ;
453 if (And_imm & (And_imm + 1))
454 continue;
455
456 // Look for (and (srl X, c1), c2).
457 SDValue Srl = N1.getOperand(0);
458 unsigned Srl_imm = 0;
459 if (!isOpcWithIntImmediate(Srl.getNode(), ISD::SRL, Srl_imm) ||
460 (Srl_imm <= 2))
461 continue;
462
463 // Make sure first operand is not a shifter operand which would prevent
464 // folding of the left shift.
465 SDValue CPTmp0;
466 SDValue CPTmp1;
467 SDValue CPTmp2;
468 if (isThumb2) {
469 if (SelectImmShifterOperand(N0, CPTmp0, CPTmp1))
470 continue;
471 } else {
472 if (SelectImmShifterOperand(N0, CPTmp0, CPTmp1) ||
473 SelectRegShifterOperand(N0, CPTmp0, CPTmp1, CPTmp2))
474 continue;
475 }
476
477 // Now make the transformation.
478 Srl = CurDAG->getNode(ISD::SRL, SDLoc(Srl), MVT::i32,
479 Srl.getOperand(0),
480 CurDAG->getConstant(Srl_imm + TZ, SDLoc(Srl),
481 MVT::i32));
482 N1 = CurDAG->getNode(ISD::AND, SDLoc(N1), MVT::i32,
483 Srl,
484 CurDAG->getConstant(And_imm, SDLoc(Srl), MVT::i32));
485 N1 = CurDAG->getNode(ISD::SHL, SDLoc(N1), MVT::i32,
486 N1, CurDAG->getConstant(TZ, SDLoc(Srl), MVT::i32));
487 CurDAG->UpdateNodeOperands(N, N0, N1);
488 }
489}
490
491/// hasNoVMLxHazardUse - Return true if it's desirable to select a FP MLA / MLS
492/// node. VFP / NEON fp VMLA / VMLS instructions have special RAW hazards (at
493/// least on current ARM implementations) which should be avoidded.
494bool ARMDAGToDAGISel::hasNoVMLxHazardUse(SDNode *N) const {
495 if (OptLevel == CodeGenOpt::None)
496 return true;
497
498 if (!Subtarget->hasVMLxHazards())
499 return true;
500
501 if (!N->hasOneUse())
502 return false;
503
504 SDNode *Use = *N->use_begin();
505 if (Use->getOpcode() == ISD::CopyToReg)
506 return true;
507 if (Use->isMachineOpcode()) {
508 const ARMBaseInstrInfo *TII = static_cast<const ARMBaseInstrInfo *>(
509 CurDAG->getSubtarget().getInstrInfo());
510
511 const MCInstrDesc &MCID = TII->get(Use->getMachineOpcode());
512 if (MCID.mayStore())
513 return true;
514 unsigned Opcode = MCID.getOpcode();
515 if (Opcode == ARM::VMOVRS || Opcode == ARM::VMOVRRD)
516 return true;
517 // vmlx feeding into another vmlx. We actually want to unfold
518 // the use later in the MLxExpansion pass. e.g.
519 // vmla
520 // vmla (stall 8 cycles)
521 //
522 // vmul (5 cycles)
523 // vadd (5 cycles)
524 // vmla
525 // This adds up to about 18 - 19 cycles.
526 //
527 // vmla
528 // vmul (stall 4 cycles)
529 // vadd adds up to about 14 cycles.
530 return TII->isFpMLxInstruction(Opcode);
531 }
532
533 return false;
534}
535
536bool ARMDAGToDAGISel::isShifterOpProfitable(const SDValue &Shift,
537 ARM_AM::ShiftOpc ShOpcVal,
538 unsigned ShAmt) {
539 if (!Subtarget->isLikeA9() && !Subtarget->isSwift())
540 return true;
541 if (Shift.hasOneUse())
542 return true;
543 // R << 2 is free.
544 return ShOpcVal == ARM_AM::lsl &&
545 (ShAmt == 2 || (Subtarget->isSwift() && ShAmt == 1));
546}
547
548bool ARMDAGToDAGISel::canExtractShiftFromMul(const SDValue &N,
549 unsigned MaxShift,
550 unsigned &PowerOfTwo,
551 SDValue &NewMulConst) const {
552 assert(N.getOpcode() == ISD::MUL)(static_cast <bool> (N.getOpcode() == ISD::MUL) ? void (
0) : __assert_fail ("N.getOpcode() == ISD::MUL", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 552, __extension__ __PRETTY_FUNCTION__))
;
553 assert(MaxShift > 0)(static_cast <bool> (MaxShift > 0) ? void (0) : __assert_fail
("MaxShift > 0", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 553, __extension__ __PRETTY_FUNCTION__))
;
554
555 // If the multiply is used in more than one place then changing the constant
556 // will make other uses incorrect, so don't.
557 if (!N.hasOneUse()) return false;
558 // Check if the multiply is by a constant
559 ConstantSDNode *MulConst = dyn_cast<ConstantSDNode>(N.getOperand(1));
560 if (!MulConst) return false;
561 // If the constant is used in more than one place then modifying it will mean
562 // we need to materialize two constants instead of one, which is a bad idea.
563 if (!MulConst->hasOneUse()) return false;
564 unsigned MulConstVal = MulConst->getZExtValue();
565 if (MulConstVal == 0) return false;
566
567 // Find the largest power of 2 that MulConstVal is a multiple of
568 PowerOfTwo = MaxShift;
569 while ((MulConstVal % (1 << PowerOfTwo)) != 0) {
570 --PowerOfTwo;
571 if (PowerOfTwo == 0) return false;
572 }
573
574 // Only optimise if the new cost is better
575 unsigned NewMulConstVal = MulConstVal / (1 << PowerOfTwo);
576 NewMulConst = CurDAG->getConstant(NewMulConstVal, SDLoc(N), MVT::i32);
577 unsigned OldCost = ConstantMaterializationCost(MulConstVal, Subtarget);
578 unsigned NewCost = ConstantMaterializationCost(NewMulConstVal, Subtarget);
579 return NewCost < OldCost;
580}
581
582void ARMDAGToDAGISel::replaceDAGValue(const SDValue &N, SDValue M) {
583 CurDAG->RepositionNode(N.getNode()->getIterator(), M.getNode());
584 ReplaceUses(N, M);
585}
586
587bool ARMDAGToDAGISel::SelectImmShifterOperand(SDValue N,
588 SDValue &BaseReg,
589 SDValue &Opc,
590 bool CheckProfitability) {
591 if (DisableShifterOp)
592 return false;
593
594 // If N is a multiply-by-constant and it's profitable to extract a shift and
595 // use it in a shifted operand do so.
596 if (N.getOpcode() == ISD::MUL) {
597 unsigned PowerOfTwo = 0;
598 SDValue NewMulConst;
599 if (canExtractShiftFromMul(N, 31, PowerOfTwo, NewMulConst)) {
600 HandleSDNode Handle(N);
601 SDLoc Loc(N);
602 replaceDAGValue(N.getOperand(1), NewMulConst);
603 BaseReg = Handle.getValue();
604 Opc = CurDAG->getTargetConstant(
605 ARM_AM::getSORegOpc(ARM_AM::lsl, PowerOfTwo), Loc, MVT::i32);
606 return true;
607 }
608 }
609
610 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOpcode());
611
612 // Don't match base register only case. That is matched to a separate
613 // lower complexity pattern with explicit register operand.
614 if (ShOpcVal == ARM_AM::no_shift) return false;
615
616 BaseReg = N.getOperand(0);
617 unsigned ShImmVal = 0;
618 ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1));
619 if (!RHS) return false;
620 ShImmVal = RHS->getZExtValue() & 31;
621 Opc = CurDAG->getTargetConstant(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal),
622 SDLoc(N), MVT::i32);
623 return true;
624}
625
626bool ARMDAGToDAGISel::SelectRegShifterOperand(SDValue N,
627 SDValue &BaseReg,
628 SDValue &ShReg,
629 SDValue &Opc,
630 bool CheckProfitability) {
631 if (DisableShifterOp)
632 return false;
633
634 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOpcode());
635
636 // Don't match base register only case. That is matched to a separate
637 // lower complexity pattern with explicit register operand.
638 if (ShOpcVal == ARM_AM::no_shift) return false;
639
640 BaseReg = N.getOperand(0);
641 unsigned ShImmVal = 0;
642 ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1));
643 if (RHS) return false;
644
645 ShReg = N.getOperand(1);
646 if (CheckProfitability && !isShifterOpProfitable(N, ShOpcVal, ShImmVal))
647 return false;
648 Opc = CurDAG->getTargetConstant(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal),
649 SDLoc(N), MVT::i32);
650 return true;
651}
652
653// Determine whether an ISD::OR's operands are suitable to turn the operation
654// into an addition, which often has more compact encodings.
655bool ARMDAGToDAGISel::SelectAddLikeOr(SDNode *Parent, SDValue N, SDValue &Out) {
656 assert(Parent->getOpcode() == ISD::OR && "unexpected parent")(static_cast <bool> (Parent->getOpcode() == ISD::OR &&
"unexpected parent") ? void (0) : __assert_fail ("Parent->getOpcode() == ISD::OR && \"unexpected parent\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 656, __extension__ __PRETTY_FUNCTION__))
;
657 Out = N;
658 return CurDAG->haveNoCommonBitsSet(N, Parent->getOperand(1));
659}
660
661
662bool ARMDAGToDAGISel::SelectAddrModeImm12(SDValue N,
663 SDValue &Base,
664 SDValue &OffImm) {
665 // Match simple R + imm12 operands.
666
667 // Base only.
668 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
669 !CurDAG->isBaseWithConstantOffset(N)) {
670 if (N.getOpcode() == ISD::FrameIndex) {
671 // Match frame index.
672 int FI = cast<FrameIndexSDNode>(N)->getIndex();
673 Base = CurDAG->getTargetFrameIndex(
674 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
675 OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32);
676 return true;
677 }
678
679 if (N.getOpcode() == ARMISD::Wrapper &&
680 N.getOperand(0).getOpcode() != ISD::TargetGlobalAddress &&
681 N.getOperand(0).getOpcode() != ISD::TargetExternalSymbol &&
682 N.getOperand(0).getOpcode() != ISD::TargetGlobalTLSAddress) {
683 Base = N.getOperand(0);
684 } else
685 Base = N;
686 OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32);
687 return true;
688 }
689
690 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
691 int RHSC = (int)RHS->getSExtValue();
692 if (N.getOpcode() == ISD::SUB)
693 RHSC = -RHSC;
694
695 if (RHSC > -0x1000 && RHSC < 0x1000) { // 12 bits
696 Base = N.getOperand(0);
697 if (Base.getOpcode() == ISD::FrameIndex) {
698 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
699 Base = CurDAG->getTargetFrameIndex(
700 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
701 }
702 OffImm = CurDAG->getTargetConstant(RHSC, SDLoc(N), MVT::i32);
703 return true;
704 }
705 }
706
707 // Base only.
708 Base = N;
709 OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32);
710 return true;
711}
712
713
714
715bool ARMDAGToDAGISel::SelectLdStSOReg(SDValue N, SDValue &Base, SDValue &Offset,
716 SDValue &Opc) {
717 if (N.getOpcode() == ISD::MUL &&
718 ((!Subtarget->isLikeA9() && !Subtarget->isSwift()) || N.hasOneUse())) {
719 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
720 // X * [3,5,9] -> X + X * [2,4,8] etc.
721 int RHSC = (int)RHS->getZExtValue();
722 if (RHSC & 1) {
723 RHSC = RHSC & ~1;
724 ARM_AM::AddrOpc AddSub = ARM_AM::add;
725 if (RHSC < 0) {
726 AddSub = ARM_AM::sub;
727 RHSC = - RHSC;
728 }
729 if (isPowerOf2_32(RHSC)) {
730 unsigned ShAmt = Log2_32(RHSC);
731 Base = Offset = N.getOperand(0);
732 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt,
733 ARM_AM::lsl),
734 SDLoc(N), MVT::i32);
735 return true;
736 }
737 }
738 }
739 }
740
741 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
742 // ISD::OR that is equivalent to an ISD::ADD.
743 !CurDAG->isBaseWithConstantOffset(N))
744 return false;
745
746 // Leave simple R +/- imm12 operands for LDRi12
747 if (N.getOpcode() == ISD::ADD || N.getOpcode() == ISD::OR) {
748 int RHSC;
749 if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/1,
750 -0x1000+1, 0x1000, RHSC)) // 12 bits.
751 return false;
752 }
753
754 // Otherwise this is R +/- [possibly shifted] R.
755 ARM_AM::AddrOpc AddSub = N.getOpcode() == ISD::SUB ? ARM_AM::sub:ARM_AM::add;
756 ARM_AM::ShiftOpc ShOpcVal =
757 ARM_AM::getShiftOpcForNode(N.getOperand(1).getOpcode());
758 unsigned ShAmt = 0;
759
760 Base = N.getOperand(0);
761 Offset = N.getOperand(1);
762
763 if (ShOpcVal != ARM_AM::no_shift) {
764 // Check to see if the RHS of the shift is a constant, if not, we can't fold
765 // it.
766 if (ConstantSDNode *Sh =
767 dyn_cast<ConstantSDNode>(N.getOperand(1).getOperand(1))) {
768 ShAmt = Sh->getZExtValue();
769 if (isShifterOpProfitable(Offset, ShOpcVal, ShAmt))
770 Offset = N.getOperand(1).getOperand(0);
771 else {
772 ShAmt = 0;
773 ShOpcVal = ARM_AM::no_shift;
774 }
775 } else {
776 ShOpcVal = ARM_AM::no_shift;
777 }
778 }
779
780 // Try matching (R shl C) + (R).
781 if (N.getOpcode() != ISD::SUB && ShOpcVal == ARM_AM::no_shift &&
782 !(Subtarget->isLikeA9() || Subtarget->isSwift() ||
783 N.getOperand(0).hasOneUse())) {
784 ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(0).getOpcode());
785 if (ShOpcVal != ARM_AM::no_shift) {
786 // Check to see if the RHS of the shift is a constant, if not, we can't
787 // fold it.
788 if (ConstantSDNode *Sh =
789 dyn_cast<ConstantSDNode>(N.getOperand(0).getOperand(1))) {
790 ShAmt = Sh->getZExtValue();
791 if (isShifterOpProfitable(N.getOperand(0), ShOpcVal, ShAmt)) {
792 Offset = N.getOperand(0).getOperand(0);
793 Base = N.getOperand(1);
794 } else {
795 ShAmt = 0;
796 ShOpcVal = ARM_AM::no_shift;
797 }
798 } else {
799 ShOpcVal = ARM_AM::no_shift;
800 }
801 }
802 }
803
804 // If Offset is a multiply-by-constant and it's profitable to extract a shift
805 // and use it in a shifted operand do so.
806 if (Offset.getOpcode() == ISD::MUL && N.hasOneUse()) {
807 unsigned PowerOfTwo = 0;
808 SDValue NewMulConst;
809 if (canExtractShiftFromMul(Offset, 31, PowerOfTwo, NewMulConst)) {
810 HandleSDNode Handle(Offset);
811 replaceDAGValue(Offset.getOperand(1), NewMulConst);
812 Offset = Handle.getValue();
813 ShAmt = PowerOfTwo;
814 ShOpcVal = ARM_AM::lsl;
815 }
816 }
817
818 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal),
819 SDLoc(N), MVT::i32);
820 return true;
821}
822
823bool ARMDAGToDAGISel::SelectAddrMode2OffsetReg(SDNode *Op, SDValue N,
824 SDValue &Offset, SDValue &Opc) {
825 unsigned Opcode = Op->getOpcode();
826 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
827 ? cast<LoadSDNode>(Op)->getAddressingMode()
828 : cast<StoreSDNode>(Op)->getAddressingMode();
829 ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC)
830 ? ARM_AM::add : ARM_AM::sub;
831 int Val;
832 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x1000, Val))
833 return false;
834
835 Offset = N;
836 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOpcode());
837 unsigned ShAmt = 0;
838 if (ShOpcVal != ARM_AM::no_shift) {
839 // Check to see if the RHS of the shift is a constant, if not, we can't fold
840 // it.
841 if (ConstantSDNode *Sh = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
842 ShAmt = Sh->getZExtValue();
843 if (isShifterOpProfitable(N, ShOpcVal, ShAmt))
844 Offset = N.getOperand(0);
845 else {
846 ShAmt = 0;
847 ShOpcVal = ARM_AM::no_shift;
848 }
849 } else {
850 ShOpcVal = ARM_AM::no_shift;
851 }
852 }
853
854 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal),
855 SDLoc(N), MVT::i32);
856 return true;
857}
858
859bool ARMDAGToDAGISel::SelectAddrMode2OffsetImmPre(SDNode *Op, SDValue N,
860 SDValue &Offset, SDValue &Opc) {
861 unsigned Opcode = Op->getOpcode();
862 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
863 ? cast<LoadSDNode>(Op)->getAddressingMode()
864 : cast<StoreSDNode>(Op)->getAddressingMode();
865 ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC)
866 ? ARM_AM::add : ARM_AM::sub;
867 int Val;
868 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x1000, Val)) { // 12 bits.
869 if (AddSub == ARM_AM::sub) Val *= -1;
870 Offset = CurDAG->getRegister(0, MVT::i32);
871 Opc = CurDAG->getTargetConstant(Val, SDLoc(Op), MVT::i32);
872 return true;
873 }
874
875 return false;
876}
877
878
879bool ARMDAGToDAGISel::SelectAddrMode2OffsetImm(SDNode *Op, SDValue N,
880 SDValue &Offset, SDValue &Opc) {
881 unsigned Opcode = Op->getOpcode();
882 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
883 ? cast<LoadSDNode>(Op)->getAddressingMode()
884 : cast<StoreSDNode>(Op)->getAddressingMode();
885 ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC)
886 ? ARM_AM::add : ARM_AM::sub;
887 int Val;
888 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x1000, Val)) { // 12 bits.
889 Offset = CurDAG->getRegister(0, MVT::i32);
890 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, Val,
891 ARM_AM::no_shift),
892 SDLoc(Op), MVT::i32);
893 return true;
894 }
895
896 return false;
897}
898
899bool ARMDAGToDAGISel::SelectAddrOffsetNone(SDValue N, SDValue &Base) {
900 Base = N;
901 return true;
902}
903
904bool ARMDAGToDAGISel::SelectAddrMode3(SDValue N,
905 SDValue &Base, SDValue &Offset,
906 SDValue &Opc) {
907 if (N.getOpcode() == ISD::SUB) {
908 // X - C is canonicalize to X + -C, no need to handle it here.
909 Base = N.getOperand(0);
910 Offset = N.getOperand(1);
911 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::sub, 0), SDLoc(N),
912 MVT::i32);
913 return true;
914 }
915
916 if (!CurDAG->isBaseWithConstantOffset(N)) {
917 Base = N;
918 if (N.getOpcode() == ISD::FrameIndex) {
919 int FI = cast<FrameIndexSDNode>(N)->getIndex();
920 Base = CurDAG->getTargetFrameIndex(
921 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
922 }
923 Offset = CurDAG->getRegister(0, MVT::i32);
924 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0), SDLoc(N),
925 MVT::i32);
926 return true;
927 }
928
929 // If the RHS is +/- imm8, fold into addr mode.
930 int RHSC;
931 if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/1,
932 -256 + 1, 256, RHSC)) { // 8 bits.
933 Base = N.getOperand(0);
934 if (Base.getOpcode() == ISD::FrameIndex) {
935 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
936 Base = CurDAG->getTargetFrameIndex(
937 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
938 }
939 Offset = CurDAG->getRegister(0, MVT::i32);
940
941 ARM_AM::AddrOpc AddSub = ARM_AM::add;
942 if (RHSC < 0) {
943 AddSub = ARM_AM::sub;
944 RHSC = -RHSC;
945 }
946 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, RHSC), SDLoc(N),
947 MVT::i32);
948 return true;
949 }
950
951 Base = N.getOperand(0);
952 Offset = N.getOperand(1);
953 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0), SDLoc(N),
954 MVT::i32);
955 return true;
956}
957
958bool ARMDAGToDAGISel::SelectAddrMode3Offset(SDNode *Op, SDValue N,
959 SDValue &Offset, SDValue &Opc) {
960 unsigned Opcode = Op->getOpcode();
961 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
962 ? cast<LoadSDNode>(Op)->getAddressingMode()
963 : cast<StoreSDNode>(Op)->getAddressingMode();
964 ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC)
965 ? ARM_AM::add : ARM_AM::sub;
966 int Val;
967 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 256, Val)) { // 12 bits.
968 Offset = CurDAG->getRegister(0, MVT::i32);
969 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, Val), SDLoc(Op),
970 MVT::i32);
971 return true;
972 }
973
974 Offset = N;
975 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, 0), SDLoc(Op),
976 MVT::i32);
977 return true;
978}
979
980bool ARMDAGToDAGISel::IsAddressingMode5(SDValue N, SDValue &Base, SDValue &Offset,
981 bool FP16) {
982 if (!CurDAG->isBaseWithConstantOffset(N)) {
983 Base = N;
984 if (N.getOpcode() == ISD::FrameIndex) {
985 int FI = cast<FrameIndexSDNode>(N)->getIndex();
986 Base = CurDAG->getTargetFrameIndex(
987 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
988 } else if (N.getOpcode() == ARMISD::Wrapper &&
989 N.getOperand(0).getOpcode() != ISD::TargetGlobalAddress &&
990 N.getOperand(0).getOpcode() != ISD::TargetExternalSymbol &&
991 N.getOperand(0).getOpcode() != ISD::TargetGlobalTLSAddress) {
992 Base = N.getOperand(0);
993 }
994 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(ARM_AM::add, 0),
995 SDLoc(N), MVT::i32);
996 return true;
997 }
998
999 // If the RHS is +/- imm8, fold into addr mode.
1000 int RHSC;
1001 const int Scale = FP16 ? 2 : 4;
1002
1003 if (isScaledConstantInRange(N.getOperand(1), Scale, -255, 256, RHSC)) {
1004 Base = N.getOperand(0);
1005 if (Base.getOpcode() == ISD::FrameIndex) {
1006 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1007 Base = CurDAG->getTargetFrameIndex(
1008 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
1009 }
1010
1011 ARM_AM::AddrOpc AddSub = ARM_AM::add;
1012 if (RHSC < 0) {
1013 AddSub = ARM_AM::sub;
1014 RHSC = -RHSC;
1015 }
1016
1017 if (FP16)
1018 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5FP16Opc(AddSub, RHSC),
1019 SDLoc(N), MVT::i32);
1020 else
1021 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(AddSub, RHSC),
1022 SDLoc(N), MVT::i32);
1023
1024 return true;
1025 }
1026
1027 Base = N;
1028
1029 if (FP16)
1030 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5FP16Opc(ARM_AM::add, 0),
1031 SDLoc(N), MVT::i32);
1032 else
1033 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(ARM_AM::add, 0),
1034 SDLoc(N), MVT::i32);
1035
1036 return true;
1037}
1038
1039bool ARMDAGToDAGISel::SelectAddrMode5(SDValue N,
1040 SDValue &Base, SDValue &Offset) {
1041 return IsAddressingMode5(N, Base, Offset, /*FP16=*/ false);
1042}
1043
1044bool ARMDAGToDAGISel::SelectAddrMode5FP16(SDValue N,
1045 SDValue &Base, SDValue &Offset) {
1046 return IsAddressingMode5(N, Base, Offset, /*FP16=*/ true);
1047}
1048
1049bool ARMDAGToDAGISel::SelectAddrMode6(SDNode *Parent, SDValue N, SDValue &Addr,
1050 SDValue &Align) {
1051 Addr = N;
1052
1053 unsigned Alignment = 0;
1054
1055 MemSDNode *MemN = cast<MemSDNode>(Parent);
1056
1057 if (isa<LSBaseSDNode>(MemN) ||
1058 ((MemN->getOpcode() == ARMISD::VST1_UPD ||
1059 MemN->getOpcode() == ARMISD::VLD1_UPD) &&
1060 MemN->getConstantOperandVal(MemN->getNumOperands() - 1) == 1)) {
1061 // This case occurs only for VLD1-lane/dup and VST1-lane instructions.
1062 // The maximum alignment is equal to the memory size being referenced.
1063 unsigned MMOAlign = MemN->getAlignment();
1064 unsigned MemSize = MemN->getMemoryVT().getSizeInBits() / 8;
1065 if (MMOAlign >= MemSize && MemSize > 1)
1066 Alignment = MemSize;
1067 } else {
1068 // All other uses of addrmode6 are for intrinsics. For now just record
1069 // the raw alignment value; it will be refined later based on the legal
1070 // alignment operands for the intrinsic.
1071 Alignment = MemN->getAlignment();
1072 }
1073
1074 Align = CurDAG->getTargetConstant(Alignment, SDLoc(N), MVT::i32);
1075 return true;
1076}
1077
1078bool ARMDAGToDAGISel::SelectAddrMode6Offset(SDNode *Op, SDValue N,
1079 SDValue &Offset) {
1080 LSBaseSDNode *LdSt = cast<LSBaseSDNode>(Op);
1081 ISD::MemIndexedMode AM = LdSt->getAddressingMode();
1082 if (AM != ISD::POST_INC)
1083 return false;
1084 Offset = N;
1085 if (ConstantSDNode *NC = dyn_cast<ConstantSDNode>(N)) {
1086 if (NC->getZExtValue() * 8 == LdSt->getMemoryVT().getSizeInBits())
1087 Offset = CurDAG->getRegister(0, MVT::i32);
1088 }
1089 return true;
1090}
1091
1092bool ARMDAGToDAGISel::SelectAddrModePC(SDValue N,
1093 SDValue &Offset, SDValue &Label) {
1094 if (N.getOpcode() == ARMISD::PIC_ADD && N.hasOneUse()) {
1095 Offset = N.getOperand(0);
1096 SDValue N1 = N.getOperand(1);
1097 Label = CurDAG->getTargetConstant(cast<ConstantSDNode>(N1)->getZExtValue(),
1098 SDLoc(N), MVT::i32);
1099 return true;
1100 }
1101
1102 return false;
1103}
1104
1105
1106//===----------------------------------------------------------------------===//
1107// Thumb Addressing Modes
1108//===----------------------------------------------------------------------===//
1109
1110static bool shouldUseZeroOffsetLdSt(SDValue N) {
1111 // Negative numbers are difficult to materialise in thumb1. If we are
1112 // selecting the add of a negative, instead try to select ri with a zero
1113 // offset, so create the add node directly which will become a sub.
1114 if (N.getOpcode() != ISD::ADD)
1115 return false;
1116
1117 // Look for an imm which is not legal for ld/st, but is legal for sub.
1118 if (auto C = dyn_cast<ConstantSDNode>(N.getOperand(1)))
1119 return C->getSExtValue() < 0 && C->getSExtValue() >= -255;
1120
1121 return false;
1122}
1123
1124bool ARMDAGToDAGISel::SelectThumbAddrModeRRSext(SDValue N, SDValue &Base,
1125 SDValue &Offset) {
1126 if (N.getOpcode() != ISD::ADD && !CurDAG->isBaseWithConstantOffset(N)) {
1127 ConstantSDNode *NC = dyn_cast<ConstantSDNode>(N);
1128 if (!NC || !NC->isNullValue())
1129 return false;
1130
1131 Base = Offset = N;
1132 return true;
1133 }
1134
1135 Base = N.getOperand(0);
1136 Offset = N.getOperand(1);
1137 return true;
1138}
1139
1140bool ARMDAGToDAGISel::SelectThumbAddrModeRR(SDValue N, SDValue &Base,
1141 SDValue &Offset) {
1142 if (shouldUseZeroOffsetLdSt(N))
1143 return false; // Select ri instead
1144 return SelectThumbAddrModeRRSext(N, Base, Offset);
1145}
1146
1147bool
1148ARMDAGToDAGISel::SelectThumbAddrModeImm5S(SDValue N, unsigned Scale,
1149 SDValue &Base, SDValue &OffImm) {
1150 if (shouldUseZeroOffsetLdSt(N)) {
1151 Base = N;
1152 OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32);
1153 return true;
1154 }
1155
1156 if (!CurDAG->isBaseWithConstantOffset(N)) {
1157 if (N.getOpcode() == ISD::ADD) {
1158 return false; // We want to select register offset instead
1159 } else if (N.getOpcode() == ARMISD::Wrapper &&
1160 N.getOperand(0).getOpcode() != ISD::TargetGlobalAddress &&
1161 N.getOperand(0).getOpcode() != ISD::TargetExternalSymbol &&
1162 N.getOperand(0).getOpcode() != ISD::TargetConstantPool &&
1163 N.getOperand(0).getOpcode() != ISD::TargetGlobalTLSAddress) {
1164 Base = N.getOperand(0);
1165 } else {
1166 Base = N;
1167 }
1168
1169 OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32);
1170 return true;
1171 }
1172
1173 // If the RHS is + imm5 * scale, fold into addr mode.
1174 int RHSC;
1175 if (isScaledConstantInRange(N.getOperand(1), Scale, 0, 32, RHSC)) {
1176 Base = N.getOperand(0);
1177 OffImm = CurDAG->getTargetConstant(RHSC, SDLoc(N), MVT::i32);
1178 return true;
1179 }
1180
1181 // Offset is too large, so use register offset instead.
1182 return false;
1183}
1184
1185bool
1186ARMDAGToDAGISel::SelectThumbAddrModeImm5S4(SDValue N, SDValue &Base,
1187 SDValue &OffImm) {
1188 return SelectThumbAddrModeImm5S(N, 4, Base, OffImm);
1189}
1190
1191bool
1192ARMDAGToDAGISel::SelectThumbAddrModeImm5S2(SDValue N, SDValue &Base,
1193 SDValue &OffImm) {
1194 return SelectThumbAddrModeImm5S(N, 2, Base, OffImm);
1195}
1196
1197bool
1198ARMDAGToDAGISel::SelectThumbAddrModeImm5S1(SDValue N, SDValue &Base,
1199 SDValue &OffImm) {
1200 return SelectThumbAddrModeImm5S(N, 1, Base, OffImm);
1201}
1202
1203bool ARMDAGToDAGISel::SelectThumbAddrModeSP(SDValue N,
1204 SDValue &Base, SDValue &OffImm) {
1205 if (N.getOpcode() == ISD::FrameIndex) {
1206 int FI = cast<FrameIndexSDNode>(N)->getIndex();
1207 // Only multiples of 4 are allowed for the offset, so the frame object
1208 // alignment must be at least 4.
1209 MachineFrameInfo &MFI = MF->getFrameInfo();
1210 if (MFI.getObjectAlign(FI) < Align(4))
1211 MFI.setObjectAlignment(FI, Align(4));
1212 Base = CurDAG->getTargetFrameIndex(
1213 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
1214 OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32);
1215 return true;
1216 }
1217
1218 if (!CurDAG->isBaseWithConstantOffset(N))
1219 return false;
1220
1221 if (N.getOperand(0).getOpcode() == ISD::FrameIndex) {
1222 // If the RHS is + imm8 * scale, fold into addr mode.
1223 int RHSC;
1224 if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/4, 0, 256, RHSC)) {
1225 Base = N.getOperand(0);
1226 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1227 // Make sure the offset is inside the object, or we might fail to
1228 // allocate an emergency spill slot. (An out-of-range access is UB, but
1229 // it could show up anyway.)
1230 MachineFrameInfo &MFI = MF->getFrameInfo();
1231 if (RHSC * 4 < MFI.getObjectSize(FI)) {
1232 // For LHS+RHS to result in an offset that's a multiple of 4 the object
1233 // indexed by the LHS must be 4-byte aligned.
1234 if (!MFI.isFixedObjectIndex(FI) && MFI.getObjectAlign(FI) < Align(4))
1235 MFI.setObjectAlignment(FI, Align(4));
1236 if (MFI.getObjectAlign(FI) >= Align(4)) {
1237 Base = CurDAG->getTargetFrameIndex(
1238 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
1239 OffImm = CurDAG->getTargetConstant(RHSC, SDLoc(N), MVT::i32);
1240 return true;
1241 }
1242 }
1243 }
1244 }
1245
1246 return false;
1247}
1248
1249template <unsigned Shift>
1250bool ARMDAGToDAGISel::SelectTAddrModeImm7(SDValue N, SDValue &Base,
1251 SDValue &OffImm) {
1252 if (N.getOpcode() == ISD::SUB || CurDAG->isBaseWithConstantOffset(N)) {
1253 int RHSC;
1254 if (isScaledConstantInRange(N.getOperand(1), 1 << Shift, -0x7f, 0x80,
1255 RHSC)) {
1256 Base = N.getOperand(0);
1257 if (N.getOpcode() == ISD::SUB)
1258 RHSC = -RHSC;
1259 OffImm =
1260 CurDAG->getTargetConstant(RHSC * (1 << Shift), SDLoc(N), MVT::i32);
1261 return true;
1262 }
1263 }
1264
1265 // Base only.
1266 Base = N;
1267 OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32);
1268 return true;
1269}
1270
1271
1272//===----------------------------------------------------------------------===//
1273// Thumb 2 Addressing Modes
1274//===----------------------------------------------------------------------===//
1275
1276
1277bool ARMDAGToDAGISel::SelectT2AddrModeImm12(SDValue N,
1278 SDValue &Base, SDValue &OffImm) {
1279 // Match simple R + imm12 operands.
1280
1281 // Base only.
1282 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
1283 !CurDAG->isBaseWithConstantOffset(N)) {
1284 if (N.getOpcode() == ISD::FrameIndex) {
1285 // Match frame index.
1286 int FI = cast<FrameIndexSDNode>(N)->getIndex();
1287 Base = CurDAG->getTargetFrameIndex(
1288 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
1289 OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32);
1290 return true;
1291 }
1292
1293 if (N.getOpcode() == ARMISD::Wrapper &&
1294 N.getOperand(0).getOpcode() != ISD::TargetGlobalAddress &&
1295 N.getOperand(0).getOpcode() != ISD::TargetExternalSymbol &&
1296 N.getOperand(0).getOpcode() != ISD::TargetGlobalTLSAddress) {
1297 Base = N.getOperand(0);
1298 if (Base.getOpcode() == ISD::TargetConstantPool)
1299 return false; // We want to select t2LDRpci instead.
1300 } else
1301 Base = N;
1302 OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32);
1303 return true;
1304 }
1305
1306 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1307 if (SelectT2AddrModeImm8(N, Base, OffImm))
1308 // Let t2LDRi8 handle (R - imm8).
1309 return false;
1310
1311 int RHSC = (int)RHS->getZExtValue();
1312 if (N.getOpcode() == ISD::SUB)
1313 RHSC = -RHSC;
1314
1315 if (RHSC >= 0 && RHSC < 0x1000) { // 12 bits (unsigned)
1316 Base = N.getOperand(0);
1317 if (Base.getOpcode() == ISD::FrameIndex) {
1318 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1319 Base = CurDAG->getTargetFrameIndex(
1320 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
1321 }
1322 OffImm = CurDAG->getTargetConstant(RHSC, SDLoc(N), MVT::i32);
1323 return true;
1324 }
1325 }
1326
1327 // Base only.
1328 Base = N;
1329 OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32);
1330 return true;
1331}
1332
1333template <unsigned Shift>
1334bool ARMDAGToDAGISel::SelectT2AddrModeImm8(SDValue N, SDValue &Base,
1335 SDValue &OffImm) {
1336 if (N.getOpcode() == ISD::SUB || CurDAG->isBaseWithConstantOffset(N)) {
1337 int RHSC;
1338 if (isScaledConstantInRange(N.getOperand(1), 1 << Shift, -255, 256, RHSC)) {
1339 Base = N.getOperand(0);
1340 if (Base.getOpcode() == ISD::FrameIndex) {
1341 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1342 Base = CurDAG->getTargetFrameIndex(
1343 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
1344 }
1345
1346 if (N.getOpcode() == ISD::SUB)
1347 RHSC = -RHSC;
1348 OffImm =
1349 CurDAG->getTargetConstant(RHSC * (1 << Shift), SDLoc(N), MVT::i32);
1350 return true;
1351 }
1352 }
1353
1354 // Base only.
1355 Base = N;
1356 OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32);
1357 return true;
1358}
1359
1360bool ARMDAGToDAGISel::SelectT2AddrModeImm8(SDValue N,
1361 SDValue &Base, SDValue &OffImm) {
1362 // Match simple R - imm8 operands.
1363 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
1364 !CurDAG->isBaseWithConstantOffset(N))
1365 return false;
1366
1367 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1368 int RHSC = (int)RHS->getSExtValue();
1369 if (N.getOpcode() == ISD::SUB)
1370 RHSC = -RHSC;
1371
1372 if ((RHSC >= -255) && (RHSC < 0)) { // 8 bits (always negative)
1373 Base = N.getOperand(0);
1374 if (Base.getOpcode() == ISD::FrameIndex) {
1375 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1376 Base = CurDAG->getTargetFrameIndex(
1377 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
1378 }
1379 OffImm = CurDAG->getTargetConstant(RHSC, SDLoc(N), MVT::i32);
1380 return true;
1381 }
1382 }
1383
1384 return false;
1385}
1386
1387bool ARMDAGToDAGISel::SelectT2AddrModeImm8Offset(SDNode *Op, SDValue N,
1388 SDValue &OffImm){
1389 unsigned Opcode = Op->getOpcode();
1390 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
1391 ? cast<LoadSDNode>(Op)->getAddressingMode()
1392 : cast<StoreSDNode>(Op)->getAddressingMode();
1393 int RHSC;
1394 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x100, RHSC)) { // 8 bits.
1395 OffImm = ((AM == ISD::PRE_INC) || (AM == ISD::POST_INC))
1396 ? CurDAG->getTargetConstant(RHSC, SDLoc(N), MVT::i32)
1397 : CurDAG->getTargetConstant(-RHSC, SDLoc(N), MVT::i32);
1398 return true;
1399 }
1400
1401 return false;
1402}
1403
1404template <unsigned Shift>
1405bool ARMDAGToDAGISel::SelectT2AddrModeImm7(SDValue N, SDValue &Base,
1406 SDValue &OffImm) {
1407 if (N.getOpcode() == ISD::SUB || CurDAG->isBaseWithConstantOffset(N)) {
1408 int RHSC;
1409 if (isScaledConstantInRange(N.getOperand(1), 1 << Shift, -0x7f, 0x80,
1410 RHSC)) {
1411 Base = N.getOperand(0);
1412 if (Base.getOpcode() == ISD::FrameIndex) {
1413 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1414 Base = CurDAG->getTargetFrameIndex(
1415 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
1416 }
1417
1418 if (N.getOpcode() == ISD::SUB)
1419 RHSC = -RHSC;
1420 OffImm =
1421 CurDAG->getTargetConstant(RHSC * (1 << Shift), SDLoc(N), MVT::i32);
1422 return true;
1423 }
1424 }
1425
1426 // Base only.
1427 Base = N;
1428 OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32);
1429 return true;
1430}
1431
1432template <unsigned Shift>
1433bool ARMDAGToDAGISel::SelectT2AddrModeImm7Offset(SDNode *Op, SDValue N,
1434 SDValue &OffImm) {
1435 return SelectT2AddrModeImm7Offset(Op, N, OffImm, Shift);
1436}
1437
1438bool ARMDAGToDAGISel::SelectT2AddrModeImm7Offset(SDNode *Op, SDValue N,
1439 SDValue &OffImm,
1440 unsigned Shift) {
1441 unsigned Opcode = Op->getOpcode();
1442 ISD::MemIndexedMode AM;
1443 switch (Opcode) {
1444 case ISD::LOAD:
1445 AM = cast<LoadSDNode>(Op)->getAddressingMode();
1446 break;
1447 case ISD::STORE:
1448 AM = cast<StoreSDNode>(Op)->getAddressingMode();
1449 break;
1450 case ISD::MLOAD:
1451 AM = cast<MaskedLoadSDNode>(Op)->getAddressingMode();
1452 break;
1453 case ISD::MSTORE:
1454 AM = cast<MaskedStoreSDNode>(Op)->getAddressingMode();
1455 break;
1456 default:
1457 llvm_unreachable("Unexpected Opcode for Imm7Offset")::llvm::llvm_unreachable_internal("Unexpected Opcode for Imm7Offset"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 1457)
;
1458 }
1459
1460 int RHSC;
1461 // 7 bit constant, shifted by Shift.
1462 if (isScaledConstantInRange(N, 1 << Shift, 0, 0x80, RHSC)) {
1463 OffImm =
1464 ((AM == ISD::PRE_INC) || (AM == ISD::POST_INC))
1465 ? CurDAG->getTargetConstant(RHSC * (1 << Shift), SDLoc(N), MVT::i32)
1466 : CurDAG->getTargetConstant(-RHSC * (1 << Shift), SDLoc(N),
1467 MVT::i32);
1468 return true;
1469 }
1470 return false;
1471}
1472
1473template <int Min, int Max>
1474bool ARMDAGToDAGISel::SelectImmediateInRange(SDValue N, SDValue &OffImm) {
1475 int Val;
1476 if (isScaledConstantInRange(N, 1, Min, Max, Val)) {
1477 OffImm = CurDAG->getTargetConstant(Val, SDLoc(N), MVT::i32);
1478 return true;
1479 }
1480 return false;
1481}
1482
1483bool ARMDAGToDAGISel::SelectT2AddrModeSoReg(SDValue N,
1484 SDValue &Base,
1485 SDValue &OffReg, SDValue &ShImm) {
1486 // (R - imm8) should be handled by t2LDRi8. The rest are handled by t2LDRi12.
1487 if (N.getOpcode() != ISD::ADD && !CurDAG->isBaseWithConstantOffset(N))
1488 return false;
1489
1490 // Leave (R + imm12) for t2LDRi12, (R - imm8) for t2LDRi8.
1491 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1492 int RHSC = (int)RHS->getZExtValue();
1493 if (RHSC >= 0 && RHSC < 0x1000) // 12 bits (unsigned)
1494 return false;
1495 else if (RHSC < 0 && RHSC >= -255) // 8 bits
1496 return false;
1497 }
1498
1499 // Look for (R + R) or (R + (R << [1,2,3])).
1500 unsigned ShAmt = 0;
1501 Base = N.getOperand(0);
1502 OffReg = N.getOperand(1);
1503
1504 // Swap if it is ((R << c) + R).
1505 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(OffReg.getOpcode());
1506 if (ShOpcVal != ARM_AM::lsl) {
1507 ShOpcVal = ARM_AM::getShiftOpcForNode(Base.getOpcode());
1508 if (ShOpcVal == ARM_AM::lsl)
1509 std::swap(Base, OffReg);
1510 }
1511
1512 if (ShOpcVal == ARM_AM::lsl) {
1513 // Check to see if the RHS of the shift is a constant, if not, we can't fold
1514 // it.
1515 if (ConstantSDNode *Sh = dyn_cast<ConstantSDNode>(OffReg.getOperand(1))) {
1516 ShAmt = Sh->getZExtValue();
1517 if (ShAmt < 4 && isShifterOpProfitable(OffReg, ShOpcVal, ShAmt))
1518 OffReg = OffReg.getOperand(0);
1519 else {
1520 ShAmt = 0;
1521 }
1522 }
1523 }
1524
1525 // If OffReg is a multiply-by-constant and it's profitable to extract a shift
1526 // and use it in a shifted operand do so.
1527 if (OffReg.getOpcode() == ISD::MUL && N.hasOneUse()) {
1528 unsigned PowerOfTwo = 0;
1529 SDValue NewMulConst;
1530 if (canExtractShiftFromMul(OffReg, 3, PowerOfTwo, NewMulConst)) {
1531 HandleSDNode Handle(OffReg);
1532 replaceDAGValue(OffReg.getOperand(1), NewMulConst);
1533 OffReg = Handle.getValue();
1534 ShAmt = PowerOfTwo;
1535 }
1536 }
1537
1538 ShImm = CurDAG->getTargetConstant(ShAmt, SDLoc(N), MVT::i32);
1539
1540 return true;
1541}
1542
1543bool ARMDAGToDAGISel::SelectT2AddrModeExclusive(SDValue N, SDValue &Base,
1544 SDValue &OffImm) {
1545 // This *must* succeed since it's used for the irreplaceable ldrex and strex
1546 // instructions.
1547 Base = N;
1548 OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32);
1549
1550 if (N.getOpcode() != ISD::ADD || !CurDAG->isBaseWithConstantOffset(N))
1551 return true;
1552
1553 ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1));
1554 if (!RHS)
1555 return true;
1556
1557 uint32_t RHSC = (int)RHS->getZExtValue();
1558 if (RHSC > 1020 || RHSC % 4 != 0)
1559 return true;
1560
1561 Base = N.getOperand(0);
1562 if (Base.getOpcode() == ISD::FrameIndex) {
1563 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1564 Base = CurDAG->getTargetFrameIndex(
1565 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
1566 }
1567
1568 OffImm = CurDAG->getTargetConstant(RHSC/4, SDLoc(N), MVT::i32);
1569 return true;
1570}
1571
1572//===--------------------------------------------------------------------===//
1573
1574/// getAL - Returns a ARMCC::AL immediate node.
1575static inline SDValue getAL(SelectionDAG *CurDAG, const SDLoc &dl) {
1576 return CurDAG->getTargetConstant((uint64_t)ARMCC::AL, dl, MVT::i32);
1577}
1578
1579void ARMDAGToDAGISel::transferMemOperands(SDNode *N, SDNode *Result) {
1580 MachineMemOperand *MemOp = cast<MemSDNode>(N)->getMemOperand();
1581 CurDAG->setNodeMemRefs(cast<MachineSDNode>(Result), {MemOp});
1582}
1583
1584bool ARMDAGToDAGISel::tryARMIndexedLoad(SDNode *N) {
1585 LoadSDNode *LD = cast<LoadSDNode>(N);
1586 ISD::MemIndexedMode AM = LD->getAddressingMode();
1587 if (AM == ISD::UNINDEXED)
1588 return false;
1589
1590 EVT LoadedVT = LD->getMemoryVT();
1591 SDValue Offset, AMOpc;
1592 bool isPre = (AM == ISD::PRE_INC) || (AM == ISD::PRE_DEC);
1593 unsigned Opcode = 0;
1594 bool Match = false;
1595 if (LoadedVT == MVT::i32 && isPre &&
1596 SelectAddrMode2OffsetImmPre(N, LD->getOffset(), Offset, AMOpc)) {
1597 Opcode = ARM::LDR_PRE_IMM;
1598 Match = true;
1599 } else if (LoadedVT == MVT::i32 && !isPre &&
1600 SelectAddrMode2OffsetImm(N, LD->getOffset(), Offset, AMOpc)) {
1601 Opcode = ARM::LDR_POST_IMM;
1602 Match = true;
1603 } else if (LoadedVT == MVT::i32 &&
1604 SelectAddrMode2OffsetReg(N, LD->getOffset(), Offset, AMOpc)) {
1605 Opcode = isPre ? ARM::LDR_PRE_REG : ARM::LDR_POST_REG;
1606 Match = true;
1607
1608 } else if (LoadedVT == MVT::i16 &&
1609 SelectAddrMode3Offset(N, LD->getOffset(), Offset, AMOpc)) {
1610 Match = true;
1611 Opcode = (LD->getExtensionType() == ISD::SEXTLOAD)
1612 ? (isPre ? ARM::LDRSH_PRE : ARM::LDRSH_POST)
1613 : (isPre ? ARM::LDRH_PRE : ARM::LDRH_POST);
1614 } else if (LoadedVT == MVT::i8 || LoadedVT == MVT::i1) {
1615 if (LD->getExtensionType() == ISD::SEXTLOAD) {
1616 if (SelectAddrMode3Offset(N, LD->getOffset(), Offset, AMOpc)) {
1617 Match = true;
1618 Opcode = isPre ? ARM::LDRSB_PRE : ARM::LDRSB_POST;
1619 }
1620 } else {
1621 if (isPre &&
1622 SelectAddrMode2OffsetImmPre(N, LD->getOffset(), Offset, AMOpc)) {
1623 Match = true;
1624 Opcode = ARM::LDRB_PRE_IMM;
1625 } else if (!isPre &&
1626 SelectAddrMode2OffsetImm(N, LD->getOffset(), Offset, AMOpc)) {
1627 Match = true;
1628 Opcode = ARM::LDRB_POST_IMM;
1629 } else if (SelectAddrMode2OffsetReg(N, LD->getOffset(), Offset, AMOpc)) {
1630 Match = true;
1631 Opcode = isPre ? ARM::LDRB_PRE_REG : ARM::LDRB_POST_REG;
1632 }
1633 }
1634 }
1635
1636 if (Match) {
1637 if (Opcode == ARM::LDR_PRE_IMM || Opcode == ARM::LDRB_PRE_IMM) {
1638 SDValue Chain = LD->getChain();
1639 SDValue Base = LD->getBasePtr();
1640 SDValue Ops[]= { Base, AMOpc, getAL(CurDAG, SDLoc(N)),
1641 CurDAG->getRegister(0, MVT::i32), Chain };
1642 SDNode *New = CurDAG->getMachineNode(Opcode, SDLoc(N), MVT::i32, MVT::i32,
1643 MVT::Other, Ops);
1644 transferMemOperands(N, New);
1645 ReplaceNode(N, New);
1646 return true;
1647 } else {
1648 SDValue Chain = LD->getChain();
1649 SDValue Base = LD->getBasePtr();
1650 SDValue Ops[]= { Base, Offset, AMOpc, getAL(CurDAG, SDLoc(N)),
1651 CurDAG->getRegister(0, MVT::i32), Chain };
1652 SDNode *New = CurDAG->getMachineNode(Opcode, SDLoc(N), MVT::i32, MVT::i32,
1653 MVT::Other, Ops);
1654 transferMemOperands(N, New);
1655 ReplaceNode(N, New);
1656 return true;
1657 }
1658 }
1659
1660 return false;
1661}
1662
1663bool ARMDAGToDAGISel::tryT1IndexedLoad(SDNode *N) {
1664 LoadSDNode *LD = cast<LoadSDNode>(N);
1665 EVT LoadedVT = LD->getMemoryVT();
1666 ISD::MemIndexedMode AM = LD->getAddressingMode();
1667 if (AM != ISD::POST_INC || LD->getExtensionType() != ISD::NON_EXTLOAD ||
1668 LoadedVT.getSimpleVT().SimpleTy != MVT::i32)
1669 return false;
1670
1671 auto *COffs = dyn_cast<ConstantSDNode>(LD->getOffset());
1672 if (!COffs || COffs->getZExtValue() != 4)
1673 return false;
1674
1675 // A T1 post-indexed load is just a single register LDM: LDM r0!, {r1}.
1676 // The encoding of LDM is not how the rest of ISel expects a post-inc load to
1677 // look however, so we use a pseudo here and switch it for a tLDMIA_UPD after
1678 // ISel.
1679 SDValue Chain = LD->getChain();
1680 SDValue Base = LD->getBasePtr();
1681 SDValue Ops[]= { Base, getAL(CurDAG, SDLoc(N)),
1682 CurDAG->getRegister(0, MVT::i32), Chain };
1683 SDNode *New = CurDAG->getMachineNode(ARM::tLDR_postidx, SDLoc(N), MVT::i32,
1684 MVT::i32, MVT::Other, Ops);
1685 transferMemOperands(N, New);
1686 ReplaceNode(N, New);
1687 return true;
1688}
1689
1690bool ARMDAGToDAGISel::tryT2IndexedLoad(SDNode *N) {
1691 LoadSDNode *LD = cast<LoadSDNode>(N);
1692 ISD::MemIndexedMode AM = LD->getAddressingMode();
1693 if (AM == ISD::UNINDEXED)
1694 return false;
1695
1696 EVT LoadedVT = LD->getMemoryVT();
1697 bool isSExtLd = LD->getExtensionType() == ISD::SEXTLOAD;
1698 SDValue Offset;
1699 bool isPre = (AM == ISD::PRE_INC) || (AM == ISD::PRE_DEC);
1700 unsigned Opcode = 0;
1701 bool Match = false;
1702 if (SelectT2AddrModeImm8Offset(N, LD->getOffset(), Offset)) {
1703 switch (LoadedVT.getSimpleVT().SimpleTy) {
1704 case MVT::i32:
1705 Opcode = isPre ? ARM::t2LDR_PRE : ARM::t2LDR_POST;
1706 break;
1707 case MVT::i16:
1708 if (isSExtLd)
1709 Opcode = isPre ? ARM::t2LDRSH_PRE : ARM::t2LDRSH_POST;
1710 else
1711 Opcode = isPre ? ARM::t2LDRH_PRE : ARM::t2LDRH_POST;
1712 break;
1713 case MVT::i8:
1714 case MVT::i1:
1715 if (isSExtLd)
1716 Opcode = isPre ? ARM::t2LDRSB_PRE : ARM::t2LDRSB_POST;
1717 else
1718 Opcode = isPre ? ARM::t2LDRB_PRE : ARM::t2LDRB_POST;
1719 break;
1720 default:
1721 return false;
1722 }
1723 Match = true;
1724 }
1725
1726 if (Match) {
1727 SDValue Chain = LD->getChain();
1728 SDValue Base = LD->getBasePtr();
1729 SDValue Ops[]= { Base, Offset, getAL(CurDAG, SDLoc(N)),
1730 CurDAG->getRegister(0, MVT::i32), Chain };
1731 SDNode *New = CurDAG->getMachineNode(Opcode, SDLoc(N), MVT::i32, MVT::i32,
1732 MVT::Other, Ops);
1733 transferMemOperands(N, New);
1734 ReplaceNode(N, New);
1735 return true;
1736 }
1737
1738 return false;
1739}
1740
1741bool ARMDAGToDAGISel::tryMVEIndexedLoad(SDNode *N) {
1742 EVT LoadedVT;
1743 unsigned Opcode = 0;
1744 bool isSExtLd, isPre;
1745 Align Alignment;
1746 ARMVCC::VPTCodes Pred;
1747 SDValue PredReg;
1748 SDValue Chain, Base, Offset;
1749
1750 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
1751 ISD::MemIndexedMode AM = LD->getAddressingMode();
1752 if (AM == ISD::UNINDEXED)
1753 return false;
1754 LoadedVT = LD->getMemoryVT();
1755 if (!LoadedVT.isVector())
1756 return false;
1757
1758 Chain = LD->getChain();
1759 Base = LD->getBasePtr();
1760 Offset = LD->getOffset();
1761 Alignment = LD->getAlign();
1762 isSExtLd = LD->getExtensionType() == ISD::SEXTLOAD;
1763 isPre = (AM == ISD::PRE_INC) || (AM == ISD::PRE_DEC);
1764 Pred = ARMVCC::None;
1765 PredReg = CurDAG->getRegister(0, MVT::i32);
1766 } else if (MaskedLoadSDNode *LD = dyn_cast<MaskedLoadSDNode>(N)) {
1767 ISD::MemIndexedMode AM = LD->getAddressingMode();
1768 if (AM == ISD::UNINDEXED)
1769 return false;
1770 LoadedVT = LD->getMemoryVT();
1771 if (!LoadedVT.isVector())
1772 return false;
1773
1774 Chain = LD->getChain();
1775 Base = LD->getBasePtr();
1776 Offset = LD->getOffset();
1777 Alignment = LD->getAlign();
1778 isSExtLd = LD->getExtensionType() == ISD::SEXTLOAD;
1779 isPre = (AM == ISD::PRE_INC) || (AM == ISD::PRE_DEC);
1780 Pred = ARMVCC::Then;
1781 PredReg = LD->getMask();
1782 } else
1783 llvm_unreachable("Expected a Load or a Masked Load!")::llvm::llvm_unreachable_internal("Expected a Load or a Masked Load!"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 1783)
;
1784
1785 // We allow LE non-masked loads to change the type (for example use a vldrb.8
1786 // as opposed to a vldrw.32). This can allow extra addressing modes or
1787 // alignments for what is otherwise an equivalent instruction.
1788 bool CanChangeType = Subtarget->isLittle() && !isa<MaskedLoadSDNode>(N);
1789
1790 SDValue NewOffset;
1791 if (Alignment >= Align(2) && LoadedVT == MVT::v4i16 &&
1792 SelectT2AddrModeImm7Offset(N, Offset, NewOffset, 1)) {
1793 if (isSExtLd)
1794 Opcode = isPre ? ARM::MVE_VLDRHS32_pre : ARM::MVE_VLDRHS32_post;
1795 else
1796 Opcode = isPre ? ARM::MVE_VLDRHU32_pre : ARM::MVE_VLDRHU32_post;
1797 } else if (LoadedVT == MVT::v8i8 &&
1798 SelectT2AddrModeImm7Offset(N, Offset, NewOffset, 0)) {
1799 if (isSExtLd)
1800 Opcode = isPre ? ARM::MVE_VLDRBS16_pre : ARM::MVE_VLDRBS16_post;
1801 else
1802 Opcode = isPre ? ARM::MVE_VLDRBU16_pre : ARM::MVE_VLDRBU16_post;
1803 } else if (LoadedVT == MVT::v4i8 &&
1804 SelectT2AddrModeImm7Offset(N, Offset, NewOffset, 0)) {
1805 if (isSExtLd)
1806 Opcode = isPre ? ARM::MVE_VLDRBS32_pre : ARM::MVE_VLDRBS32_post;
1807 else
1808 Opcode = isPre ? ARM::MVE_VLDRBU32_pre : ARM::MVE_VLDRBU32_post;
1809 } else if (Alignment >= Align(4) &&
1810 (CanChangeType || LoadedVT == MVT::v4i32 ||
1811 LoadedVT == MVT::v4f32) &&
1812 SelectT2AddrModeImm7Offset(N, Offset, NewOffset, 2))
1813 Opcode = isPre ? ARM::MVE_VLDRWU32_pre : ARM::MVE_VLDRWU32_post;
1814 else if (Alignment >= Align(2) &&
1815 (CanChangeType || LoadedVT == MVT::v8i16 ||
1816 LoadedVT == MVT::v8f16) &&
1817 SelectT2AddrModeImm7Offset(N, Offset, NewOffset, 1))
1818 Opcode = isPre ? ARM::MVE_VLDRHU16_pre : ARM::MVE_VLDRHU16_post;
1819 else if ((CanChangeType || LoadedVT == MVT::v16i8) &&
1820 SelectT2AddrModeImm7Offset(N, Offset, NewOffset, 0))
1821 Opcode = isPre ? ARM::MVE_VLDRBU8_pre : ARM::MVE_VLDRBU8_post;
1822 else
1823 return false;
1824
1825 SDValue Ops[] = {Base, NewOffset,
1826 CurDAG->getTargetConstant(Pred, SDLoc(N), MVT::i32), PredReg,
1827 Chain};
1828 SDNode *New = CurDAG->getMachineNode(Opcode, SDLoc(N), MVT::i32,
1829 N->getValueType(0), MVT::Other, Ops);
1830 transferMemOperands(N, New);
1831 ReplaceUses(SDValue(N, 0), SDValue(New, 1));
1832 ReplaceUses(SDValue(N, 1), SDValue(New, 0));
1833 ReplaceUses(SDValue(N, 2), SDValue(New, 2));
1834 CurDAG->RemoveDeadNode(N);
1835 return true;
1836}
1837
1838/// Form a GPRPair pseudo register from a pair of GPR regs.
1839SDNode *ARMDAGToDAGISel::createGPRPairNode(EVT VT, SDValue V0, SDValue V1) {
1840 SDLoc dl(V0.getNode());
1841 SDValue RegClass =
1842 CurDAG->getTargetConstant(ARM::GPRPairRegClassID, dl, MVT::i32);
1843 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::gsub_0, dl, MVT::i32);
1844 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::gsub_1, dl, MVT::i32);
1845 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 };
1846 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
1847}
1848
1849/// Form a D register from a pair of S registers.
1850SDNode *ARMDAGToDAGISel::createSRegPairNode(EVT VT, SDValue V0, SDValue V1) {
1851 SDLoc dl(V0.getNode());
1852 SDValue RegClass =
1853 CurDAG->getTargetConstant(ARM::DPR_VFP2RegClassID, dl, MVT::i32);
1854 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::ssub_0, dl, MVT::i32);
1855 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::ssub_1, dl, MVT::i32);
1856 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 };
1857 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
1858}
1859
1860/// Form a quad register from a pair of D registers.
1861SDNode *ARMDAGToDAGISel::createDRegPairNode(EVT VT, SDValue V0, SDValue V1) {
1862 SDLoc dl(V0.getNode());
1863 SDValue RegClass = CurDAG->getTargetConstant(ARM::QPRRegClassID, dl,
1864 MVT::i32);
1865 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::dsub_0, dl, MVT::i32);
1866 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::dsub_1, dl, MVT::i32);
1867 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 };
1868 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
1869}
1870
1871/// Form 4 consecutive D registers from a pair of Q registers.
1872SDNode *ARMDAGToDAGISel::createQRegPairNode(EVT VT, SDValue V0, SDValue V1) {
1873 SDLoc dl(V0.getNode());
1874 SDValue RegClass = CurDAG->getTargetConstant(ARM::QQPRRegClassID, dl,
1875 MVT::i32);
1876 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::qsub_0, dl, MVT::i32);
1877 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::qsub_1, dl, MVT::i32);
1878 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 };
1879 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
1880}
1881
1882/// Form 4 consecutive S registers.
1883SDNode *ARMDAGToDAGISel::createQuadSRegsNode(EVT VT, SDValue V0, SDValue V1,
1884 SDValue V2, SDValue V3) {
1885 SDLoc dl(V0.getNode());
1886 SDValue RegClass =
1887 CurDAG->getTargetConstant(ARM::QPR_VFP2RegClassID, dl, MVT::i32);
1888 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::ssub_0, dl, MVT::i32);
1889 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::ssub_1, dl, MVT::i32);
1890 SDValue SubReg2 = CurDAG->getTargetConstant(ARM::ssub_2, dl, MVT::i32);
1891 SDValue SubReg3 = CurDAG->getTargetConstant(ARM::ssub_3, dl, MVT::i32);
1892 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1,
1893 V2, SubReg2, V3, SubReg3 };
1894 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
1895}
1896
1897/// Form 4 consecutive D registers.
1898SDNode *ARMDAGToDAGISel::createQuadDRegsNode(EVT VT, SDValue V0, SDValue V1,
1899 SDValue V2, SDValue V3) {
1900 SDLoc dl(V0.getNode());
1901 SDValue RegClass = CurDAG->getTargetConstant(ARM::QQPRRegClassID, dl,
1902 MVT::i32);
1903 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::dsub_0, dl, MVT::i32);
1904 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::dsub_1, dl, MVT::i32);
1905 SDValue SubReg2 = CurDAG->getTargetConstant(ARM::dsub_2, dl, MVT::i32);
1906 SDValue SubReg3 = CurDAG->getTargetConstant(ARM::dsub_3, dl, MVT::i32);
1907 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1,
1908 V2, SubReg2, V3, SubReg3 };
1909 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
1910}
1911
1912/// Form 4 consecutive Q registers.
1913SDNode *ARMDAGToDAGISel::createQuadQRegsNode(EVT VT, SDValue V0, SDValue V1,
1914 SDValue V2, SDValue V3) {
1915 SDLoc dl(V0.getNode());
1916 SDValue RegClass = CurDAG->getTargetConstant(ARM::QQQQPRRegClassID, dl,
1917 MVT::i32);
1918 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::qsub_0, dl, MVT::i32);
1919 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::qsub_1, dl, MVT::i32);
1920 SDValue SubReg2 = CurDAG->getTargetConstant(ARM::qsub_2, dl, MVT::i32);
1921 SDValue SubReg3 = CurDAG->getTargetConstant(ARM::qsub_3, dl, MVT::i32);
1922 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1,
1923 V2, SubReg2, V3, SubReg3 };
1924 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
1925}
1926
1927/// GetVLDSTAlign - Get the alignment (in bytes) for the alignment operand
1928/// of a NEON VLD or VST instruction. The supported values depend on the
1929/// number of registers being loaded.
1930SDValue ARMDAGToDAGISel::GetVLDSTAlign(SDValue Align, const SDLoc &dl,
1931 unsigned NumVecs, bool is64BitVector) {
1932 unsigned NumRegs = NumVecs;
1933 if (!is64BitVector && NumVecs < 3)
1934 NumRegs *= 2;
1935
1936 unsigned Alignment = cast<ConstantSDNode>(Align)->getZExtValue();
1937 if (Alignment >= 32 && NumRegs == 4)
1938 Alignment = 32;
1939 else if (Alignment >= 16 && (NumRegs == 2 || NumRegs == 4))
1940 Alignment = 16;
1941 else if (Alignment >= 8)
1942 Alignment = 8;
1943 else
1944 Alignment = 0;
1945
1946 return CurDAG->getTargetConstant(Alignment, dl, MVT::i32);
1947}
1948
1949static bool isVLDfixed(unsigned Opc)
1950{
1951 switch (Opc) {
1952 default: return false;
1953 case ARM::VLD1d8wb_fixed : return true;
1954 case ARM::VLD1d16wb_fixed : return true;
1955 case ARM::VLD1d64Qwb_fixed : return true;
1956 case ARM::VLD1d32wb_fixed : return true;
1957 case ARM::VLD1d64wb_fixed : return true;
1958 case ARM::VLD1d8TPseudoWB_fixed : return true;
1959 case ARM::VLD1d16TPseudoWB_fixed : return true;
1960 case ARM::VLD1d32TPseudoWB_fixed : return true;
1961 case ARM::VLD1d64TPseudoWB_fixed : return true;
1962 case ARM::VLD1d8QPseudoWB_fixed : return true;
1963 case ARM::VLD1d16QPseudoWB_fixed : return true;
1964 case ARM::VLD1d32QPseudoWB_fixed : return true;
1965 case ARM::VLD1d64QPseudoWB_fixed : return true;
1966 case ARM::VLD1q8wb_fixed : return true;
1967 case ARM::VLD1q16wb_fixed : return true;
1968 case ARM::VLD1q32wb_fixed : return true;
1969 case ARM::VLD1q64wb_fixed : return true;
1970 case ARM::VLD1DUPd8wb_fixed : return true;
1971 case ARM::VLD1DUPd16wb_fixed : return true;
1972 case ARM::VLD1DUPd32wb_fixed : return true;
1973 case ARM::VLD1DUPq8wb_fixed : return true;
1974 case ARM::VLD1DUPq16wb_fixed : return true;
1975 case ARM::VLD1DUPq32wb_fixed : return true;
1976 case ARM::VLD2d8wb_fixed : return true;
1977 case ARM::VLD2d16wb_fixed : return true;
1978 case ARM::VLD2d32wb_fixed : return true;
1979 case ARM::VLD2q8PseudoWB_fixed : return true;
1980 case ARM::VLD2q16PseudoWB_fixed : return true;
1981 case ARM::VLD2q32PseudoWB_fixed : return true;
1982 case ARM::VLD2DUPd8wb_fixed : return true;
1983 case ARM::VLD2DUPd16wb_fixed : return true;
1984 case ARM::VLD2DUPd32wb_fixed : return true;
1985 case ARM::VLD2DUPq8OddPseudoWB_fixed: return true;
1986 case ARM::VLD2DUPq16OddPseudoWB_fixed: return true;
1987 case ARM::VLD2DUPq32OddPseudoWB_fixed: return true;
1988 }
1989}
1990
1991static bool isVSTfixed(unsigned Opc)
1992{
1993 switch (Opc) {
1994 default: return false;
1995 case ARM::VST1d8wb_fixed : return true;
1996 case ARM::VST1d16wb_fixed : return true;
1997 case ARM::VST1d32wb_fixed : return true;
1998 case ARM::VST1d64wb_fixed : return true;
1999 case ARM::VST1q8wb_fixed : return true;
2000 case ARM::VST1q16wb_fixed : return true;
2001 case ARM::VST1q32wb_fixed : return true;
2002 case ARM::VST1q64wb_fixed : return true;
2003 case ARM::VST1d8TPseudoWB_fixed : return true;
2004 case ARM::VST1d16TPseudoWB_fixed : return true;
2005 case ARM::VST1d32TPseudoWB_fixed : return true;
2006 case ARM::VST1d64TPseudoWB_fixed : return true;
2007 case ARM::VST1d8QPseudoWB_fixed : return true;
2008 case ARM::VST1d16QPseudoWB_fixed : return true;
2009 case ARM::VST1d32QPseudoWB_fixed : return true;
2010 case ARM::VST1d64QPseudoWB_fixed : return true;
2011 case ARM::VST2d8wb_fixed : return true;
2012 case ARM::VST2d16wb_fixed : return true;
2013 case ARM::VST2d32wb_fixed : return true;
2014 case ARM::VST2q8PseudoWB_fixed : return true;
2015 case ARM::VST2q16PseudoWB_fixed : return true;
2016 case ARM::VST2q32PseudoWB_fixed : return true;
2017 }
2018}
2019
2020// Get the register stride update opcode of a VLD/VST instruction that
2021// is otherwise equivalent to the given fixed stride updating instruction.
2022static unsigned getVLDSTRegisterUpdateOpcode(unsigned Opc) {
2023 assert((isVLDfixed(Opc) || isVSTfixed(Opc))(static_cast <bool> ((isVLDfixed(Opc) || isVSTfixed(Opc
)) && "Incorrect fixed stride updating instruction.")
? void (0) : __assert_fail ("(isVLDfixed(Opc) || isVSTfixed(Opc)) && \"Incorrect fixed stride updating instruction.\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 2024, __extension__ __PRETTY_FUNCTION__))
2024 && "Incorrect fixed stride updating instruction.")(static_cast <bool> ((isVLDfixed(Opc) || isVSTfixed(Opc
)) && "Incorrect fixed stride updating instruction.")
? void (0) : __assert_fail ("(isVLDfixed(Opc) || isVSTfixed(Opc)) && \"Incorrect fixed stride updating instruction.\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 2024, __extension__ __PRETTY_FUNCTION__))
;
2025 switch (Opc) {
2026 default: break;
2027 case ARM::VLD1d8wb_fixed: return ARM::VLD1d8wb_register;
2028 case ARM::VLD1d16wb_fixed: return ARM::VLD1d16wb_register;
2029 case ARM::VLD1d32wb_fixed: return ARM::VLD1d32wb_register;
2030 case ARM::VLD1d64wb_fixed: return ARM::VLD1d64wb_register;
2031 case ARM::VLD1q8wb_fixed: return ARM::VLD1q8wb_register;
2032 case ARM::VLD1q16wb_fixed: return ARM::VLD1q16wb_register;
2033 case ARM::VLD1q32wb_fixed: return ARM::VLD1q32wb_register;
2034 case ARM::VLD1q64wb_fixed: return ARM::VLD1q64wb_register;
2035 case ARM::VLD1d64Twb_fixed: return ARM::VLD1d64Twb_register;
2036 case ARM::VLD1d64Qwb_fixed: return ARM::VLD1d64Qwb_register;
2037 case ARM::VLD1d8TPseudoWB_fixed: return ARM::VLD1d8TPseudoWB_register;
2038 case ARM::VLD1d16TPseudoWB_fixed: return ARM::VLD1d16TPseudoWB_register;
2039 case ARM::VLD1d32TPseudoWB_fixed: return ARM::VLD1d32TPseudoWB_register;
2040 case ARM::VLD1d64TPseudoWB_fixed: return ARM::VLD1d64TPseudoWB_register;
2041 case ARM::VLD1d8QPseudoWB_fixed: return ARM::VLD1d8QPseudoWB_register;
2042 case ARM::VLD1d16QPseudoWB_fixed: return ARM::VLD1d16QPseudoWB_register;
2043 case ARM::VLD1d32QPseudoWB_fixed: return ARM::VLD1d32QPseudoWB_register;
2044 case ARM::VLD1d64QPseudoWB_fixed: return ARM::VLD1d64QPseudoWB_register;
2045 case ARM::VLD1DUPd8wb_fixed : return ARM::VLD1DUPd8wb_register;
2046 case ARM::VLD1DUPd16wb_fixed : return ARM::VLD1DUPd16wb_register;
2047 case ARM::VLD1DUPd32wb_fixed : return ARM::VLD1DUPd32wb_register;
2048 case ARM::VLD1DUPq8wb_fixed : return ARM::VLD1DUPq8wb_register;
2049 case ARM::VLD1DUPq16wb_fixed : return ARM::VLD1DUPq16wb_register;
2050 case ARM::VLD1DUPq32wb_fixed : return ARM::VLD1DUPq32wb_register;
2051 case ARM::VLD2DUPq8OddPseudoWB_fixed: return ARM::VLD2DUPq8OddPseudoWB_register;
2052 case ARM::VLD2DUPq16OddPseudoWB_fixed: return ARM::VLD2DUPq16OddPseudoWB_register;
2053 case ARM::VLD2DUPq32OddPseudoWB_fixed: return ARM::VLD2DUPq32OddPseudoWB_register;
2054
2055 case ARM::VST1d8wb_fixed: return ARM::VST1d8wb_register;
2056 case ARM::VST1d16wb_fixed: return ARM::VST1d16wb_register;
2057 case ARM::VST1d32wb_fixed: return ARM::VST1d32wb_register;
2058 case ARM::VST1d64wb_fixed: return ARM::VST1d64wb_register;
2059 case ARM::VST1q8wb_fixed: return ARM::VST1q8wb_register;
2060 case ARM::VST1q16wb_fixed: return ARM::VST1q16wb_register;
2061 case ARM::VST1q32wb_fixed: return ARM::VST1q32wb_register;
2062 case ARM::VST1q64wb_fixed: return ARM::VST1q64wb_register;
2063 case ARM::VST1d8TPseudoWB_fixed: return ARM::VST1d8TPseudoWB_register;
2064 case ARM::VST1d16TPseudoWB_fixed: return ARM::VST1d16TPseudoWB_register;
2065 case ARM::VST1d32TPseudoWB_fixed: return ARM::VST1d32TPseudoWB_register;
2066 case ARM::VST1d64TPseudoWB_fixed: return ARM::VST1d64TPseudoWB_register;
2067 case ARM::VST1d8QPseudoWB_fixed: return ARM::VST1d8QPseudoWB_register;
2068 case ARM::VST1d16QPseudoWB_fixed: return ARM::VST1d16QPseudoWB_register;
2069 case ARM::VST1d32QPseudoWB_fixed: return ARM::VST1d32QPseudoWB_register;
2070 case ARM::VST1d64QPseudoWB_fixed: return ARM::VST1d64QPseudoWB_register;
2071
2072 case ARM::VLD2d8wb_fixed: return ARM::VLD2d8wb_register;
2073 case ARM::VLD2d16wb_fixed: return ARM::VLD2d16wb_register;
2074 case ARM::VLD2d32wb_fixed: return ARM::VLD2d32wb_register;
2075 case ARM::VLD2q8PseudoWB_fixed: return ARM::VLD2q8PseudoWB_register;
2076 case ARM::VLD2q16PseudoWB_fixed: return ARM::VLD2q16PseudoWB_register;
2077 case ARM::VLD2q32PseudoWB_fixed: return ARM::VLD2q32PseudoWB_register;
2078
2079 case ARM::VST2d8wb_fixed: return ARM::VST2d8wb_register;
2080 case ARM::VST2d16wb_fixed: return ARM::VST2d16wb_register;
2081 case ARM::VST2d32wb_fixed: return ARM::VST2d32wb_register;
2082 case ARM::VST2q8PseudoWB_fixed: return ARM::VST2q8PseudoWB_register;
2083 case ARM::VST2q16PseudoWB_fixed: return ARM::VST2q16PseudoWB_register;
2084 case ARM::VST2q32PseudoWB_fixed: return ARM::VST2q32PseudoWB_register;
2085
2086 case ARM::VLD2DUPd8wb_fixed: return ARM::VLD2DUPd8wb_register;
2087 case ARM::VLD2DUPd16wb_fixed: return ARM::VLD2DUPd16wb_register;
2088 case ARM::VLD2DUPd32wb_fixed: return ARM::VLD2DUPd32wb_register;
2089 }
2090 return Opc; // If not one we handle, return it unchanged.
2091}
2092
2093/// Returns true if the given increment is a Constant known to be equal to the
2094/// access size performed by a NEON load/store. This means the "[rN]!" form can
2095/// be used.
2096static bool isPerfectIncrement(SDValue Inc, EVT VecTy, unsigned NumVecs) {
2097 auto C = dyn_cast<ConstantSDNode>(Inc);
2098 return C && C->getZExtValue() == VecTy.getSizeInBits() / 8 * NumVecs;
2099}
2100
2101void ARMDAGToDAGISel::SelectVLD(SDNode *N, bool isUpdating, unsigned NumVecs,
2102 const uint16_t *DOpcodes,
2103 const uint16_t *QOpcodes0,
2104 const uint16_t *QOpcodes1) {
2105 assert(Subtarget->hasNEON())(static_cast <bool> (Subtarget->hasNEON()) ? void (0
) : __assert_fail ("Subtarget->hasNEON()", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 2105, __extension__ __PRETTY_FUNCTION__))
;
2106 assert(NumVecs >= 1 && NumVecs <= 4 && "VLD NumVecs out-of-range")(static_cast <bool> (NumVecs >= 1 && NumVecs
<= 4 && "VLD NumVecs out-of-range") ? void (0) : __assert_fail
("NumVecs >= 1 && NumVecs <= 4 && \"VLD NumVecs out-of-range\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 2106, __extension__ __PRETTY_FUNCTION__))
;
2107 SDLoc dl(N);
2108
2109 SDValue MemAddr, Align;
2110 bool IsIntrinsic = !isUpdating; // By coincidence, all supported updating
2111 // nodes are not intrinsics.
2112 unsigned AddrOpIdx = IsIntrinsic ? 2 : 1;
2113 if (!SelectAddrMode6(N, N->getOperand(AddrOpIdx), MemAddr, Align))
2114 return;
2115
2116 SDValue Chain = N->getOperand(0);
2117 EVT VT = N->getValueType(0);
2118 bool is64BitVector = VT.is64BitVector();
2119 Align = GetVLDSTAlign(Align, dl, NumVecs, is64BitVector);
2120
2121 unsigned OpcodeIndex;
2122 switch (VT.getSimpleVT().SimpleTy) {
2123 default: llvm_unreachable("unhandled vld type")::llvm::llvm_unreachable_internal("unhandled vld type", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 2123)
;
2124 // Double-register operations:
2125 case MVT::v8i8: OpcodeIndex = 0; break;
2126 case MVT::v4f16:
2127 case MVT::v4bf16:
2128 case MVT::v4i16: OpcodeIndex = 1; break;
2129 case MVT::v2f32:
2130 case MVT::v2i32: OpcodeIndex = 2; break;
2131 case MVT::v1i64: OpcodeIndex = 3; break;
2132 // Quad-register operations:
2133 case MVT::v16i8: OpcodeIndex = 0; break;
2134 case MVT::v8f16:
2135 case MVT::v8bf16:
2136 case MVT::v8i16: OpcodeIndex = 1; break;
2137 case MVT::v4f32:
2138 case MVT::v4i32: OpcodeIndex = 2; break;
2139 case MVT::v2f64:
2140 case MVT::v2i64: OpcodeIndex = 3; break;
2141 }
2142
2143 EVT ResTy;
2144 if (NumVecs == 1)
2145 ResTy = VT;
2146 else {
2147 unsigned ResTyElts = (NumVecs == 3) ? 4 : NumVecs;
2148 if (!is64BitVector)
2149 ResTyElts *= 2;
2150 ResTy = EVT::getVectorVT(*CurDAG->getContext(), MVT::i64, ResTyElts);
2151 }
2152 std::vector<EVT> ResTys;
2153 ResTys.push_back(ResTy);
2154 if (isUpdating)
2155 ResTys.push_back(MVT::i32);
2156 ResTys.push_back(MVT::Other);
2157
2158 SDValue Pred = getAL(CurDAG, dl);
2159 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2160 SDNode *VLd;
2161 SmallVector<SDValue, 7> Ops;
2162
2163 // Double registers and VLD1/VLD2 quad registers are directly supported.
2164 if (is64BitVector || NumVecs <= 2) {
2165 unsigned Opc = (is64BitVector ? DOpcodes[OpcodeIndex] :
2166 QOpcodes0[OpcodeIndex]);
2167 Ops.push_back(MemAddr);
2168 Ops.push_back(Align);
2169 if (isUpdating) {
2170 SDValue Inc = N->getOperand(AddrOpIdx + 1);
2171 bool IsImmUpdate = isPerfectIncrement(Inc, VT, NumVecs);
2172 if (!IsImmUpdate) {
2173 // We use a VLD1 for v1i64 even if the pseudo says vld2/3/4, so
2174 // check for the opcode rather than the number of vector elements.
2175 if (isVLDfixed(Opc))
2176 Opc = getVLDSTRegisterUpdateOpcode(Opc);
2177 Ops.push_back(Inc);
2178 // VLD1/VLD2 fixed increment does not need Reg0 so only include it in
2179 // the operands if not such an opcode.
2180 } else if (!isVLDfixed(Opc))
2181 Ops.push_back(Reg0);
2182 }
2183 Ops.push_back(Pred);
2184 Ops.push_back(Reg0);
2185 Ops.push_back(Chain);
2186 VLd = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
2187
2188 } else {
2189 // Otherwise, quad registers are loaded with two separate instructions,
2190 // where one loads the even registers and the other loads the odd registers.
2191 EVT AddrTy = MemAddr.getValueType();
2192
2193 // Load the even subregs. This is always an updating load, so that it
2194 // provides the address to the second load for the odd subregs.
2195 SDValue ImplDef =
2196 SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, ResTy), 0);
2197 const SDValue OpsA[] = { MemAddr, Align, Reg0, ImplDef, Pred, Reg0, Chain };
2198 SDNode *VLdA = CurDAG->getMachineNode(QOpcodes0[OpcodeIndex], dl,
2199 ResTy, AddrTy, MVT::Other, OpsA);
2200 Chain = SDValue(VLdA, 2);
2201
2202 // Load the odd subregs.
2203 Ops.push_back(SDValue(VLdA, 1));
2204 Ops.push_back(Align);
2205 if (isUpdating) {
2206 SDValue Inc = N->getOperand(AddrOpIdx + 1);
2207 assert(isa<ConstantSDNode>(Inc.getNode()) &&(static_cast <bool> (isa<ConstantSDNode>(Inc.getNode
()) && "only constant post-increment update allowed for VLD3/4"
) ? void (0) : __assert_fail ("isa<ConstantSDNode>(Inc.getNode()) && \"only constant post-increment update allowed for VLD3/4\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 2208, __extension__ __PRETTY_FUNCTION__))
2208 "only constant post-increment update allowed for VLD3/4")(static_cast <bool> (isa<ConstantSDNode>(Inc.getNode
()) && "only constant post-increment update allowed for VLD3/4"
) ? void (0) : __assert_fail ("isa<ConstantSDNode>(Inc.getNode()) && \"only constant post-increment update allowed for VLD3/4\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 2208, __extension__ __PRETTY_FUNCTION__))
;
2209 (void)Inc;
2210 Ops.push_back(Reg0);
2211 }
2212 Ops.push_back(SDValue(VLdA, 0));
2213 Ops.push_back(Pred);
2214 Ops.push_back(Reg0);
2215 Ops.push_back(Chain);
2216 VLd = CurDAG->getMachineNode(QOpcodes1[OpcodeIndex], dl, ResTys, Ops);
2217 }
2218
2219 // Transfer memoperands.
2220 MachineMemOperand *MemOp = cast<MemIntrinsicSDNode>(N)->getMemOperand();
2221 CurDAG->setNodeMemRefs(cast<MachineSDNode>(VLd), {MemOp});
2222
2223 if (NumVecs == 1) {
2224 ReplaceNode(N, VLd);
2225 return;
2226 }
2227
2228 // Extract out the subregisters.
2229 SDValue SuperReg = SDValue(VLd, 0);
2230 static_assert(ARM::dsub_7 == ARM::dsub_0 + 7 &&
2231 ARM::qsub_3 == ARM::qsub_0 + 3,
2232 "Unexpected subreg numbering");
2233 unsigned Sub0 = (is64BitVector ? ARM::dsub_0 : ARM::qsub_0);
2234 for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
2235 ReplaceUses(SDValue(N, Vec),
2236 CurDAG->getTargetExtractSubreg(Sub0 + Vec, dl, VT, SuperReg));
2237 ReplaceUses(SDValue(N, NumVecs), SDValue(VLd, 1));
2238 if (isUpdating)
2239 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLd, 2));
2240 CurDAG->RemoveDeadNode(N);
2241}
2242
2243void ARMDAGToDAGISel::SelectVST(SDNode *N, bool isUpdating, unsigned NumVecs,
2244 const uint16_t *DOpcodes,
2245 const uint16_t *QOpcodes0,
2246 const uint16_t *QOpcodes1) {
2247 assert(Subtarget->hasNEON())(static_cast <bool> (Subtarget->hasNEON()) ? void (0
) : __assert_fail ("Subtarget->hasNEON()", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 2247, __extension__ __PRETTY_FUNCTION__))
;
2248 assert(NumVecs >= 1 && NumVecs <= 4 && "VST NumVecs out-of-range")(static_cast <bool> (NumVecs >= 1 && NumVecs
<= 4 && "VST NumVecs out-of-range") ? void (0) : __assert_fail
("NumVecs >= 1 && NumVecs <= 4 && \"VST NumVecs out-of-range\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 2248, __extension__ __PRETTY_FUNCTION__))
;
2249 SDLoc dl(N);
2250
2251 SDValue MemAddr, Align;
2252 bool IsIntrinsic = !isUpdating; // By coincidence, all supported updating
2253 // nodes are not intrinsics.
2254 unsigned AddrOpIdx = IsIntrinsic ? 2 : 1;
2255 unsigned Vec0Idx = 3; // AddrOpIdx + (isUpdating ? 2 : 1)
2256 if (!SelectAddrMode6(N, N->getOperand(AddrOpIdx), MemAddr, Align))
2257 return;
2258
2259 MachineMemOperand *MemOp = cast<MemIntrinsicSDNode>(N)->getMemOperand();
2260
2261 SDValue Chain = N->getOperand(0);
2262 EVT VT = N->getOperand(Vec0Idx).getValueType();
2263 bool is64BitVector = VT.is64BitVector();
2264 Align = GetVLDSTAlign(Align, dl, NumVecs, is64BitVector);
2265
2266 unsigned OpcodeIndex;
2267 switch (VT.getSimpleVT().SimpleTy) {
2268 default: llvm_unreachable("unhandled vst type")::llvm::llvm_unreachable_internal("unhandled vst type", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 2268)
;
2269 // Double-register operations:
2270 case MVT::v8i8: OpcodeIndex = 0; break;
2271 case MVT::v4f16:
2272 case MVT::v4bf16:
2273 case MVT::v4i16: OpcodeIndex = 1; break;
2274 case MVT::v2f32:
2275 case MVT::v2i32: OpcodeIndex = 2; break;
2276 case MVT::v1i64: OpcodeIndex = 3; break;
2277 // Quad-register operations:
2278 case MVT::v16i8: OpcodeIndex = 0; break;
2279 case MVT::v8f16:
2280 case MVT::v8bf16:
2281 case MVT::v8i16: OpcodeIndex = 1; break;
2282 case MVT::v4f32:
2283 case MVT::v4i32: OpcodeIndex = 2; break;
2284 case MVT::v2f64:
2285 case MVT::v2i64: OpcodeIndex = 3; break;
2286 }
2287
2288 std::vector<EVT> ResTys;
2289 if (isUpdating)
2290 ResTys.push_back(MVT::i32);
2291 ResTys.push_back(MVT::Other);
2292
2293 SDValue Pred = getAL(CurDAG, dl);
2294 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2295 SmallVector<SDValue, 7> Ops;
2296
2297 // Double registers and VST1/VST2 quad registers are directly supported.
2298 if (is64BitVector || NumVecs <= 2) {
2299 SDValue SrcReg;
2300 if (NumVecs == 1) {
2301 SrcReg = N->getOperand(Vec0Idx);
2302 } else if (is64BitVector) {
2303 // Form a REG_SEQUENCE to force register allocation.
2304 SDValue V0 = N->getOperand(Vec0Idx + 0);
2305 SDValue V1 = N->getOperand(Vec0Idx + 1);
2306 if (NumVecs == 2)
2307 SrcReg = SDValue(createDRegPairNode(MVT::v2i64, V0, V1), 0);
2308 else {
2309 SDValue V2 = N->getOperand(Vec0Idx + 2);
2310 // If it's a vst3, form a quad D-register and leave the last part as
2311 // an undef.
2312 SDValue V3 = (NumVecs == 3)
2313 ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,dl,VT), 0)
2314 : N->getOperand(Vec0Idx + 3);
2315 SrcReg = SDValue(createQuadDRegsNode(MVT::v4i64, V0, V1, V2, V3), 0);
2316 }
2317 } else {
2318 // Form a QQ register.
2319 SDValue Q0 = N->getOperand(Vec0Idx);
2320 SDValue Q1 = N->getOperand(Vec0Idx + 1);
2321 SrcReg = SDValue(createQRegPairNode(MVT::v4i64, Q0, Q1), 0);
2322 }
2323
2324 unsigned Opc = (is64BitVector ? DOpcodes[OpcodeIndex] :
2325 QOpcodes0[OpcodeIndex]);
2326 Ops.push_back(MemAddr);
2327 Ops.push_back(Align);
2328 if (isUpdating) {
2329 SDValue Inc = N->getOperand(AddrOpIdx + 1);
2330 bool IsImmUpdate = isPerfectIncrement(Inc, VT, NumVecs);
2331 if (!IsImmUpdate) {
2332 // We use a VST1 for v1i64 even if the pseudo says VST2/3/4, so
2333 // check for the opcode rather than the number of vector elements.
2334 if (isVSTfixed(Opc))
2335 Opc = getVLDSTRegisterUpdateOpcode(Opc);
2336 Ops.push_back(Inc);
2337 }
2338 // VST1/VST2 fixed increment does not need Reg0 so only include it in
2339 // the operands if not such an opcode.
2340 else if (!isVSTfixed(Opc))
2341 Ops.push_back(Reg0);
2342 }
2343 Ops.push_back(SrcReg);
2344 Ops.push_back(Pred);
2345 Ops.push_back(Reg0);
2346 Ops.push_back(Chain);
2347 SDNode *VSt = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
2348
2349 // Transfer memoperands.
2350 CurDAG->setNodeMemRefs(cast<MachineSDNode>(VSt), {MemOp});
2351
2352 ReplaceNode(N, VSt);
2353 return;
2354 }
2355
2356 // Otherwise, quad registers are stored with two separate instructions,
2357 // where one stores the even registers and the other stores the odd registers.
2358
2359 // Form the QQQQ REG_SEQUENCE.
2360 SDValue V0 = N->getOperand(Vec0Idx + 0);
2361 SDValue V1 = N->getOperand(Vec0Idx + 1);
2362 SDValue V2 = N->getOperand(Vec0Idx + 2);
2363 SDValue V3 = (NumVecs == 3)
2364 ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, VT), 0)
2365 : N->getOperand(Vec0Idx + 3);
2366 SDValue RegSeq = SDValue(createQuadQRegsNode(MVT::v8i64, V0, V1, V2, V3), 0);
2367
2368 // Store the even D registers. This is always an updating store, so that it
2369 // provides the address to the second store for the odd subregs.
2370 const SDValue OpsA[] = { MemAddr, Align, Reg0, RegSeq, Pred, Reg0, Chain };
2371 SDNode *VStA = CurDAG->getMachineNode(QOpcodes0[OpcodeIndex], dl,
2372 MemAddr.getValueType(),
2373 MVT::Other, OpsA);
2374 CurDAG->setNodeMemRefs(cast<MachineSDNode>(VStA), {MemOp});
2375 Chain = SDValue(VStA, 1);
2376
2377 // Store the odd D registers.
2378 Ops.push_back(SDValue(VStA, 0));
2379 Ops.push_back(Align);
2380 if (isUpdating) {
2381 SDValue Inc = N->getOperand(AddrOpIdx + 1);
2382 assert(isa<ConstantSDNode>(Inc.getNode()) &&(static_cast <bool> (isa<ConstantSDNode>(Inc.getNode
()) && "only constant post-increment update allowed for VST3/4"
) ? void (0) : __assert_fail ("isa<ConstantSDNode>(Inc.getNode()) && \"only constant post-increment update allowed for VST3/4\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 2383, __extension__ __PRETTY_FUNCTION__))
2383 "only constant post-increment update allowed for VST3/4")(static_cast <bool> (isa<ConstantSDNode>(Inc.getNode
()) && "only constant post-increment update allowed for VST3/4"
) ? void (0) : __assert_fail ("isa<ConstantSDNode>(Inc.getNode()) && \"only constant post-increment update allowed for VST3/4\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 2383, __extension__ __PRETTY_FUNCTION__))
;
2384 (void)Inc;
2385 Ops.push_back(Reg0);
2386 }
2387 Ops.push_back(RegSeq);
2388 Ops.push_back(Pred);
2389 Ops.push_back(Reg0);
2390 Ops.push_back(Chain);
2391 SDNode *VStB = CurDAG->getMachineNode(QOpcodes1[OpcodeIndex], dl, ResTys,
2392 Ops);
2393 CurDAG->setNodeMemRefs(cast<MachineSDNode>(VStB), {MemOp});
2394 ReplaceNode(N, VStB);
2395}
2396
2397void ARMDAGToDAGISel::SelectVLDSTLane(SDNode *N, bool IsLoad, bool isUpdating,
2398 unsigned NumVecs,
2399 const uint16_t *DOpcodes,
2400 const uint16_t *QOpcodes) {
2401 assert(Subtarget->hasNEON())(static_cast <bool> (Subtarget->hasNEON()) ? void (0
) : __assert_fail ("Subtarget->hasNEON()", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 2401, __extension__ __PRETTY_FUNCTION__))
;
2402 assert(NumVecs >=2 && NumVecs <= 4 && "VLDSTLane NumVecs out-of-range")(static_cast <bool> (NumVecs >=2 && NumVecs <=
4 && "VLDSTLane NumVecs out-of-range") ? void (0) : __assert_fail
("NumVecs >=2 && NumVecs <= 4 && \"VLDSTLane NumVecs out-of-range\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 2402, __extension__ __PRETTY_FUNCTION__))
;
2403 SDLoc dl(N);
2404
2405 SDValue MemAddr, Align;
2406 bool IsIntrinsic = !isUpdating; // By coincidence, all supported updating
2407 // nodes are not intrinsics.
2408 unsigned AddrOpIdx = IsIntrinsic ? 2 : 1;
2409 unsigned Vec0Idx = 3; // AddrOpIdx + (isUpdating ? 2 : 1)
2410 if (!SelectAddrMode6(N, N->getOperand(AddrOpIdx), MemAddr, Align))
2411 return;
2412
2413 MachineMemOperand *MemOp = cast<MemIntrinsicSDNode>(N)->getMemOperand();
2414
2415 SDValue Chain = N->getOperand(0);
2416 unsigned Lane =
2417 cast<ConstantSDNode>(N->getOperand(Vec0Idx + NumVecs))->getZExtValue();
2418 EVT VT = N->getOperand(Vec0Idx).getValueType();
2419 bool is64BitVector = VT.is64BitVector();
2420
2421 unsigned Alignment = 0;
2422 if (NumVecs != 3) {
2423 Alignment = cast<ConstantSDNode>(Align)->getZExtValue();
2424 unsigned NumBytes = NumVecs * VT.getScalarSizeInBits() / 8;
2425 if (Alignment > NumBytes)
2426 Alignment = NumBytes;
2427 if (Alignment < 8 && Alignment < NumBytes)
2428 Alignment = 0;
2429 // Alignment must be a power of two; make sure of that.
2430 Alignment = (Alignment & -Alignment);
2431 if (Alignment == 1)
2432 Alignment = 0;
2433 }
2434 Align = CurDAG->getTargetConstant(Alignment, dl, MVT::i32);
2435
2436 unsigned OpcodeIndex;
2437 switch (VT.getSimpleVT().SimpleTy) {
2438 default: llvm_unreachable("unhandled vld/vst lane type")::llvm::llvm_unreachable_internal("unhandled vld/vst lane type"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 2438)
;
2439 // Double-register operations:
2440 case MVT::v8i8: OpcodeIndex = 0; break;
2441 case MVT::v4f16:
2442 case MVT::v4bf16:
2443 case MVT::v4i16: OpcodeIndex = 1; break;
2444 case MVT::v2f32:
2445 case MVT::v2i32: OpcodeIndex = 2; break;
2446 // Quad-register operations:
2447 case MVT::v8f16:
2448 case MVT::v8bf16:
2449 case MVT::v8i16: OpcodeIndex = 0; break;
2450 case MVT::v4f32:
2451 case MVT::v4i32: OpcodeIndex = 1; break;
2452 }
2453
2454 std::vector<EVT> ResTys;
2455 if (IsLoad) {
2456 unsigned ResTyElts = (NumVecs == 3) ? 4 : NumVecs;
2457 if (!is64BitVector)
2458 ResTyElts *= 2;
2459 ResTys.push_back(EVT::getVectorVT(*CurDAG->getContext(),
2460 MVT::i64, ResTyElts));
2461 }
2462 if (isUpdating)
2463 ResTys.push_back(MVT::i32);
2464 ResTys.push_back(MVT::Other);
2465
2466 SDValue Pred = getAL(CurDAG, dl);
2467 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2468
2469 SmallVector<SDValue, 8> Ops;
2470 Ops.push_back(MemAddr);
2471 Ops.push_back(Align);
2472 if (isUpdating) {
2473 SDValue Inc = N->getOperand(AddrOpIdx + 1);
2474 bool IsImmUpdate =
2475 isPerfectIncrement(Inc, VT.getVectorElementType(), NumVecs);
2476 Ops.push_back(IsImmUpdate ? Reg0 : Inc);
2477 }
2478
2479 SDValue SuperReg;
2480 SDValue V0 = N->getOperand(Vec0Idx + 0);
2481 SDValue V1 = N->getOperand(Vec0Idx + 1);
2482 if (NumVecs == 2) {
2483 if (is64BitVector)
2484 SuperReg = SDValue(createDRegPairNode(MVT::v2i64, V0, V1), 0);
2485 else
2486 SuperReg = SDValue(createQRegPairNode(MVT::v4i64, V0, V1), 0);
2487 } else {
2488 SDValue V2 = N->getOperand(Vec0Idx + 2);
2489 SDValue V3 = (NumVecs == 3)
2490 ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, VT), 0)
2491 : N->getOperand(Vec0Idx + 3);
2492 if (is64BitVector)
2493 SuperReg = SDValue(createQuadDRegsNode(MVT::v4i64, V0, V1, V2, V3), 0);
2494 else
2495 SuperReg = SDValue(createQuadQRegsNode(MVT::v8i64, V0, V1, V2, V3), 0);
2496 }
2497 Ops.push_back(SuperReg);
2498 Ops.push_back(getI32Imm(Lane, dl));
2499 Ops.push_back(Pred);
2500 Ops.push_back(Reg0);
2501 Ops.push_back(Chain);
2502
2503 unsigned Opc = (is64BitVector ? DOpcodes[OpcodeIndex] :
2504 QOpcodes[OpcodeIndex]);
2505 SDNode *VLdLn = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
2506 CurDAG->setNodeMemRefs(cast<MachineSDNode>(VLdLn), {MemOp});
2507 if (!IsLoad) {
2508 ReplaceNode(N, VLdLn);
2509 return;
2510 }
2511
2512 // Extract the subregisters.
2513 SuperReg = SDValue(VLdLn, 0);
2514 static_assert(ARM::dsub_7 == ARM::dsub_0 + 7 &&
2515 ARM::qsub_3 == ARM::qsub_0 + 3,
2516 "Unexpected subreg numbering");
2517 unsigned Sub0 = is64BitVector ? ARM::dsub_0 : ARM::qsub_0;
2518 for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
2519 ReplaceUses(SDValue(N, Vec),
2520 CurDAG->getTargetExtractSubreg(Sub0 + Vec, dl, VT, SuperReg));
2521 ReplaceUses(SDValue(N, NumVecs), SDValue(VLdLn, 1));
2522 if (isUpdating)
2523 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLdLn, 2));
2524 CurDAG->RemoveDeadNode(N);
2525}
2526
2527template <typename SDValueVector>
2528void ARMDAGToDAGISel::AddMVEPredicateToOps(SDValueVector &Ops, SDLoc Loc,
2529 SDValue PredicateMask) {
2530 Ops.push_back(CurDAG->getTargetConstant(ARMVCC::Then, Loc, MVT::i32));
2531 Ops.push_back(PredicateMask);
2532}
2533
2534template <typename SDValueVector>
2535void ARMDAGToDAGISel::AddMVEPredicateToOps(SDValueVector &Ops, SDLoc Loc,
2536 SDValue PredicateMask,
2537 SDValue Inactive) {
2538 Ops.push_back(CurDAG->getTargetConstant(ARMVCC::Then, Loc, MVT::i32));
2539 Ops.push_back(PredicateMask);
2540 Ops.push_back(Inactive);
2541}
2542
2543template <typename SDValueVector>
2544void ARMDAGToDAGISel::AddEmptyMVEPredicateToOps(SDValueVector &Ops, SDLoc Loc) {
2545 Ops.push_back(CurDAG->getTargetConstant(ARMVCC::None, Loc, MVT::i32));
2546 Ops.push_back(CurDAG->getRegister(0, MVT::i32));
2547}
2548
2549template <typename SDValueVector>
2550void ARMDAGToDAGISel::AddEmptyMVEPredicateToOps(SDValueVector &Ops, SDLoc Loc,
2551 EVT InactiveTy) {
2552 Ops.push_back(CurDAG->getTargetConstant(ARMVCC::None, Loc, MVT::i32));
2553 Ops.push_back(CurDAG->getRegister(0, MVT::i32));
2554 Ops.push_back(SDValue(
2555 CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, Loc, InactiveTy), 0));
2556}
2557
2558void ARMDAGToDAGISel::SelectMVE_WB(SDNode *N, const uint16_t *Opcodes,
2559 bool Predicated) {
2560 SDLoc Loc(N);
2561 SmallVector<SDValue, 8> Ops;
2562
2563 uint16_t Opcode;
2564 switch (N->getValueType(1).getVectorElementType().getSizeInBits()) {
2565 case 32:
2566 Opcode = Opcodes[0];
2567 break;
2568 case 64:
2569 Opcode = Opcodes[1];
2570 break;
2571 default:
2572 llvm_unreachable("bad vector element size in SelectMVE_WB")::llvm::llvm_unreachable_internal("bad vector element size in SelectMVE_WB"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 2572)
;
2573 }
2574
2575 Ops.push_back(N->getOperand(2)); // vector of base addresses
2576
2577 int32_t ImmValue = cast<ConstantSDNode>(N->getOperand(3))->getZExtValue();
2578 Ops.push_back(getI32Imm(ImmValue, Loc)); // immediate offset
2579
2580 if (Predicated)
2581 AddMVEPredicateToOps(Ops, Loc, N->getOperand(4));
2582 else
2583 AddEmptyMVEPredicateToOps(Ops, Loc);
2584
2585 Ops.push_back(N->getOperand(0)); // chain
2586
2587 SmallVector<EVT, 8> VTs;
2588 VTs.push_back(N->getValueType(1));
2589 VTs.push_back(N->getValueType(0));
2590 VTs.push_back(N->getValueType(2));
2591
2592 SDNode *New = CurDAG->getMachineNode(Opcode, SDLoc(N), VTs, Ops);
2593 ReplaceUses(SDValue(N, 0), SDValue(New, 1));
2594 ReplaceUses(SDValue(N, 1), SDValue(New, 0));
2595 ReplaceUses(SDValue(N, 2), SDValue(New, 2));
2596 transferMemOperands(N, New);
2597 CurDAG->RemoveDeadNode(N);
2598}
2599
2600void ARMDAGToDAGISel::SelectMVE_LongShift(SDNode *N, uint16_t Opcode,
2601 bool Immediate,
2602 bool HasSaturationOperand) {
2603 SDLoc Loc(N);
2604 SmallVector<SDValue, 8> Ops;
2605
2606 // Two 32-bit halves of the value to be shifted
2607 Ops.push_back(N->getOperand(1));
2608 Ops.push_back(N->getOperand(2));
2609
2610 // The shift count
2611 if (Immediate) {
2612 int32_t ImmValue = cast<ConstantSDNode>(N->getOperand(3))->getZExtValue();
2613 Ops.push_back(getI32Imm(ImmValue, Loc)); // immediate shift count
2614 } else {
2615 Ops.push_back(N->getOperand(3));
2616 }
2617
2618 // The immediate saturation operand, if any
2619 if (HasSaturationOperand) {
2620 int32_t SatOp = cast<ConstantSDNode>(N->getOperand(4))->getZExtValue();
2621 int SatBit = (SatOp == 64 ? 0 : 1);
2622 Ops.push_back(getI32Imm(SatBit, Loc));
2623 }
2624
2625 // MVE scalar shifts are IT-predicable, so include the standard
2626 // predicate arguments.
2627 Ops.push_back(getAL(CurDAG, Loc));
2628 Ops.push_back(CurDAG->getRegister(0, MVT::i32));
2629
2630 CurDAG->SelectNodeTo(N, Opcode, N->getVTList(), makeArrayRef(Ops));
2631}
2632
2633void ARMDAGToDAGISel::SelectMVE_VADCSBC(SDNode *N, uint16_t OpcodeWithCarry,
2634 uint16_t OpcodeWithNoCarry,
2635 bool Add, bool Predicated) {
2636 SDLoc Loc(N);
2637 SmallVector<SDValue, 8> Ops;
2638 uint16_t Opcode;
2639
2640 unsigned FirstInputOp = Predicated ? 2 : 1;
2641
2642 // Two input vectors and the input carry flag
2643 Ops.push_back(N->getOperand(FirstInputOp));
2644 Ops.push_back(N->getOperand(FirstInputOp + 1));
2645 SDValue CarryIn = N->getOperand(FirstInputOp + 2);
2646 ConstantSDNode *CarryInConstant = dyn_cast<ConstantSDNode>(CarryIn);
2647 uint32_t CarryMask = 1 << 29;
2648 uint32_t CarryExpected = Add ? 0 : CarryMask;
2649 if (CarryInConstant &&
2650 (CarryInConstant->getZExtValue() & CarryMask) == CarryExpected) {
2651 Opcode = OpcodeWithNoCarry;
2652 } else {
2653 Ops.push_back(CarryIn);
2654 Opcode = OpcodeWithCarry;
2655 }
2656
2657 if (Predicated)
2658 AddMVEPredicateToOps(Ops, Loc,
2659 N->getOperand(FirstInputOp + 3), // predicate
2660 N->getOperand(FirstInputOp - 1)); // inactive
2661 else
2662 AddEmptyMVEPredicateToOps(Ops, Loc, N->getValueType(0));
2663
2664 CurDAG->SelectNodeTo(N, Opcode, N->getVTList(), makeArrayRef(Ops));
2665}
2666
2667void ARMDAGToDAGISel::SelectMVE_VSHLC(SDNode *N, bool Predicated) {
2668 SDLoc Loc(N);
2669 SmallVector<SDValue, 8> Ops;
2670
2671 // One vector input, followed by a 32-bit word of bits to shift in
2672 // and then an immediate shift count
2673 Ops.push_back(N->getOperand(1));
2674 Ops.push_back(N->getOperand(2));
2675 int32_t ImmValue = cast<ConstantSDNode>(N->getOperand(3))->getZExtValue();
2676 Ops.push_back(getI32Imm(ImmValue, Loc)); // immediate shift count
2677
2678 if (Predicated)
2679 AddMVEPredicateToOps(Ops, Loc, N->getOperand(4));
2680 else
2681 AddEmptyMVEPredicateToOps(Ops, Loc);
2682
2683 CurDAG->SelectNodeTo(N, ARM::MVE_VSHLC, N->getVTList(), makeArrayRef(Ops));
2684}
2685
2686static bool SDValueToConstBool(SDValue SDVal) {
2687 assert(isa<ConstantSDNode>(SDVal) && "expected a compile-time constant")(static_cast <bool> (isa<ConstantSDNode>(SDVal) &&
"expected a compile-time constant") ? void (0) : __assert_fail
("isa<ConstantSDNode>(SDVal) && \"expected a compile-time constant\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 2687, __extension__ __PRETTY_FUNCTION__))
;
2688 ConstantSDNode *SDValConstant = dyn_cast<ConstantSDNode>(SDVal);
2689 uint64_t Value = SDValConstant->getZExtValue();
2690 assert((Value == 0 || Value == 1) && "expected value 0 or 1")(static_cast <bool> ((Value == 0 || Value == 1) &&
"expected value 0 or 1") ? void (0) : __assert_fail ("(Value == 0 || Value == 1) && \"expected value 0 or 1\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 2690, __extension__ __PRETTY_FUNCTION__))
;
2691 return Value;
2692}
2693
2694void ARMDAGToDAGISel::SelectBaseMVE_VMLLDAV(SDNode *N, bool Predicated,
2695 const uint16_t *OpcodesS,
2696 const uint16_t *OpcodesU,
2697 size_t Stride, size_t TySize) {
2698 assert(TySize < Stride && "Invalid TySize")(static_cast <bool> (TySize < Stride && "Invalid TySize"
) ? void (0) : __assert_fail ("TySize < Stride && \"Invalid TySize\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 2698, __extension__ __PRETTY_FUNCTION__))
;
2699 bool IsUnsigned = SDValueToConstBool(N->getOperand(1));
2700 bool IsSub = SDValueToConstBool(N->getOperand(2));
2701 bool IsExchange = SDValueToConstBool(N->getOperand(3));
2702 if (IsUnsigned) {
2703 assert(!IsSub &&(static_cast <bool> (!IsSub && "Unsigned versions of vmlsldav[a]/vrmlsldavh[a] do not exist"
) ? void (0) : __assert_fail ("!IsSub && \"Unsigned versions of vmlsldav[a]/vrmlsldavh[a] do not exist\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 2704, __extension__ __PRETTY_FUNCTION__))
2704 "Unsigned versions of vmlsldav[a]/vrmlsldavh[a] do not exist")(static_cast <bool> (!IsSub && "Unsigned versions of vmlsldav[a]/vrmlsldavh[a] do not exist"
) ? void (0) : __assert_fail ("!IsSub && \"Unsigned versions of vmlsldav[a]/vrmlsldavh[a] do not exist\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 2704, __extension__ __PRETTY_FUNCTION__))
;
2705 assert(!IsExchange &&(static_cast <bool> (!IsExchange && "Unsigned versions of vmlaldav[a]x/vrmlaldavh[a]x do not exist"
) ? void (0) : __assert_fail ("!IsExchange && \"Unsigned versions of vmlaldav[a]x/vrmlaldavh[a]x do not exist\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 2706, __extension__ __PRETTY_FUNCTION__))
2706 "Unsigned versions of vmlaldav[a]x/vrmlaldavh[a]x do not exist")(static_cast <bool> (!IsExchange && "Unsigned versions of vmlaldav[a]x/vrmlaldavh[a]x do not exist"
) ? void (0) : __assert_fail ("!IsExchange && \"Unsigned versions of vmlaldav[a]x/vrmlaldavh[a]x do not exist\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 2706, __extension__ __PRETTY_FUNCTION__))
;
2707 }
2708
2709 auto OpIsZero = [N](size_t OpNo) {
2710 if (ConstantSDNode *OpConst = dyn_cast<ConstantSDNode>(N->getOperand(OpNo)))
2711 if (OpConst->getZExtValue() == 0)
2712 return true;
2713 return false;
2714 };
2715
2716 // If the input accumulator value is not zero, select an instruction with
2717 // accumulator, otherwise select an instruction without accumulator
2718 bool IsAccum = !(OpIsZero(4) && OpIsZero(5));
2719
2720 const uint16_t *Opcodes = IsUnsigned ? OpcodesU : OpcodesS;
2721 if (IsSub)
2722 Opcodes += 4 * Stride;
2723 if (IsExchange)
2724 Opcodes += 2 * Stride;
2725 if (IsAccum)
2726 Opcodes += Stride;
2727 uint16_t Opcode = Opcodes[TySize];
2728
2729 SDLoc Loc(N);
2730 SmallVector<SDValue, 8> Ops;
2731 // Push the accumulator operands, if they are used
2732 if (IsAccum) {
2733 Ops.push_back(N->getOperand(4));
2734 Ops.push_back(N->getOperand(5));
2735 }
2736 // Push the two vector operands
2737 Ops.push_back(N->getOperand(6));
2738 Ops.push_back(N->getOperand(7));
2739
2740 if (Predicated)
2741 AddMVEPredicateToOps(Ops, Loc, N->getOperand(8));
2742 else
2743 AddEmptyMVEPredicateToOps(Ops, Loc);
2744
2745 CurDAG->SelectNodeTo(N, Opcode, N->getVTList(), makeArrayRef(Ops));
2746}
2747
2748void ARMDAGToDAGISel::SelectMVE_VMLLDAV(SDNode *N, bool Predicated,
2749 const uint16_t *OpcodesS,
2750 const uint16_t *OpcodesU) {
2751 EVT VecTy = N->getOperand(6).getValueType();
2752 size_t SizeIndex;
2753 switch (VecTy.getVectorElementType().getSizeInBits()) {
2754 case 16:
2755 SizeIndex = 0;
2756 break;
2757 case 32:
2758 SizeIndex = 1;
2759 break;
2760 default:
2761 llvm_unreachable("bad vector element size")::llvm::llvm_unreachable_internal("bad vector element size", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 2761)
;
2762 }
2763
2764 SelectBaseMVE_VMLLDAV(N, Predicated, OpcodesS, OpcodesU, 2, SizeIndex);
2765}
2766
2767void ARMDAGToDAGISel::SelectMVE_VRMLLDAVH(SDNode *N, bool Predicated,
2768 const uint16_t *OpcodesS,
2769 const uint16_t *OpcodesU) {
2770 assert((static_cast <bool> (N->getOperand(6).getValueType()
.getVectorElementType().getSizeInBits() == 32 && "bad vector element size"
) ? void (0) : __assert_fail ("N->getOperand(6).getValueType().getVectorElementType().getSizeInBits() == 32 && \"bad vector element size\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 2773, __extension__ __PRETTY_FUNCTION__))
2771 N->getOperand(6).getValueType().getVectorElementType().getSizeInBits() ==(static_cast <bool> (N->getOperand(6).getValueType()
.getVectorElementType().getSizeInBits() == 32 && "bad vector element size"
) ? void (0) : __assert_fail ("N->getOperand(6).getValueType().getVectorElementType().getSizeInBits() == 32 && \"bad vector element size\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 2773, __extension__ __PRETTY_FUNCTION__))
2772 32 &&(static_cast <bool> (N->getOperand(6).getValueType()
.getVectorElementType().getSizeInBits() == 32 && "bad vector element size"
) ? void (0) : __assert_fail ("N->getOperand(6).getValueType().getVectorElementType().getSizeInBits() == 32 && \"bad vector element size\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 2773, __extension__ __PRETTY_FUNCTION__))
2773 "bad vector element size")(static_cast <bool> (N->getOperand(6).getValueType()
.getVectorElementType().getSizeInBits() == 32 && "bad vector element size"
) ? void (0) : __assert_fail ("N->getOperand(6).getValueType().getVectorElementType().getSizeInBits() == 32 && \"bad vector element size\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 2773, __extension__ __PRETTY_FUNCTION__))
;
2774 SelectBaseMVE_VMLLDAV(N, Predicated, OpcodesS, OpcodesU, 1, 0);
2775}
2776
2777void ARMDAGToDAGISel::SelectMVE_VLD(SDNode *N, unsigned NumVecs,
2778 const uint16_t *const *Opcodes,
2779 bool HasWriteback) {
2780 EVT VT = N->getValueType(0);
2781 SDLoc Loc(N);
2782
2783 const uint16_t *OurOpcodes;
2784 switch (VT.getVectorElementType().getSizeInBits()) {
2785 case 8:
2786 OurOpcodes = Opcodes[0];
2787 break;
2788 case 16:
2789 OurOpcodes = Opcodes[1];
2790 break;
2791 case 32:
2792 OurOpcodes = Opcodes[2];
2793 break;
2794 default:
2795 llvm_unreachable("bad vector element size in SelectMVE_VLD")::llvm::llvm_unreachable_internal("bad vector element size in SelectMVE_VLD"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 2795)
;
2796 }
2797
2798 EVT DataTy = EVT::getVectorVT(*CurDAG->getContext(), MVT::i64, NumVecs * 2);
2799 SmallVector<EVT, 4> ResultTys = {DataTy, MVT::Other};
2800 unsigned PtrOperand = HasWriteback ? 1 : 2;
2801
2802 auto Data = SDValue(
2803 CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, Loc, DataTy), 0);
2804 SDValue Chain = N->getOperand(0);
2805 // Add a MVE_VLDn instruction for each Vec, except the last
2806 for (unsigned Stage = 0; Stage < NumVecs - 1; ++Stage) {
2807 SDValue Ops[] = {Data, N->getOperand(PtrOperand), Chain};
2808 auto LoadInst =
2809 CurDAG->getMachineNode(OurOpcodes[Stage], Loc, ResultTys, Ops);
2810 Data = SDValue(LoadInst, 0);
2811 Chain = SDValue(LoadInst, 1);
2812 transferMemOperands(N, LoadInst);
2813 }
2814 // The last may need a writeback on it
2815 if (HasWriteback)
2816 ResultTys = {DataTy, MVT::i32, MVT::Other};
2817 SDValue Ops[] = {Data, N->getOperand(PtrOperand), Chain};
2818 auto LoadInst =
2819 CurDAG->getMachineNode(OurOpcodes[NumVecs - 1], Loc, ResultTys, Ops);
2820 transferMemOperands(N, LoadInst);
2821
2822 unsigned i;
2823 for (i = 0; i < NumVecs; i++)
2824 ReplaceUses(SDValue(N, i),
2825 CurDAG->getTargetExtractSubreg(ARM::qsub_0 + i, Loc, VT,
2826 SDValue(LoadInst, 0)));
2827 if (HasWriteback)
2828 ReplaceUses(SDValue(N, i++), SDValue(LoadInst, 1));
2829 ReplaceUses(SDValue(N, i), SDValue(LoadInst, HasWriteback ? 2 : 1));
2830 CurDAG->RemoveDeadNode(N);
2831}
2832
2833void ARMDAGToDAGISel::SelectMVE_VxDUP(SDNode *N, const uint16_t *Opcodes,
2834 bool Wrapping, bool Predicated) {
2835 EVT VT = N->getValueType(0);
2836 SDLoc Loc(N);
2837
2838 uint16_t Opcode;
2839 switch (VT.getScalarSizeInBits()) {
2840 case 8:
2841 Opcode = Opcodes[0];
2842 break;
2843 case 16:
2844 Opcode = Opcodes[1];
2845 break;
2846 case 32:
2847 Opcode = Opcodes[2];
2848 break;
2849 default:
2850 llvm_unreachable("bad vector element size in SelectMVE_VxDUP")::llvm::llvm_unreachable_internal("bad vector element size in SelectMVE_VxDUP"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 2850)
;
2851 }
2852
2853 SmallVector<SDValue, 8> Ops;
2854 unsigned OpIdx = 1;
2855
2856 SDValue Inactive;
2857 if (Predicated)
2858 Inactive = N->getOperand(OpIdx++);
2859
2860 Ops.push_back(N->getOperand(OpIdx++)); // base
2861 if (Wrapping)
2862 Ops.push_back(N->getOperand(OpIdx++)); // limit
2863
2864 SDValue ImmOp = N->getOperand(OpIdx++); // step
2865 int ImmValue = cast<ConstantSDNode>(ImmOp)->getZExtValue();
2866 Ops.push_back(getI32Imm(ImmValue, Loc));
2867
2868 if (Predicated)
2869 AddMVEPredicateToOps(Ops, Loc, N->getOperand(OpIdx), Inactive);
2870 else
2871 AddEmptyMVEPredicateToOps(Ops, Loc, N->getValueType(0));
2872
2873 CurDAG->SelectNodeTo(N, Opcode, N->getVTList(), makeArrayRef(Ops));
2874}
2875
2876void ARMDAGToDAGISel::SelectCDE_CXxD(SDNode *N, uint16_t Opcode,
2877 size_t NumExtraOps, bool HasAccum) {
2878 bool IsBigEndian = CurDAG->getDataLayout().isBigEndian();
2879 SDLoc Loc(N);
2880 SmallVector<SDValue, 8> Ops;
2881
2882 unsigned OpIdx = 1;
2883
2884 // Convert and append the immediate operand designating the coprocessor.
2885 SDValue ImmCorpoc = N->getOperand(OpIdx++);
2886 uint32_t ImmCoprocVal = cast<ConstantSDNode>(ImmCorpoc)->getZExtValue();
2887 Ops.push_back(getI32Imm(ImmCoprocVal, Loc));
2888
2889 // For accumulating variants copy the low and high order parts of the
2890 // accumulator into a register pair and add it to the operand vector.
2891 if (HasAccum) {
2892 SDValue AccLo = N->getOperand(OpIdx++);
2893 SDValue AccHi = N->getOperand(OpIdx++);
2894 if (IsBigEndian)
2895 std::swap(AccLo, AccHi);
2896 Ops.push_back(SDValue(createGPRPairNode(MVT::Untyped, AccLo, AccHi), 0));
2897 }
2898
2899 // Copy extra operands as-is.
2900 for (size_t I = 0; I < NumExtraOps; I++)
2901 Ops.push_back(N->getOperand(OpIdx++));
2902
2903 // Convert and append the immediate operand
2904 SDValue Imm = N->getOperand(OpIdx);
2905 uint32_t ImmVal = cast<ConstantSDNode>(Imm)->getZExtValue();
2906 Ops.push_back(getI32Imm(ImmVal, Loc));
2907
2908 // Accumulating variants are IT-predicable, add predicate operands.
2909 if (HasAccum) {
2910 SDValue Pred = getAL(CurDAG, Loc);
2911 SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
2912 Ops.push_back(Pred);
2913 Ops.push_back(PredReg);
2914 }
2915
2916 // Create the CDE intruction
2917 SDNode *InstrNode = CurDAG->getMachineNode(Opcode, Loc, MVT::Untyped, Ops);
2918 SDValue ResultPair = SDValue(InstrNode, 0);
2919
2920 // The original intrinsic had two outputs, and the output of the dual-register
2921 // CDE instruction is a register pair. We need to extract the two subregisters
2922 // and replace all uses of the original outputs with the extracted
2923 // subregisters.
2924 uint16_t SubRegs[2] = {ARM::gsub_0, ARM::gsub_1};
2925 if (IsBigEndian)
2926 std::swap(SubRegs[0], SubRegs[1]);
2927
2928 for (size_t ResIdx = 0; ResIdx < 2; ResIdx++) {
2929 if (SDValue(N, ResIdx).use_empty())
2930 continue;
2931 SDValue SubReg = CurDAG->getTargetExtractSubreg(SubRegs[ResIdx], Loc,
2932 MVT::i32, ResultPair);
2933 ReplaceUses(SDValue(N, ResIdx), SubReg);
2934 }
2935
2936 CurDAG->RemoveDeadNode(N);
2937}
2938
2939void ARMDAGToDAGISel::SelectVLDDup(SDNode *N, bool IsIntrinsic,
2940 bool isUpdating, unsigned NumVecs,
2941 const uint16_t *DOpcodes,
2942 const uint16_t *QOpcodes0,
2943 const uint16_t *QOpcodes1) {
2944 assert(Subtarget->hasNEON())(static_cast <bool> (Subtarget->hasNEON()) ? void (0
) : __assert_fail ("Subtarget->hasNEON()", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 2944, __extension__ __PRETTY_FUNCTION__))
;
2945 assert(NumVecs >= 1 && NumVecs <= 4 && "VLDDup NumVecs out-of-range")(static_cast <bool> (NumVecs >= 1 && NumVecs
<= 4 && "VLDDup NumVecs out-of-range") ? void (0)
: __assert_fail ("NumVecs >= 1 && NumVecs <= 4 && \"VLDDup NumVecs out-of-range\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 2945, __extension__ __PRETTY_FUNCTION__))
;
2946 SDLoc dl(N);
2947
2948 SDValue MemAddr, Align;
2949 unsigned AddrOpIdx = IsIntrinsic ? 2 : 1;
2950 if (!SelectAddrMode6(N, N->getOperand(AddrOpIdx), MemAddr, Align))
2951 return;
2952
2953 SDValue Chain = N->getOperand(0);
2954 EVT VT = N->getValueType(0);
2955 bool is64BitVector = VT.is64BitVector();
2956
2957 unsigned Alignment = 0;
2958 if (NumVecs != 3) {
2959 Alignment = cast<ConstantSDNode>(Align)->getZExtValue();
2960 unsigned NumBytes = NumVecs * VT.getScalarSizeInBits() / 8;
2961 if (Alignment > NumBytes)
2962 Alignment = NumBytes;
2963 if (Alignment < 8 && Alignment < NumBytes)
2964 Alignment = 0;
2965 // Alignment must be a power of two; make sure of that.
2966 Alignment = (Alignment & -Alignment);
2967 if (Alignment == 1)
2968 Alignment = 0;
2969 }
2970 Align = CurDAG->getTargetConstant(Alignment, dl, MVT::i32);
2971
2972 unsigned OpcodeIndex;
2973 switch (VT.getSimpleVT().SimpleTy) {
2974 default: llvm_unreachable("unhandled vld-dup type")::llvm::llvm_unreachable_internal("unhandled vld-dup type", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 2974)
;
2975 case MVT::v8i8:
2976 case MVT::v16i8: OpcodeIndex = 0; break;
2977 case MVT::v4i16:
2978 case MVT::v8i16:
2979 case MVT::v4f16:
2980 case MVT::v8f16:
2981 case MVT::v4bf16:
2982 case MVT::v8bf16:
2983 OpcodeIndex = 1; break;
2984 case MVT::v2f32:
2985 case MVT::v2i32:
2986 case MVT::v4f32:
2987 case MVT::v4i32: OpcodeIndex = 2; break;
2988 case MVT::v1f64:
2989 case MVT::v1i64: OpcodeIndex = 3; break;
2990 }
2991
2992 unsigned ResTyElts = (NumVecs == 3) ? 4 : NumVecs;
2993 if (!is64BitVector)
2994 ResTyElts *= 2;
2995 EVT ResTy = EVT::getVectorVT(*CurDAG->getContext(), MVT::i64, ResTyElts);
2996
2997 std::vector<EVT> ResTys;
2998 ResTys.push_back(ResTy);
2999 if (isUpdating)
3000 ResTys.push_back(MVT::i32);
3001 ResTys.push_back(MVT::Other);
3002
3003 SDValue Pred = getAL(CurDAG, dl);
3004 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
3005
3006 SmallVector<SDValue, 6> Ops;
3007 Ops.push_back(MemAddr);
3008 Ops.push_back(Align);
3009 unsigned Opc = is64BitVector ? DOpcodes[OpcodeIndex]
3010 : (NumVecs == 1) ? QOpcodes0[OpcodeIndex]
3011 : QOpcodes1[OpcodeIndex];
3012 if (isUpdating) {
3013 SDValue Inc = N->getOperand(2);
3014 bool IsImmUpdate =
3015 isPerfectIncrement(Inc, VT.getVectorElementType(), NumVecs);
3016 if (IsImmUpdate) {
3017 if (!isVLDfixed(Opc))
3018 Ops.push_back(Reg0);
3019 } else {
3020 if (isVLDfixed(Opc))
3021 Opc = getVLDSTRegisterUpdateOpcode(Opc);
3022 Ops.push_back(Inc);
3023 }
3024 }
3025 if (is64BitVector || NumVecs == 1) {
3026 // Double registers and VLD1 quad registers are directly supported.
3027 } else if (NumVecs == 2) {
3028 const SDValue OpsA[] = {MemAddr, Align, Pred, Reg0, Chain};
3029 SDNode *VLdA = CurDAG->getMachineNode(QOpcodes0[OpcodeIndex], dl, ResTy,
3030 MVT::Other, OpsA);
3031 Chain = SDValue(VLdA, 1);
3032 } else {
3033 SDValue ImplDef = SDValue(
3034 CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, ResTy), 0);
3035 const SDValue OpsA[] = {MemAddr, Align, ImplDef, Pred, Reg0, Chain};
3036 SDNode *VLdA = CurDAG->getMachineNode(QOpcodes0[OpcodeIndex], dl, ResTy,
3037 MVT::Other, OpsA);
3038 Ops.push_back(SDValue(VLdA, 0));
3039 Chain = SDValue(VLdA, 1);
3040 }
3041
3042 Ops.push_back(Pred);
3043 Ops.push_back(Reg0);
3044 Ops.push_back(Chain);
3045
3046 SDNode *VLdDup = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
3047
3048 // Transfer memoperands.
3049 MachineMemOperand *MemOp = cast<MemIntrinsicSDNode>(N)->getMemOperand();
3050 CurDAG->setNodeMemRefs(cast<MachineSDNode>(VLdDup), {MemOp});
3051
3052 // Extract the subregisters.
3053 if (NumVecs == 1) {
3054 ReplaceUses(SDValue(N, 0), SDValue(VLdDup, 0));
3055 } else {
3056 SDValue SuperReg = SDValue(VLdDup, 0);
3057 static_assert(ARM::dsub_7 == ARM::dsub_0 + 7, "Unexpected subreg numbering");
3058 unsigned SubIdx = is64BitVector ? ARM::dsub_0 : ARM::qsub_0;
3059 for (unsigned Vec = 0; Vec != NumVecs; ++Vec) {
3060 ReplaceUses(SDValue(N, Vec),
3061 CurDAG->getTargetExtractSubreg(SubIdx+Vec, dl, VT, SuperReg));
3062 }
3063 }
3064 ReplaceUses(SDValue(N, NumVecs), SDValue(VLdDup, 1));
3065 if (isUpdating)
3066 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLdDup, 2));
3067 CurDAG->RemoveDeadNode(N);
3068}
3069
3070bool ARMDAGToDAGISel::tryInsertVectorElt(SDNode *N) {
3071 if (!Subtarget->hasMVEIntegerOps())
3072 return false;
3073
3074 SDLoc dl(N);
3075
3076 // We are trying to use VMOV/VMOVX/VINS to more efficiently lower insert and
3077 // extracts of v8f16 and v8i16 vectors. Check that we have two adjacent
3078 // inserts of the correct type:
3079 SDValue Ins1 = SDValue(N, 0);
3080 SDValue Ins2 = N->getOperand(0);
3081 EVT VT = Ins1.getValueType();
3082 if (Ins2.getOpcode() != ISD::INSERT_VECTOR_ELT || !Ins2.hasOneUse() ||
3083 !isa<ConstantSDNode>(Ins1.getOperand(2)) ||
3084 !isa<ConstantSDNode>(Ins2.getOperand(2)) ||
3085 (VT != MVT::v8f16 && VT != MVT::v8i16) || (Ins2.getValueType() != VT))
3086 return false;
3087
3088 unsigned Lane1 = Ins1.getConstantOperandVal(2);
3089 unsigned Lane2 = Ins2.getConstantOperandVal(2);
3090 if (Lane2 % 2 != 0 || Lane1 != Lane2 + 1)
3091 return false;
3092
3093 // If the inserted values will be able to use T/B already, leave it to the
3094 // existing tablegen patterns. For example VCVTT/VCVTB.
3095 SDValue Val1 = Ins1.getOperand(1);
3096 SDValue Val2 = Ins2.getOperand(1);
3097 if (Val1.getOpcode() == ISD::FP_ROUND || Val2.getOpcode() == ISD::FP_ROUND)
3098 return false;
3099
3100 // Check if the inserted values are both extracts.
3101 if ((Val1.getOpcode() == ISD::EXTRACT_VECTOR_ELT ||
3102 Val1.getOpcode() == ARMISD::VGETLANEu) &&
3103 (Val2.getOpcode() == ISD::EXTRACT_VECTOR_ELT ||
3104 Val2.getOpcode() == ARMISD::VGETLANEu) &&
3105 isa<ConstantSDNode>(Val1.getOperand(1)) &&
3106 isa<ConstantSDNode>(Val2.getOperand(1)) &&
3107 (Val1.getOperand(0).getValueType() == MVT::v8f16 ||
3108 Val1.getOperand(0).getValueType() == MVT::v8i16) &&
3109 (Val2.getOperand(0).getValueType() == MVT::v8f16 ||
3110 Val2.getOperand(0).getValueType() == MVT::v8i16)) {
3111 unsigned ExtractLane1 = Val1.getConstantOperandVal(1);
3112 unsigned ExtractLane2 = Val2.getConstantOperandVal(1);
3113
3114 // If the two extracted lanes are from the same place and adjacent, this
3115 // simplifies into a f32 lane move.
3116 if (Val1.getOperand(0) == Val2.getOperand(0) && ExtractLane2 % 2 == 0 &&
3117 ExtractLane1 == ExtractLane2 + 1) {
3118 SDValue NewExt = CurDAG->getTargetExtractSubreg(
3119 ARM::ssub_0 + ExtractLane2 / 2, dl, MVT::f32, Val1.getOperand(0));
3120 SDValue NewIns = CurDAG->getTargetInsertSubreg(
3121 ARM::ssub_0 + Lane2 / 2, dl, VT, Ins2.getOperand(0),
3122 NewExt);
3123 ReplaceUses(Ins1, NewIns);
3124 return true;
3125 }
3126
3127 // Else v8i16 pattern of an extract and an insert, with a optional vmovx for
3128 // extracting odd lanes.
3129 if (VT == MVT::v8i16) {
3130 SDValue Inp1 = CurDAG->getTargetExtractSubreg(
3131 ARM::ssub_0 + ExtractLane1 / 2, dl, MVT::f32, Val1.getOperand(0));
3132 SDValue Inp2 = CurDAG->getTargetExtractSubreg(
3133 ARM::ssub_0 + ExtractLane2 / 2, dl, MVT::f32, Val2.getOperand(0));
3134 if (ExtractLane1 % 2 != 0)
3135 Inp1 = SDValue(CurDAG->getMachineNode(ARM::VMOVH, dl, MVT::f32, Inp1), 0);
3136 if (ExtractLane2 % 2 != 0)
3137 Inp2 = SDValue(CurDAG->getMachineNode(ARM::VMOVH, dl, MVT::f32, Inp2), 0);
3138 SDNode *VINS = CurDAG->getMachineNode(ARM::VINSH, dl, MVT::f32, Inp2, Inp1);
3139 SDValue NewIns =
3140 CurDAG->getTargetInsertSubreg(ARM::ssub_0 + Lane2 / 2, dl, MVT::v4f32,
3141 Ins2.getOperand(0), SDValue(VINS, 0));
3142 ReplaceUses(Ins1, NewIns);
3143 return true;
3144 }
3145 }
3146
3147 // The inserted values are not extracted - if they are f16 then insert them
3148 // directly using a VINS.
3149 if (VT == MVT::v8f16) {
3150 SDNode *VINS = CurDAG->getMachineNode(ARM::VINSH, dl, MVT::f32, Val2, Val1);
3151 SDValue NewIns =
3152 CurDAG->getTargetInsertSubreg(ARM::ssub_0 + Lane2 / 2, dl, MVT::v4f32,
3153 Ins2.getOperand(0), SDValue(VINS, 0));
3154 ReplaceUses(Ins1, NewIns);
3155 return true;
3156 }
3157
3158 return false;
3159}
3160
3161bool ARMDAGToDAGISel::transformFixedFloatingPointConversion(SDNode *N,
3162 SDNode *FMul,
3163 bool IsUnsigned,
3164 bool FixedToFloat) {
3165 auto Type = N->getValueType(0);
3166 unsigned ScalarBits = Type.getScalarSizeInBits();
3167 if (ScalarBits > 32)
3168 return false;
3169
3170 SDNodeFlags FMulFlags = FMul->getFlags();
3171 // The fixed-point vcvt and vcvt+vmul are not always equivalent if inf is
3172 // allowed in 16 bit unsigned floats
3173 if (ScalarBits == 16 && !FMulFlags.hasNoInfs() && IsUnsigned)
3174 return false;
3175
3176 SDValue ImmNode = FMul->getOperand(1);
3177 SDValue VecVal = FMul->getOperand(0);
3178 if (VecVal->getOpcode() == ISD::UINT_TO_FP ||
3179 VecVal->getOpcode() == ISD::SINT_TO_FP)
3180 VecVal = VecVal->getOperand(0);
3181
3182 if (VecVal.getValueType().getScalarSizeInBits() != ScalarBits)
3183 return false;
3184
3185 if (ImmNode.getOpcode() == ISD::BITCAST) {
3186 if (ImmNode.getValueType().getScalarSizeInBits() != ScalarBits)
3187 return false;
3188 ImmNode = ImmNode.getOperand(0);
3189 }
3190
3191 if (ImmNode.getValueType().getScalarSizeInBits() != ScalarBits)
3192 return false;
3193
3194 APFloat ImmAPF(0.0f);
3195 switch (ImmNode.getOpcode()) {
3196 case ARMISD::VMOVIMM:
3197 case ARMISD::VDUP: {
3198 if (!isa<ConstantSDNode>(ImmNode.getOperand(0)))
3199 return false;
3200 unsigned Imm = ImmNode.getConstantOperandVal(0);
3201 if (ImmNode.getOpcode() == ARMISD::VMOVIMM)
3202 Imm = ARM_AM::decodeVMOVModImm(Imm, ScalarBits);
3203 ImmAPF =
3204 APFloat(ScalarBits == 32 ? APFloat::IEEEsingle() : APFloat::IEEEhalf(),
3205 APInt(ScalarBits, Imm));
3206 break;
3207 }
3208 case ARMISD::VMOVFPIMM: {
3209 ImmAPF = APFloat(ARM_AM::getFPImmFloat(ImmNode.getConstantOperandVal(0)));
3210 break;
3211 }
3212 default:
3213 return false;
3214 }
3215
3216 // Where n is the number of fractional bits, multiplying by 2^n will convert
3217 // from float to fixed and multiplying by 2^-n will convert from fixed to
3218 // float. Taking log2 of the factor (after taking the inverse in the case of
3219 // float to fixed) will give n.
3220 APFloat ToConvert = ImmAPF;
3221 if (FixedToFloat) {
3222 if (!ImmAPF.getExactInverse(&ToConvert))
3223 return false;
3224 }
3225 APSInt Converted(64, 0);
3226 bool IsExact;
3227 ToConvert.convertToInteger(Converted, llvm::RoundingMode::NearestTiesToEven,
3228 &IsExact);
3229 if (!IsExact || !Converted.isPowerOf2())
3230 return false;
3231
3232 unsigned FracBits = Converted.logBase2();
3233 if (FracBits > ScalarBits)
3234 return false;
3235
3236 SmallVector<SDValue, 3> Ops{
3237 VecVal, CurDAG->getConstant(FracBits, SDLoc(N), MVT::i32)};
3238 AddEmptyMVEPredicateToOps(Ops, SDLoc(N), Type);
3239
3240 unsigned int Opcode;
3241 switch (ScalarBits) {
3242 case 16:
3243 if (FixedToFloat)
3244 Opcode = IsUnsigned ? ARM::MVE_VCVTf16u16_fix : ARM::MVE_VCVTf16s16_fix;
3245 else
3246 Opcode = IsUnsigned ? ARM::MVE_VCVTu16f16_fix : ARM::MVE_VCVTs16f16_fix;
3247 break;
3248 case 32:
3249 if (FixedToFloat)
3250 Opcode = IsUnsigned ? ARM::MVE_VCVTf32u32_fix : ARM::MVE_VCVTf32s32_fix;
3251 else
3252 Opcode = IsUnsigned ? ARM::MVE_VCVTu32f32_fix : ARM::MVE_VCVTs32f32_fix;
3253 break;
3254 default:
3255 llvm_unreachable("unexpected number of scalar bits")::llvm::llvm_unreachable_internal("unexpected number of scalar bits"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 3255)
;
3256 break;
3257 }
3258
3259 ReplaceNode(N, CurDAG->getMachineNode(Opcode, SDLoc(N), Type, Ops));
3260 return true;
3261}
3262
3263bool ARMDAGToDAGISel::tryFP_TO_INT(SDNode *N, SDLoc dl) {
3264 // Transform a floating-point to fixed-point conversion to a VCVT
3265 if (!Subtarget->hasMVEFloatOps())
1
Assuming the condition is false
2
Taking false branch
3266 return false;
3267 EVT Type = N->getValueType(0);
3268 if (!Type.isVector())
3
Calling 'EVT::isVector'
6
Returning from 'EVT::isVector'
7
Assuming the condition is false
8
Taking false branch
3269 return false;
3270 unsigned int ScalarBits = Type.getScalarSizeInBits();
3271
3272 bool IsUnsigned = N->getOpcode() == ISD::FP_TO_UINT;
9
Assuming the condition is true
3273 SDNode *Node = N->getOperand(0).getNode();
3274
3275 // floating-point to fixed-point with one fractional bit gets turned into an
3276 // FP_TO_[U|S]INT(FADD (x, x)) rather than an FP_TO_[U|S]INT(FMUL (x, y))
3277 if (Node->getOpcode() == ISD::FADD) {
10
Assuming the condition is true
11
Taking true branch
3278 if (Node->getOperand(0) != Node->getOperand(1))
12
Calling 'SDValue::operator!='
19
Returning from 'SDValue::operator!='
20
Taking false branch
3279 return false;
3280 SDNodeFlags Flags = Node->getFlags();
3281 // The fixed-point vcvt and vcvt+vmul are not always equivalent if inf is
3282 // allowed in 16 bit unsigned floats
3283 if (ScalarBits == 16 && !Flags.hasNoInfs() && IsUnsigned)
21
Assuming 'ScalarBits' is not equal to 16
3284 return false;
3285
3286 unsigned Opcode;
22
'Opcode' declared without an initial value
3287 switch (ScalarBits) {
23
'Default' branch taken. Execution continues on line 3295
3288 case 16:
3289 Opcode = IsUnsigned ? ARM::MVE_VCVTu16f16_fix : ARM::MVE_VCVTs16f16_fix;
3290 break;
3291 case 32:
3292 Opcode = IsUnsigned ? ARM::MVE_VCVTu32f32_fix : ARM::MVE_VCVTs32f32_fix;
3293 break;
3294 }
3295 SmallVector<SDValue, 3> Ops{Node->getOperand(0),
3296 CurDAG->getConstant(1, dl, MVT::i32)};
3297 AddEmptyMVEPredicateToOps(Ops, dl, Type);
3298
3299 ReplaceNode(N, CurDAG->getMachineNode(Opcode, dl, Type, Ops));
24
1st function call argument is an uninitialized value
3300 return true;
3301 }
3302
3303 if (Node->getOpcode() != ISD::FMUL)
3304 return false;
3305
3306 return transformFixedFloatingPointConversion(N, Node, IsUnsigned, false);
3307}
3308
3309bool ARMDAGToDAGISel::tryFMULFixed(SDNode *N, SDLoc dl) {
3310 // Transform a fixed-point to floating-point conversion to a VCVT
3311 if (!Subtarget->hasMVEFloatOps())
3312 return false;
3313 auto Type = N->getValueType(0);
3314 if (!Type.isVector())
3315 return false;
3316
3317 auto LHS = N->getOperand(0);
3318 if (LHS.getOpcode() != ISD::SINT_TO_FP && LHS.getOpcode() != ISD::UINT_TO_FP)
3319 return false;
3320
3321 return transformFixedFloatingPointConversion(
3322 N, N, LHS.getOpcode() == ISD::UINT_TO_FP, true);
3323}
3324
3325bool ARMDAGToDAGISel::tryV6T2BitfieldExtractOp(SDNode *N, bool isSigned) {
3326 if (!Subtarget->hasV6T2Ops())
3327 return false;
3328
3329 unsigned Opc = isSigned
3330 ? (Subtarget->isThumb() ? ARM::t2SBFX : ARM::SBFX)
3331 : (Subtarget->isThumb() ? ARM::t2UBFX : ARM::UBFX);
3332 SDLoc dl(N);
3333
3334 // For unsigned extracts, check for a shift right and mask
3335 unsigned And_imm = 0;
3336 if (N->getOpcode() == ISD::AND) {
3337 if (isOpcWithIntImmediate(N, ISD::AND, And_imm)) {
3338
3339 // The immediate is a mask of the low bits iff imm & (imm+1) == 0
3340 if (And_imm & (And_imm + 1))
3341 return false;
3342
3343 unsigned Srl_imm = 0;
3344 if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SRL,
3345 Srl_imm)) {
3346 assert(Srl_imm > 0 && Srl_imm < 32 && "bad amount in shift node!")(static_cast <bool> (Srl_imm > 0 && Srl_imm <
32 && "bad amount in shift node!") ? void (0) : __assert_fail
("Srl_imm > 0 && Srl_imm < 32 && \"bad amount in shift node!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 3346, __extension__ __PRETTY_FUNCTION__))
;
3347
3348 // Mask off the unnecessary bits of the AND immediate; normally
3349 // DAGCombine will do this, but that might not happen if
3350 // targetShrinkDemandedConstant chooses a different immediate.
3351 And_imm &= -1U >> Srl_imm;
3352
3353 // Note: The width operand is encoded as width-1.
3354 unsigned Width = countTrailingOnes(And_imm) - 1;
3355 unsigned LSB = Srl_imm;
3356
3357 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
3358
3359 if ((LSB + Width + 1) == N->getValueType(0).getSizeInBits()) {
3360 // It's cheaper to use a right shift to extract the top bits.
3361 if (Subtarget->isThumb()) {
3362 Opc = isSigned ? ARM::t2ASRri : ARM::t2LSRri;
3363 SDValue Ops[] = { N->getOperand(0).getOperand(0),
3364 CurDAG->getTargetConstant(LSB, dl, MVT::i32),
3365 getAL(CurDAG, dl), Reg0, Reg0 };
3366 CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops);
3367 return true;
3368 }
3369
3370 // ARM models shift instructions as MOVsi with shifter operand.
3371 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(ISD::SRL);
3372 SDValue ShOpc =
3373 CurDAG->getTargetConstant(ARM_AM::getSORegOpc(ShOpcVal, LSB), dl,
3374 MVT::i32);
3375 SDValue Ops[] = { N->getOperand(0).getOperand(0), ShOpc,
3376 getAL(CurDAG, dl), Reg0, Reg0 };
3377 CurDAG->SelectNodeTo(N, ARM::MOVsi, MVT::i32, Ops);
3378 return true;
3379 }
3380
3381 assert(LSB + Width + 1 <= 32 && "Shouldn't create an invalid ubfx")(static_cast <bool> (LSB + Width + 1 <= 32 &&
"Shouldn't create an invalid ubfx") ? void (0) : __assert_fail
("LSB + Width + 1 <= 32 && \"Shouldn't create an invalid ubfx\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 3381, __extension__ __PRETTY_FUNCTION__))
;
3382 SDValue Ops[] = { N->getOperand(0).getOperand(0),
3383 CurDAG->getTargetConstant(LSB, dl, MVT::i32),
3384 CurDAG->getTargetConstant(Width, dl, MVT::i32),
3385 getAL(CurDAG, dl), Reg0 };
3386 CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops);
3387 return true;
3388 }
3389 }
3390 return false;
3391 }
3392
3393 // Otherwise, we're looking for a shift of a shift
3394 unsigned Shl_imm = 0;
3395 if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SHL, Shl_imm)) {
3396 assert(Shl_imm > 0 && Shl_imm < 32 && "bad amount in shift node!")(static_cast <bool> (Shl_imm > 0 && Shl_imm <
32 && "bad amount in shift node!") ? void (0) : __assert_fail
("Shl_imm > 0 && Shl_imm < 32 && \"bad amount in shift node!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 3396, __extension__ __PRETTY_FUNCTION__))
;
3397 unsigned Srl_imm = 0;
3398 if (isInt32Immediate(N->getOperand(1), Srl_imm)) {
3399 assert(Srl_imm > 0 && Srl_imm < 32 && "bad amount in shift node!")(static_cast <bool> (Srl_imm > 0 && Srl_imm <
32 && "bad amount in shift node!") ? void (0) : __assert_fail
("Srl_imm > 0 && Srl_imm < 32 && \"bad amount in shift node!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 3399, __extension__ __PRETTY_FUNCTION__))
;
3400 // Note: The width operand is encoded as width-1.
3401 unsigned Width = 32 - Srl_imm - 1;
3402 int LSB = Srl_imm - Shl_imm;
3403 if (LSB < 0)
3404 return false;
3405 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
3406 assert(LSB + Width + 1 <= 32 && "Shouldn't create an invalid ubfx")(static_cast <bool> (LSB + Width + 1 <= 32 &&
"Shouldn't create an invalid ubfx") ? void (0) : __assert_fail
("LSB + Width + 1 <= 32 && \"Shouldn't create an invalid ubfx\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 3406, __extension__ __PRETTY_FUNCTION__))
;
3407 SDValue Ops[] = { N->getOperand(0).getOperand(0),
3408 CurDAG->getTargetConstant(LSB, dl, MVT::i32),
3409 CurDAG->getTargetConstant(Width, dl, MVT::i32),
3410 getAL(CurDAG, dl), Reg0 };
3411 CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops);
3412 return true;
3413 }
3414 }
3415
3416 // Or we are looking for a shift of an and, with a mask operand
3417 if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::AND, And_imm) &&
3418 isShiftedMask_32(And_imm)) {
3419 unsigned Srl_imm = 0;
3420 unsigned LSB = countTrailingZeros(And_imm);
3421 // Shift must be the same as the ands lsb
3422 if (isInt32Immediate(N->getOperand(1), Srl_imm) && Srl_imm == LSB) {
3423 assert(Srl_imm > 0 && Srl_imm < 32 && "bad amount in shift node!")(static_cast <bool> (Srl_imm > 0 && Srl_imm <
32 && "bad amount in shift node!") ? void (0) : __assert_fail
("Srl_imm > 0 && Srl_imm < 32 && \"bad amount in shift node!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 3423, __extension__ __PRETTY_FUNCTION__))
;
3424 unsigned MSB = 31 - countLeadingZeros(And_imm);
3425 // Note: The width operand is encoded as width-1.
3426 unsigned Width = MSB - LSB;
3427 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
3428 assert(Srl_imm + Width + 1 <= 32 && "Shouldn't create an invalid ubfx")(static_cast <bool> (Srl_imm + Width + 1 <= 32 &&
"Shouldn't create an invalid ubfx") ? void (0) : __assert_fail
("Srl_imm + Width + 1 <= 32 && \"Shouldn't create an invalid ubfx\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 3428, __extension__ __PRETTY_FUNCTION__))
;
3429 SDValue Ops[] = { N->getOperand(0).getOperand(0),
3430 CurDAG->getTargetConstant(Srl_imm, dl, MVT::i32),
3431 CurDAG->getTargetConstant(Width, dl, MVT::i32),
3432 getAL(CurDAG, dl), Reg0 };
3433 CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops);
3434 return true;
3435 }
3436 }
3437
3438 if (N->getOpcode() == ISD::SIGN_EXTEND_INREG) {
3439 unsigned Width = cast<VTSDNode>(N->getOperand(1))->getVT().getSizeInBits();
3440 unsigned LSB = 0;
3441 if (!isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SRL, LSB) &&
3442 !isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SRA, LSB))
3443 return false;
3444
3445 if (LSB + Width > 32)
3446 return false;
3447
3448 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
3449 assert(LSB + Width <= 32 && "Shouldn't create an invalid ubfx")(static_cast <bool> (LSB + Width <= 32 && "Shouldn't create an invalid ubfx"
) ? void (0) : __assert_fail ("LSB + Width <= 32 && \"Shouldn't create an invalid ubfx\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 3449, __extension__ __PRETTY_FUNCTION__))
;
3450 SDValue Ops[] = { N->getOperand(0).getOperand(0),
3451 CurDAG->getTargetConstant(LSB, dl, MVT::i32),
3452 CurDAG->getTargetConstant(Width - 1, dl, MVT::i32),
3453 getAL(CurDAG, dl), Reg0 };
3454 CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops);
3455 return true;
3456 }
3457
3458 return false;
3459}
3460
3461/// Target-specific DAG combining for ISD::XOR.
3462/// Target-independent combining lowers SELECT_CC nodes of the form
3463/// select_cc setg[ge] X, 0, X, -X
3464/// select_cc setgt X, -1, X, -X
3465/// select_cc setl[te] X, 0, -X, X
3466/// select_cc setlt X, 1, -X, X
3467/// which represent Integer ABS into:
3468/// Y = sra (X, size(X)-1); xor (add (X, Y), Y)
3469/// ARM instruction selection detects the latter and matches it to
3470/// ARM::ABS or ARM::t2ABS machine node.
3471bool ARMDAGToDAGISel::tryABSOp(SDNode *N){
3472 SDValue XORSrc0 = N->getOperand(0);
3473 SDValue XORSrc1 = N->getOperand(1);
3474 EVT VT = N->getValueType(0);
3475
3476 if (Subtarget->isThumb1Only())
3477 return false;
3478
3479 if (XORSrc0.getOpcode() != ISD::ADD || XORSrc1.getOpcode() != ISD::SRA)
3480 return false;
3481
3482 SDValue ADDSrc0 = XORSrc0.getOperand(0);
3483 SDValue ADDSrc1 = XORSrc0.getOperand(1);
3484 SDValue SRASrc0 = XORSrc1.getOperand(0);
3485 SDValue SRASrc1 = XORSrc1.getOperand(1);
3486 ConstantSDNode *SRAConstant = dyn_cast<ConstantSDNode>(SRASrc1);
3487 EVT XType = SRASrc0.getValueType();
3488 unsigned Size = XType.getSizeInBits() - 1;
3489
3490 if (ADDSrc1 == XORSrc1 && ADDSrc0 == SRASrc0 &&
3491 XType.isInteger() && SRAConstant != nullptr &&
3492 Size == SRAConstant->getZExtValue()) {
3493 unsigned Opcode = Subtarget->isThumb2() ? ARM::t2ABS : ARM::ABS;
3494 CurDAG->SelectNodeTo(N, Opcode, VT, ADDSrc0);
3495 return true;
3496 }
3497
3498 return false;
3499}
3500
3501/// We've got special pseudo-instructions for these
3502void ARMDAGToDAGISel::SelectCMP_SWAP(SDNode *N) {
3503 unsigned Opcode;
3504 EVT MemTy = cast<MemSDNode>(N)->getMemoryVT();
3505 if (MemTy == MVT::i8)
3506 Opcode = Subtarget->isThumb() ? ARM::tCMP_SWAP_8 : ARM::CMP_SWAP_8;
3507 else if (MemTy == MVT::i16)
3508 Opcode = Subtarget->isThumb() ? ARM::tCMP_SWAP_16 : ARM::CMP_SWAP_16;
3509 else if (MemTy == MVT::i32)
3510 Opcode = ARM::CMP_SWAP_32;
3511 else
3512 llvm_unreachable("Unknown AtomicCmpSwap type")::llvm::llvm_unreachable_internal("Unknown AtomicCmpSwap type"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 3512)
;
3513
3514 SDValue Ops[] = {N->getOperand(1), N->getOperand(2), N->getOperand(3),
3515 N->getOperand(0)};
3516 SDNode *CmpSwap = CurDAG->getMachineNode(
3517 Opcode, SDLoc(N),
3518 CurDAG->getVTList(MVT::i32, MVT::i32, MVT::Other), Ops);
3519
3520 MachineMemOperand *MemOp = cast<MemSDNode>(N)->getMemOperand();
3521 CurDAG->setNodeMemRefs(cast<MachineSDNode>(CmpSwap), {MemOp});
3522
3523 ReplaceUses(SDValue(N, 0), SDValue(CmpSwap, 0));
3524 ReplaceUses(SDValue(N, 1), SDValue(CmpSwap, 2));
3525 CurDAG->RemoveDeadNode(N);
3526}
3527
3528static Optional<std::pair<unsigned, unsigned>>
3529getContiguousRangeOfSetBits(const APInt &A) {
3530 unsigned FirstOne = A.getBitWidth() - A.countLeadingZeros() - 1;
3531 unsigned LastOne = A.countTrailingZeros();
3532 if (A.countPopulation() != (FirstOne - LastOne + 1))
3533 return Optional<std::pair<unsigned,unsigned>>();
3534 return std::make_pair(FirstOne, LastOne);
3535}
3536
3537void ARMDAGToDAGISel::SelectCMPZ(SDNode *N, bool &SwitchEQNEToPLMI) {
3538 assert(N->getOpcode() == ARMISD::CMPZ)(static_cast <bool> (N->getOpcode() == ARMISD::CMPZ)
? void (0) : __assert_fail ("N->getOpcode() == ARMISD::CMPZ"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 3538, __extension__ __PRETTY_FUNCTION__))
;
3539 SwitchEQNEToPLMI = false;
3540
3541 if (!Subtarget->isThumb())
3542 // FIXME: Work out whether it is profitable to do this in A32 mode - LSL and
3543 // LSR don't exist as standalone instructions - they need the barrel shifter.
3544 return;
3545
3546 // select (cmpz (and X, C), #0) -> (LSLS X) or (LSRS X) or (LSRS (LSLS X))
3547 SDValue And = N->getOperand(0);
3548 if (!And->hasOneUse())
3549 return;
3550
3551 SDValue Zero = N->getOperand(1);
3552 if (!isa<ConstantSDNode>(Zero) || !cast<ConstantSDNode>(Zero)->isNullValue() ||
3553 And->getOpcode() != ISD::AND)
3554 return;
3555 SDValue X = And.getOperand(0);
3556 auto C = dyn_cast<ConstantSDNode>(And.getOperand(1));
3557
3558 if (!C)
3559 return;
3560 auto Range = getContiguousRangeOfSetBits(C->getAPIntValue());
3561 if (!Range)
3562 return;
3563
3564 // There are several ways to lower this:
3565 SDNode *NewN;
3566 SDLoc dl(N);
3567
3568 auto EmitShift = [&](unsigned Opc, SDValue Src, unsigned Imm) -> SDNode* {
3569 if (Subtarget->isThumb2()) {
3570 Opc = (Opc == ARM::tLSLri) ? ARM::t2LSLri : ARM::t2LSRri;
3571 SDValue Ops[] = { Src, CurDAG->getTargetConstant(Imm, dl, MVT::i32),
3572 getAL(CurDAG, dl), CurDAG->getRegister(0, MVT::i32),
3573 CurDAG->getRegister(0, MVT::i32) };
3574 return CurDAG->getMachineNode(Opc, dl, MVT::i32, Ops);
3575 } else {
3576 SDValue Ops[] = {CurDAG->getRegister(ARM::CPSR, MVT::i32), Src,
3577 CurDAG->getTargetConstant(Imm, dl, MVT::i32),
3578 getAL(CurDAG, dl), CurDAG->getRegister(0, MVT::i32)};
3579 return CurDAG->getMachineNode(Opc, dl, MVT::i32, Ops);
3580 }
3581 };
3582
3583 if (Range->second == 0) {
3584 // 1. Mask includes the LSB -> Simply shift the top N bits off
3585 NewN = EmitShift(ARM::tLSLri, X, 31 - Range->first);
3586 ReplaceNode(And.getNode(), NewN);
3587 } else if (Range->first == 31) {
3588 // 2. Mask includes the MSB -> Simply shift the bottom N bits off
3589 NewN = EmitShift(ARM::tLSRri, X, Range->second);
3590 ReplaceNode(And.getNode(), NewN);
3591 } else if (Range->first == Range->second) {
3592 // 3. Only one bit is set. We can shift this into the sign bit and use a
3593 // PL/MI comparison.
3594 NewN = EmitShift(ARM::tLSLri, X, 31 - Range->first);
3595 ReplaceNode(And.getNode(), NewN);
3596
3597 SwitchEQNEToPLMI = true;
3598 } else if (!Subtarget->hasV6T2Ops()) {
3599 // 4. Do a double shift to clear bottom and top bits, but only in
3600 // thumb-1 mode as in thumb-2 we can use UBFX.
3601 NewN = EmitShift(ARM::tLSLri, X, 31 - Range->first);
3602 NewN = EmitShift(ARM::tLSRri, SDValue(NewN, 0),
3603 Range->second + (31 - Range->first));
3604 ReplaceNode(And.getNode(), NewN);
3605 }
3606
3607}
3608
3609void ARMDAGToDAGISel::Select(SDNode *N) {
3610 SDLoc dl(N);
3611
3612 if (N->isMachineOpcode()) {
3613 N->setNodeId(-1);
3614 return; // Already selected.
3615 }
3616
3617 switch (N->getOpcode()) {
3618 default: break;
3619 case ISD::STORE: {
3620 // For Thumb1, match an sp-relative store in C++. This is a little
3621 // unfortunate, but I don't think I can make the chain check work
3622 // otherwise. (The chain of the store has to be the same as the chain
3623 // of the CopyFromReg, or else we can't replace the CopyFromReg with
3624 // a direct reference to "SP".)
3625 //
3626 // This is only necessary on Thumb1 because Thumb1 sp-relative stores use
3627 // a different addressing mode from other four-byte stores.
3628 //
3629 // This pattern usually comes up with call arguments.
3630 StoreSDNode *ST = cast<StoreSDNode>(N);
3631 SDValue Ptr = ST->getBasePtr();
3632 if (Subtarget->isThumb1Only() && ST->isUnindexed()) {
3633 int RHSC = 0;
3634 if (Ptr.getOpcode() == ISD::ADD &&
3635 isScaledConstantInRange(Ptr.getOperand(1), /*Scale=*/4, 0, 256, RHSC))
3636 Ptr = Ptr.getOperand(0);
3637
3638 if (Ptr.getOpcode() == ISD::CopyFromReg &&
3639 cast<RegisterSDNode>(Ptr.getOperand(1))->getReg() == ARM::SP &&
3640 Ptr.getOperand(0) == ST->getChain()) {
3641 SDValue Ops[] = {ST->getValue(),
3642 CurDAG->getRegister(ARM::SP, MVT::i32),
3643 CurDAG->getTargetConstant(RHSC, dl, MVT::i32),
3644 getAL(CurDAG, dl),
3645 CurDAG->getRegister(0, MVT::i32),
3646 ST->getChain()};
3647 MachineSDNode *ResNode =
3648 CurDAG->getMachineNode(ARM::tSTRspi, dl, MVT::Other, Ops);
3649 MachineMemOperand *MemOp = ST->getMemOperand();
3650 CurDAG->setNodeMemRefs(cast<MachineSDNode>(ResNode), {MemOp});
3651 ReplaceNode(N, ResNode);
3652 return;
3653 }
3654 }
3655 break;
3656 }
3657 case ISD::WRITE_REGISTER:
3658 if (tryWriteRegister(N))
3659 return;
3660 break;
3661 case ISD::READ_REGISTER:
3662 if (tryReadRegister(N))
3663 return;
3664 break;
3665 case ISD::INLINEASM:
3666 case ISD::INLINEASM_BR:
3667 if (tryInlineAsm(N))
3668 return;
3669 break;
3670 case ISD::XOR:
3671 // Select special operations if XOR node forms integer ABS pattern
3672 if (tryABSOp(N))
3673 return;
3674 // Other cases are autogenerated.
3675 break;
3676 case ISD::Constant: {
3677 unsigned Val = cast<ConstantSDNode>(N)->getZExtValue();
3678 // If we can't materialize the constant we need to use a literal pool
3679 if (ConstantMaterializationCost(Val, Subtarget) > 2) {
3680 SDValue CPIdx = CurDAG->getTargetConstantPool(
3681 ConstantInt::get(Type::getInt32Ty(*CurDAG->getContext()), Val),
3682 TLI->getPointerTy(CurDAG->getDataLayout()));
3683
3684 SDNode *ResNode;
3685 if (Subtarget->isThumb()) {
3686 SDValue Ops[] = {
3687 CPIdx,
3688 getAL(CurDAG, dl),
3689 CurDAG->getRegister(0, MVT::i32),
3690 CurDAG->getEntryNode()
3691 };
3692 ResNode = CurDAG->getMachineNode(ARM::tLDRpci, dl, MVT::i32, MVT::Other,
3693 Ops);
3694 } else {
3695 SDValue Ops[] = {
3696 CPIdx,
3697 CurDAG->getTargetConstant(0, dl, MVT::i32),
3698 getAL(CurDAG, dl),
3699 CurDAG->getRegister(0, MVT::i32),
3700 CurDAG->getEntryNode()
3701 };
3702 ResNode = CurDAG->getMachineNode(ARM::LDRcp, dl, MVT::i32, MVT::Other,
3703 Ops);
3704 }
3705 // Annotate the Node with memory operand information so that MachineInstr
3706 // queries work properly. This e.g. gives the register allocation the
3707 // required information for rematerialization.
3708 MachineFunction& MF = CurDAG->getMachineFunction();
3709 MachineMemOperand *MemOp =
3710 MF.getMachineMemOperand(MachinePointerInfo::getConstantPool(MF),
3711 MachineMemOperand::MOLoad, 4, Align(4));
3712
3713 CurDAG->setNodeMemRefs(cast<MachineSDNode>(ResNode), {MemOp});
3714
3715 ReplaceNode(N, ResNode);
3716 return;
3717 }
3718
3719 // Other cases are autogenerated.
3720 break;
3721 }
3722 case ISD::FrameIndex: {
3723 // Selects to ADDri FI, 0 which in turn will become ADDri SP, imm.
3724 int FI = cast<FrameIndexSDNode>(N)->getIndex();
3725 SDValue TFI = CurDAG->getTargetFrameIndex(
3726 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
3727 if (Subtarget->isThumb1Only()) {
3728 // Set the alignment of the frame object to 4, to avoid having to generate
3729 // more than one ADD
3730 MachineFrameInfo &MFI = MF->getFrameInfo();
3731 if (MFI.getObjectAlign(FI) < Align(4))
3732 MFI.setObjectAlignment(FI, Align(4));
3733 CurDAG->SelectNodeTo(N, ARM::tADDframe, MVT::i32, TFI,
3734 CurDAG->getTargetConstant(0, dl, MVT::i32));
3735 return;
3736 } else {
3737 unsigned Opc = ((Subtarget->isThumb() && Subtarget->hasThumb2()) ?
3738 ARM::t2ADDri : ARM::ADDri);
3739 SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, dl, MVT::i32),
3740 getAL(CurDAG, dl), CurDAG->getRegister(0, MVT::i32),
3741 CurDAG->getRegister(0, MVT::i32) };
3742 CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops);
3743 return;
3744 }
3745 }
3746 case ISD::INSERT_VECTOR_ELT: {
3747 if (tryInsertVectorElt(N))
3748 return;
3749 break;
3750 }
3751 case ISD::SRL:
3752 if (tryV6T2BitfieldExtractOp(N, false))
3753 return;
3754 break;
3755 case ISD::SIGN_EXTEND_INREG:
3756 case ISD::SRA:
3757 if (tryV6T2BitfieldExtractOp(N, true))
3758 return;
3759 break;
3760 case ISD::FP_TO_UINT:
3761 case ISD::FP_TO_SINT:
3762 if (tryFP_TO_INT(N, dl))
3763 return;
3764 break;
3765 case ISD::FMUL:
3766 if (tryFMULFixed(N, dl))
3767 return;
3768 break;
3769 case ISD::MUL:
3770 if (Subtarget->isThumb1Only())
3771 break;
3772 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1))) {
3773 unsigned RHSV = C->getZExtValue();
3774 if (!RHSV) break;
3775 if (isPowerOf2_32(RHSV-1)) { // 2^n+1?
3776 unsigned ShImm = Log2_32(RHSV-1);
3777 if (ShImm >= 32)
3778 break;
3779 SDValue V = N->getOperand(0);
3780 ShImm = ARM_AM::getSORegOpc(ARM_AM::lsl, ShImm);
3781 SDValue ShImmOp = CurDAG->getTargetConstant(ShImm, dl, MVT::i32);
3782 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
3783 if (Subtarget->isThumb()) {
3784 SDValue Ops[] = { V, V, ShImmOp, getAL(CurDAG, dl), Reg0, Reg0 };
3785 CurDAG->SelectNodeTo(N, ARM::t2ADDrs, MVT::i32, Ops);
3786 return;
3787 } else {
3788 SDValue Ops[] = { V, V, Reg0, ShImmOp, getAL(CurDAG, dl), Reg0,
3789 Reg0 };
3790 CurDAG->SelectNodeTo(N, ARM::ADDrsi, MVT::i32, Ops);
3791 return;
3792 }
3793 }
3794 if (isPowerOf2_32(RHSV+1)) { // 2^n-1?
3795 unsigned ShImm = Log2_32(RHSV+1);
3796 if (ShImm >= 32)
3797 break;
3798 SDValue V = N->getOperand(0);
3799 ShImm = ARM_AM::getSORegOpc(ARM_AM::lsl, ShImm);
3800 SDValue ShImmOp = CurDAG->getTargetConstant(ShImm, dl, MVT::i32);
3801 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
3802 if (Subtarget->isThumb()) {
3803 SDValue Ops[] = { V, V, ShImmOp, getAL(CurDAG, dl), Reg0, Reg0 };
3804 CurDAG->SelectNodeTo(N, ARM::t2RSBrs, MVT::i32, Ops);
3805 return;
3806 } else {
3807 SDValue Ops[] = { V, V, Reg0, ShImmOp, getAL(CurDAG, dl), Reg0,
3808 Reg0 };
3809 CurDAG->SelectNodeTo(N, ARM::RSBrsi, MVT::i32, Ops);
3810 return;
3811 }
3812 }
3813 }
3814 break;
3815 case ISD::AND: {
3816 // Check for unsigned bitfield extract
3817 if (tryV6T2BitfieldExtractOp(N, false))
3818 return;
3819
3820 // If an immediate is used in an AND node, it is possible that the immediate
3821 // can be more optimally materialized when negated. If this is the case we
3822 // can negate the immediate and use a BIC instead.
3823 auto *N1C = dyn_cast<ConstantSDNode>(N->getOperand(1));
3824 if (N1C && N1C->hasOneUse() && Subtarget->isThumb()) {
3825 uint32_t Imm = (uint32_t) N1C->getZExtValue();
3826
3827 // In Thumb2 mode, an AND can take a 12-bit immediate. If this
3828 // immediate can be negated and fit in the immediate operand of
3829 // a t2BIC, don't do any manual transform here as this can be
3830 // handled by the generic ISel machinery.
3831 bool PreferImmediateEncoding =
3832 Subtarget->hasThumb2() && (is_t2_so_imm(Imm) || is_t2_so_imm_not(Imm));
3833 if (!PreferImmediateEncoding &&
3834 ConstantMaterializationCost(Imm, Subtarget) >
3835 ConstantMaterializationCost(~Imm, Subtarget)) {
3836 // The current immediate costs more to materialize than a negated
3837 // immediate, so negate the immediate and use a BIC.
3838 SDValue NewImm =
3839 CurDAG->getConstant(~N1C->getZExtValue(), dl, MVT::i32);
3840 // If the new constant didn't exist before, reposition it in the topological
3841 // ordering so it is just before N. Otherwise, don't touch its location.
3842 if (NewImm->getNodeId() == -1)
3843 CurDAG->RepositionNode(N->getIterator(), NewImm.getNode());
3844
3845 if (!Subtarget->hasThumb2()) {
3846 SDValue Ops[] = {CurDAG->getRegister(ARM::CPSR, MVT::i32),
3847 N->getOperand(0), NewImm, getAL(CurDAG, dl),
3848 CurDAG->getRegister(0, MVT::i32)};
3849 ReplaceNode(N, CurDAG->getMachineNode(ARM::tBIC, dl, MVT::i32, Ops));
3850 return;
3851 } else {
3852 SDValue Ops[] = {N->getOperand(0), NewImm, getAL(CurDAG, dl),
3853 CurDAG->getRegister(0, MVT::i32),
3854 CurDAG->getRegister(0, MVT::i32)};
3855 ReplaceNode(N,
3856 CurDAG->getMachineNode(ARM::t2BICrr, dl, MVT::i32, Ops));
3857 return;
3858 }
3859 }
3860 }
3861
3862 // (and (or x, c2), c1) and top 16-bits of c1 and c2 match, lower 16-bits
3863 // of c1 are 0xffff, and lower 16-bit of c2 are 0. That is, the top 16-bits
3864 // are entirely contributed by c2 and lower 16-bits are entirely contributed
3865 // by x. That's equal to (or (and x, 0xffff), (and c1, 0xffff0000)).
3866 // Select it to: "movt x, ((c1 & 0xffff) >> 16)
3867 EVT VT = N->getValueType(0);
3868 if (VT != MVT::i32)
3869 break;
3870 unsigned Opc = (Subtarget->isThumb() && Subtarget->hasThumb2())
3871 ? ARM::t2MOVTi16
3872 : (Subtarget->hasV6T2Ops() ? ARM::MOVTi16 : 0);
3873 if (!Opc)
3874 break;
3875 SDValue N0 = N->getOperand(0), N1 = N->getOperand(1);
3876 N1C = dyn_cast<ConstantSDNode>(N1);
3877 if (!N1C)
3878 break;
3879 if (N0.getOpcode() == ISD::OR && N0.getNode()->hasOneUse()) {
3880 SDValue N2 = N0.getOperand(1);
3881 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2);
3882 if (!N2C)
3883 break;
3884 unsigned N1CVal = N1C->getZExtValue();
3885 unsigned N2CVal = N2C->getZExtValue();
3886 if ((N1CVal & 0xffff0000U) == (N2CVal & 0xffff0000U) &&
3887 (N1CVal & 0xffffU) == 0xffffU &&
3888 (N2CVal & 0xffffU) == 0x0U) {
3889 SDValue Imm16 = CurDAG->getTargetConstant((N2CVal & 0xFFFF0000U) >> 16,
3890 dl, MVT::i32);
3891 SDValue Ops[] = { N0.getOperand(0), Imm16,
3892 getAL(CurDAG, dl), CurDAG->getRegister(0, MVT::i32) };
3893 ReplaceNode(N, CurDAG->getMachineNode(Opc, dl, VT, Ops));
3894 return;
3895 }
3896 }
3897
3898 break;
3899 }
3900 case ARMISD::UMAAL: {
3901 unsigned Opc = Subtarget->isThumb() ? ARM::t2UMAAL : ARM::UMAAL;
3902 SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
3903 N->getOperand(2), N->getOperand(3),
3904 getAL(CurDAG, dl),
3905 CurDAG->getRegister(0, MVT::i32) };
3906 ReplaceNode(N, CurDAG->getMachineNode(Opc, dl, MVT::i32, MVT::i32, Ops));
3907 return;
3908 }
3909 case ARMISD::UMLAL:{
3910 if (Subtarget->isThumb()) {
3911 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2),
3912 N->getOperand(3), getAL(CurDAG, dl),
3913 CurDAG->getRegister(0, MVT::i32)};
3914 ReplaceNode(
3915 N, CurDAG->getMachineNode(ARM::t2UMLAL, dl, MVT::i32, MVT::i32, Ops));
3916 return;
3917 }else{
3918 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2),
3919 N->getOperand(3), getAL(CurDAG, dl),
3920 CurDAG->getRegister(0, MVT::i32),
3921 CurDAG->getRegister(0, MVT::i32) };
3922 ReplaceNode(N, CurDAG->getMachineNode(
3923 Subtarget->hasV6Ops() ? ARM::UMLAL : ARM::UMLALv5, dl,
3924 MVT::i32, MVT::i32, Ops));
3925 return;
3926 }
3927 }
3928 case ARMISD::SMLAL:{
3929 if (Subtarget->isThumb()) {
3930 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2),
3931 N->getOperand(3), getAL(CurDAG, dl),
3932 CurDAG->getRegister(0, MVT::i32)};
3933 ReplaceNode(
3934 N, CurDAG->getMachineNode(ARM::t2SMLAL, dl, MVT::i32, MVT::i32, Ops));
3935 return;
3936 }else{
3937 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2),
3938 N->getOperand(3), getAL(CurDAG, dl),
3939 CurDAG->getRegister(0, MVT::i32),
3940 CurDAG->getRegister(0, MVT::i32) };
3941 ReplaceNode(N, CurDAG->getMachineNode(
3942 Subtarget->hasV6Ops() ? ARM::SMLAL : ARM::SMLALv5, dl,
3943 MVT::i32, MVT::i32, Ops));
3944 return;
3945 }
3946 }
3947 case ARMISD::SUBE: {
3948 if (!Subtarget->hasV6Ops() || !Subtarget->hasDSP())
3949 break;
3950 // Look for a pattern to match SMMLS
3951 // (sube a, (smul_loHi a, b), (subc 0, (smul_LOhi(a, b))))
3952 if (N->getOperand(1).getOpcode() != ISD::SMUL_LOHI ||
3953 N->getOperand(2).getOpcode() != ARMISD::SUBC ||
3954 !SDValue(N, 1).use_empty())
3955 break;
3956
3957 if (Subtarget->isThumb())
3958 assert(Subtarget->hasThumb2() &&(static_cast <bool> (Subtarget->hasThumb2() &&
"This pattern should not be generated for Thumb") ? void (0)
: __assert_fail ("Subtarget->hasThumb2() && \"This pattern should not be generated for Thumb\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 3959, __extension__ __PRETTY_FUNCTION__))
3959 "This pattern should not be generated for Thumb")(static_cast <bool> (Subtarget->hasThumb2() &&
"This pattern should not be generated for Thumb") ? void (0)
: __assert_fail ("Subtarget->hasThumb2() && \"This pattern should not be generated for Thumb\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 3959, __extension__ __PRETTY_FUNCTION__))
;
3960
3961 SDValue SmulLoHi = N->getOperand(1);
3962 SDValue Subc = N->getOperand(2);
3963 auto *Zero = dyn_cast<ConstantSDNode>(Subc.getOperand(0));
3964
3965 if (!Zero || Zero->getZExtValue() != 0 ||
3966 Subc.getOperand(1) != SmulLoHi.getValue(0) ||
3967 N->getOperand(1) != SmulLoHi.getValue(1) ||
3968 N->getOperand(2) != Subc.getValue(1))
3969 break;
3970
3971 unsigned Opc = Subtarget->isThumb2() ? ARM::t2SMMLS : ARM::SMMLS;
3972 SDValue Ops[] = { SmulLoHi.getOperand(0), SmulLoHi.getOperand(1),
3973 N->getOperand(0), getAL(CurDAG, dl),
3974 CurDAG->getRegister(0, MVT::i32) };
3975 ReplaceNode(N, CurDAG->getMachineNode(Opc, dl, MVT::i32, Ops));
3976 return;
3977 }
3978 case ISD::LOAD: {
3979 if (Subtarget->hasMVEIntegerOps() && tryMVEIndexedLoad(N))
3980 return;
3981 if (Subtarget->isThumb() && Subtarget->hasThumb2()) {
3982 if (tryT2IndexedLoad(N))
3983 return;
3984 } else if (Subtarget->isThumb()) {
3985 if (tryT1IndexedLoad(N))
3986 return;
3987 } else if (tryARMIndexedLoad(N))
3988 return;
3989 // Other cases are autogenerated.
3990 break;
3991 }
3992 case ISD::MLOAD:
3993 if (Subtarget->hasMVEIntegerOps() && tryMVEIndexedLoad(N))
3994 return;
3995 // Other cases are autogenerated.
3996 break;
3997 case ARMISD::WLSSETUP: {
3998 SDNode *New = CurDAG->getMachineNode(ARM::t2WhileLoopSetup, dl, MVT::i32,
3999 N->getOperand(0));
4000 ReplaceUses(N, New);
4001 CurDAG->RemoveDeadNode(N);
4002 return;
4003 }
4004 case ARMISD::WLS: {
4005 SDNode *New = CurDAG->getMachineNode(ARM::t2WhileLoopStart, dl, MVT::Other,
4006 N->getOperand(1), N->getOperand(2),
4007 N->getOperand(0));
4008 ReplaceUses(N, New);
4009 CurDAG->RemoveDeadNode(N);
4010 return;
4011 }
4012 case ARMISD::LE: {
4013 SDValue Ops[] = { N->getOperand(1),
4014 N->getOperand(2),
4015 N->getOperand(0) };
4016 unsigned Opc = ARM::t2LoopEnd;
4017 SDNode *New = CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops);
4018 ReplaceUses(N, New);
4019 CurDAG->RemoveDeadNode(N);
4020 return;
4021 }
4022 case ARMISD::LDRD: {
4023 if (Subtarget->isThumb2())
4024 break; // TableGen handles isel in this case.
4025 SDValue Base, RegOffset, ImmOffset;
4026 const SDValue &Chain = N->getOperand(0);
4027 const SDValue &Addr = N->getOperand(1);
4028 SelectAddrMode3(Addr, Base, RegOffset, ImmOffset);
4029 if (RegOffset != CurDAG->getRegister(0, MVT::i32)) {
4030 // The register-offset variant of LDRD mandates that the register
4031 // allocated to RegOffset is not reused in any of the remaining operands.
4032 // This restriction is currently not enforced. Therefore emitting this
4033 // variant is explicitly avoided.
4034 Base = Addr;
4035 RegOffset = CurDAG->getRegister(0, MVT::i32);
4036 }
4037 SDValue Ops[] = {Base, RegOffset, ImmOffset, Chain};
4038 SDNode *New = CurDAG->getMachineNode(ARM::LOADDUAL, dl,
4039 {MVT::Untyped, MVT::Other}, Ops);
4040 SDValue Lo = CurDAG->getTargetExtractSubreg(ARM::gsub_0, dl, MVT::i32,
4041 SDValue(New, 0));
4042 SDValue Hi = CurDAG->getTargetExtractSubreg(ARM::gsub_1, dl, MVT::i32,
4043 SDValue(New, 0));
4044 transferMemOperands(N, New);
4045 ReplaceUses(SDValue(N, 0), Lo);
4046 ReplaceUses(SDValue(N, 1), Hi);
4047 ReplaceUses(SDValue(N, 2), SDValue(New, 1));
4048 CurDAG->RemoveDeadNode(N);
4049 return;
4050 }
4051 case ARMISD::STRD: {
4052 if (Subtarget->isThumb2())
4053 break; // TableGen handles isel in this case.
4054 SDValue Base, RegOffset, ImmOffset;
4055 const SDValue &Chain = N->getOperand(0);
4056 const SDValue &Addr = N->getOperand(3);
4057 SelectAddrMode3(Addr, Base, RegOffset, ImmOffset);
4058 if (RegOffset != CurDAG->getRegister(0, MVT::i32)) {
4059 // The register-offset variant of STRD mandates that the register
4060 // allocated to RegOffset is not reused in any of the remaining operands.
4061 // This restriction is currently not enforced. Therefore emitting this
4062 // variant is explicitly avoided.
4063 Base = Addr;
4064 RegOffset = CurDAG->getRegister(0, MVT::i32);
4065 }
4066 SDNode *RegPair =
4067 createGPRPairNode(MVT::Untyped, N->getOperand(1), N->getOperand(2));
4068 SDValue Ops[] = {SDValue(RegPair, 0), Base, RegOffset, ImmOffset, Chain};
4069 SDNode *New = CurDAG->getMachineNode(ARM::STOREDUAL, dl, MVT::Other, Ops);
4070 transferMemOperands(N, New);
4071 ReplaceUses(SDValue(N, 0), SDValue(New, 0));
4072 CurDAG->RemoveDeadNode(N);
4073 return;
4074 }
4075 case ARMISD::LOOP_DEC: {
4076 SDValue Ops[] = { N->getOperand(1),
4077 N->getOperand(2),
4078 N->getOperand(0) };
4079 SDNode *Dec =
4080 CurDAG->getMachineNode(ARM::t2LoopDec, dl,
4081 CurDAG->getVTList(MVT::i32, MVT::Other), Ops);
4082 ReplaceUses(N, Dec);
4083 CurDAG->RemoveDeadNode(N);
4084 return;
4085 }
4086 case ARMISD::BRCOND: {
4087 // Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc)
4088 // Emits: (Bcc:void (bb:Other):$dst, (imm:i32):$cc)
4089 // Pattern complexity = 6 cost = 1 size = 0
4090
4091 // Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc)
4092 // Emits: (tBcc:void (bb:Other):$dst, (imm:i32):$cc)
4093 // Pattern complexity = 6 cost = 1 size = 0
4094
4095 // Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc)
4096 // Emits: (t2Bcc:void (bb:Other):$dst, (imm:i32):$cc)
4097 // Pattern complexity = 6 cost = 1 size = 0
4098
4099 unsigned Opc = Subtarget->isThumb() ?
4100 ((Subtarget->hasThumb2()) ? ARM::t2Bcc : ARM::tBcc) : ARM::Bcc;
4101 SDValue Chain = N->getOperand(0);
4102 SDValue N1 = N->getOperand(1);
4103 SDValue N2 = N->getOperand(2);
4104 SDValue N3 = N->getOperand(3);
4105 SDValue InFlag = N->getOperand(4);
4106 assert(N1.getOpcode() == ISD::BasicBlock)(static_cast <bool> (N1.getOpcode() == ISD::BasicBlock)
? void (0) : __assert_fail ("N1.getOpcode() == ISD::BasicBlock"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 4106, __extension__ __PRETTY_FUNCTION__))
;
4107 assert(N2.getOpcode() == ISD::Constant)(static_cast <bool> (N2.getOpcode() == ISD::Constant) ?
void (0) : __assert_fail ("N2.getOpcode() == ISD::Constant",
"/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 4107, __extension__ __PRETTY_FUNCTION__))
;
4108 assert(N3.getOpcode() == ISD::Register)(static_cast <bool> (N3.getOpcode() == ISD::Register) ?
void (0) : __assert_fail ("N3.getOpcode() == ISD::Register",
"/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 4108, __extension__ __PRETTY_FUNCTION__))
;
4109
4110 unsigned CC = (unsigned) cast<ConstantSDNode>(N2)->getZExtValue();
4111
4112 if (InFlag.getOpcode() == ARMISD::CMPZ) {
4113 if (InFlag.getOperand(0).getOpcode() == ISD::INTRINSIC_W_CHAIN) {
4114 SDValue Int = InFlag.getOperand(0);
4115 uint64_t ID = cast<ConstantSDNode>(Int->getOperand(1))->getZExtValue();
4116
4117 // Handle low-overhead loops.
4118 if (ID == Intrinsic::loop_decrement_reg) {
4119 SDValue Elements = Int.getOperand(2);
4120 SDValue Size = CurDAG->getTargetConstant(
4121 cast<ConstantSDNode>(Int.getOperand(3))->getZExtValue(), dl,
4122 MVT::i32);
4123
4124 SDValue Args[] = { Elements, Size, Int.getOperand(0) };
4125 SDNode *LoopDec =
4126 CurDAG->getMachineNode(ARM::t2LoopDec, dl,
4127 CurDAG->getVTList(MVT::i32, MVT::Other),
4128 Args);
4129 ReplaceUses(Int.getNode(), LoopDec);
4130
4131 SDValue EndArgs[] = { SDValue(LoopDec, 0), N1, Chain };
4132 SDNode *LoopEnd =
4133 CurDAG->getMachineNode(ARM::t2LoopEnd, dl, MVT::Other, EndArgs);
4134
4135 ReplaceUses(N, LoopEnd);
4136 CurDAG->RemoveDeadNode(N);
4137 CurDAG->RemoveDeadNode(InFlag.getNode());
4138 CurDAG->RemoveDeadNode(Int.getNode());
4139 return;
4140 }
4141 }
4142
4143 bool SwitchEQNEToPLMI;
4144 SelectCMPZ(InFlag.getNode(), SwitchEQNEToPLMI);
4145 InFlag = N->getOperand(4);
4146
4147 if (SwitchEQNEToPLMI) {
4148 switch ((ARMCC::CondCodes)CC) {
4149 default: llvm_unreachable("CMPZ must be either NE or EQ!")::llvm::llvm_unreachable_internal("CMPZ must be either NE or EQ!"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 4149)
;
4150 case ARMCC::NE:
4151 CC = (unsigned)ARMCC::MI;
4152 break;
4153 case ARMCC::EQ:
4154 CC = (unsigned)ARMCC::PL;
4155 break;
4156 }
4157 }
4158 }
4159
4160 SDValue Tmp2 = CurDAG->getTargetConstant(CC, dl, MVT::i32);
4161 SDValue Ops[] = { N1, Tmp2, N3, Chain, InFlag };
4162 SDNode *ResNode = CurDAG->getMachineNode(Opc, dl, MVT::Other,
4163 MVT::Glue, Ops);
4164 Chain = SDValue(ResNode, 0);
4165 if (N->getNumValues() == 2) {
4166 InFlag = SDValue(ResNode, 1);
4167 ReplaceUses(SDValue(N, 1), InFlag);
4168 }
4169 ReplaceUses(SDValue(N, 0),
4170 SDValue(Chain.getNode(), Chain.getResNo()));
4171 CurDAG->RemoveDeadNode(N);
4172 return;
4173 }
4174
4175 case ARMISD::CMPZ: {
4176 // select (CMPZ X, #-C) -> (CMPZ (ADDS X, #C), #0)
4177 // This allows us to avoid materializing the expensive negative constant.
4178 // The CMPZ #0 is useless and will be peepholed away but we need to keep it
4179 // for its glue output.
4180 SDValue X = N->getOperand(0);
4181 auto *C = dyn_cast<ConstantSDNode>(N->getOperand(1).getNode());
4182 if (C && C->getSExtValue() < 0 && Subtarget->isThumb()) {
4183 int64_t Addend = -C->getSExtValue();
4184
4185 SDNode *Add = nullptr;
4186 // ADDS can be better than CMN if the immediate fits in a
4187 // 16-bit ADDS, which means either [0,256) for tADDi8 or [0,8) for tADDi3.
4188 // Outside that range we can just use a CMN which is 32-bit but has a
4189 // 12-bit immediate range.
4190 if (Addend < 1<<8) {
4191 if (Subtarget->isThumb2()) {
4192 SDValue Ops[] = { X, CurDAG->getTargetConstant(Addend, dl, MVT::i32),
4193 getAL(CurDAG, dl), CurDAG->getRegister(0, MVT::i32),
4194 CurDAG->getRegister(0, MVT::i32) };
4195 Add = CurDAG->getMachineNode(ARM::t2ADDri, dl, MVT::i32, Ops);
4196 } else {
4197 unsigned Opc = (Addend < 1<<3) ? ARM::tADDi3 : ARM::tADDi8;
4198 SDValue Ops[] = {CurDAG->getRegister(ARM::CPSR, MVT::i32), X,
4199 CurDAG->getTargetConstant(Addend, dl, MVT::i32),
4200 getAL(CurDAG, dl), CurDAG->getRegister(0, MVT::i32)};
4201 Add = CurDAG->getMachineNode(Opc, dl, MVT::i32, Ops);
4202 }
4203 }
4204 if (Add) {
4205 SDValue Ops2[] = {SDValue(Add, 0), CurDAG->getConstant(0, dl, MVT::i32)};
4206 CurDAG->MorphNodeTo(N, ARMISD::CMPZ, CurDAG->getVTList(MVT::Glue), Ops2);
4207 }
4208 }
4209 // Other cases are autogenerated.
4210 break;
4211 }
4212
4213 case ARMISD::CMOV: {
4214 SDValue InFlag = N->getOperand(4);
4215
4216 if (InFlag.getOpcode() == ARMISD::CMPZ) {
4217 bool SwitchEQNEToPLMI;
4218 SelectCMPZ(InFlag.getNode(), SwitchEQNEToPLMI);
4219
4220 if (SwitchEQNEToPLMI) {
4221 SDValue ARMcc = N->getOperand(2);
4222 ARMCC::CondCodes CC =
4223 (ARMCC::CondCodes)cast<ConstantSDNode>(ARMcc)->getZExtValue();
4224
4225 switch (CC) {
4226 default: llvm_unreachable("CMPZ must be either NE or EQ!")::llvm::llvm_unreachable_internal("CMPZ must be either NE or EQ!"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 4226)
;
4227 case ARMCC::NE:
4228 CC = ARMCC::MI;
4229 break;
4230 case ARMCC::EQ:
4231 CC = ARMCC::PL;
4232 break;
4233 }
4234 SDValue NewARMcc = CurDAG->getConstant((unsigned)CC, dl, MVT::i32);
4235 SDValue Ops[] = {N->getOperand(0), N->getOperand(1), NewARMcc,
4236 N->getOperand(3), N->getOperand(4)};
4237 CurDAG->MorphNodeTo(N, ARMISD::CMOV, N->getVTList(), Ops);
4238 }
4239
4240 }
4241 // Other cases are autogenerated.
4242 break;
4243 }
4244
4245 case ARMISD::VZIP: {
4246 unsigned Opc = 0;
4247 EVT VT = N->getValueType(0);
4248 switch (VT.getSimpleVT().SimpleTy) {
4249 default: return;
4250 case MVT::v8i8: Opc = ARM::VZIPd8; break;
4251 case MVT::v4f16:
4252 case MVT::v4i16: Opc = ARM::VZIPd16; break;
4253 case MVT::v2f32:
4254 // vzip.32 Dd, Dm is a pseudo-instruction expanded to vtrn.32 Dd, Dm.
4255 case MVT::v2i32: Opc = ARM::VTRNd32; break;
4256 case MVT::v16i8: Opc = ARM::VZIPq8; break;
4257 case MVT::v8f16:
4258 case MVT::v8i16: Opc = ARM::VZIPq16; break;
4259 case MVT::v4f32:
4260 case MVT::v4i32: Opc = ARM::VZIPq32; break;
4261 }
4262 SDValue Pred = getAL(CurDAG, dl);
4263 SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
4264 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), Pred, PredReg };
4265 ReplaceNode(N, CurDAG->getMachineNode(Opc, dl, VT, VT, Ops));
4266 return;
4267 }
4268 case ARMISD::VUZP: {
4269 unsigned Opc = 0;
4270 EVT VT = N->getValueType(0);
4271 switch (VT.getSimpleVT().SimpleTy) {
4272 default: return;
4273 case MVT::v8i8: Opc = ARM::VUZPd8; break;
4274 case MVT::v4f16:
4275 case MVT::v4i16: Opc = ARM::VUZPd16; break;
4276 case MVT::v2f32:
4277 // vuzp.32 Dd, Dm is a pseudo-instruction expanded to vtrn.32 Dd, Dm.
4278 case MVT::v2i32: Opc = ARM::VTRNd32; break;
4279 case MVT::v16i8: Opc = ARM::VUZPq8; break;
4280 case MVT::v8f16:
4281 case MVT::v8i16: Opc = ARM::VUZPq16; break;
4282 case MVT::v4f32:
4283 case MVT::v4i32: Opc = ARM::VUZPq32; break;
4284 }
4285 SDValue Pred = getAL(CurDAG, dl);
4286 SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
4287 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), Pred, PredReg };
4288 ReplaceNode(N, CurDAG->getMachineNode(Opc, dl, VT, VT, Ops));
4289 return;
4290 }
4291 case ARMISD::VTRN: {
4292 unsigned Opc = 0;
4293 EVT VT = N->getValueType(0);
4294 switch (VT.getSimpleVT().SimpleTy) {
4295 default: return;
4296 case MVT::v8i8: Opc = ARM::VTRNd8; break;
4297 case MVT::v4f16:
4298 case MVT::v4i16: Opc = ARM::VTRNd16; break;
4299 case MVT::v2f32:
4300 case MVT::v2i32: Opc = ARM::VTRNd32; break;
4301 case MVT::v16i8: Opc = ARM::VTRNq8; break;
4302 case MVT::v8f16:
4303 case MVT::v8i16: Opc = ARM::VTRNq16; break;
4304 case MVT::v4f32:
4305 case MVT::v4i32: Opc = ARM::VTRNq32; break;
4306 }
4307 SDValue Pred = getAL(CurDAG, dl);
4308 SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
4309 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), Pred, PredReg };
4310 ReplaceNode(N, CurDAG->getMachineNode(Opc, dl, VT, VT, Ops));
4311 return;
4312 }
4313 case ARMISD::BUILD_VECTOR: {
4314 EVT VecVT = N->getValueType(0);
4315 EVT EltVT = VecVT.getVectorElementType();
4316 unsigned NumElts = VecVT.getVectorNumElements();
4317 if (EltVT == MVT::f64) {
4318 assert(NumElts == 2 && "unexpected type for BUILD_VECTOR")(static_cast <bool> (NumElts == 2 && "unexpected type for BUILD_VECTOR"
) ? void (0) : __assert_fail ("NumElts == 2 && \"unexpected type for BUILD_VECTOR\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 4318, __extension__ __PRETTY_FUNCTION__))
;
4319 ReplaceNode(
4320 N, createDRegPairNode(VecVT, N->getOperand(0), N->getOperand(1)));
4321 return;
4322 }
4323 assert(EltVT == MVT::f32 && "unexpected type for BUILD_VECTOR")(static_cast <bool> (EltVT == MVT::f32 && "unexpected type for BUILD_VECTOR"
) ? void (0) : __assert_fail ("EltVT == MVT::f32 && \"unexpected type for BUILD_VECTOR\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 4323, __extension__ __PRETTY_FUNCTION__))
;
4324 if (NumElts == 2) {
4325 ReplaceNode(
4326 N, createSRegPairNode(VecVT, N->getOperand(0), N->getOperand(1)));
4327 return;
4328 }
4329 assert(NumElts == 4 && "unexpected type for BUILD_VECTOR")(static_cast <bool> (NumElts == 4 && "unexpected type for BUILD_VECTOR"
) ? void (0) : __assert_fail ("NumElts == 4 && \"unexpected type for BUILD_VECTOR\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 4329, __extension__ __PRETTY_FUNCTION__))
;
4330 ReplaceNode(N,
4331 createQuadSRegsNode(VecVT, N->getOperand(0), N->getOperand(1),
4332 N->getOperand(2), N->getOperand(3)));
4333 return;
4334 }
4335
4336 case ARMISD::VLD1DUP: {
4337 static const uint16_t DOpcodes[] = { ARM::VLD1DUPd8, ARM::VLD1DUPd16,
4338 ARM::VLD1DUPd32 };
4339 static const uint16_t QOpcodes[] = { ARM::VLD1DUPq8, ARM::VLD1DUPq16,
4340 ARM::VLD1DUPq32 };
4341 SelectVLDDup(N, /* IsIntrinsic= */ false, false, 1, DOpcodes, QOpcodes);
4342 return;
4343 }
4344
4345 case ARMISD::VLD2DUP: {
4346 static const uint16_t Opcodes[] = { ARM::VLD2DUPd8, ARM::VLD2DUPd16,
4347 ARM::VLD2DUPd32 };
4348 SelectVLDDup(N, /* IsIntrinsic= */ false, false, 2, Opcodes);
4349 return;
4350 }
4351
4352 case ARMISD::VLD3DUP: {
4353 static const uint16_t Opcodes[] = { ARM::VLD3DUPd8Pseudo,
4354 ARM::VLD3DUPd16Pseudo,
4355 ARM::VLD3DUPd32Pseudo };
4356 SelectVLDDup(N, /* IsIntrinsic= */ false, false, 3, Opcodes);
4357 return;
4358 }
4359
4360 case ARMISD::VLD4DUP: {
4361 static const uint16_t Opcodes[] = { ARM::VLD4DUPd8Pseudo,
4362 ARM::VLD4DUPd16Pseudo,
4363 ARM::VLD4DUPd32Pseudo };
4364 SelectVLDDup(N, /* IsIntrinsic= */ false, false, 4, Opcodes);
4365 return;
4366 }
4367
4368 case ARMISD::VLD1DUP_UPD: {
4369 static const uint16_t DOpcodes[] = { ARM::VLD1DUPd8wb_fixed,
4370 ARM::VLD1DUPd16wb_fixed,
4371 ARM::VLD1DUPd32wb_fixed };
4372 static const uint16_t QOpcodes[] = { ARM::VLD1DUPq8wb_fixed,
4373 ARM::VLD1DUPq16wb_fixed,
4374 ARM::VLD1DUPq32wb_fixed };
4375 SelectVLDDup(N, /* IsIntrinsic= */ false, true, 1, DOpcodes, QOpcodes);
4376 return;
4377 }
4378
4379 case ARMISD::VLD2DUP_UPD: {
4380 static const uint16_t DOpcodes[] = { ARM::VLD2DUPd8wb_fixed,
4381 ARM::VLD2DUPd16wb_fixed,
4382 ARM::VLD2DUPd32wb_fixed,
4383 ARM::VLD1q64wb_fixed };
4384 static const uint16_t QOpcodes0[] = { ARM::VLD2DUPq8EvenPseudo,
4385 ARM::VLD2DUPq16EvenPseudo,
4386 ARM::VLD2DUPq32EvenPseudo };
4387 static const uint16_t QOpcodes1[] = { ARM::VLD2DUPq8OddPseudoWB_fixed,
4388 ARM::VLD2DUPq16OddPseudoWB_fixed,
4389 ARM::VLD2DUPq32OddPseudoWB_fixed };
4390 SelectVLDDup(N, /* IsIntrinsic= */ false, true, 2, DOpcodes, QOpcodes0, QOpcodes1);
4391 return;
4392 }
4393
4394 case ARMISD::VLD3DUP_UPD: {
4395 static const uint16_t DOpcodes[] = { ARM::VLD3DUPd8Pseudo_UPD,
4396 ARM::VLD3DUPd16Pseudo_UPD,
4397 ARM::VLD3DUPd32Pseudo_UPD,
4398 ARM::VLD1d64TPseudoWB_fixed };
4399 static const uint16_t QOpcodes0[] = { ARM::VLD3DUPq8EvenPseudo,
4400 ARM::VLD3DUPq16EvenPseudo,
4401 ARM::VLD3DUPq32EvenPseudo };
4402 static const uint16_t QOpcodes1[] = { ARM::VLD3DUPq8OddPseudo_UPD,
4403 ARM::VLD3DUPq16OddPseudo_UPD,
4404 ARM::VLD3DUPq32OddPseudo_UPD };
4405 SelectVLDDup(N, /* IsIntrinsic= */ false, true, 3, DOpcodes, QOpcodes0, QOpcodes1);
4406 return;
4407 }
4408
4409 case ARMISD::VLD4DUP_UPD: {
4410 static const uint16_t DOpcodes[] = { ARM::VLD4DUPd8Pseudo_UPD,
4411 ARM::VLD4DUPd16Pseudo_UPD,
4412 ARM::VLD4DUPd32Pseudo_UPD,
4413 ARM::VLD1d64QPseudoWB_fixed };
4414 static const uint16_t QOpcodes0[] = { ARM::VLD4DUPq8EvenPseudo,
4415 ARM::VLD4DUPq16EvenPseudo,
4416 ARM::VLD4DUPq32EvenPseudo };
4417 static const uint16_t QOpcodes1[] = { ARM::VLD4DUPq8OddPseudo_UPD,
4418 ARM::VLD4DUPq16OddPseudo_UPD,
4419 ARM::VLD4DUPq32OddPseudo_UPD };
4420 SelectVLDDup(N, /* IsIntrinsic= */ false, true, 4, DOpcodes, QOpcodes0, QOpcodes1);
4421 return;
4422 }
4423
4424 case ARMISD::VLD1_UPD: {
4425 static const uint16_t DOpcodes[] = { ARM::VLD1d8wb_fixed,
4426 ARM::VLD1d16wb_fixed,
4427 ARM::VLD1d32wb_fixed,
4428 ARM::VLD1d64wb_fixed };
4429 static const uint16_t QOpcodes[] = { ARM::VLD1q8wb_fixed,
4430 ARM::VLD1q16wb_fixed,
4431 ARM::VLD1q32wb_fixed,
4432 ARM::VLD1q64wb_fixed };
4433 SelectVLD(N, true, 1, DOpcodes, QOpcodes, nullptr);
4434 return;
4435 }
4436
4437 case ARMISD::VLD2_UPD: {
4438 if (Subtarget->hasNEON()) {
4439 static const uint16_t DOpcodes[] = {
4440 ARM::VLD2d8wb_fixed, ARM::VLD2d16wb_fixed, ARM::VLD2d32wb_fixed,
4441 ARM::VLD1q64wb_fixed};
4442 static const uint16_t QOpcodes[] = {ARM::VLD2q8PseudoWB_fixed,
4443 ARM::VLD2q16PseudoWB_fixed,
4444 ARM::VLD2q32PseudoWB_fixed};
4445 SelectVLD(N, true, 2, DOpcodes, QOpcodes, nullptr);
4446 } else {
4447 static const uint16_t Opcodes8[] = {ARM::MVE_VLD20_8,
4448 ARM::MVE_VLD21_8_wb};
4449 static const uint16_t Opcodes16[] = {ARM::MVE_VLD20_16,
4450 ARM::MVE_VLD21_16_wb};
4451 static const uint16_t Opcodes32[] = {ARM::MVE_VLD20_32,
4452 ARM::MVE_VLD21_32_wb};
4453 static const uint16_t *const Opcodes[] = {Opcodes8, Opcodes16, Opcodes32};
4454 SelectMVE_VLD(N, 2, Opcodes, true);
4455 }
4456 return;
4457 }
4458
4459 case ARMISD::VLD3_UPD: {
4460 static const uint16_t DOpcodes[] = { ARM::VLD3d8Pseudo_UPD,
4461 ARM::VLD3d16Pseudo_UPD,
4462 ARM::VLD3d32Pseudo_UPD,
4463 ARM::VLD1d64TPseudoWB_fixed};
4464 static const uint16_t QOpcodes0[] = { ARM::VLD3q8Pseudo_UPD,
4465 ARM::VLD3q16Pseudo_UPD,
4466 ARM::VLD3q32Pseudo_UPD };
4467 static const uint16_t QOpcodes1[] = { ARM::VLD3q8oddPseudo_UPD,
4468 ARM::VLD3q16oddPseudo_UPD,
4469 ARM::VLD3q32oddPseudo_UPD };
4470 SelectVLD(N, true, 3, DOpcodes, QOpcodes0, QOpcodes1);
4471 return;
4472 }
4473
4474 case ARMISD::VLD4_UPD: {
4475 if (Subtarget->hasNEON()) {
4476 static const uint16_t DOpcodes[] = {
4477 ARM::VLD4d8Pseudo_UPD, ARM::VLD4d16Pseudo_UPD, ARM::VLD4d32Pseudo_UPD,
4478 ARM::VLD1d64QPseudoWB_fixed};
4479 static const uint16_t QOpcodes0[] = {ARM::VLD4q8Pseudo_UPD,
4480 ARM::VLD4q16Pseudo_UPD,
4481 ARM::VLD4q32Pseudo_UPD};
4482 static const uint16_t QOpcodes1[] = {ARM::VLD4q8oddPseudo_UPD,
4483 ARM::VLD4q16oddPseudo_UPD,
4484 ARM::VLD4q32oddPseudo_UPD};
4485 SelectVLD(N, true, 4, DOpcodes, QOpcodes0, QOpcodes1);
4486 } else {
4487 static const uint16_t Opcodes8[] = {ARM::MVE_VLD40_8, ARM::MVE_VLD41_8,
4488 ARM::MVE_VLD42_8,
4489 ARM::MVE_VLD43_8_wb};
4490 static const uint16_t Opcodes16[] = {ARM::MVE_VLD40_16, ARM::MVE_VLD41_16,
4491 ARM::MVE_VLD42_16,
4492 ARM::MVE_VLD43_16_wb};
4493 static const uint16_t Opcodes32[] = {ARM::MVE_VLD40_32, ARM::MVE_VLD41_32,
4494 ARM::MVE_VLD42_32,
4495 ARM::MVE_VLD43_32_wb};
4496 static const uint16_t *const Opcodes[] = {Opcodes8, Opcodes16, Opcodes32};
4497 SelectMVE_VLD(N, 4, Opcodes, true);
4498 }
4499 return;
4500 }
4501
4502 case ARMISD::VLD1x2_UPD: {
4503 if (Subtarget->hasNEON()) {
4504 static const uint16_t DOpcodes[] = {
4505 ARM::VLD1q8wb_fixed, ARM::VLD1q16wb_fixed, ARM::VLD1q32wb_fixed,
4506 ARM::VLD1q64wb_fixed};
4507 static const uint16_t QOpcodes[] = {
4508 ARM::VLD1d8QPseudoWB_fixed, ARM::VLD1d16QPseudoWB_fixed,
4509 ARM::VLD1d32QPseudoWB_fixed, ARM::VLD1d64QPseudoWB_fixed};
4510 SelectVLD(N, true, 2, DOpcodes, QOpcodes, nullptr);
4511 return;
4512 }
4513 break;
4514 }
4515
4516 case ARMISD::VLD1x3_UPD: {
4517 if (Subtarget->hasNEON()) {
4518 static const uint16_t DOpcodes[] = {
4519 ARM::VLD1d8TPseudoWB_fixed, ARM::VLD1d16TPseudoWB_fixed,
4520 ARM::VLD1d32TPseudoWB_fixed, ARM::VLD1d64TPseudoWB_fixed};
4521 static const uint16_t QOpcodes0[] = {
4522 ARM::VLD1q8LowTPseudo_UPD, ARM::VLD1q16LowTPseudo_UPD,
4523 ARM::VLD1q32LowTPseudo_UPD, ARM::VLD1q64LowTPseudo_UPD};
4524 static const uint16_t QOpcodes1[] = {
4525 ARM::VLD1q8HighTPseudo_UPD, ARM::VLD1q16HighTPseudo_UPD,
4526 ARM::VLD1q32HighTPseudo_UPD, ARM::VLD1q64HighTPseudo_UPD};
4527 SelectVLD(N, true, 3, DOpcodes, QOpcodes0, QOpcodes1);
4528 return;
4529 }
4530 break;
4531 }
4532
4533 case ARMISD::VLD1x4_UPD: {
4534 if (Subtarget->hasNEON()) {
4535 static const uint16_t DOpcodes[] = {
4536 ARM::VLD1d8QPseudoWB_fixed, ARM::VLD1d16QPseudoWB_fixed,
4537 ARM::VLD1d32QPseudoWB_fixed, ARM::VLD1d64QPseudoWB_fixed};
4538 static const uint16_t QOpcodes0[] = {
4539 ARM::VLD1q8LowQPseudo_UPD, ARM::VLD1q16LowQPseudo_UPD,
4540 ARM::VLD1q32LowQPseudo_UPD, ARM::VLD1q64LowQPseudo_UPD};
4541 static const uint16_t QOpcodes1[] = {
4542 ARM::VLD1q8HighQPseudo_UPD, ARM::VLD1q16HighQPseudo_UPD,
4543 ARM::VLD1q32HighQPseudo_UPD, ARM::VLD1q64HighQPseudo_UPD};
4544 SelectVLD(N, true, 4, DOpcodes, QOpcodes0, QOpcodes1);
4545 return;
4546 }
4547 break;
4548 }
4549
4550 case ARMISD::VLD2LN_UPD: {
4551 static const uint16_t DOpcodes[] = { ARM::VLD2LNd8Pseudo_UPD,
4552 ARM::VLD2LNd16Pseudo_UPD,
4553 ARM::VLD2LNd32Pseudo_UPD };
4554 static const uint16_t QOpcodes[] = { ARM::VLD2LNq16Pseudo_UPD,
4555 ARM::VLD2LNq32Pseudo_UPD };
4556 SelectVLDSTLane(N, true, true, 2, DOpcodes, QOpcodes);
4557 return;
4558 }
4559
4560 case ARMISD::VLD3LN_UPD: {
4561 static const uint16_t DOpcodes[] = { ARM::VLD3LNd8Pseudo_UPD,
4562 ARM::VLD3LNd16Pseudo_UPD,
4563 ARM::VLD3LNd32Pseudo_UPD };
4564 static const uint16_t QOpcodes[] = { ARM::VLD3LNq16Pseudo_UPD,
4565 ARM::VLD3LNq32Pseudo_UPD };
4566 SelectVLDSTLane(N, true, true, 3, DOpcodes, QOpcodes);
4567 return;
4568 }
4569
4570 case ARMISD::VLD4LN_UPD: {
4571 static const uint16_t DOpcodes[] = { ARM::VLD4LNd8Pseudo_UPD,
4572 ARM::VLD4LNd16Pseudo_UPD,
4573 ARM::VLD4LNd32Pseudo_UPD };
4574 static const uint16_t QOpcodes[] = { ARM::VLD4LNq16Pseudo_UPD,
4575 ARM::VLD4LNq32Pseudo_UPD };
4576 SelectVLDSTLane(N, true, true, 4, DOpcodes, QOpcodes);
4577 return;
4578 }
4579
4580 case ARMISD::VST1_UPD: {
4581 static const uint16_t DOpcodes[] = { ARM::VST1d8wb_fixed,
4582 ARM::VST1d16wb_fixed,
4583 ARM::VST1d32wb_fixed,
4584 ARM::VST1d64wb_fixed };
4585 static const uint16_t QOpcodes[] = { ARM::VST1q8wb_fixed,
4586 ARM::VST1q16wb_fixed,
4587 ARM::VST1q32wb_fixed,
4588 ARM::VST1q64wb_fixed };
4589 SelectVST(N, true, 1, DOpcodes, QOpcodes, nullptr);
4590 return;
4591 }
4592
4593 case ARMISD::VST2_UPD: {
4594 if (Subtarget->hasNEON()) {
4595 static const uint16_t DOpcodes[] = {
4596 ARM::VST2d8wb_fixed, ARM::VST2d16wb_fixed, ARM::VST2d32wb_fixed,
4597 ARM::VST1q64wb_fixed};
4598 static const uint16_t QOpcodes[] = {ARM::VST2q8PseudoWB_fixed,
4599 ARM::VST2q16PseudoWB_fixed,
4600 ARM::VST2q32PseudoWB_fixed};
4601 SelectVST(N, true, 2, DOpcodes, QOpcodes, nullptr);
4602 return;
4603 }
4604 break;
4605 }
4606
4607 case ARMISD::VST3_UPD: {
4608 static const uint16_t DOpcodes[] = { ARM::VST3d8Pseudo_UPD,
4609 ARM::VST3d16Pseudo_UPD,
4610 ARM::VST3d32Pseudo_UPD,
4611 ARM::VST1d64TPseudoWB_fixed};
4612 static const uint16_t QOpcodes0[] = { ARM::VST3q8Pseudo_UPD,
4613 ARM::VST3q16Pseudo_UPD,
4614 ARM::VST3q32Pseudo_UPD };
4615 static const uint16_t QOpcodes1[] = { ARM::VST3q8oddPseudo_UPD,
4616 ARM::VST3q16oddPseudo_UPD,
4617 ARM::VST3q32oddPseudo_UPD };
4618 SelectVST(N, true, 3, DOpcodes, QOpcodes0, QOpcodes1);
4619 return;
4620 }
4621
4622 case ARMISD::VST4_UPD: {
4623 if (Subtarget->hasNEON()) {
4624 static const uint16_t DOpcodes[] = {
4625 ARM::VST4d8Pseudo_UPD, ARM::VST4d16Pseudo_UPD, ARM::VST4d32Pseudo_UPD,
4626 ARM::VST1d64QPseudoWB_fixed};
4627 static const uint16_t QOpcodes0[] = {ARM::VST4q8Pseudo_UPD,
4628 ARM::VST4q16Pseudo_UPD,
4629 ARM::VST4q32Pseudo_UPD};
4630 static const uint16_t QOpcodes1[] = {ARM::VST4q8oddPseudo_UPD,
4631 ARM::VST4q16oddPseudo_UPD,
4632 ARM::VST4q32oddPseudo_UPD};
4633 SelectVST(N, true, 4, DOpcodes, QOpcodes0, QOpcodes1);
4634 return;
4635 }
4636 break;
4637 }
4638
4639 case ARMISD::VST1x2_UPD: {
4640 if (Subtarget->hasNEON()) {
4641 static const uint16_t DOpcodes[] = { ARM::VST1q8wb_fixed,
4642 ARM::VST1q16wb_fixed,
4643 ARM::VST1q32wb_fixed,
4644 ARM::VST1q64wb_fixed};
4645 static const uint16_t QOpcodes[] = { ARM::VST1d8QPseudoWB_fixed,
4646 ARM::VST1d16QPseudoWB_fixed,
4647 ARM::VST1d32QPseudoWB_fixed,
4648 ARM::VST1d64QPseudoWB_fixed };
4649 SelectVST(N, true, 2, DOpcodes, QOpcodes, nullptr);
4650 return;
4651 }
4652 break;
4653 }
4654
4655 case ARMISD::VST1x3_UPD: {
4656 if (Subtarget->hasNEON()) {
4657 static const uint16_t DOpcodes[] = { ARM::VST1d8TPseudoWB_fixed,
4658 ARM::VST1d16TPseudoWB_fixed,
4659 ARM::VST1d32TPseudoWB_fixed,
4660 ARM::VST1d64TPseudoWB_fixed };
4661 static const uint16_t QOpcodes0[] = { ARM::VST1q8LowTPseudo_UPD,
4662 ARM::VST1q16LowTPseudo_UPD,
4663 ARM::VST1q32LowTPseudo_UPD,
4664 ARM::VST1q64LowTPseudo_UPD };
4665 static const uint16_t QOpcodes1[] = { ARM::VST1q8HighTPseudo_UPD,
4666 ARM::VST1q16HighTPseudo_UPD,
4667 ARM::VST1q32HighTPseudo_UPD,
4668 ARM::VST1q64HighTPseudo_UPD };
4669 SelectVST(N, true, 3, DOpcodes, QOpcodes0, QOpcodes1);
4670 return;
4671 }
4672 break;
4673 }
4674
4675 case ARMISD::VST1x4_UPD: {
4676 if (Subtarget->hasNEON()) {
4677 static const uint16_t DOpcodes[] = { ARM::VST1d8QPseudoWB_fixed,
4678 ARM::VST1d16QPseudoWB_fixed,
4679 ARM::VST1d32QPseudoWB_fixed,
4680 ARM::VST1d64QPseudoWB_fixed };
4681 static const uint16_t QOpcodes0[] = { ARM::VST1q8LowQPseudo_UPD,
4682 ARM::VST1q16LowQPseudo_UPD,
4683 ARM::VST1q32LowQPseudo_UPD,
4684 ARM::VST1q64LowQPseudo_UPD };
4685 static const uint16_t QOpcodes1[] = { ARM::VST1q8HighQPseudo_UPD,
4686 ARM::VST1q16HighQPseudo_UPD,
4687 ARM::VST1q32HighQPseudo_UPD,
4688 ARM::VST1q64HighQPseudo_UPD };
4689 SelectVST(N, true, 4, DOpcodes, QOpcodes0, QOpcodes1);
4690 return;
4691 }
4692 break;
4693 }
4694 case ARMISD::VST2LN_UPD: {
4695 static const uint16_t DOpcodes[] = { ARM::VST2LNd8Pseudo_UPD,
4696 ARM::VST2LNd16Pseudo_UPD,
4697 ARM::VST2LNd32Pseudo_UPD };
4698 static const uint16_t QOpcodes[] = { ARM::VST2LNq16Pseudo_UPD,
4699 ARM::VST2LNq32Pseudo_UPD };
4700 SelectVLDSTLane(N, false, true, 2, DOpcodes, QOpcodes);
4701 return;
4702 }
4703
4704 case ARMISD::VST3LN_UPD: {
4705 static const uint16_t DOpcodes[] = { ARM::VST3LNd8Pseudo_UPD,
4706 ARM::VST3LNd16Pseudo_UPD,
4707 ARM::VST3LNd32Pseudo_UPD };
4708 static const uint16_t QOpcodes[] = { ARM::VST3LNq16Pseudo_UPD,
4709 ARM::VST3LNq32Pseudo_UPD };
4710 SelectVLDSTLane(N, false, true, 3, DOpcodes, QOpcodes);
4711 return;
4712 }
4713
4714 case ARMISD::VST4LN_UPD: {
4715 static const uint16_t DOpcodes[] = { ARM::VST4LNd8Pseudo_UPD,
4716 ARM::VST4LNd16Pseudo_UPD,
4717 ARM::VST4LNd32Pseudo_UPD };
4718 static const uint16_t QOpcodes[] = { ARM::VST4LNq16Pseudo_UPD,
4719 ARM::VST4LNq32Pseudo_UPD };
4720 SelectVLDSTLane(N, false, true, 4, DOpcodes, QOpcodes);
4721 return;
4722 }
4723
4724 case ISD::INTRINSIC_VOID:
4725 case ISD::INTRINSIC_W_CHAIN: {
4726 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
4727 switch (IntNo) {
4728 default:
4729 break;
4730
4731 case Intrinsic::arm_mrrc:
4732 case Intrinsic::arm_mrrc2: {
4733 SDLoc dl(N);
4734 SDValue Chain = N->getOperand(0);
4735 unsigned Opc;
4736
4737 if (Subtarget->isThumb())
4738 Opc = (IntNo == Intrinsic::arm_mrrc ? ARM::t2MRRC : ARM::t2MRRC2);
4739 else
4740 Opc = (IntNo == Intrinsic::arm_mrrc ? ARM::MRRC : ARM::MRRC2);
4741
4742 SmallVector<SDValue, 5> Ops;
4743 Ops.push_back(getI32Imm(cast<ConstantSDNode>(N->getOperand(2))->getZExtValue(), dl)); /* coproc */
4744 Ops.push_back(getI32Imm(cast<ConstantSDNode>(N->getOperand(3))->getZExtValue(), dl)); /* opc */
4745 Ops.push_back(getI32Imm(cast<ConstantSDNode>(N->getOperand(4))->getZExtValue(), dl)); /* CRm */
4746
4747 // The mrrc2 instruction in ARM doesn't allow predicates, the top 4 bits of the encoded
4748 // instruction will always be '1111' but it is possible in assembly language to specify
4749 // AL as a predicate to mrrc2 but it doesn't make any difference to the encoded instruction.
4750 if (Opc != ARM::MRRC2) {
4751 Ops.push_back(getAL(CurDAG, dl));
4752 Ops.push_back(CurDAG->getRegister(0, MVT::i32));
4753 }
4754
4755 Ops.push_back(Chain);
4756
4757 // Writes to two registers.
4758 const EVT RetType[] = {MVT::i32, MVT::i32, MVT::Other};
4759
4760 ReplaceNode(N, CurDAG->getMachineNode(Opc, dl, RetType, Ops));
4761 return;
4762 }
4763 case Intrinsic::arm_ldaexd:
4764 case Intrinsic::arm_ldrexd: {
4765 SDLoc dl(N);
4766 SDValue Chain = N->getOperand(0);
4767 SDValue MemAddr = N->getOperand(2);
4768 bool isThumb = Subtarget->isThumb() && Subtarget->hasV8MBaselineOps();
4769
4770 bool IsAcquire = IntNo == Intrinsic::arm_ldaexd;
4771 unsigned NewOpc = isThumb ? (IsAcquire ? ARM::t2LDAEXD : ARM::t2LDREXD)
4772 : (IsAcquire ? ARM::LDAEXD : ARM::LDREXD);
4773
4774 // arm_ldrexd returns a i64 value in {i32, i32}
4775 std::vector<EVT> ResTys;
4776 if (isThumb) {
4777 ResTys.push_back(MVT::i32);
4778 ResTys.push_back(MVT::i32);
4779 } else
4780 ResTys.push_back(MVT::Untyped);
4781 ResTys.push_back(MVT::Other);
4782
4783 // Place arguments in the right order.
4784 SDValue Ops[] = {MemAddr, getAL(CurDAG, dl),
4785 CurDAG->getRegister(0, MVT::i32), Chain};
4786 SDNode *Ld = CurDAG->getMachineNode(NewOpc, dl, ResTys, Ops);
4787 // Transfer memoperands.
4788 MachineMemOperand *MemOp = cast<MemIntrinsicSDNode>(N)->getMemOperand();
4789 CurDAG->setNodeMemRefs(cast<MachineSDNode>(Ld), {MemOp});
4790
4791 // Remap uses.
4792 SDValue OutChain = isThumb ? SDValue(Ld, 2) : SDValue(Ld, 1);
4793 if (!SDValue(N, 0).use_empty()) {
4794 SDValue Result;
4795 if (isThumb)
4796 Result = SDValue(Ld, 0);
4797 else {
4798 SDValue SubRegIdx =
4799 CurDAG->getTargetConstant(ARM::gsub_0, dl, MVT::i32);
4800 SDNode *ResNode = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
4801 dl, MVT::i32, SDValue(Ld, 0), SubRegIdx);
4802 Result = SDValue(ResNode,0);
4803 }
4804 ReplaceUses(SDValue(N, 0), Result);
4805 }
4806 if (!SDValue(N, 1).use_empty()) {
4807 SDValue Result;
4808 if (isThumb)
4809 Result = SDValue(Ld, 1);
4810 else {
4811 SDValue SubRegIdx =
4812 CurDAG->getTargetConstant(ARM::gsub_1, dl, MVT::i32);
4813 SDNode *ResNode = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
4814 dl, MVT::i32, SDValue(Ld, 0), SubRegIdx);
4815 Result = SDValue(ResNode,0);
4816 }
4817 ReplaceUses(SDValue(N, 1), Result);
4818 }
4819 ReplaceUses(SDValue(N, 2), OutChain);
4820 CurDAG->RemoveDeadNode(N);
4821 return;
4822 }
4823 case Intrinsic::arm_stlexd:
4824 case Intrinsic::arm_strexd: {
4825 SDLoc dl(N);
4826 SDValue Chain = N->getOperand(0);
4827 SDValue Val0 = N->getOperand(2);
4828 SDValue Val1 = N->getOperand(3);
4829 SDValue MemAddr = N->getOperand(4);
4830
4831 // Store exclusive double return a i32 value which is the return status
4832 // of the issued store.
4833 const EVT ResTys[] = {MVT::i32, MVT::Other};
4834
4835 bool isThumb = Subtarget->isThumb() && Subtarget->hasThumb2();
4836 // Place arguments in the right order.
4837 SmallVector<SDValue, 7> Ops;
4838 if (isThumb) {
4839 Ops.push_back(Val0);
4840 Ops.push_back(Val1);
4841 } else
4842 // arm_strexd uses GPRPair.
4843 Ops.push_back(SDValue(createGPRPairNode(MVT::Untyped, Val0, Val1), 0));
4844 Ops.push_back(MemAddr);
4845 Ops.push_back(getAL(CurDAG, dl));
4846 Ops.push_back(CurDAG->getRegister(0, MVT::i32));
4847 Ops.push_back(Chain);
4848
4849 bool IsRelease = IntNo == Intrinsic::arm_stlexd;
4850 unsigned NewOpc = isThumb ? (IsRelease ? ARM::t2STLEXD : ARM::t2STREXD)
4851 : (IsRelease ? ARM::STLEXD : ARM::STREXD);
4852
4853 SDNode *St = CurDAG->getMachineNode(NewOpc, dl, ResTys, Ops);
4854 // Transfer memoperands.
4855 MachineMemOperand *MemOp = cast<MemIntrinsicSDNode>(N)->getMemOperand();
4856 CurDAG->setNodeMemRefs(cast<MachineSDNode>(St), {MemOp});
4857
4858 ReplaceNode(N, St);
4859 return;
4860 }
4861
4862 case Intrinsic::arm_neon_vld1: {
4863 static const uint16_t DOpcodes[] = { ARM::VLD1d8, ARM::VLD1d16,
4864 ARM::VLD1d32, ARM::VLD1d64 };
4865 static const uint16_t QOpcodes[] = { ARM::VLD1q8, ARM::VLD1q16,
4866 ARM::VLD1q32, ARM::VLD1q64};
4867 SelectVLD(N, false, 1, DOpcodes, QOpcodes, nullptr);
4868 return;
4869 }
4870
4871 case Intrinsic::arm_neon_vld1x2: {
4872 static const uint16_t DOpcodes[] = { ARM::VLD1q8, ARM::VLD1q16,
4873 ARM::VLD1q32, ARM::VLD1q64 };
4874 static const uint16_t QOpcodes[] = { ARM::VLD1d8QPseudo,
4875 ARM::VLD1d16QPseudo,
4876 ARM::VLD1d32QPseudo,
4877 ARM::VLD1d64QPseudo };
4878 SelectVLD(N, false, 2, DOpcodes, QOpcodes, nullptr);
4879 return;
4880 }
4881
4882 case Intrinsic::arm_neon_vld1x3: {
4883 static const uint16_t DOpcodes[] = { ARM::VLD1d8TPseudo,
4884 ARM::VLD1d16TPseudo,
4885 ARM::VLD1d32TPseudo,
4886 ARM::VLD1d64TPseudo };
4887 static const uint16_t QOpcodes0[] = { ARM::VLD1q8LowTPseudo_UPD,
4888 ARM::VLD1q16LowTPseudo_UPD,
4889 ARM::VLD1q32LowTPseudo_UPD,
4890 ARM::VLD1q64LowTPseudo_UPD };
4891 static const uint16_t QOpcodes1[] = { ARM::VLD1q8HighTPseudo,
4892 ARM::VLD1q16HighTPseudo,
4893 ARM::VLD1q32HighTPseudo,
4894 ARM::VLD1q64HighTPseudo };
4895 SelectVLD(N, false, 3, DOpcodes, QOpcodes0, QOpcodes1);
4896 return;
4897 }
4898
4899 case Intrinsic::arm_neon_vld1x4: {
4900 static const uint16_t DOpcodes[] = { ARM::VLD1d8QPseudo,
4901 ARM::VLD1d16QPseudo,
4902 ARM::VLD1d32QPseudo,
4903 ARM::VLD1d64QPseudo };
4904 static const uint16_t QOpcodes0[] = { ARM::VLD1q8LowQPseudo_UPD,
4905 ARM::VLD1q16LowQPseudo_UPD,
4906 ARM::VLD1q32LowQPseudo_UPD,
4907 ARM::VLD1q64LowQPseudo_UPD };
4908 static const uint16_t QOpcodes1[] = { ARM::VLD1q8HighQPseudo,
4909 ARM::VLD1q16HighQPseudo,
4910 ARM::VLD1q32HighQPseudo,
4911 ARM::VLD1q64HighQPseudo };
4912 SelectVLD(N, false, 4, DOpcodes, QOpcodes0, QOpcodes1);
4913 return;
4914 }
4915
4916 case Intrinsic::arm_neon_vld2: {
4917 static const uint16_t DOpcodes[] = { ARM::VLD2d8, ARM::VLD2d16,
4918 ARM::VLD2d32, ARM::VLD1q64 };
4919 static const uint16_t QOpcodes[] = { ARM::VLD2q8Pseudo, ARM::VLD2q16Pseudo,
4920 ARM::VLD2q32Pseudo };
4921 SelectVLD(N, false, 2, DOpcodes, QOpcodes, nullptr);
4922 return;
4923 }
4924
4925 case Intrinsic::arm_neon_vld3: {
4926 static const uint16_t DOpcodes[] = { ARM::VLD3d8Pseudo,
4927 ARM::VLD3d16Pseudo,
4928 ARM::VLD3d32Pseudo,
4929 ARM::VLD1d64TPseudo };
4930 static const uint16_t QOpcodes0[] = { ARM::VLD3q8Pseudo_UPD,
4931 ARM::VLD3q16Pseudo_UPD,
4932 ARM::VLD3q32Pseudo_UPD };
4933 static const uint16_t QOpcodes1[] = { ARM::VLD3q8oddPseudo,
4934 ARM::VLD3q16oddPseudo,
4935 ARM::VLD3q32oddPseudo };
4936 SelectVLD(N, false, 3, DOpcodes, QOpcodes0, QOpcodes1);
4937 return;
4938 }
4939
4940 case Intrinsic::arm_neon_vld4: {
4941 static const uint16_t DOpcodes[] = { ARM::VLD4d8Pseudo,
4942 ARM::VLD4d16Pseudo,
4943 ARM::VLD4d32Pseudo,
4944 ARM::VLD1d64QPseudo };
4945 static const uint16_t QOpcodes0[] = { ARM::VLD4q8Pseudo_UPD,
4946 ARM::VLD4q16Pseudo_UPD,
4947 ARM::VLD4q32Pseudo_UPD };
4948 static const uint16_t QOpcodes1[] = { ARM::VLD4q8oddPseudo,
4949 ARM::VLD4q16oddPseudo,
4950 ARM::VLD4q32oddPseudo };
4951 SelectVLD(N, false, 4, DOpcodes, QOpcodes0, QOpcodes1);
4952 return;
4953 }
4954
4955 case Intrinsic::arm_neon_vld2dup: {
4956 static const uint16_t DOpcodes[] = { ARM::VLD2DUPd8, ARM::VLD2DUPd16,
4957 ARM::VLD2DUPd32, ARM::VLD1q64 };
4958 static const uint16_t QOpcodes0[] = { ARM::VLD2DUPq8EvenPseudo,
4959 ARM::VLD2DUPq16EvenPseudo,
4960 ARM::VLD2DUPq32EvenPseudo };
4961 static const uint16_t QOpcodes1[] = { ARM::VLD2DUPq8OddPseudo,
4962 ARM::VLD2DUPq16OddPseudo,
4963 ARM::VLD2DUPq32OddPseudo };
4964 SelectVLDDup(N, /* IsIntrinsic= */ true, false, 2,
4965 DOpcodes, QOpcodes0, QOpcodes1);
4966 return;
4967 }
4968
4969 case Intrinsic::arm_neon_vld3dup: {
4970 static const uint16_t DOpcodes[] = { ARM::VLD3DUPd8Pseudo,
4971 ARM::VLD3DUPd16Pseudo,
4972 ARM::VLD3DUPd32Pseudo,
4973 ARM::VLD1d64TPseudo };
4974 static const uint16_t QOpcodes0[] = { ARM::VLD3DUPq8EvenPseudo,
4975 ARM::VLD3DUPq16EvenPseudo,
4976 ARM::VLD3DUPq32EvenPseudo };
4977 static const uint16_t QOpcodes1[] = { ARM::VLD3DUPq8OddPseudo,
4978 ARM::VLD3DUPq16OddPseudo,
4979 ARM::VLD3DUPq32OddPseudo };
4980 SelectVLDDup(N, /* IsIntrinsic= */ true, false, 3,
4981 DOpcodes, QOpcodes0, QOpcodes1);
4982 return;
4983 }
4984
4985 case Intrinsic::arm_neon_vld4dup: {
4986 static const uint16_t DOpcodes[] = { ARM::VLD4DUPd8Pseudo,
4987 ARM::VLD4DUPd16Pseudo,
4988 ARM::VLD4DUPd32Pseudo,
4989 ARM::VLD1d64QPseudo };
4990 static const uint16_t QOpcodes0[] = { ARM::VLD4DUPq8EvenPseudo,
4991 ARM::VLD4DUPq16EvenPseudo,
4992 ARM::VLD4DUPq32EvenPseudo };
4993 static const uint16_t QOpcodes1[] = { ARM::VLD4DUPq8OddPseudo,
4994 ARM::VLD4DUPq16OddPseudo,
4995 ARM::VLD4DUPq32OddPseudo };
4996 SelectVLDDup(N, /* IsIntrinsic= */ true, false, 4,
4997 DOpcodes, QOpcodes0, QOpcodes1);
4998 return;
4999 }
5000
5001 case Intrinsic::arm_neon_vld2lane: {
5002 static const uint16_t DOpcodes[] = { ARM::VLD2LNd8Pseudo,
5003 ARM::VLD2LNd16Pseudo,
5004 ARM::VLD2LNd32Pseudo };
5005 static const uint16_t QOpcodes[] = { ARM::VLD2LNq16Pseudo,
5006 ARM::VLD2LNq32Pseudo };
5007 SelectVLDSTLane(N, true, false, 2, DOpcodes, QOpcodes);
5008 return;
5009 }
5010
5011 case Intrinsic::arm_neon_vld3lane: {
5012 static const uint16_t DOpcodes[] = { ARM::VLD3LNd8Pseudo,
5013 ARM::VLD3LNd16Pseudo,
5014 ARM::VLD3LNd32Pseudo };
5015 static const uint16_t QOpcodes[] = { ARM::VLD3LNq16Pseudo,
5016 ARM::VLD3LNq32Pseudo };
5017 SelectVLDSTLane(N, true, false, 3, DOpcodes, QOpcodes);
5018 return;
5019 }
5020
5021 case Intrinsic::arm_neon_vld4lane: {
5022 static const uint16_t DOpcodes[] = { ARM::VLD4LNd8Pseudo,
5023 ARM::VLD4LNd16Pseudo,
5024 ARM::VLD4LNd32Pseudo };
5025 static const uint16_t QOpcodes[] = { ARM::VLD4LNq16Pseudo,
5026 ARM::VLD4LNq32Pseudo };
5027 SelectVLDSTLane(N, true, false, 4, DOpcodes, QOpcodes);
5028 return;
5029 }
5030
5031 case Intrinsic::arm_neon_vst1: {
5032 static const uint16_t DOpcodes[] = { ARM::VST1d8, ARM::VST1d16,
5033 ARM::VST1d32, ARM::VST1d64 };
5034 static const uint16_t QOpcodes[] = { ARM::VST1q8, ARM::VST1q16,
5035 ARM::VST1q32, ARM::VST1q64 };
5036 SelectVST(N, false, 1, DOpcodes, QOpcodes, nullptr);
5037 return;
5038 }
5039
5040 case Intrinsic::arm_neon_vst1x2: {
5041 static const uint16_t DOpcodes[] = { ARM::VST1q8, ARM::VST1q16,
5042 ARM::VST1q32, ARM::VST1q64 };
5043 static const uint16_t QOpcodes[] = { ARM::VST1d8QPseudo,
5044 ARM::VST1d16QPseudo,
5045 ARM::VST1d32QPseudo,
5046 ARM::VST1d64QPseudo };
5047 SelectVST(N, false, 2, DOpcodes, QOpcodes, nullptr);
5048 return;
5049 }
5050
5051 case Intrinsic::arm_neon_vst1x3: {
5052 static const uint16_t DOpcodes[] = { ARM::VST1d8TPseudo,
5053 ARM::VST1d16TPseudo,
5054 ARM::VST1d32TPseudo,
5055 ARM::VST1d64TPseudo };
5056 static const uint16_t QOpcodes0[] = { ARM::VST1q8LowTPseudo_UPD,
5057 ARM::VST1q16LowTPseudo_UPD,
5058 ARM::VST1q32LowTPseudo_UPD,
5059 ARM::VST1q64LowTPseudo_UPD };
5060 static const uint16_t QOpcodes1[] = { ARM::VST1q8HighTPseudo,
5061 ARM::VST1q16HighTPseudo,
5062 ARM::VST1q32HighTPseudo,
5063 ARM::VST1q64HighTPseudo };
5064 SelectVST(N, false, 3, DOpcodes, QOpcodes0, QOpcodes1);
5065 return;
5066 }
5067
5068 case Intrinsic::arm_neon_vst1x4: {
5069 static const uint16_t DOpcodes[] = { ARM::VST1d8QPseudo,
5070 ARM::VST1d16QPseudo,
5071 ARM::VST1d32QPseudo,
5072 ARM::VST1d64QPseudo };
5073 static const uint16_t QOpcodes0[] = { ARM::VST1q8LowQPseudo_UPD,
5074 ARM::VST1q16LowQPseudo_UPD,
5075 ARM::VST1q32LowQPseudo_UPD,
5076 ARM::VST1q64LowQPseudo_UPD };
5077 static const uint16_t QOpcodes1[] = { ARM::VST1q8HighQPseudo,
5078 ARM::VST1q16HighQPseudo,
5079 ARM::VST1q32HighQPseudo,
5080 ARM::VST1q64HighQPseudo };
5081 SelectVST(N, false, 4, DOpcodes, QOpcodes0, QOpcodes1);
5082 return;
5083 }
5084
5085 case Intrinsic::arm_neon_vst2: {
5086 static const uint16_t DOpcodes[] = { ARM::VST2d8, ARM::VST2d16,
5087 ARM::VST2d32, ARM::VST1q64 };
5088 static const uint16_t QOpcodes[] = { ARM::VST2q8Pseudo, ARM::VST2q16Pseudo,
5089 ARM::VST2q32Pseudo };
5090 SelectVST(N, false, 2, DOpcodes, QOpcodes, nullptr);
5091 return;
5092 }
5093
5094 case Intrinsic::arm_neon_vst3: {
5095 static const uint16_t DOpcodes[] = { ARM::VST3d8Pseudo,
5096 ARM::VST3d16Pseudo,
5097 ARM::VST3d32Pseudo,
5098 ARM::VST1d64TPseudo };
5099 static const uint16_t QOpcodes0[] = { ARM::VST3q8Pseudo_UPD,
5100 ARM::VST3q16Pseudo_UPD,
5101 ARM::VST3q32Pseudo_UPD };
5102 static const uint16_t QOpcodes1[] = { ARM::VST3q8oddPseudo,
5103 ARM::VST3q16oddPseudo,
5104 ARM::VST3q32oddPseudo };
5105 SelectVST(N, false, 3, DOpcodes, QOpcodes0, QOpcodes1);
5106 return;
5107 }
5108
5109 case Intrinsic::arm_neon_vst4: {
5110 static const uint16_t DOpcodes[] = { ARM::VST4d8Pseudo,
5111 ARM::VST4d16Pseudo,
5112 ARM::VST4d32Pseudo,
5113 ARM::VST1d64QPseudo };
5114 static const uint16_t QOpcodes0[] = { ARM::VST4q8Pseudo_UPD,
5115 ARM::VST4q16Pseudo_UPD,
5116 ARM::VST4q32Pseudo_UPD };
5117 static const uint16_t QOpcodes1[] = { ARM::VST4q8oddPseudo,
5118 ARM::VST4q16oddPseudo,
5119 ARM::VST4q32oddPseudo };
5120 SelectVST(N, false, 4, DOpcodes, QOpcodes0, QOpcodes1);
5121 return;
5122 }
5123
5124 case Intrinsic::arm_neon_vst2lane: {
5125 static const uint16_t DOpcodes[] = { ARM::VST2LNd8Pseudo,
5126 ARM::VST2LNd16Pseudo,
5127 ARM::VST2LNd32Pseudo };
5128 static const uint16_t QOpcodes[] = { ARM::VST2LNq16Pseudo,
5129 ARM::VST2LNq32Pseudo };
5130 SelectVLDSTLane(N, false, false, 2, DOpcodes, QOpcodes);
5131 return;
5132 }
5133
5134 case Intrinsic::arm_neon_vst3lane: {
5135 static const uint16_t DOpcodes[] = { ARM::VST3LNd8Pseudo,
5136 ARM::VST3LNd16Pseudo,
5137 ARM::VST3LNd32Pseudo };
5138 static const uint16_t QOpcodes[] = { ARM::VST3LNq16Pseudo,
5139 ARM::VST3LNq32Pseudo };
5140 SelectVLDSTLane(N, false, false, 3, DOpcodes, QOpcodes);
5141 return;
5142 }
5143
5144 case Intrinsic::arm_neon_vst4lane: {
5145 static const uint16_t DOpcodes[] = { ARM::VST4LNd8Pseudo,
5146 ARM::VST4LNd16Pseudo,
5147 ARM::VST4LNd32Pseudo };
5148 static const uint16_t QOpcodes[] = { ARM::VST4LNq16Pseudo,
5149 ARM::VST4LNq32Pseudo };
5150 SelectVLDSTLane(N, false, false, 4, DOpcodes, QOpcodes);
5151 return;
5152 }
5153
5154 case Intrinsic::arm_mve_vldr_gather_base_wb:
5155 case Intrinsic::arm_mve_vldr_gather_base_wb_predicated: {
5156 static const uint16_t Opcodes[] = {ARM::MVE_VLDRWU32_qi_pre,
5157 ARM::MVE_VLDRDU64_qi_pre};
5158 SelectMVE_WB(N, Opcodes,
5159 IntNo == Intrinsic::arm_mve_vldr_gather_base_wb_predicated);
5160 return;
5161 }
5162
5163 case Intrinsic::arm_mve_vld2q: {
5164 static const uint16_t Opcodes8[] = {ARM::MVE_VLD20_8, ARM::MVE_VLD21_8};
5165 static const uint16_t Opcodes16[] = {ARM::MVE_VLD20_16,
5166 ARM::MVE_VLD21_16};
5167 static const uint16_t Opcodes32[] = {ARM::MVE_VLD20_32,
5168 ARM::MVE_VLD21_32};
5169 static const uint16_t *const Opcodes[] = {Opcodes8, Opcodes16, Opcodes32};
5170 SelectMVE_VLD(N, 2, Opcodes, false);
5171 return;
5172 }
5173
5174 case Intrinsic::arm_mve_vld4q: {
5175 static const uint16_t Opcodes8[] = {ARM::MVE_VLD40_8, ARM::MVE_VLD41_8,
5176 ARM::MVE_VLD42_8, ARM::MVE_VLD43_8};
5177 static const uint16_t Opcodes16[] = {ARM::MVE_VLD40_16, ARM::MVE_VLD41_16,
5178 ARM::MVE_VLD42_16,
5179 ARM::MVE_VLD43_16};
5180 static const uint16_t Opcodes32[] = {ARM::MVE_VLD40_32, ARM::MVE_VLD41_32,
5181 ARM::MVE_VLD42_32,
5182 ARM::MVE_VLD43_32};
5183 static const uint16_t *const Opcodes[] = {Opcodes8, Opcodes16, Opcodes32};
5184 SelectMVE_VLD(N, 4, Opcodes, false);
5185 return;
5186 }
5187 }
5188 break;
5189 }
5190
5191 case ISD::INTRINSIC_WO_CHAIN: {
5192 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
5193 switch (IntNo) {
5194 default:
5195 break;
5196
5197 // Scalar f32 -> bf16
5198 case Intrinsic::arm_neon_vcvtbfp2bf: {
5199 SDLoc dl(N);
5200 const SDValue &Src = N->getOperand(1);
5201 llvm::EVT DestTy = N->getValueType(0);
5202 SDValue Pred = getAL(CurDAG, dl);
5203 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
5204 SDValue Ops[] = { Src, Src, Pred, Reg0 };
5205 CurDAG->SelectNodeTo(N, ARM::BF16_VCVTB, DestTy, Ops);
5206 return;
5207 }
5208
5209 // Vector v4f32 -> v4bf16
5210 case Intrinsic::arm_neon_vcvtfp2bf: {
5211 SDLoc dl(N);
5212 const SDValue &Src = N->getOperand(1);
5213 SDValue Pred = getAL(CurDAG, dl);
5214 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
5215 SDValue Ops[] = { Src, Pred, Reg0 };
5216 CurDAG->SelectNodeTo(N, ARM::BF16_VCVT, MVT::v4bf16, Ops);
5217 return;
5218 }
5219
5220 case Intrinsic::arm_mve_urshrl:
5221 SelectMVE_LongShift(N, ARM::MVE_URSHRL, true, false);
5222 return;
5223 case Intrinsic::arm_mve_uqshll:
5224 SelectMVE_LongShift(N, ARM::MVE_UQSHLL, true, false);
5225 return;
5226 case Intrinsic::arm_mve_srshrl:
5227 SelectMVE_LongShift(N, ARM::MVE_SRSHRL, true, false);
5228 return;
5229 case Intrinsic::arm_mve_sqshll:
5230 SelectMVE_LongShift(N, ARM::MVE_SQSHLL, true, false);
5231 return;
5232 case Intrinsic::arm_mve_uqrshll:
5233 SelectMVE_LongShift(N, ARM::MVE_UQRSHLL, false, true);
5234 return;
5235 case Intrinsic::arm_mve_sqrshrl:
5236 SelectMVE_LongShift(N, ARM::MVE_SQRSHRL, false, true);
5237 return;
5238
5239 case Intrinsic::arm_mve_vadc:
5240 case Intrinsic::arm_mve_vadc_predicated:
5241 SelectMVE_VADCSBC(N, ARM::MVE_VADC, ARM::MVE_VADCI, true,
5242 IntNo == Intrinsic::arm_mve_vadc_predicated);
5243 return;
5244 case Intrinsic::arm_mve_vsbc:
5245 case Intrinsic::arm_mve_vsbc_predicated:
5246 SelectMVE_VADCSBC(N, ARM::MVE_VSBC, ARM::MVE_VSBCI, true,
5247 IntNo == Intrinsic::arm_mve_vsbc_predicated);
5248 return;
5249 case Intrinsic::arm_mve_vshlc:
5250 case Intrinsic::arm_mve_vshlc_predicated:
5251 SelectMVE_VSHLC(N, IntNo == Intrinsic::arm_mve_vshlc_predicated);
5252 return;
5253
5254 case Intrinsic::arm_mve_vmlldava:
5255 case Intrinsic::arm_mve_vmlldava_predicated: {
5256 static const uint16_t OpcodesU[] = {
5257 ARM::MVE_VMLALDAVu16, ARM::MVE_VMLALDAVu32,
5258 ARM::MVE_VMLALDAVau16, ARM::MVE_VMLALDAVau32,
5259 };
5260 static const uint16_t OpcodesS[] = {
5261 ARM::MVE_VMLALDAVs16, ARM::MVE_VMLALDAVs32,
5262 ARM::MVE_VMLALDAVas16, ARM::MVE_VMLALDAVas32,
5263 ARM::MVE_VMLALDAVxs16, ARM::MVE_VMLALDAVxs32,
5264 ARM::MVE_VMLALDAVaxs16, ARM::MVE_VMLALDAVaxs32,
5265 ARM::MVE_VMLSLDAVs16, ARM::MVE_VMLSLDAVs32,
5266 ARM::MVE_VMLSLDAVas16, ARM::MVE_VMLSLDAVas32,
5267 ARM::MVE_VMLSLDAVxs16, ARM::MVE_VMLSLDAVxs32,
5268 ARM::MVE_VMLSLDAVaxs16, ARM::MVE_VMLSLDAVaxs32,
5269 };
5270 SelectMVE_VMLLDAV(N, IntNo == Intrinsic::arm_mve_vmlldava_predicated,
5271 OpcodesS, OpcodesU);
5272 return;
5273 }
5274
5275 case Intrinsic::arm_mve_vrmlldavha:
5276 case Intrinsic::arm_mve_vrmlldavha_predicated: {
5277 static const uint16_t OpcodesU[] = {
5278 ARM::MVE_VRMLALDAVHu32, ARM::MVE_VRMLALDAVHau32,
5279 };
5280 static const uint16_t OpcodesS[] = {
5281 ARM::MVE_VRMLALDAVHs32, ARM::MVE_VRMLALDAVHas32,
5282 ARM::MVE_VRMLALDAVHxs32, ARM::MVE_VRMLALDAVHaxs32,
5283 ARM::MVE_VRMLSLDAVHs32, ARM::MVE_VRMLSLDAVHas32,
5284 ARM::MVE_VRMLSLDAVHxs32, ARM::MVE_VRMLSLDAVHaxs32,
5285 };
5286 SelectMVE_VRMLLDAVH(N, IntNo == Intrinsic::arm_mve_vrmlldavha_predicated,
5287 OpcodesS, OpcodesU);
5288 return;
5289 }
5290
5291 case Intrinsic::arm_mve_vidup:
5292 case Intrinsic::arm_mve_vidup_predicated: {
5293 static const uint16_t Opcodes[] = {
5294 ARM::MVE_VIDUPu8, ARM::MVE_VIDUPu16, ARM::MVE_VIDUPu32,
5295 };
5296 SelectMVE_VxDUP(N, Opcodes, false,
5297 IntNo == Intrinsic::arm_mve_vidup_predicated);
5298 return;
5299 }
5300
5301 case Intrinsic::arm_mve_vddup:
5302 case Intrinsic::arm_mve_vddup_predicated: {
5303 static const uint16_t Opcodes[] = {
5304 ARM::MVE_VDDUPu8, ARM::MVE_VDDUPu16, ARM::MVE_VDDUPu32,
5305 };
5306 SelectMVE_VxDUP(N, Opcodes, false,
5307 IntNo == Intrinsic::arm_mve_vddup_predicated);
5308 return;
5309 }
5310
5311 case Intrinsic::arm_mve_viwdup:
5312 case Intrinsic::arm_mve_viwdup_predicated: {
5313 static const uint16_t Opcodes[] = {
5314 ARM::MVE_VIWDUPu8, ARM::MVE_VIWDUPu16, ARM::MVE_VIWDUPu32,
5315 };
5316 SelectMVE_VxDUP(N, Opcodes, true,
5317 IntNo == Intrinsic::arm_mve_viwdup_predicated);
5318 return;
5319 }
5320
5321 case Intrinsic::arm_mve_vdwdup:
5322 case Intrinsic::arm_mve_vdwdup_predicated: {
5323 static const uint16_t Opcodes[] = {
5324 ARM::MVE_VDWDUPu8, ARM::MVE_VDWDUPu16, ARM::MVE_VDWDUPu32,
5325 };
5326 SelectMVE_VxDUP(N, Opcodes, true,
5327 IntNo == Intrinsic::arm_mve_vdwdup_predicated);
5328 return;
5329 }
5330
5331 case Intrinsic::arm_cde_cx1d:
5332 case Intrinsic::arm_cde_cx1da:
5333 case Intrinsic::arm_cde_cx2d:
5334 case Intrinsic::arm_cde_cx2da:
5335 case Intrinsic::arm_cde_cx3d:
5336 case Intrinsic::arm_cde_cx3da: {
5337 bool HasAccum = IntNo == Intrinsic::arm_cde_cx1da ||
5338 IntNo == Intrinsic::arm_cde_cx2da ||
5339 IntNo == Intrinsic::arm_cde_cx3da;
5340 size_t NumExtraOps;
5341 uint16_t Opcode;
5342 switch (IntNo) {
5343 case Intrinsic::arm_cde_cx1d:
5344 case Intrinsic::arm_cde_cx1da:
5345 NumExtraOps = 0;
5346 Opcode = HasAccum ? ARM::CDE_CX1DA : ARM::CDE_CX1D;
5347 break;
5348 case Intrinsic::arm_cde_cx2d:
5349 case Intrinsic::arm_cde_cx2da:
5350 NumExtraOps = 1;
5351 Opcode = HasAccum ? ARM::CDE_CX2DA : ARM::CDE_CX2D;
5352 break;
5353 case Intrinsic::arm_cde_cx3d:
5354 case Intrinsic::arm_cde_cx3da:
5355 NumExtraOps = 2;
5356 Opcode = HasAccum ? ARM::CDE_CX3DA : ARM::CDE_CX3D;
5357 break;
5358 default:
5359 llvm_unreachable("Unexpected opcode")::llvm::llvm_unreachable_internal("Unexpected opcode", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 5359)
;
5360 }
5361 SelectCDE_CXxD(N, Opcode, NumExtraOps, HasAccum);
5362 return;
5363 }
5364 }
5365 break;
5366 }
5367
5368 case ISD::ATOMIC_CMP_SWAP:
5369 SelectCMP_SWAP(N);
5370 return;
5371 }
5372
5373 SelectCode(N);
5374}
5375
5376// Inspect a register string of the form
5377// cp<coprocessor>:<opc1>:c<CRn>:c<CRm>:<opc2> (32bit) or
5378// cp<coprocessor>:<opc1>:c<CRm> (64bit) inspect the fields of the string
5379// and obtain the integer operands from them, adding these operands to the
5380// provided vector.
5381static void getIntOperandsFromRegisterString(StringRef RegString,
5382 SelectionDAG *CurDAG,
5383 const SDLoc &DL,
5384 std::vector<SDValue> &Ops) {
5385 SmallVector<StringRef, 5> Fields;
5386 RegString.split(Fields, ':');
5387
5388 if (Fields.size() > 1) {
5389 bool AllIntFields = true;
5390
5391 for (StringRef Field : Fields) {
5392 // Need to trim out leading 'cp' characters and get the integer field.
5393 unsigned IntField;
5394 AllIntFields &= !Field.trim("CPcp").getAsInteger(10, IntField);
5395 Ops.push_back(CurDAG->getTargetConstant(IntField, DL, MVT::i32));
5396 }
5397
5398 assert(AllIntFields &&(static_cast <bool> (AllIntFields && "Unexpected non-integer value in special register string."
) ? void (0) : __assert_fail ("AllIntFields && \"Unexpected non-integer value in special register string.\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 5399, __extension__ __PRETTY_FUNCTION__))
5399 "Unexpected non-integer value in special register string.")(static_cast <bool> (AllIntFields && "Unexpected non-integer value in special register string."
) ? void (0) : __assert_fail ("AllIntFields && \"Unexpected non-integer value in special register string.\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 5399, __extension__ __PRETTY_FUNCTION__))
;
5400 (void)AllIntFields;
5401 }
5402}
5403
5404// Maps a Banked Register string to its mask value. The mask value returned is
5405// for use in the MRSbanked / MSRbanked instruction nodes as the Banked Register
5406// mask operand, which expresses which register is to be used, e.g. r8, and in
5407// which mode it is to be used, e.g. usr. Returns -1 to signify that the string
5408// was invalid.
5409static inline int getBankedRegisterMask(StringRef RegString) {
5410 auto TheReg = ARMBankedReg::lookupBankedRegByName(RegString.lower());
5411 if (!TheReg)
5412 return -1;
5413 return TheReg->Encoding;
5414}
5415
5416// The flags here are common to those allowed for apsr in the A class cores and
5417// those allowed for the special registers in the M class cores. Returns a
5418// value representing which flags were present, -1 if invalid.
5419static inline int getMClassFlagsMask(StringRef Flags) {
5420 return StringSwitch<int>(Flags)
5421 .Case("", 0x2) // no flags means nzcvq for psr registers, and 0x2 is
5422 // correct when flags are not permitted
5423 .Case("g", 0x1)
5424 .Case("nzcvq", 0x2)
5425 .Case("nzcvqg", 0x3)
5426 .Default(-1);
5427}
5428
5429// Maps MClass special registers string to its value for use in the
5430// t2MRS_M/t2MSR_M instruction nodes as the SYSm value operand.
5431// Returns -1 to signify that the string was invalid.
5432static int getMClassRegisterMask(StringRef Reg, const ARMSubtarget *Subtarget) {
5433 auto TheReg = ARMSysReg::lookupMClassSysRegByName(Reg);
5434 const FeatureBitset &FeatureBits = Subtarget->getFeatureBits();
5435 if (!TheReg || !TheReg->hasRequiredFeatures(FeatureBits))
5436 return -1;
5437 return (int)(TheReg->Encoding & 0xFFF); // SYSm value
5438}
5439
5440static int getARClassRegisterMask(StringRef Reg, StringRef Flags) {
5441 // The mask operand contains the special register (R Bit) in bit 4, whether
5442 // the register is spsr (R bit is 1) or one of cpsr/apsr (R bit is 0), and
5443 // bits 3-0 contains the fields to be accessed in the special register, set by
5444 // the flags provided with the register.
5445 int Mask = 0;
5446 if (Reg == "apsr") {
5447 // The flags permitted for apsr are the same flags that are allowed in
5448 // M class registers. We get the flag value and then shift the flags into
5449 // the correct place to combine with the mask.
5450 Mask = getMClassFlagsMask(Flags);
5451 if (Mask == -1)
5452 return -1;
5453 return Mask << 2;
5454 }
5455
5456 if (Reg != "cpsr" && Reg != "spsr") {
5457 return -1;
5458 }
5459
5460 // This is the same as if the flags were "fc"
5461 if (Flags.empty() || Flags == "all")
5462 return Mask | 0x9;
5463
5464 // Inspect the supplied flags string and set the bits in the mask for
5465 // the relevant and valid flags allowed for cpsr and spsr.
5466 for (char Flag : Flags) {
5467 int FlagVal;
5468 switch (Flag) {
5469 case 'c':
5470 FlagVal = 0x1;
5471 break;
5472 case 'x':
5473 FlagVal = 0x2;
5474 break;
5475 case 's':
5476 FlagVal = 0x4;
5477 break;
5478 case 'f':
5479 FlagVal = 0x8;
5480 break;
5481 default:
5482 FlagVal = 0;
5483 }
5484
5485 // This avoids allowing strings where the same flag bit appears twice.
5486 if (!FlagVal || (Mask & FlagVal))
5487 return -1;
5488 Mask |= FlagVal;
5489 }
5490
5491 // If the register is spsr then we need to set the R bit.
5492 if (Reg == "spsr")
5493 Mask |= 0x10;
5494
5495 return Mask;
5496}
5497
5498// Lower the read_register intrinsic to ARM specific DAG nodes
5499// using the supplied metadata string to select the instruction node to use
5500// and the registers/masks to construct as operands for the node.
5501bool ARMDAGToDAGISel::tryReadRegister(SDNode *N){
5502 const auto *MD = cast<MDNodeSDNode>(N->getOperand(1));
5503 const auto *RegString = cast<MDString>(MD->getMD()->getOperand(0));
5504 bool IsThumb2 = Subtarget->isThumb2();
5505 SDLoc DL(N);
5506
5507 std::vector<SDValue> Ops;
5508 getIntOperandsFromRegisterString(RegString->getString(), CurDAG, DL, Ops);
5509
5510 if (!Ops.empty()) {
5511 // If the special register string was constructed of fields (as defined
5512 // in the ACLE) then need to lower to MRC node (32 bit) or
5513 // MRRC node(64 bit), we can make the distinction based on the number of
5514 // operands we have.
5515 unsigned Opcode;
5516 SmallVector<EVT, 3> ResTypes;
5517 if (Ops.size() == 5){
5518 Opcode = IsThumb2 ? ARM::t2MRC : ARM::MRC;
5519 ResTypes.append({ MVT::i32, MVT::Other });
5520 } else {
5521 assert(Ops.size() == 3 &&(static_cast <bool> (Ops.size() == 3 && "Invalid number of fields in special register string."
) ? void (0) : __assert_fail ("Ops.size() == 3 && \"Invalid number of fields in special register string.\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 5522, __extension__ __PRETTY_FUNCTION__))
5522 "Invalid number of fields in special register string.")(static_cast <bool> (Ops.size() == 3 && "Invalid number of fields in special register string."
) ? void (0) : __assert_fail ("Ops.size() == 3 && \"Invalid number of fields in special register string.\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 5522, __extension__ __PRETTY_FUNCTION__))
;
5523 Opcode = IsThumb2 ? ARM::t2MRRC : ARM::MRRC;
5524 ResTypes.append({ MVT::i32, MVT::i32, MVT::Other });
5525 }
5526
5527 Ops.push_back(getAL(CurDAG, DL));
5528 Ops.push_back(CurDAG->getRegister(0, MVT::i32));
5529 Ops.push_back(N->getOperand(0));
5530 ReplaceNode(N, CurDAG->getMachineNode(Opcode, DL, ResTypes, Ops));
5531 return true;
5532 }
5533
5534 std::string SpecialReg = RegString->getString().lower();
5535
5536 int BankedReg = getBankedRegisterMask(SpecialReg);
5537 if (BankedReg != -1) {
5538 Ops = { CurDAG->getTargetConstant(BankedReg, DL, MVT::i32),
5539 getAL(CurDAG, DL), CurDAG->getRegister(0, MVT::i32),
5540 N->getOperand(0) };
5541 ReplaceNode(
5542 N, CurDAG->getMachineNode(IsThumb2 ? ARM::t2MRSbanked : ARM::MRSbanked,
5543 DL, MVT::i32, MVT::Other, Ops));
5544 return true;
5545 }
5546
5547 // The VFP registers are read by creating SelectionDAG nodes with opcodes
5548 // corresponding to the register that is being read from. So we switch on the
5549 // string to find which opcode we need to use.
5550 unsigned Opcode = StringSwitch<unsigned>(SpecialReg)
5551 .Case("fpscr", ARM::VMRS)
5552 .Case("fpexc", ARM::VMRS_FPEXC)
5553 .Case("fpsid", ARM::VMRS_FPSID)
5554 .Case("mvfr0", ARM::VMRS_MVFR0)
5555 .Case("mvfr1", ARM::VMRS_MVFR1)
5556 .Case("mvfr2", ARM::VMRS_MVFR2)
5557 .Case("fpinst", ARM::VMRS_FPINST)
5558 .Case("fpinst2", ARM::VMRS_FPINST2)
5559 .Default(0);
5560
5561 // If an opcode was found then we can lower the read to a VFP instruction.
5562 if (Opcode) {
5563 if (!Subtarget->hasVFP2Base())
5564 return false;
5565 if (Opcode == ARM::VMRS_MVFR2 && !Subtarget->hasFPARMv8Base())
5566 return false;
5567
5568 Ops = { getAL(CurDAG, DL), CurDAG->getRegister(0, MVT::i32),
5569 N->getOperand(0) };
5570 ReplaceNode(N,
5571 CurDAG->getMachineNode(Opcode, DL, MVT::i32, MVT::Other, Ops));
5572 return true;
5573 }
5574
5575 // If the target is M Class then need to validate that the register string
5576 // is an acceptable value, so check that a mask can be constructed from the
5577 // string.
5578 if (Subtarget->isMClass()) {
5579 int SYSmValue = getMClassRegisterMask(SpecialReg, Subtarget);
5580 if (SYSmValue == -1)
5581 return false;
5582
5583 SDValue Ops[] = { CurDAG->getTargetConstant(SYSmValue, DL, MVT::i32),
5584 getAL(CurDAG, DL), CurDAG->getRegister(0, MVT::i32),
5585 N->getOperand(0) };
5586 ReplaceNode(
5587 N, CurDAG->getMachineNode(ARM::t2MRS_M, DL, MVT::i32, MVT::Other, Ops));
5588 return true;
5589 }
5590
5591 // Here we know the target is not M Class so we need to check if it is one
5592 // of the remaining possible values which are apsr, cpsr or spsr.
5593 if (SpecialReg == "apsr" || SpecialReg == "cpsr") {
5594 Ops = { getAL(CurDAG, DL), CurDAG->getRegister(0, MVT::i32),
5595 N->getOperand(0) };
5596 ReplaceNode(N, CurDAG->getMachineNode(IsThumb2 ? ARM::t2MRS_AR : ARM::MRS,
5597 DL, MVT::i32, MVT::Other, Ops));
5598 return true;
5599 }
5600
5601 if (SpecialReg == "spsr") {
5602 Ops = { getAL(CurDAG, DL), CurDAG->getRegister(0, MVT::i32),
5603 N->getOperand(0) };
5604 ReplaceNode(
5605 N, CurDAG->getMachineNode(IsThumb2 ? ARM::t2MRSsys_AR : ARM::MRSsys, DL,
5606 MVT::i32, MVT::Other, Ops));
5607 return true;
5608 }
5609
5610 return false;
5611}
5612
5613// Lower the write_register intrinsic to ARM specific DAG nodes
5614// using the supplied metadata string to select the instruction node to use
5615// and the registers/masks to use in the nodes
5616bool ARMDAGToDAGISel::tryWriteRegister(SDNode *N){
5617 const auto *MD = cast<MDNodeSDNode>(N->getOperand(1));
5618 const auto *RegString = cast<MDString>(MD->getMD()->getOperand(0));
5619 bool IsThumb2 = Subtarget->isThumb2();
5620 SDLoc DL(N);
5621
5622 std::vector<SDValue> Ops;
5623 getIntOperandsFromRegisterString(RegString->getString(), CurDAG, DL, Ops);
5624
5625 if (!Ops.empty()) {
5626 // If the special register string was constructed of fields (as defined
5627 // in the ACLE) then need to lower to MCR node (32 bit) or
5628 // MCRR node(64 bit), we can make the distinction based on the number of
5629 // operands we have.
5630 unsigned Opcode;
5631 if (Ops.size() == 5) {
5632 Opcode = IsThumb2 ? ARM::t2MCR : ARM::MCR;
5633 Ops.insert(Ops.begin()+2, N->getOperand(2));
5634 } else {
5635 assert(Ops.size() == 3 &&(static_cast <bool> (Ops.size() == 3 && "Invalid number of fields in special register string."
) ? void (0) : __assert_fail ("Ops.size() == 3 && \"Invalid number of fields in special register string.\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 5636, __extension__ __PRETTY_FUNCTION__))
5636 "Invalid number of fields in special register string.")(static_cast <bool> (Ops.size() == 3 && "Invalid number of fields in special register string."
) ? void (0) : __assert_fail ("Ops.size() == 3 && \"Invalid number of fields in special register string.\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 5636, __extension__ __PRETTY_FUNCTION__))
;
5637 Opcode = IsThumb2 ? ARM::t2MCRR : ARM::MCRR;
5638 SDValue WriteValue[] = { N->getOperand(2), N->getOperand(3) };
5639 Ops.insert(Ops.begin()+2, WriteValue, WriteValue+2);
5640 }
5641
5642 Ops.push_back(getAL(CurDAG, DL));
5643 Ops.push_back(CurDAG->getRegister(0, MVT::i32));
5644 Ops.push_back(N->getOperand(0));
5645
5646 ReplaceNode(N, CurDAG->getMachineNode(Opcode, DL, MVT::Other, Ops));
5647 return true;
5648 }
5649
5650 std::string SpecialReg = RegString->getString().lower();
5651 int BankedReg = getBankedRegisterMask(SpecialReg);
5652 if (BankedReg != -1) {
5653 Ops = { CurDAG->getTargetConstant(BankedReg, DL, MVT::i32), N->getOperand(2),
5654 getAL(CurDAG, DL), CurDAG->getRegister(0, MVT::i32),
5655 N->getOperand(0) };
5656 ReplaceNode(
5657 N, CurDAG->getMachineNode(IsThumb2 ? ARM::t2MSRbanked : ARM::MSRbanked,
5658 DL, MVT::Other, Ops));
5659 return true;
5660 }
5661
5662 // The VFP registers are written to by creating SelectionDAG nodes with
5663 // opcodes corresponding to the register that is being written. So we switch
5664 // on the string to find which opcode we need to use.
5665 unsigned Opcode = StringSwitch<unsigned>(SpecialReg)
5666 .Case("fpscr", ARM::VMSR)
5667 .Case("fpexc", ARM::VMSR_FPEXC)
5668 .Case("fpsid", ARM::VMSR_FPSID)
5669 .Case("fpinst", ARM::VMSR_FPINST)
5670 .Case("fpinst2", ARM::VMSR_FPINST2)
5671 .Default(0);
5672
5673 if (Opcode) {
5674 if (!Subtarget->hasVFP2Base())
5675 return false;
5676 Ops = { N->getOperand(2), getAL(CurDAG, DL),
5677 CurDAG->getRegister(0, MVT::i32), N->getOperand(0) };
5678 ReplaceNode(N, CurDAG->getMachineNode(Opcode, DL, MVT::Other, Ops));
5679 return true;
5680 }
5681
5682 std::pair<StringRef, StringRef> Fields;
5683 Fields = StringRef(SpecialReg).rsplit('_');
5684 std::string Reg = Fields.first.str();
5685 StringRef Flags = Fields.second;
5686
5687 // If the target was M Class then need to validate the special register value
5688 // and retrieve the mask for use in the instruction node.
5689 if (Subtarget->isMClass()) {
5690 int SYSmValue = getMClassRegisterMask(SpecialReg, Subtarget);
5691 if (SYSmValue == -1)
5692 return false;
5693
5694 SDValue Ops[] = { CurDAG->getTargetConstant(SYSmValue, DL, MVT::i32),
5695 N->getOperand(2), getAL(CurDAG, DL),
5696 CurDAG->getRegister(0, MVT::i32), N->getOperand(0) };
5697 ReplaceNode(N, CurDAG->getMachineNode(ARM::t2MSR_M, DL, MVT::Other, Ops));
5698 return true;
5699 }
5700
5701 // We then check to see if a valid mask can be constructed for one of the
5702 // register string values permitted for the A and R class cores. These values
5703 // are apsr, spsr and cpsr; these are also valid on older cores.
5704 int Mask = getARClassRegisterMask(Reg, Flags);
5705 if (Mask != -1) {
5706 Ops = { CurDAG->getTargetConstant(Mask, DL, MVT::i32), N->getOperand(2),
5707 getAL(CurDAG, DL), CurDAG->getRegister(0, MVT::i32),
5708 N->getOperand(0) };
5709 ReplaceNode(N, CurDAG->getMachineNode(IsThumb2 ? ARM::t2MSR_AR : ARM::MSR,
5710 DL, MVT::Other, Ops));
5711 return true;
5712 }
5713
5714 return false;
5715}
5716
5717bool ARMDAGToDAGISel::tryInlineAsm(SDNode *N){
5718 std::vector<SDValue> AsmNodeOperands;
5719 unsigned Flag, Kind;
5720 bool Changed = false;
5721 unsigned NumOps = N->getNumOperands();
5722
5723 // Normally, i64 data is bounded to two arbitrary GRPs for "%r" constraint.
5724 // However, some instrstions (e.g. ldrexd/strexd in ARM mode) require
5725 // (even/even+1) GPRs and use %n and %Hn to refer to the individual regs
5726 // respectively. Since there is no constraint to explicitly specify a
5727 // reg pair, we use GPRPair reg class for "%r" for 64-bit data. For Thumb,
5728 // the 64-bit data may be referred by H, Q, R modifiers, so we still pack
5729 // them into a GPRPair.
5730
5731 SDLoc dl(N);
5732 SDValue Glue = N->getGluedNode() ? N->getOperand(NumOps-1)
5733 : SDValue(nullptr,0);
5734
5735 SmallVector<bool, 8> OpChanged;
5736 // Glue node will be appended late.
5737 for(unsigned i = 0, e = N->getGluedNode() ? NumOps - 1 : NumOps; i < e; ++i) {
5738 SDValue op = N->getOperand(i);
5739 AsmNodeOperands.push_back(op);
5740
5741 if (i < InlineAsm::Op_FirstOperand)
5742 continue;
5743
5744 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(i))) {
5745 Flag = C->getZExtValue();
5746 Kind = InlineAsm::getKind(Flag);
5747 }
5748 else
5749 continue;
5750
5751 // Immediate operands to inline asm in the SelectionDAG are modeled with
5752 // two operands. The first is a constant of value InlineAsm::Kind_Imm, and
5753 // the second is a constant with the value of the immediate. If we get here
5754 // and we have a Kind_Imm, skip the next operand, and continue.
5755 if (Kind == InlineAsm::Kind_Imm) {
5756 SDValue op = N->getOperand(++i);
5757 AsmNodeOperands.push_back(op);
5758 continue;
5759 }
5760
5761 unsigned NumRegs = InlineAsm::getNumOperandRegisters(Flag);
5762 if (NumRegs)
5763 OpChanged.push_back(false);
5764
5765 unsigned DefIdx = 0;
5766 bool IsTiedToChangedOp = false;
5767 // If it's a use that is tied with a previous def, it has no
5768 // reg class constraint.
5769 if (Changed && InlineAsm::isUseOperandTiedToDef(Flag, DefIdx))
5770 IsTiedToChangedOp = OpChanged[DefIdx];
5771
5772 // Memory operands to inline asm in the SelectionDAG are modeled with two
5773 // operands: a constant of value InlineAsm::Kind_Mem followed by the input
5774 // operand. If we get here and we have a Kind_Mem, skip the next operand (so
5775 // it doesn't get misinterpreted), and continue. We do this here because
5776 // it's important to update the OpChanged array correctly before moving on.
5777 if (Kind == InlineAsm::Kind_Mem) {
5778 SDValue op = N->getOperand(++i);
5779 AsmNodeOperands.push_back(op);
5780 continue;
5781 }
5782
5783 if (Kind != InlineAsm::Kind_RegUse && Kind != InlineAsm::Kind_RegDef
5784 && Kind != InlineAsm::Kind_RegDefEarlyClobber)
5785 continue;
5786
5787 unsigned RC;
5788 bool HasRC = InlineAsm::hasRegClassConstraint(Flag, RC);
5789 if ((!IsTiedToChangedOp && (!HasRC || RC != ARM::GPRRegClassID))
5790 || NumRegs != 2)
5791 continue;
5792
5793 assert((i+2 < NumOps) && "Invalid number of operands in inline asm")(static_cast <bool> ((i+2 < NumOps) && "Invalid number of operands in inline asm"
) ? void (0) : __assert_fail ("(i+2 < NumOps) && \"Invalid number of operands in inline asm\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 5793, __extension__ __PRETTY_FUNCTION__))
;
5794 SDValue V0 = N->getOperand(i+1);
5795 SDValue V1 = N->getOperand(i+2);
5796 unsigned Reg0 = cast<RegisterSDNode>(V0)->getReg();
5797 unsigned Reg1 = cast<RegisterSDNode>(V1)->getReg();
5798 SDValue PairedReg;
5799 MachineRegisterInfo &MRI = MF->getRegInfo();
5800
5801 if (Kind == InlineAsm::Kind_RegDef ||
5802 Kind == InlineAsm::Kind_RegDefEarlyClobber) {
5803 // Replace the two GPRs with 1 GPRPair and copy values from GPRPair to
5804 // the original GPRs.
5805
5806 Register GPVR = MRI.createVirtualRegister(&ARM::GPRPairRegClass);
5807 PairedReg = CurDAG->getRegister(GPVR, MVT::Untyped);
5808 SDValue Chain = SDValue(N,0);
5809
5810 SDNode *GU = N->getGluedUser();
5811 SDValue RegCopy = CurDAG->getCopyFromReg(Chain, dl, GPVR, MVT::Untyped,
5812 Chain.getValue(1));
5813
5814 // Extract values from a GPRPair reg and copy to the original GPR reg.
5815 SDValue Sub0 = CurDAG->getTargetExtractSubreg(ARM::gsub_0, dl, MVT::i32,
5816 RegCopy);
5817 SDValue Sub1 = CurDAG->getTargetExtractSubreg(ARM::gsub_1, dl, MVT::i32,
5818 RegCopy);
5819 SDValue T0 = CurDAG->getCopyToReg(Sub0, dl, Reg0, Sub0,
5820 RegCopy.getValue(1));
5821 SDValue T1 = CurDAG->getCopyToReg(Sub1, dl, Reg1, Sub1, T0.getValue(1));
5822
5823 // Update the original glue user.
5824 std::vector<SDValue> Ops(GU->op_begin(), GU->op_end()-1);
5825 Ops.push_back(T1.getValue(1));
5826 CurDAG->UpdateNodeOperands(GU, Ops);
5827 }
5828 else {
5829 // For Kind == InlineAsm::Kind_RegUse, we first copy two GPRs into a
5830 // GPRPair and then pass the GPRPair to the inline asm.
5831 SDValue Chain = AsmNodeOperands[InlineAsm::Op_InputChain];
5832
5833 // As REG_SEQ doesn't take RegisterSDNode, we copy them first.
5834 SDValue T0 = CurDAG->getCopyFromReg(Chain, dl, Reg0, MVT::i32,
5835 Chain.getValue(1));
5836 SDValue T1 = CurDAG->getCopyFromReg(Chain, dl, Reg1, MVT::i32,
5837 T0.getValue(1));
5838 SDValue Pair = SDValue(createGPRPairNode(MVT::Untyped, T0, T1), 0);
5839
5840 // Copy REG_SEQ into a GPRPair-typed VR and replace the original two
5841 // i32 VRs of inline asm with it.
5842 Register GPVR = MRI.createVirtualRegister(&ARM::GPRPairRegClass);
5843 PairedReg = CurDAG->getRegister(GPVR, MVT::Untyped);
5844 Chain = CurDAG->getCopyToReg(T1, dl, GPVR, Pair, T1.getValue(1));
5845
5846 AsmNodeOperands[InlineAsm::Op_InputChain] = Chain;
5847 Glue = Chain.getValue(1);
5848 }
5849
5850 Changed = true;
5851
5852 if(PairedReg.getNode()) {
5853 OpChanged[OpChanged.size() -1 ] = true;
5854 Flag = InlineAsm::getFlagWord(Kind, 1 /* RegNum*/);
5855 if (IsTiedToChangedOp)
5856 Flag = InlineAsm::getFlagWordForMatchingOp(Flag, DefIdx);
5857 else
5858 Flag = InlineAsm::getFlagWordForRegClass(Flag, ARM::GPRPairRegClassID);
5859 // Replace the current flag.
5860 AsmNodeOperands[AsmNodeOperands.size() -1] = CurDAG->getTargetConstant(
5861 Flag, dl, MVT::i32);
5862 // Add the new register node and skip the original two GPRs.
5863 AsmNodeOperands.push_back(PairedReg);
5864 // Skip the next two GPRs.
5865 i += 2;
5866 }
5867 }
5868
5869 if (Glue.getNode())
5870 AsmNodeOperands.push_back(Glue);
5871 if (!Changed)
5872 return false;
5873
5874 SDValue New = CurDAG->getNode(N->getOpcode(), SDLoc(N),
5875 CurDAG->getVTList(MVT::Other, MVT::Glue), AsmNodeOperands);
5876 New->setNodeId(-1);
5877 ReplaceNode(N, New.getNode());
5878 return true;
5879}
5880
5881
5882bool ARMDAGToDAGISel::
5883SelectInlineAsmMemoryOperand(const SDValue &Op, unsigned ConstraintID,
5884 std::vector<SDValue> &OutOps) {
5885 switch(ConstraintID) {
5886 default:
5887 llvm_unreachable("Unexpected asm memory constraint")::llvm::llvm_unreachable_internal("Unexpected asm memory constraint"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 5887)
;
5888 case InlineAsm::Constraint_m:
5889 case InlineAsm::Constraint_o:
5890 case InlineAsm::Constraint_Q:
5891 case InlineAsm::Constraint_Um:
5892 case InlineAsm::Constraint_Un:
5893 case InlineAsm::Constraint_Uq:
5894 case InlineAsm::Constraint_Us:
5895 case InlineAsm::Constraint_Ut:
5896 case InlineAsm::Constraint_Uv:
5897 case InlineAsm::Constraint_Uy:
5898 // Require the address to be in a register. That is safe for all ARM
5899 // variants and it is hard to do anything much smarter without knowing
5900 // how the operand is used.
5901 OutOps.push_back(Op);
5902 return false;
5903 }
5904 return true;
5905}
5906
5907/// createARMISelDag - This pass converts a legalized DAG into a
5908/// ARM-specific DAG, ready for instruction scheduling.
5909///
5910FunctionPass *llvm::createARMISelDag(ARMBaseTargetMachine &TM,
5911 CodeGenOpt::Level OptLevel) {
5912 return new ARMDAGToDAGISel(TM, OptLevel);
5913}

/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/ValueTypes.h

1//===- CodeGen/ValueTypes.h - Low-Level Target independ. types --*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the set of low-level target independent types which various
10// values in the code generator are. This allows the target specific behavior
11// of instructions to be described to target independent passes.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_CODEGEN_VALUETYPES_H
16#define LLVM_CODEGEN_VALUETYPES_H
17
18#include "llvm/Support/Compiler.h"
19#include "llvm/Support/MachineValueType.h"
20#include "llvm/Support/MathExtras.h"
21#include "llvm/Support/TypeSize.h"
22#include "llvm/Support/WithColor.h"
23#include <cassert>
24#include <cstdint>
25#include <string>
26
27namespace llvm {
28
29 class LLVMContext;
30 class Type;
31
32 /// Extended Value Type. Capable of holding value types which are not native
33 /// for any processor (such as the i12345 type), as well as the types an MVT
34 /// can represent.
35 struct EVT {
36 private:
37 MVT V = MVT::INVALID_SIMPLE_VALUE_TYPE;
38 Type *LLVMTy = nullptr;
39
40 public:
41 constexpr EVT() = default;
42 constexpr EVT(MVT::SimpleValueType SVT) : V(SVT) {}
43 constexpr EVT(MVT S) : V(S) {}
44
45 bool operator==(EVT VT) const {
46 return !(*this != VT);
47 }
48 bool operator!=(EVT VT) const {
49 if (V.SimpleTy != VT.V.SimpleTy)
50 return true;
51 if (V.SimpleTy == MVT::INVALID_SIMPLE_VALUE_TYPE)
52 return LLVMTy != VT.LLVMTy;
53 return false;
54 }
55
56 /// Returns the EVT that represents a floating-point type with the given
57 /// number of bits. There are two floating-point types with 128 bits - this
58 /// returns f128 rather than ppcf128.
59 static EVT getFloatingPointVT(unsigned BitWidth) {
60 return MVT::getFloatingPointVT(BitWidth);
61 }
62
63 /// Returns the EVT that represents an integer with the given number of
64 /// bits.
65 static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth) {
66 MVT M = MVT::getIntegerVT(BitWidth);
67 if (M.SimpleTy != MVT::INVALID_SIMPLE_VALUE_TYPE)
68 return M;
69 return getExtendedIntegerVT(Context, BitWidth);
70 }
71
72 /// Returns the EVT that represents a vector NumElements in length, where
73 /// each element is of type VT.
74 static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements,
75 bool IsScalable = false) {
76 MVT M = MVT::getVectorVT(VT.V, NumElements, IsScalable);
77 if (M.SimpleTy != MVT::INVALID_SIMPLE_VALUE_TYPE)
78 return M;
79 return getExtendedVectorVT(Context, VT, NumElements, IsScalable);
80 }
81
82 /// Returns the EVT that represents a vector EC.Min elements in length,
83 /// where each element is of type VT.
84 static EVT getVectorVT(LLVMContext &Context, EVT VT, ElementCount EC) {
85 MVT M = MVT::getVectorVT(VT.V, EC);
86 if (M.SimpleTy != MVT::INVALID_SIMPLE_VALUE_TYPE)
87 return M;
88 return getExtendedVectorVT(Context, VT, EC);
89 }
90
91 /// Return a vector with the same number of elements as this vector, but
92 /// with the element type converted to an integer type with the same
93 /// bitwidth.
94 EVT changeVectorElementTypeToInteger() const {
95 if (isSimple())
96 return getSimpleVT().changeVectorElementTypeToInteger();
97 return changeExtendedVectorElementTypeToInteger();
98 }
99
100 /// Return a VT for a vector type whose attributes match ourselves
101 /// with the exception of the element type that is chosen by the caller.
102 EVT changeVectorElementType(EVT EltVT) const {
103 if (isSimple()) {
104 assert(EltVT.isSimple() &&(static_cast <bool> (EltVT.isSimple() && "Can't change simple vector VT to have extended element VT"
) ? void (0) : __assert_fail ("EltVT.isSimple() && \"Can't change simple vector VT to have extended element VT\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/ValueTypes.h"
, 105, __extension__ __PRETTY_FUNCTION__))
105 "Can't change simple vector VT to have extended element VT")(static_cast <bool> (EltVT.isSimple() && "Can't change simple vector VT to have extended element VT"
) ? void (0) : __assert_fail ("EltVT.isSimple() && \"Can't change simple vector VT to have extended element VT\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/ValueTypes.h"
, 105, __extension__ __PRETTY_FUNCTION__))
;
106 return getSimpleVT().changeVectorElementType(EltVT.getSimpleVT());
107 }
108 return changeExtendedVectorElementType(EltVT);
109 }
110
111 /// Return the type converted to an equivalently sized integer or vector
112 /// with integer element type. Similar to changeVectorElementTypeToInteger,
113 /// but also handles scalars.
114 EVT changeTypeToInteger() {
115 if (isVector())
116 return changeVectorElementTypeToInteger();
117
118 if (isSimple())
119 return getSimpleVT().changeTypeToInteger();
120 return changeExtendedTypeToInteger();
121 }
122
123 /// Test if the given EVT has zero size, this will fail if called on a
124 /// scalable type
125 bool isZeroSized() const {
126 return !isScalableVector() && getSizeInBits() == 0;
127 }
128
129 /// Test if the given EVT is simple (as opposed to being extended).
130 bool isSimple() const {
131 return V.SimpleTy != MVT::INVALID_SIMPLE_VALUE_TYPE;
132 }
133
134 /// Test if the given EVT is extended (as opposed to being simple).
135 bool isExtended() const {
136 return !isSimple();
137 }
138
139 /// Return true if this is a FP or a vector FP type.
140 bool isFloatingPoint() const {
141 return isSimple() ? V.isFloatingPoint() : isExtendedFloatingPoint();
142 }
143
144 /// Return true if this is an integer or a vector integer type.
145 bool isInteger() const {
146 return isSimple() ? V.isInteger() : isExtendedInteger();
147 }
148
149 /// Return true if this is an integer, but not a vector.
150 bool isScalarInteger() const {
151 return isSimple() ? V.isScalarInteger() : isExtendedScalarInteger();
152 }
153
154 /// Return true if this is a vector value type.
155 bool isVector() const {
156 return isSimple() ? V.isVector() : isExtendedVector();
4
'?' condition is false
5
Returning value, which participates in a condition later
157 }
158
159 /// Return true if this is a vector type where the runtime
160 /// length is machine dependent
161 bool isScalableVector() const {
162 return isSimple() ? V.isScalableVector() : isExtendedScalableVector();
163 }
164
165 bool isFixedLengthVector() const {
166 return isSimple() ? V.isFixedLengthVector()
167 : isExtendedFixedLengthVector();
168 }
169
170 /// Return true if this is a 16-bit vector type.
171 bool is16BitVector() const {
172 return isSimple() ? V.is16BitVector() : isExtended16BitVector();
173 }
174
175 /// Return true if this is a 32-bit vector type.
176 bool is32BitVector() const {
177 return isSimple() ? V.is32BitVector() : isExtended32BitVector();
178 }
179
180 /// Return true if this is a 64-bit vector type.
181 bool is64BitVector() const {
182 return isSimple() ? V.is64BitVector() : isExtended64BitVector();
183 }
184
185 /// Return true if this is a 128-bit vector type.
186 bool is128BitVector() const {
187 return isSimple() ? V.is128BitVector() : isExtended128BitVector();
188 }
189
190 /// Return true if this is a 256-bit vector type.
191 bool is256BitVector() const {
192 return isSimple() ? V.is256BitVector() : isExtended256BitVector();
193 }
194
195 /// Return true if this is a 512-bit vector type.
196 bool is512BitVector() const {
197 return isSimple() ? V.is512BitVector() : isExtended512BitVector();
198 }
199
200 /// Return true if this is a 1024-bit vector type.
201 bool is1024BitVector() const {
202 return isSimple() ? V.is1024BitVector() : isExtended1024BitVector();
203 }
204
205 /// Return true if this is a 2048-bit vector type.
206 bool is2048BitVector() const {
207 return isSimple() ? V.is2048BitVector() : isExtended2048BitVector();
208 }
209
210 /// Return true if this is an overloaded type for TableGen.
211 bool isOverloaded() const {
212 return (V==MVT::iAny || V==MVT::fAny || V==MVT::vAny || V==MVT::iPTRAny);
213 }
214
215 /// Return true if the bit size is a multiple of 8.
216 bool isByteSized() const {
217 return !isZeroSized() && getSizeInBits().isKnownMultipleOf(8);
218 }
219
220 /// Return true if the size is a power-of-two number of bytes.
221 bool isRound() const {
222 if (isScalableVector())
223 return false;
224 unsigned BitSize = getSizeInBits();
225 return BitSize >= 8 && !(BitSize & (BitSize - 1));
226 }
227
228 /// Return true if this has the same number of bits as VT.
229 bool bitsEq(EVT VT) const {
230 if (EVT::operator==(VT)) return true;
231 return getSizeInBits() == VT.getSizeInBits();
232 }
233
234 /// Return true if we know at compile time this has more bits than VT.
235 bool knownBitsGT(EVT VT) const {
236 return TypeSize::isKnownGT(getSizeInBits(), VT.getSizeInBits());
237 }
238
239 /// Return true if we know at compile time this has more than or the same
240 /// bits as VT.
241 bool knownBitsGE(EVT VT) const {
242 return TypeSize::isKnownGE(getSizeInBits(), VT.getSizeInBits());
243 }
244
245 /// Return true if we know at compile time this has fewer bits than VT.
246 bool knownBitsLT(EVT VT) const {
247 return TypeSize::isKnownLT(getSizeInBits(), VT.getSizeInBits());
248 }
249
250 /// Return true if we know at compile time this has fewer than or the same
251 /// bits as VT.
252 bool knownBitsLE(EVT VT) const {
253 return TypeSize::isKnownLE(getSizeInBits(), VT.getSizeInBits());
254 }
255
256 /// Return true if this has more bits than VT.
257 bool bitsGT(EVT VT) const {
258 if (EVT::operator==(VT)) return false;
259 assert(isScalableVector() == VT.isScalableVector() &&(static_cast <bool> (isScalableVector() == VT.isScalableVector
() && "Comparison between scalable and fixed types") ?
void (0) : __assert_fail ("isScalableVector() == VT.isScalableVector() && \"Comparison between scalable and fixed types\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/ValueTypes.h"
, 260, __extension__ __PRETTY_FUNCTION__))
260 "Comparison between scalable and fixed types")(static_cast <bool> (isScalableVector() == VT.isScalableVector
() && "Comparison between scalable and fixed types") ?
void (0) : __assert_fail ("isScalableVector() == VT.isScalableVector() && \"Comparison between scalable and fixed types\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/ValueTypes.h"
, 260, __extension__ __PRETTY_FUNCTION__))
;
261 return knownBitsGT(VT);
262 }
263
264 /// Return true if this has no less bits than VT.
265 bool bitsGE(EVT VT) const {
266 if (EVT::operator==(VT)) return true;
267 assert(isScalableVector() == VT.isScalableVector() &&(static_cast <bool> (isScalableVector() == VT.isScalableVector
() && "Comparison between scalable and fixed types") ?
void (0) : __assert_fail ("isScalableVector() == VT.isScalableVector() && \"Comparison between scalable and fixed types\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/ValueTypes.h"
, 268, __extension__ __PRETTY_FUNCTION__))
268 "Comparison between scalable and fixed types")(static_cast <bool> (isScalableVector() == VT.isScalableVector
() && "Comparison between scalable and fixed types") ?
void (0) : __assert_fail ("isScalableVector() == VT.isScalableVector() && \"Comparison between scalable and fixed types\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/ValueTypes.h"
, 268, __extension__ __PRETTY_FUNCTION__))
;
269 return knownBitsGE(VT);
270 }
271
272 /// Return true if this has less bits than VT.
273 bool bitsLT(EVT VT) const {
274 if (EVT::operator==(VT)) return false;
275 assert(isScalableVector() == VT.isScalableVector() &&(static_cast <bool> (isScalableVector() == VT.isScalableVector
() && "Comparison between scalable and fixed types") ?
void (0) : __assert_fail ("isScalableVector() == VT.isScalableVector() && \"Comparison between scalable and fixed types\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/ValueTypes.h"
, 276, __extension__ __PRETTY_FUNCTION__))
276 "Comparison between scalable and fixed types")(static_cast <bool> (isScalableVector() == VT.isScalableVector
() && "Comparison between scalable and fixed types") ?
void (0) : __assert_fail ("isScalableVector() == VT.isScalableVector() && \"Comparison between scalable and fixed types\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/ValueTypes.h"
, 276, __extension__ __PRETTY_FUNCTION__))
;
277 return knownBitsLT(VT);
278 }
279
280 /// Return true if this has no more bits than VT.
281 bool bitsLE(EVT VT) const {
282 if (EVT::operator==(VT)) return true;
283 assert(isScalableVector() == VT.isScalableVector() &&(static_cast <bool> (isScalableVector() == VT.isScalableVector
() && "Comparison between scalable and fixed types") ?
void (0) : __assert_fail ("isScalableVector() == VT.isScalableVector() && \"Comparison between scalable and fixed types\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/ValueTypes.h"
, 284, __extension__ __PRETTY_FUNCTION__))
284 "Comparison between scalable and fixed types")(static_cast <bool> (isScalableVector() == VT.isScalableVector
() && "Comparison between scalable and fixed types") ?
void (0) : __assert_fail ("isScalableVector() == VT.isScalableVector() && \"Comparison between scalable and fixed types\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/ValueTypes.h"
, 284, __extension__ __PRETTY_FUNCTION__))
;
285 return knownBitsLE(VT);
286 }
287
288 /// Return the SimpleValueType held in the specified simple EVT.
289 MVT getSimpleVT() const {
290 assert(isSimple() && "Expected a SimpleValueType!")(static_cast <bool> (isSimple() && "Expected a SimpleValueType!"
) ? void (0) : __assert_fail ("isSimple() && \"Expected a SimpleValueType!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/ValueTypes.h"
, 290, __extension__ __PRETTY_FUNCTION__))
;
291 return V;
292 }
293
294 /// If this is a vector type, return the element type, otherwise return
295 /// this.
296 EVT getScalarType() const {
297 return isVector() ? getVectorElementType() : *this;
298 }
299
300 /// Given a vector type, return the type of each element.
301 EVT getVectorElementType() const {
302 assert(isVector() && "Invalid vector type!")(static_cast <bool> (isVector() && "Invalid vector type!"
) ? void (0) : __assert_fail ("isVector() && \"Invalid vector type!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/ValueTypes.h"
, 302, __extension__ __PRETTY_FUNCTION__))
;
303 if (isSimple())
304 return V.getVectorElementType();
305 return getExtendedVectorElementType();
306 }
307
308 /// Given a vector type, return the number of elements it contains.
309 unsigned getVectorNumElements() const {
310 assert(isVector() && "Invalid vector type!")(static_cast <bool> (isVector() && "Invalid vector type!"
) ? void (0) : __assert_fail ("isVector() && \"Invalid vector type!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/ValueTypes.h"
, 310, __extension__ __PRETTY_FUNCTION__))
;
311
312 if (isScalableVector())
313 llvm::reportInvalidSizeRequest(
314 "Possible incorrect use of EVT::getVectorNumElements() for "
315 "scalable vector. Scalable flag may be dropped, use "
316 "EVT::getVectorElementCount() instead");
317
318 return isSimple() ? V.getVectorNumElements()
319 : getExtendedVectorNumElements();
320 }
321
322 // Given a (possibly scalable) vector type, return the ElementCount
323 ElementCount getVectorElementCount() const {
324 assert((isVector()) && "Invalid vector type!")(static_cast <bool> ((isVector()) && "Invalid vector type!"
) ? void (0) : __assert_fail ("(isVector()) && \"Invalid vector type!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/ValueTypes.h"
, 324, __extension__ __PRETTY_FUNCTION__))
;
325 if (isSimple())
326 return V.getVectorElementCount();
327
328 return getExtendedVectorElementCount();
329 }
330
331 /// Given a vector type, return the minimum number of elements it contains.
332 unsigned getVectorMinNumElements() const {
333 return getVectorElementCount().getKnownMinValue();
334 }
335
336 /// Return the size of the specified value type in bits.
337 ///
338 /// If the value type is a scalable vector type, the scalable property will
339 /// be set and the runtime size will be a positive integer multiple of the
340 /// base size.
341 TypeSize getSizeInBits() const {
342 if (isSimple())
343 return V.getSizeInBits();
344 return getExtendedSizeInBits();
345 }
346
347 /// Return the size of the specified fixed width value type in bits. The
348 /// function will assert if the type is scalable.
349 uint64_t getFixedSizeInBits() const {
350 return getSizeInBits().getFixedSize();
351 }
352
353 uint64_t getScalarSizeInBits() const {
354 return getScalarType().getSizeInBits().getFixedSize();
355 }
356
357 /// Return the number of bytes overwritten by a store of the specified value
358 /// type.
359 ///
360 /// If the value type is a scalable vector type, the scalable property will
361 /// be set and the runtime size will be a positive integer multiple of the
362 /// base size.
363 TypeSize getStoreSize() const {
364 TypeSize BaseSize = getSizeInBits();
365 return {(BaseSize.getKnownMinSize() + 7) / 8, BaseSize.isScalable()};
366 }
367
368 /// Return the number of bits overwritten by a store of the specified value
369 /// type.
370 ///
371 /// If the value type is a scalable vector type, the scalable property will
372 /// be set and the runtime size will be a positive integer multiple of the
373 /// base size.
374 TypeSize getStoreSizeInBits() const {
375 return getStoreSize() * 8;
376 }
377
378 /// Rounds the bit-width of the given integer EVT up to the nearest power of
379 /// two (and at least to eight), and returns the integer EVT with that
380 /// number of bits.
381 EVT getRoundIntegerType(LLVMContext &Context) const {
382 assert(isInteger() && !isVector() && "Invalid integer type!")(static_cast <bool> (isInteger() && !isVector()
&& "Invalid integer type!") ? void (0) : __assert_fail
("isInteger() && !isVector() && \"Invalid integer type!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/ValueTypes.h"
, 382, __extension__ __PRETTY_FUNCTION__))
;
383 unsigned BitWidth = getSizeInBits();
384 if (BitWidth <= 8)
385 return EVT(MVT::i8);
386 return getIntegerVT(Context, 1 << Log2_32_Ceil(BitWidth));
387 }
388
389 /// Finds the smallest simple value type that is greater than or equal to
390 /// half the width of this EVT. If no simple value type can be found, an
391 /// extended integer value type of half the size (rounded up) is returned.
392 EVT getHalfSizedIntegerVT(LLVMContext &Context) const {
393 assert(isInteger() && !isVector() && "Invalid integer type!")(static_cast <bool> (isInteger() && !isVector()
&& "Invalid integer type!") ? void (0) : __assert_fail
("isInteger() && !isVector() && \"Invalid integer type!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/ValueTypes.h"
, 393, __extension__ __PRETTY_FUNCTION__))
;
394 unsigned EVTSize = getSizeInBits();
395 for (unsigned IntVT = MVT::FIRST_INTEGER_VALUETYPE;
396 IntVT <= MVT::LAST_INTEGER_VALUETYPE; ++IntVT) {
397 EVT HalfVT = EVT((MVT::SimpleValueType)IntVT);
398 if (HalfVT.getSizeInBits() * 2 >= EVTSize)
399 return HalfVT;
400 }
401 return getIntegerVT(Context, (EVTSize + 1) / 2);
402 }
403
404 /// Return a VT for an integer vector type with the size of the
405 /// elements doubled. The typed returned may be an extended type.
406 EVT widenIntegerVectorElementType(LLVMContext &Context) const {
407 EVT EltVT = getVectorElementType();
408 EltVT = EVT::getIntegerVT(Context, 2 * EltVT.getSizeInBits());
409 return EVT::getVectorVT(Context, EltVT, getVectorElementCount());
410 }
411
412 // Return a VT for a vector type with the same element type but
413 // half the number of elements. The type returned may be an
414 // extended type.
415 EVT getHalfNumVectorElementsVT(LLVMContext &Context) const {
416 EVT EltVT = getVectorElementType();
417 auto EltCnt = getVectorElementCount();
418 assert(EltCnt.isKnownEven() && "Splitting vector, but not in half!")(static_cast <bool> (EltCnt.isKnownEven() && "Splitting vector, but not in half!"
) ? void (0) : __assert_fail ("EltCnt.isKnownEven() && \"Splitting vector, but not in half!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/ValueTypes.h"
, 418, __extension__ __PRETTY_FUNCTION__))
;
419 return EVT::getVectorVT(Context, EltVT, EltCnt.divideCoefficientBy(2));
420 }
421
422 // Return a VT for a vector type with the same element type but
423 // double the number of elements. The type returned may be an
424 // extended type.
425 EVT getDoubleNumVectorElementsVT(LLVMContext &Context) const {
426 EVT EltVT = getVectorElementType();
427 auto EltCnt = getVectorElementCount();
428 return EVT::getVectorVT(Context, EltVT, EltCnt * 2);
429 }
430
431 /// Returns true if the given vector is a power of 2.
432 bool isPow2VectorType() const {
433 unsigned NElts = getVectorMinNumElements();
434 return !(NElts & (NElts - 1));
435 }
436
437 /// Widens the length of the given vector EVT up to the nearest power of 2
438 /// and returns that type.
439 EVT getPow2VectorType(LLVMContext &Context) const {
440 if (!isPow2VectorType()) {
441 ElementCount NElts = getVectorElementCount();
442 unsigned NewMinCount = 1 << Log2_32_Ceil(NElts.getKnownMinValue());
443 NElts = ElementCount::get(NewMinCount, NElts.isScalable());
444 return EVT::getVectorVT(Context, getVectorElementType(), NElts);
445 }
446 else {
447 return *this;
448 }
449 }
450
451 /// This function returns value type as a string, e.g. "i32".
452 std::string getEVTString() const;
453
454 /// This method returns an LLVM type corresponding to the specified EVT.
455 /// For integer types, this returns an unsigned type. Note that this will
456 /// abort for types that cannot be represented.
457 Type *getTypeForEVT(LLVMContext &Context) const;
458
459 /// Return the value type corresponding to the specified type.
460 /// This returns all pointers as iPTR. If HandleUnknown is true, unknown
461 /// types are returned as Other, otherwise they are invalid.
462 static EVT getEVT(Type *Ty, bool HandleUnknown = false);
463
464 intptr_t getRawBits() const {
465 if (isSimple())
466 return V.SimpleTy;
467 else
468 return (intptr_t)(LLVMTy);
469 }
470
471 /// A meaningless but well-behaved order, useful for constructing
472 /// containers.
473 struct compareRawBits {
474 bool operator()(EVT L, EVT R) const {
475 if (L.V.SimpleTy == R.V.SimpleTy)
476 return L.LLVMTy < R.LLVMTy;
477 else
478 return L.V.SimpleTy < R.V.SimpleTy;
479 }
480 };
481
482 private:
483 // Methods for handling the Extended-type case in functions above.
484 // These are all out-of-line to prevent users of this header file
485 // from having a dependency on Type.h.
486 EVT changeExtendedTypeToInteger() const;
487 EVT changeExtendedVectorElementType(EVT EltVT) const;
488 EVT changeExtendedVectorElementTypeToInteger() const;
489 static EVT getExtendedIntegerVT(LLVMContext &C, unsigned BitWidth);
490 static EVT getExtendedVectorVT(LLVMContext &C, EVT VT, unsigned NumElements,
491 bool IsScalable);
492 static EVT getExtendedVectorVT(LLVMContext &Context, EVT VT,
493 ElementCount EC);
494 bool isExtendedFloatingPoint() const LLVM_READONLY__attribute__((__pure__));
495 bool isExtendedInteger() const LLVM_READONLY__attribute__((__pure__));
496 bool isExtendedScalarInteger() const LLVM_READONLY__attribute__((__pure__));
497 bool isExtendedVector() const LLVM_READONLY__attribute__((__pure__));
498 bool isExtended16BitVector() const LLVM_READONLY__attribute__((__pure__));
499 bool isExtended32BitVector() const LLVM_READONLY__attribute__((__pure__));
500 bool isExtended64BitVector() const LLVM_READONLY__attribute__((__pure__));
501 bool isExtended128BitVector() const LLVM_READONLY__attribute__((__pure__));
502 bool isExtended256BitVector() const LLVM_READONLY__attribute__((__pure__));
503 bool isExtended512BitVector() const LLVM_READONLY__attribute__((__pure__));
504 bool isExtended1024BitVector() const LLVM_READONLY__attribute__((__pure__));
505 bool isExtended2048BitVector() const LLVM_READONLY__attribute__((__pure__));
506 bool isExtendedFixedLengthVector() const LLVM_READONLY__attribute__((__pure__));
507 bool isExtendedScalableVector() const LLVM_READONLY__attribute__((__pure__));
508 EVT getExtendedVectorElementType() const;
509 unsigned getExtendedVectorNumElements() const LLVM_READONLY__attribute__((__pure__));
510 ElementCount getExtendedVectorElementCount() const LLVM_READONLY__attribute__((__pure__));
511 TypeSize getExtendedSizeInBits() const LLVM_READONLY__attribute__((__pure__));
512 };
513
514} // end namespace llvm
515
516#endif // LLVM_CODEGEN_VALUETYPES_H

/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h

1//===- llvm/CodeGen/SelectionDAGNodes.h - SelectionDAG Nodes ----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file declares the SDNode class and derived classes, which are used to
10// represent the nodes and operations present in a SelectionDAG. These nodes
11// and operations are machine code level operations, with some similarities to
12// the GCC RTL representation.
13//
14// Clients should include the SelectionDAG.h file instead of this file directly.
15//
16//===----------------------------------------------------------------------===//
17
18#ifndef LLVM_CODEGEN_SELECTIONDAGNODES_H
19#define LLVM_CODEGEN_SELECTIONDAGNODES_H
20
21#include "llvm/ADT/APFloat.h"
22#include "llvm/ADT/ArrayRef.h"
23#include "llvm/ADT/BitVector.h"
24#include "llvm/ADT/FoldingSet.h"
25#include "llvm/ADT/GraphTraits.h"
26#include "llvm/ADT/SmallPtrSet.h"
27#include "llvm/ADT/SmallVector.h"
28#include "llvm/ADT/ilist_node.h"
29#include "llvm/ADT/iterator.h"
30#include "llvm/ADT/iterator_range.h"
31#include "llvm/CodeGen/ISDOpcodes.h"
32#include "llvm/CodeGen/MachineMemOperand.h"
33#include "llvm/CodeGen/Register.h"
34#include "llvm/CodeGen/ValueTypes.h"
35#include "llvm/IR/Constants.h"
36#include "llvm/IR/DebugLoc.h"
37#include "llvm/IR/Instruction.h"
38#include "llvm/IR/Instructions.h"
39#include "llvm/IR/Metadata.h"
40#include "llvm/IR/Operator.h"
41#include "llvm/Support/AlignOf.h"
42#include "llvm/Support/AtomicOrdering.h"
43#include "llvm/Support/Casting.h"
44#include "llvm/Support/ErrorHandling.h"
45#include "llvm/Support/MachineValueType.h"
46#include "llvm/Support/TypeSize.h"
47#include <algorithm>
48#include <cassert>
49#include <climits>
50#include <cstddef>
51#include <cstdint>
52#include <cstring>
53#include <iterator>
54#include <string>
55#include <tuple>
56
57namespace llvm {
58
59class APInt;
60class Constant;
61template <typename T> struct DenseMapInfo;
62class GlobalValue;
63class MachineBasicBlock;
64class MachineConstantPoolValue;
65class MCSymbol;
66class raw_ostream;
67class SDNode;
68class SelectionDAG;
69class Type;
70class Value;
71
72void checkForCycles(const SDNode *N, const SelectionDAG *DAG = nullptr,
73 bool force = false);
74
75/// This represents a list of ValueType's that has been intern'd by
76/// a SelectionDAG. Instances of this simple value class are returned by
77/// SelectionDAG::getVTList(...).
78///
79struct SDVTList {
80 const EVT *VTs;
81 unsigned int NumVTs;
82};
83
84namespace ISD {
85
86 /// Node predicates
87
88/// If N is a BUILD_VECTOR or SPLAT_VECTOR node whose elements are all the
89/// same constant or undefined, return true and return the constant value in
90/// \p SplatValue.
91bool isConstantSplatVector(const SDNode *N, APInt &SplatValue);
92
93/// Return true if the specified node is a BUILD_VECTOR or SPLAT_VECTOR where
94/// all of the elements are ~0 or undef. If \p BuildVectorOnly is set to
95/// true, it only checks BUILD_VECTOR.
96bool isConstantSplatVectorAllOnes(const SDNode *N,
97 bool BuildVectorOnly = false);
98
99/// Return true if the specified node is a BUILD_VECTOR or SPLAT_VECTOR where
100/// all of the elements are 0 or undef. If \p BuildVectorOnly is set to true, it
101/// only checks BUILD_VECTOR.
102bool isConstantSplatVectorAllZeros(const SDNode *N,
103 bool BuildVectorOnly = false);
104
105/// Return true if the specified node is a BUILD_VECTOR where all of the
106/// elements are ~0 or undef.
107bool isBuildVectorAllOnes(const SDNode *N);
108
109/// Return true if the specified node is a BUILD_VECTOR where all of the
110/// elements are 0 or undef.
111bool isBuildVectorAllZeros(const SDNode *N);
112
113/// Return true if the specified node is a BUILD_VECTOR node of all
114/// ConstantSDNode or undef.
115bool isBuildVectorOfConstantSDNodes(const SDNode *N);
116
117/// Return true if the specified node is a BUILD_VECTOR node of all
118/// ConstantFPSDNode or undef.
119bool isBuildVectorOfConstantFPSDNodes(const SDNode *N);
120
121/// Return true if the node has at least one operand and all operands of the
122/// specified node are ISD::UNDEF.
123bool allOperandsUndef(const SDNode *N);
124
125} // end namespace ISD
126
127//===----------------------------------------------------------------------===//
128/// Unlike LLVM values, Selection DAG nodes may return multiple
129/// values as the result of a computation. Many nodes return multiple values,
130/// from loads (which define a token and a return value) to ADDC (which returns
131/// a result and a carry value), to calls (which may return an arbitrary number
132/// of values).
133///
134/// As such, each use of a SelectionDAG computation must indicate the node that
135/// computes it as well as which return value to use from that node. This pair
136/// of information is represented with the SDValue value type.
137///
138class SDValue {
139 friend struct DenseMapInfo<SDValue>;
140
141 SDNode *Node = nullptr; // The node defining the value we are using.
142 unsigned ResNo = 0; // Which return value of the node we are using.
143
144public:
145 SDValue() = default;
146 SDValue(SDNode *node, unsigned resno);
147
148 /// get the index which selects a specific result in the SDNode
149 unsigned getResNo() const { return ResNo; }
150
151 /// get the SDNode which holds the desired result
152 SDNode *getNode() const { return Node; }
153
154 /// set the SDNode
155 void setNode(SDNode *N) { Node = N; }
156
157 inline SDNode *operator->() const { return Node; }
158
159 bool operator==(const SDValue &O) const {
160 return Node == O.Node && ResNo == O.ResNo;
14
Assuming 'Node' is equal to 'O.Node'
15
Assuming 'ResNo' is equal to 'O.ResNo'
16
Returning the value 1, which participates in a condition later
161 }
162 bool operator!=(const SDValue &O) const {
163 return !operator==(O);
13
Calling 'SDValue::operator=='
17
Returning from 'SDValue::operator=='
18
Returning zero, which participates in a condition later
164 }
165 bool operator<(const SDValue &O) const {
166 return std::tie(Node, ResNo) < std::tie(O.Node, O.ResNo);
167 }
168 explicit operator bool() const {
169 return Node != nullptr;
170 }
171
172 SDValue getValue(unsigned R) const {
173 return SDValue(Node, R);
174 }
175
176 /// Return true if this node is an operand of N.
177 bool isOperandOf(const SDNode *N) const;
178
179 /// Return the ValueType of the referenced return value.
180 inline EVT getValueType() const;
181
182 /// Return the simple ValueType of the referenced return value.
183 MVT getSimpleValueType() const {
184 return getValueType().getSimpleVT();
185 }
186
187 /// Returns the size of the value in bits.
188 ///
189 /// If the value type is a scalable vector type, the scalable property will
190 /// be set and the runtime size will be a positive integer multiple of the
191 /// base size.
192 TypeSize getValueSizeInBits() const {
193 return getValueType().getSizeInBits();
194 }
195
196 uint64_t getScalarValueSizeInBits() const {
197 return getValueType().getScalarType().getFixedSizeInBits();
198 }
199
200 // Forwarding methods - These forward to the corresponding methods in SDNode.
201 inline unsigned getOpcode() const;
202 inline unsigned getNumOperands() const;
203 inline const SDValue &getOperand(unsigned i) const;
204 inline uint64_t getConstantOperandVal(unsigned i) const;
205 inline const APInt &getConstantOperandAPInt(unsigned i) const;
206 inline bool isTargetMemoryOpcode() const;
207 inline bool isTargetOpcode() const;
208 inline bool isMachineOpcode() const;
209 inline bool isUndef() const;
210 inline unsigned getMachineOpcode() const;
211 inline const DebugLoc &getDebugLoc() const;
212 inline void dump() const;
213 inline void dump(const SelectionDAG *G) const;
214 inline void dumpr() const;
215 inline void dumpr(const SelectionDAG *G) const;
216
217 /// Return true if this operand (which must be a chain) reaches the
218 /// specified operand without crossing any side-effecting instructions.
219 /// In practice, this looks through token factors and non-volatile loads.
220 /// In order to remain efficient, this only
221 /// looks a couple of nodes in, it does not do an exhaustive search.
222 bool reachesChainWithoutSideEffects(SDValue Dest,
223 unsigned Depth = 2) const;
224
225 /// Return true if there are no nodes using value ResNo of Node.
226 inline bool use_empty() const;
227
228 /// Return true if there is exactly one node using value ResNo of Node.
229 inline bool hasOneUse() const;
230};
231
232template<> struct DenseMapInfo<SDValue> {
233 static inline SDValue getEmptyKey() {
234 SDValue V;
235 V.ResNo = -1U;
236 return V;
237 }
238
239 static inline SDValue getTombstoneKey() {
240 SDValue V;
241 V.ResNo = -2U;
242 return V;
243 }
244
245 static unsigned getHashValue(const SDValue &Val) {
246 return ((unsigned)((uintptr_t)Val.getNode() >> 4) ^
247 (unsigned)((uintptr_t)Val.getNode() >> 9)) + Val.getResNo();
248 }
249
250 static bool isEqual(const SDValue &LHS, const SDValue &RHS) {
251 return LHS == RHS;
252 }
253};
254
255/// Allow casting operators to work directly on
256/// SDValues as if they were SDNode*'s.
257template<> struct simplify_type<SDValue> {
258 using SimpleType = SDNode *;
259
260 static SimpleType getSimplifiedValue(SDValue &Val) {
261 return Val.getNode();
262 }
263};
264template<> struct simplify_type<const SDValue> {
265 using SimpleType = /*const*/ SDNode *;
266
267 static SimpleType getSimplifiedValue(const SDValue &Val) {
268 return Val.getNode();
269 }
270};
271
272/// Represents a use of a SDNode. This class holds an SDValue,
273/// which records the SDNode being used and the result number, a
274/// pointer to the SDNode using the value, and Next and Prev pointers,
275/// which link together all the uses of an SDNode.
276///
277class SDUse {
278 /// Val - The value being used.
279 SDValue Val;
280 /// User - The user of this value.
281 SDNode *User = nullptr;
282 /// Prev, Next - Pointers to the uses list of the SDNode referred by
283 /// this operand.
284 SDUse **Prev = nullptr;
285 SDUse *Next = nullptr;
286
287public:
288 SDUse() = default;
289 SDUse(const SDUse &U) = delete;
290 SDUse &operator=(const SDUse &) = delete;
291
292 /// Normally SDUse will just implicitly convert to an SDValue that it holds.
293 operator const SDValue&() const { return Val; }
294
295 /// If implicit conversion to SDValue doesn't work, the get() method returns
296 /// the SDValue.
297 const SDValue &get() const { return Val; }
298
299 /// This returns the SDNode that contains this Use.
300 SDNode *getUser() { return User; }
301
302 /// Get the next SDUse in the use list.
303 SDUse *getNext() const { return Next; }
304
305 /// Convenience function for get().getNode().
306 SDNode *getNode() const { return Val.getNode(); }
307 /// Convenience function for get().getResNo().
308 unsigned getResNo() const { return Val.getResNo(); }
309 /// Convenience function for get().getValueType().
310 EVT getValueType() const { return Val.getValueType(); }
311
312 /// Convenience function for get().operator==
313 bool operator==(const SDValue &V) const {
314 return Val == V;
315 }
316
317 /// Convenience function for get().operator!=
318 bool operator!=(const SDValue &V) const {
319 return Val != V;
320 }
321
322 /// Convenience function for get().operator<
323 bool operator<(const SDValue &V) const {
324 return Val < V;
325 }
326
327private:
328 friend class SelectionDAG;
329 friend class SDNode;
330 // TODO: unfriend HandleSDNode once we fix its operand handling.
331 friend class HandleSDNode;
332
333 void setUser(SDNode *p) { User = p; }
334
335 /// Remove this use from its existing use list, assign it the
336 /// given value, and add it to the new value's node's use list.
337 inline void set(const SDValue &V);
338 /// Like set, but only supports initializing a newly-allocated
339 /// SDUse with a non-null value.
340 inline void setInitial(const SDValue &V);
341 /// Like set, but only sets the Node portion of the value,
342 /// leaving the ResNo portion unmodified.
343 inline void setNode(SDNode *N);
344
345 void addToList(SDUse **List) {
346 Next = *List;
347 if (Next) Next->Prev = &Next;
348 Prev = List;
349 *List = this;
350 }
351
352 void removeFromList() {
353 *Prev = Next;
354 if (Next) Next->Prev = Prev;
355 }
356};
357
358/// simplify_type specializations - Allow casting operators to work directly on
359/// SDValues as if they were SDNode*'s.
360template<> struct simplify_type<SDUse> {
361 using SimpleType = SDNode *;
362
363 static SimpleType getSimplifiedValue(SDUse &Val) {
364 return Val.getNode();
365 }
366};
367
368/// These are IR-level optimization flags that may be propagated to SDNodes.
369/// TODO: This data structure should be shared by the IR optimizer and the
370/// the backend.
371struct SDNodeFlags {
372private:
373 bool NoUnsignedWrap : 1;
374 bool NoSignedWrap : 1;
375 bool Exact : 1;
376 bool NoNaNs : 1;
377 bool NoInfs : 1;
378 bool NoSignedZeros : 1;
379 bool AllowReciprocal : 1;
380 bool AllowContract : 1;
381 bool ApproximateFuncs : 1;
382 bool AllowReassociation : 1;
383
384 // We assume instructions do not raise floating-point exceptions by default,
385 // and only those marked explicitly may do so. We could choose to represent
386 // this via a positive "FPExcept" flags like on the MI level, but having a
387 // negative "NoFPExcept" flag here (that defaults to true) makes the flag
388 // intersection logic more straightforward.
389 bool NoFPExcept : 1;
390
391public:
392 /// Default constructor turns off all optimization flags.
393 SDNodeFlags()
394 : NoUnsignedWrap(false), NoSignedWrap(false), Exact(false), NoNaNs(false),
395 NoInfs(false), NoSignedZeros(false), AllowReciprocal(false),
396 AllowContract(false), ApproximateFuncs(false),
397 AllowReassociation(false), NoFPExcept(false) {}
398
399 /// Propagate the fast-math-flags from an IR FPMathOperator.
400 void copyFMF(const FPMathOperator &FPMO) {
401 setNoNaNs(FPMO.hasNoNaNs());
402 setNoInfs(FPMO.hasNoInfs());
403 setNoSignedZeros(FPMO.hasNoSignedZeros());
404 setAllowReciprocal(FPMO.hasAllowReciprocal());
405 setAllowContract(FPMO.hasAllowContract());
406 setApproximateFuncs(FPMO.hasApproxFunc());
407 setAllowReassociation(FPMO.hasAllowReassoc());
408 }
409
410 // These are mutators for each flag.
411 void setNoUnsignedWrap(bool b) { NoUnsignedWrap = b; }
412 void setNoSignedWrap(bool b) { NoSignedWrap = b; }
413 void setExact(bool b) { Exact = b; }
414 void setNoNaNs(bool b) { NoNaNs = b; }
415 void setNoInfs(bool b) { NoInfs = b; }
416 void setNoSignedZeros(bool b) { NoSignedZeros = b; }
417 void setAllowReciprocal(bool b) { AllowReciprocal = b; }
418 void setAllowContract(bool b) { AllowContract = b; }
419 void setApproximateFuncs(bool b) { ApproximateFuncs = b; }
420 void setAllowReassociation(bool b) { AllowReassociation = b; }
421 void setNoFPExcept(bool b) { NoFPExcept = b; }
422
423 // These are accessors for each flag.
424 bool hasNoUnsignedWrap() const { return NoUnsignedWrap; }
425 bool hasNoSignedWrap() const { return NoSignedWrap; }
426 bool hasExact() const { return Exact; }
427 bool hasNoNaNs() const { return NoNaNs; }
428 bool hasNoInfs() const { return NoInfs; }
429 bool hasNoSignedZeros() const { return NoSignedZeros; }
430 bool hasAllowReciprocal() const { return AllowReciprocal; }
431 bool hasAllowContract() const { return AllowContract; }
432 bool hasApproximateFuncs() const { return ApproximateFuncs; }
433 bool hasAllowReassociation() const { return AllowReassociation; }
434 bool hasNoFPExcept() const { return NoFPExcept; }
435
436 /// Clear any flags in this flag set that aren't also set in Flags. All
437 /// flags will be cleared if Flags are undefined.
438 void intersectWith(const SDNodeFlags Flags) {
439 NoUnsignedWrap &= Flags.NoUnsignedWrap;
440 NoSignedWrap &= Flags.NoSignedWrap;
441 Exact &= Flags.Exact;
442 NoNaNs &= Flags.NoNaNs;
443 NoInfs &= Flags.NoInfs;
444 NoSignedZeros &= Flags.NoSignedZeros;
445 AllowReciprocal &= Flags.AllowReciprocal;
446 AllowContract &= Flags.AllowContract;
447 ApproximateFuncs &= Flags.ApproximateFuncs;
448 AllowReassociation &= Flags.AllowReassociation;
449 NoFPExcept &= Flags.NoFPExcept;
450 }
451};
452
453/// Represents one node in the SelectionDAG.
454///
455class SDNode : public FoldingSetNode, public ilist_node<SDNode> {
456private:
457 /// The operation that this node performs.
458 int16_t NodeType;
459
460protected:
461 // We define a set of mini-helper classes to help us interpret the bits in our
462 // SubclassData. These are designed to fit within a uint16_t so they pack
463 // with NodeType.
464
465#if defined(_AIX) && (!defined(__GNUC__4) || defined(__clang__1))
466// Except for GCC; by default, AIX compilers store bit-fields in 4-byte words
467// and give the `pack` pragma push semantics.
468#define BEGIN_TWO_BYTE_PACK() _Pragma("pack(2)")pack(2)
469#define END_TWO_BYTE_PACK() _Pragma("pack(pop)")pack(pop)
470#else
471#define BEGIN_TWO_BYTE_PACK()
472#define END_TWO_BYTE_PACK()
473#endif
474
475BEGIN_TWO_BYTE_PACK()
476 class SDNodeBitfields {
477 friend class SDNode;
478 friend class MemIntrinsicSDNode;
479 friend class MemSDNode;
480 friend class SelectionDAG;
481
482 uint16_t HasDebugValue : 1;
483 uint16_t IsMemIntrinsic : 1;
484 uint16_t IsDivergent : 1;
485 };
486 enum { NumSDNodeBits = 3 };
487
488 class ConstantSDNodeBitfields {
489 friend class ConstantSDNode;
490
491 uint16_t : NumSDNodeBits;
492
493 uint16_t IsOpaque : 1;
494 };
495
496 class MemSDNodeBitfields {
497 friend class MemSDNode;
498 friend class MemIntrinsicSDNode;
499 friend class AtomicSDNode;
500
501 uint16_t : NumSDNodeBits;
502
503 uint16_t IsVolatile : 1;
504 uint16_t IsNonTemporal : 1;
505 uint16_t IsDereferenceable : 1;
506 uint16_t IsInvariant : 1;
507 };
508 enum { NumMemSDNodeBits = NumSDNodeBits + 4 };
509
510 class LSBaseSDNodeBitfields {
511 friend class LSBaseSDNode;
512 friend class MaskedLoadStoreSDNode;
513 friend class MaskedGatherScatterSDNode;
514
515 uint16_t : NumMemSDNodeBits;
516
517 // This storage is shared between disparate class hierarchies to hold an
518 // enumeration specific to the class hierarchy in use.
519 // LSBaseSDNode => enum ISD::MemIndexedMode
520 // MaskedLoadStoreBaseSDNode => enum ISD::MemIndexedMode
521 // MaskedGatherScatterSDNode => enum ISD::MemIndexType
522 uint16_t AddressingMode : 3;
523 };
524 enum { NumLSBaseSDNodeBits = NumMemSDNodeBits + 3 };
525
526 class LoadSDNodeBitfields {
527 friend class LoadSDNode;
528 friend class MaskedLoadSDNode;
529 friend class MaskedGatherSDNode;
530
531 uint16_t : NumLSBaseSDNodeBits;
532
533 uint16_t ExtTy : 2; // enum ISD::LoadExtType
534 uint16_t IsExpanding : 1;
535 };
536
537 class StoreSDNodeBitfields {
538 friend class StoreSDNode;
539 friend class MaskedStoreSDNode;
540 friend class MaskedScatterSDNode;
541
542 uint16_t : NumLSBaseSDNodeBits;
543
544 uint16_t IsTruncating : 1;
545 uint16_t IsCompressing : 1;
546 };
547
548 union {
549 char RawSDNodeBits[sizeof(uint16_t)];
550 SDNodeBitfields SDNodeBits;
551 ConstantSDNodeBitfields ConstantSDNodeBits;
552 MemSDNodeBitfields MemSDNodeBits;
553 LSBaseSDNodeBitfields LSBaseSDNodeBits;
554 LoadSDNodeBitfields LoadSDNodeBits;
555 StoreSDNodeBitfields StoreSDNodeBits;
556 };
557END_TWO_BYTE_PACK()
558#undef BEGIN_TWO_BYTE_PACK
559#undef END_TWO_BYTE_PACK
560
561 // RawSDNodeBits must cover the entirety of the union. This means that all of
562 // the union's members must have size <= RawSDNodeBits. We write the RHS as
563 // "2" instead of sizeof(RawSDNodeBits) because MSVC can't handle the latter.
564 static_assert(sizeof(SDNodeBitfields) <= 2, "field too wide");
565 static_assert(sizeof(ConstantSDNodeBitfields) <= 2, "field too wide");
566 static_assert(sizeof(MemSDNodeBitfields) <= 2, "field too wide");
567 static_assert(sizeof(LSBaseSDNodeBitfields) <= 2, "field too wide");
568 static_assert(sizeof(LoadSDNodeBitfields) <= 2, "field too wide");
569 static_assert(sizeof(StoreSDNodeBitfields) <= 2, "field too wide");
570
571private:
572 friend class SelectionDAG;
573 // TODO: unfriend HandleSDNode once we fix its operand handling.
574 friend class HandleSDNode;
575
576 /// Unique id per SDNode in the DAG.
577 int NodeId = -1;
578
579 /// The values that are used by this operation.
580 SDUse *OperandList = nullptr;
581
582 /// The types of the values this node defines. SDNode's may
583 /// define multiple values simultaneously.
584 const EVT *ValueList;
585
586 /// List of uses for this SDNode.
587 SDUse *UseList = nullptr;
588
589 /// The number of entries in the Operand/Value list.
590 unsigned short NumOperands = 0;
591 unsigned short NumValues;
592
593 // The ordering of the SDNodes. It roughly corresponds to the ordering of the
594 // original LLVM instructions.
595 // This is used for turning off scheduling, because we'll forgo
596 // the normal scheduling algorithms and output the instructions according to
597 // this ordering.
598 unsigned IROrder;
599
600 /// Source line information.
601 DebugLoc debugLoc;
602
603 /// Return a pointer to the specified value type.
604 static const EVT *getValueTypeList(EVT VT);
605
606 SDNodeFlags Flags;
607
608public:
609 /// Unique and persistent id per SDNode in the DAG.
610 /// Used for debug printing.
611 uint16_t PersistentId;
612
613 //===--------------------------------------------------------------------===//
614 // Accessors
615 //
616
617 /// Return the SelectionDAG opcode value for this node. For
618 /// pre-isel nodes (those for which isMachineOpcode returns false), these
619 /// are the opcode values in the ISD and <target>ISD namespaces. For
620 /// post-isel opcodes, see getMachineOpcode.
621 unsigned getOpcode() const { return (unsigned short)NodeType; }
622
623 /// Test if this node has a target-specific opcode (in the
624 /// \<target\>ISD namespace).
625 bool isTargetOpcode() const { return NodeType >= ISD::BUILTIN_OP_END; }
626
627 /// Test if this node has a target-specific opcode that may raise
628 /// FP exceptions (in the \<target\>ISD namespace and greater than
629 /// FIRST_TARGET_STRICTFP_OPCODE). Note that all target memory
630 /// opcode are currently automatically considered to possibly raise
631 /// FP exceptions as well.
632 bool isTargetStrictFPOpcode() const {
633 return NodeType >= ISD::FIRST_TARGET_STRICTFP_OPCODE;
634 }
635
636 /// Test if this node has a target-specific
637 /// memory-referencing opcode (in the \<target\>ISD namespace and
638 /// greater than FIRST_TARGET_MEMORY_OPCODE).
639 bool isTargetMemoryOpcode() const {
640 return NodeType >= ISD::FIRST_TARGET_MEMORY_OPCODE;
641 }
642
643 /// Return true if the type of the node type undefined.
644 bool isUndef() const { return NodeType == ISD::UNDEF; }
645
646 /// Test if this node is a memory intrinsic (with valid pointer information).
647 /// INTRINSIC_W_CHAIN and INTRINSIC_VOID nodes are sometimes created for
648 /// non-memory intrinsics (with chains) that are not really instances of
649 /// MemSDNode. For such nodes, we need some extra state to determine the
650 /// proper classof relationship.
651 bool isMemIntrinsic() const {
652 return (NodeType == ISD::INTRINSIC_W_CHAIN ||
653 NodeType == ISD::INTRINSIC_VOID) &&
654 SDNodeBits.IsMemIntrinsic;
655 }
656
657 /// Test if this node is a strict floating point pseudo-op.
658 bool isStrictFPOpcode() {
659 switch (NodeType) {
660 default:
661 return false;
662 case ISD::STRICT_FP16_TO_FP:
663 case ISD::STRICT_FP_TO_FP16:
664#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
665 case ISD::STRICT_##DAGN:
666#include "llvm/IR/ConstrainedOps.def"
667 return true;
668 }
669 }
670
671 /// Test if this node has a post-isel opcode, directly
672 /// corresponding to a MachineInstr opcode.
673 bool isMachineOpcode() const { return NodeType < 0; }
674
675 /// This may only be called if isMachineOpcode returns
676 /// true. It returns the MachineInstr opcode value that the node's opcode
677 /// corresponds to.
678 unsigned getMachineOpcode() const {
679 assert(isMachineOpcode() && "Not a MachineInstr opcode!")(static_cast <bool> (isMachineOpcode() && "Not a MachineInstr opcode!"
) ? void (0) : __assert_fail ("isMachineOpcode() && \"Not a MachineInstr opcode!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 679, __extension__ __PRETTY_FUNCTION__))
;
680 return ~NodeType;
681 }
682
683 bool getHasDebugValue() const { return SDNodeBits.HasDebugValue; }
684 void setHasDebugValue(bool b) { SDNodeBits.HasDebugValue = b; }
685
686 bool isDivergent() const { return SDNodeBits.IsDivergent; }
687
688 /// Return true if there are no uses of this node.
689 bool use_empty() const { return UseList == nullptr; }
690
691 /// Return true if there is exactly one use of this node.
692 bool hasOneUse() const { return hasSingleElement(uses()); }
693
694 /// Return the number of uses of this node. This method takes
695 /// time proportional to the number of uses.
696 size_t use_size() const { return std::distance(use_begin(), use_end()); }
697
698 /// Return the unique node id.
699 int getNodeId() const { return NodeId; }
700
701 /// Set unique node id.
702 void setNodeId(int Id) { NodeId = Id; }
703
704 /// Return the node ordering.
705 unsigned getIROrder() const { return IROrder; }
706
707 /// Set the node ordering.
708 void setIROrder(unsigned Order) { IROrder = Order; }
709
710 /// Return the source location info.
711 const DebugLoc &getDebugLoc() const { return debugLoc; }
712
713 /// Set source location info. Try to avoid this, putting
714 /// it in the constructor is preferable.
715 void setDebugLoc(DebugLoc dl) { debugLoc = std::move(dl); }
716
717 /// This class provides iterator support for SDUse
718 /// operands that use a specific SDNode.
719 class use_iterator {
720 friend class SDNode;
721
722 SDUse *Op = nullptr;
723
724 explicit use_iterator(SDUse *op) : Op(op) {}
725
726 public:
727 using iterator_category = std::forward_iterator_tag;
728 using value_type = SDUse;
729 using difference_type = std::ptrdiff_t;
730 using pointer = value_type *;
731 using reference = value_type &;
732
733 use_iterator() = default;
734 use_iterator(const use_iterator &I) : Op(I.Op) {}
735
736 bool operator==(const use_iterator &x) const {
737 return Op == x.Op;
738 }
739 bool operator!=(const use_iterator &x) const {
740 return !operator==(x);
741 }
742
743 /// Return true if this iterator is at the end of uses list.
744 bool atEnd() const { return Op == nullptr; }
745
746 // Iterator traversal: forward iteration only.
747 use_iterator &operator++() { // Preincrement
748 assert(Op && "Cannot increment end iterator!")(static_cast <bool> (Op && "Cannot increment end iterator!"
) ? void (0) : __assert_fail ("Op && \"Cannot increment end iterator!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 748, __extension__ __PRETTY_FUNCTION__))
;
749 Op = Op->getNext();
750 return *this;
751 }
752
753 use_iterator operator++(int) { // Postincrement
754 use_iterator tmp = *this; ++*this; return tmp;
755 }
756
757 /// Retrieve a pointer to the current user node.
758 SDNode *operator*() const {
759 assert(Op && "Cannot dereference end iterator!")(static_cast <bool> (Op && "Cannot dereference end iterator!"
) ? void (0) : __assert_fail ("Op && \"Cannot dereference end iterator!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 759, __extension__ __PRETTY_FUNCTION__))
;
760 return Op->getUser();
761 }
762
763 SDNode *operator->() const { return operator*(); }
764
765 SDUse &getUse() const { return *Op; }
766
767 /// Retrieve the operand # of this use in its user.
768 unsigned getOperandNo() const {
769 assert(Op && "Cannot dereference end iterator!")(static_cast <bool> (Op && "Cannot dereference end iterator!"
) ? void (0) : __assert_fail ("Op && \"Cannot dereference end iterator!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 769, __extension__ __PRETTY_FUNCTION__))
;
770 return (unsigned)(Op - Op->getUser()->OperandList);
771 }
772 };
773
774 /// Provide iteration support to walk over all uses of an SDNode.
775 use_iterator use_begin() const {
776 return use_iterator(UseList);
777 }
778
779 static use_iterator use_end() { return use_iterator(nullptr); }
780
781 inline iterator_range<use_iterator> uses() {
782 return make_range(use_begin(), use_end());
783 }
784 inline iterator_range<use_iterator> uses() const {
785 return make_range(use_begin(), use_end());
786 }
787
788 /// Return true if there are exactly NUSES uses of the indicated value.
789 /// This method ignores uses of other values defined by this operation.
790 bool hasNUsesOfValue(unsigned NUses, unsigned Value) const;
791
792 /// Return true if there are any use of the indicated value.
793 /// This method ignores uses of other values defined by this operation.
794 bool hasAnyUseOfValue(unsigned Value) const;
795
796 /// Return true if this node is the only use of N.
797 bool isOnlyUserOf(const SDNode *N) const;
798
799 /// Return true if this node is an operand of N.
800 bool isOperandOf(const SDNode *N) const;
801
802 /// Return true if this node is a predecessor of N.
803 /// NOTE: Implemented on top of hasPredecessor and every bit as
804 /// expensive. Use carefully.
805 bool isPredecessorOf(const SDNode *N) const {
806 return N->hasPredecessor(this);
807 }
808
809 /// Return true if N is a predecessor of this node.
810 /// N is either an operand of this node, or can be reached by recursively
811 /// traversing up the operands.
812 /// NOTE: This is an expensive method. Use it carefully.
813 bool hasPredecessor(const SDNode *N) const;
814
815 /// Returns true if N is a predecessor of any node in Worklist. This
816 /// helper keeps Visited and Worklist sets externally to allow unions
817 /// searches to be performed in parallel, caching of results across
818 /// queries and incremental addition to Worklist. Stops early if N is
819 /// found but will resume. Remember to clear Visited and Worklists
820 /// if DAG changes. MaxSteps gives a maximum number of nodes to visit before
821 /// giving up. The TopologicalPrune flag signals that positive NodeIds are
822 /// topologically ordered (Operands have strictly smaller node id) and search
823 /// can be pruned leveraging this.
824 static bool hasPredecessorHelper(const SDNode *N,
825 SmallPtrSetImpl<const SDNode *> &Visited,
826 SmallVectorImpl<const SDNode *> &Worklist,
827 unsigned int MaxSteps = 0,
828 bool TopologicalPrune = false) {
829 SmallVector<const SDNode *, 8> DeferredNodes;
830 if (Visited.count(N))
831 return true;
832
833 // Node Id's are assigned in three places: As a topological
834 // ordering (> 0), during legalization (results in values set to
835 // 0), new nodes (set to -1). If N has a topolgical id then we
836 // know that all nodes with ids smaller than it cannot be
837 // successors and we need not check them. Filter out all node
838 // that can't be matches. We add them to the worklist before exit
839 // in case of multiple calls. Note that during selection the topological id
840 // may be violated if a node's predecessor is selected before it. We mark
841 // this at selection negating the id of unselected successors and
842 // restricting topological pruning to positive ids.
843
844 int NId = N->getNodeId();
845 // If we Invalidated the Id, reconstruct original NId.
846 if (NId < -1)
847 NId = -(NId + 1);
848
849 bool Found = false;
850 while (!Worklist.empty()) {
851 const SDNode *M = Worklist.pop_back_val();
852 int MId = M->getNodeId();
853 if (TopologicalPrune && M->getOpcode() != ISD::TokenFactor && (NId > 0) &&
854 (MId > 0) && (MId < NId)) {
855 DeferredNodes.push_back(M);
856 continue;
857 }
858 for (const SDValue &OpV : M->op_values()) {
859 SDNode *Op = OpV.getNode();
860 if (Visited.insert(Op).second)
861 Worklist.push_back(Op);
862 if (Op == N)
863 Found = true;
864 }
865 if (Found)
866 break;
867 if (MaxSteps != 0 && Visited.size() >= MaxSteps)
868 break;
869 }
870 // Push deferred nodes back on worklist.
871 Worklist.append(DeferredNodes.begin(), DeferredNodes.end());
872 // If we bailed early, conservatively return found.
873 if (MaxSteps != 0 && Visited.size() >= MaxSteps)
874 return true;
875 return Found;
876 }
877
878 /// Return true if all the users of N are contained in Nodes.
879 /// NOTE: Requires at least one match, but doesn't require them all.
880 static bool areOnlyUsersOf(ArrayRef<const SDNode *> Nodes, const SDNode *N);
881
882 /// Return the number of values used by this operation.
883 unsigned getNumOperands() const { return NumOperands; }
884
885 /// Return the maximum number of operands that a SDNode can hold.
886 static constexpr size_t getMaxNumOperands() {
887 return std::numeric_limits<decltype(SDNode::NumOperands)>::max();
888 }
889
890 /// Helper method returns the integer value of a ConstantSDNode operand.
891 inline uint64_t getConstantOperandVal(unsigned Num) const;
892
893 /// Helper method returns the APInt of a ConstantSDNode operand.
894 inline const APInt &getConstantOperandAPInt(unsigned Num) const;
895
896 const SDValue &getOperand(unsigned Num) const {
897 assert(Num < NumOperands && "Invalid child # of SDNode!")(static_cast <bool> (Num < NumOperands && "Invalid child # of SDNode!"
) ? void (0) : __assert_fail ("Num < NumOperands && \"Invalid child # of SDNode!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 897, __extension__ __PRETTY_FUNCTION__))
;
898 return OperandList[Num];
899 }
900
901 using op_iterator = SDUse *;
902
903 op_iterator op_begin() const { return OperandList; }
904 op_iterator op_end() const { return OperandList+NumOperands; }
905 ArrayRef<SDUse> ops() const { return makeArrayRef(op_begin(), op_end()); }
906
907 /// Iterator for directly iterating over the operand SDValue's.
908 struct value_op_iterator
909 : iterator_adaptor_base<value_op_iterator, op_iterator,
910 std::random_access_iterator_tag, SDValue,
911 ptrdiff_t, value_op_iterator *,
912 value_op_iterator *> {
913 explicit value_op_iterator(SDUse *U = nullptr)
914 : iterator_adaptor_base(U) {}
915
916 const SDValue &operator*() const { return I->get(); }
917 };
918
919 iterator_range<value_op_iterator> op_values() const {
920 return make_range(value_op_iterator(op_begin()),
921 value_op_iterator(op_end()));
922 }
923
924 SDVTList getVTList() const {
925 SDVTList X = { ValueList, NumValues };
926 return X;
927 }
928
929 /// If this node has a glue operand, return the node
930 /// to which the glue operand points. Otherwise return NULL.
931 SDNode *getGluedNode() const {
932 if (getNumOperands() != 0 &&
933 getOperand(getNumOperands()-1).getValueType() == MVT::Glue)
934 return getOperand(getNumOperands()-1).getNode();
935 return nullptr;
936 }
937
938 /// If this node has a glue value with a user, return
939 /// the user (there is at most one). Otherwise return NULL.
940 SDNode *getGluedUser() const {
941 for (use_iterator UI = use_begin(), UE = use_end(); UI != UE; ++UI)
942 if (UI.getUse().get().getValueType() == MVT::Glue)
943 return *UI;
944 return nullptr;
945 }
946
947 SDNodeFlags getFlags() const { return Flags; }
948 void setFlags(SDNodeFlags NewFlags) { Flags = NewFlags; }
949
950 /// Clear any flags in this node that aren't also set in Flags.
951 /// If Flags is not in a defined state then this has no effect.
952 void intersectFlagsWith(const SDNodeFlags Flags);
953
954 /// Return the number of values defined/returned by this operator.
955 unsigned getNumValues() const { return NumValues; }
956
957 /// Return the type of a specified result.
958 EVT getValueType(unsigned ResNo) const {
959 assert(ResNo < NumValues && "Illegal result number!")(static_cast <bool> (ResNo < NumValues && "Illegal result number!"
) ? void (0) : __assert_fail ("ResNo < NumValues && \"Illegal result number!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 959, __extension__ __PRETTY_FUNCTION__))
;
960 return ValueList[ResNo];
961 }
962
963 /// Return the type of a specified result as a simple type.
964 MVT getSimpleValueType(unsigned ResNo) const {
965 return getValueType(ResNo).getSimpleVT();
966 }
967
968 /// Returns MVT::getSizeInBits(getValueType(ResNo)).
969 ///
970 /// If the value type is a scalable vector type, the scalable property will
971 /// be set and the runtime size will be a positive integer multiple of the
972 /// base size.
973 TypeSize getValueSizeInBits(unsigned ResNo) const {
974 return getValueType(ResNo).getSizeInBits();
975 }
976
977 using value_iterator = const EVT *;
978
979 value_iterator value_begin() const { return ValueList; }
980 value_iterator value_end() const { return ValueList+NumValues; }
981 iterator_range<value_iterator> values() const {
982 return llvm::make_range(value_begin(), value_end());
983 }
984
985 /// Return the opcode of this operation for printing.
986 std::string getOperationName(const SelectionDAG *G = nullptr) const;
987 static const char* getIndexedModeName(ISD::MemIndexedMode AM);
988 void print_types(raw_ostream &OS, const SelectionDAG *G) const;
989 void print_details(raw_ostream &OS, const SelectionDAG *G) const;
990 void print(raw_ostream &OS, const SelectionDAG *G = nullptr) const;
991 void printr(raw_ostream &OS, const SelectionDAG *G = nullptr) const;
992
993 /// Print a SelectionDAG node and all children down to
994 /// the leaves. The given SelectionDAG allows target-specific nodes
995 /// to be printed in human-readable form. Unlike printr, this will
996 /// print the whole DAG, including children that appear multiple
997 /// times.
998 ///
999 void printrFull(raw_ostream &O, const SelectionDAG *G = nullptr) const;
1000
1001 /// Print a SelectionDAG node and children up to
1002 /// depth "depth." The given SelectionDAG allows target-specific
1003 /// nodes to be printed in human-readable form. Unlike printr, this
1004 /// will print children that appear multiple times wherever they are
1005 /// used.
1006 ///
1007 void printrWithDepth(raw_ostream &O, const SelectionDAG *G = nullptr,
1008 unsigned depth = 100) const;
1009
1010 /// Dump this node, for debugging.
1011 void dump() const;
1012
1013 /// Dump (recursively) this node and its use-def subgraph.
1014 void dumpr() const;
1015
1016 /// Dump this node, for debugging.
1017 /// The given SelectionDAG allows target-specific nodes to be printed
1018 /// in human-readable form.
1019 void dump(const SelectionDAG *G) const;
1020
1021 /// Dump (recursively) this node and its use-def subgraph.
1022 /// The given SelectionDAG allows target-specific nodes to be printed
1023 /// in human-readable form.
1024 void dumpr(const SelectionDAG *G) const;
1025
1026 /// printrFull to dbgs(). The given SelectionDAG allows
1027 /// target-specific nodes to be printed in human-readable form.
1028 /// Unlike dumpr, this will print the whole DAG, including children
1029 /// that appear multiple times.
1030 void dumprFull(const SelectionDAG *G = nullptr) const;
1031
1032 /// printrWithDepth to dbgs(). The given
1033 /// SelectionDAG allows target-specific nodes to be printed in
1034 /// human-readable form. Unlike dumpr, this will print children
1035 /// that appear multiple times wherever they are used.
1036 ///
1037 void dumprWithDepth(const SelectionDAG *G = nullptr,
1038 unsigned depth = 100) const;
1039
1040 /// Gather unique data for the node.
1041 void Profile(FoldingSetNodeID &ID) const;
1042
1043 /// This method should only be used by the SDUse class.
1044 void addUse(SDUse &U) { U.addToList(&UseList); }
1045
1046protected:
1047 static SDVTList getSDVTList(EVT VT) {
1048 SDVTList Ret = { getValueTypeList(VT), 1 };
1049 return Ret;
1050 }
1051
1052 /// Create an SDNode.
1053 ///
1054 /// SDNodes are created without any operands, and never own the operand
1055 /// storage. To add operands, see SelectionDAG::createOperands.
1056 SDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs)
1057 : NodeType(Opc), ValueList(VTs.VTs), NumValues(VTs.NumVTs),
1058 IROrder(Order), debugLoc(std::move(dl)) {
1059 memset(&RawSDNodeBits, 0, sizeof(RawSDNodeBits));
1060 assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor")(static_cast <bool> (debugLoc.hasTrivialDestructor() &&
"Expected trivial destructor") ? void (0) : __assert_fail ("debugLoc.hasTrivialDestructor() && \"Expected trivial destructor\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1060, __extension__ __PRETTY_FUNCTION__))
;
1061 assert(NumValues == VTs.NumVTs &&(static_cast <bool> (NumValues == VTs.NumVTs &&
"NumValues wasn't wide enough for its operands!") ? void (0)
: __assert_fail ("NumValues == VTs.NumVTs && \"NumValues wasn't wide enough for its operands!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1062, __extension__ __PRETTY_FUNCTION__))
1062 "NumValues wasn't wide enough for its operands!")(static_cast <bool> (NumValues == VTs.NumVTs &&
"NumValues wasn't wide enough for its operands!") ? void (0)
: __assert_fail ("NumValues == VTs.NumVTs && \"NumValues wasn't wide enough for its operands!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1062, __extension__ __PRETTY_FUNCTION__))
;
1063 }
1064
1065 /// Release the operands and set this node to have zero operands.
1066 void DropOperands();
1067};
1068
1069/// Wrapper class for IR location info (IR ordering and DebugLoc) to be passed
1070/// into SDNode creation functions.
1071/// When an SDNode is created from the DAGBuilder, the DebugLoc is extracted
1072/// from the original Instruction, and IROrder is the ordinal position of
1073/// the instruction.
1074/// When an SDNode is created after the DAG is being built, both DebugLoc and
1075/// the IROrder are propagated from the original SDNode.
1076/// So SDLoc class provides two constructors besides the default one, one to
1077/// be used by the DAGBuilder, the other to be used by others.
1078class SDLoc {
1079private:
1080 DebugLoc DL;
1081 int IROrder = 0;
1082
1083public:
1084 SDLoc() = default;
1085 SDLoc(const SDNode *N) : DL(N->getDebugLoc()), IROrder(N->getIROrder()) {}
1086 SDLoc(const SDValue V) : SDLoc(V.getNode()) {}
1087 SDLoc(const Instruction *I, int Order) : IROrder(Order) {
1088 assert(Order >= 0 && "bad IROrder")(static_cast <bool> (Order >= 0 && "bad IROrder"
) ? void (0) : __assert_fail ("Order >= 0 && \"bad IROrder\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1088, __extension__ __PRETTY_FUNCTION__))
;
1089 if (I)
1090 DL = I->getDebugLoc();
1091 }
1092
1093 unsigned getIROrder() const { return IROrder; }
1094 const DebugLoc &getDebugLoc() const { return DL; }
1095};
1096
1097// Define inline functions from the SDValue class.
1098
1099inline SDValue::SDValue(SDNode *node, unsigned resno)
1100 : Node(node), ResNo(resno) {
1101 // Explicitly check for !ResNo to avoid use-after-free, because there are
1102 // callers that use SDValue(N, 0) with a deleted N to indicate successful
1103 // combines.
1104 assert((!Node || !ResNo || ResNo < Node->getNumValues()) &&(static_cast <bool> ((!Node || !ResNo || ResNo < Node
->getNumValues()) && "Invalid result number for the given node!"
) ? void (0) : __assert_fail ("(!Node || !ResNo || ResNo < Node->getNumValues()) && \"Invalid result number for the given node!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1105, __extension__ __PRETTY_FUNCTION__))
1105 "Invalid result number for the given node!")(static_cast <bool> ((!Node || !ResNo || ResNo < Node
->getNumValues()) && "Invalid result number for the given node!"
) ? void (0) : __assert_fail ("(!Node || !ResNo || ResNo < Node->getNumValues()) && \"Invalid result number for the given node!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1105, __extension__ __PRETTY_FUNCTION__))
;
1106 assert(ResNo < -2U && "Cannot use result numbers reserved for DenseMaps.")(static_cast <bool> (ResNo < -2U && "Cannot use result numbers reserved for DenseMaps."
) ? void (0) : __assert_fail ("ResNo < -2U && \"Cannot use result numbers reserved for DenseMaps.\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1106, __extension__ __PRETTY_FUNCTION__))
;
1107}
1108
1109inline unsigned SDValue::getOpcode() const {
1110 return Node->getOpcode();
1111}
1112
1113inline EVT SDValue::getValueType() const {
1114 return Node->getValueType(ResNo);
1115}
1116
1117inline unsigned SDValue::getNumOperands() const {
1118 return Node->getNumOperands();
1119}
1120
1121inline const SDValue &SDValue::getOperand(unsigned i) const {
1122 return Node->getOperand(i);
1123}
1124
1125inline uint64_t SDValue::getConstantOperandVal(unsigned i) const {
1126 return Node->getConstantOperandVal(i);
1127}
1128
1129inline const APInt &SDValue::getConstantOperandAPInt(unsigned i) const {
1130 return Node->getConstantOperandAPInt(i);
1131}
1132
1133inline bool SDValue::isTargetOpcode() const {
1134 return Node->isTargetOpcode();
1135}
1136
1137inline bool SDValue::isTargetMemoryOpcode() const {
1138 return Node->isTargetMemoryOpcode();
1139}
1140
1141inline bool SDValue::isMachineOpcode() const {
1142 return Node->isMachineOpcode();
1143}
1144
1145inline unsigned SDValue::getMachineOpcode() const {
1146 return Node->getMachineOpcode();
1147}
1148
1149inline bool SDValue::isUndef() const {
1150 return Node->isUndef();
1151}
1152
1153inline bool SDValue::use_empty() const {
1154 return !Node->hasAnyUseOfValue(ResNo);
1155}
1156
1157inline bool SDValue::hasOneUse() const {
1158 return Node->hasNUsesOfValue(1, ResNo);
1159}
1160
1161inline const DebugLoc &SDValue::getDebugLoc() const {
1162 return Node->getDebugLoc();
1163}
1164
1165inline void SDValue::dump() const {
1166 return Node->dump();
1167}
1168
1169inline void SDValue::dump(const SelectionDAG *G) const {
1170 return Node->dump(G);
1171}
1172
1173inline void SDValue::dumpr() const {
1174 return Node->dumpr();
1175}
1176
1177inline void SDValue::dumpr(const SelectionDAG *G) const {
1178 return Node->dumpr(G);
1179}
1180
1181// Define inline functions from the SDUse class.
1182
1183inline void SDUse::set(const SDValue &V) {
1184 if (Val.getNode()) removeFromList();
1185 Val = V;
1186 if (V.getNode()) V.getNode()->addUse(*this);
1187}
1188
1189inline void SDUse::setInitial(const SDValue &V) {
1190 Val = V;
1191 V.getNode()->addUse(*this);
1192}
1193
1194inline void SDUse::setNode(SDNode *N) {
1195 if (Val.getNode()) removeFromList();
1196 Val.setNode(N);
1197 if (N) N->addUse(*this);
1198}
1199
1200/// This class is used to form a handle around another node that
1201/// is persistent and is updated across invocations of replaceAllUsesWith on its
1202/// operand. This node should be directly created by end-users and not added to
1203/// the AllNodes list.
1204class HandleSDNode : public SDNode {
1205 SDUse Op;
1206
1207public:
1208 explicit HandleSDNode(SDValue X)
1209 : SDNode(ISD::HANDLENODE, 0, DebugLoc(), getSDVTList(MVT::Other)) {
1210 // HandleSDNodes are never inserted into the DAG, so they won't be
1211 // auto-numbered. Use ID 65535 as a sentinel.
1212 PersistentId = 0xffff;
1213
1214 // Manually set up the operand list. This node type is special in that it's
1215 // always stack allocated and SelectionDAG does not manage its operands.
1216 // TODO: This should either (a) not be in the SDNode hierarchy, or (b) not
1217 // be so special.
1218 Op.setUser(this);
1219 Op.setInitial(X);
1220 NumOperands = 1;
1221 OperandList = &Op;
1222 }
1223 ~HandleSDNode();
1224
1225 const SDValue &getValue() const { return Op; }
1226};
1227
1228class AddrSpaceCastSDNode : public SDNode {
1229private:
1230 unsigned SrcAddrSpace;
1231 unsigned DestAddrSpace;
1232
1233public:
1234 AddrSpaceCastSDNode(unsigned Order, const DebugLoc &dl, EVT VT,
1235 unsigned SrcAS, unsigned DestAS);
1236
1237 unsigned getSrcAddressSpace() const { return SrcAddrSpace; }
1238 unsigned getDestAddressSpace() const { return DestAddrSpace; }
1239
1240 static bool classof(const SDNode *N) {
1241 return N->getOpcode() == ISD::ADDRSPACECAST;
1242 }
1243};
1244
1245/// This is an abstract virtual class for memory operations.
1246class MemSDNode : public SDNode {
1247private:
1248 // VT of in-memory value.
1249 EVT MemoryVT;
1250
1251protected:
1252 /// Memory reference information.
1253 MachineMemOperand *MMO;
1254
1255public:
1256 MemSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, SDVTList VTs,
1257 EVT memvt, MachineMemOperand *MMO);
1258
1259 bool readMem() const { return MMO->isLoad(); }
1260 bool writeMem() const { return MMO->isStore(); }
1261
1262 /// Returns alignment and volatility of the memory access
1263 Align getOriginalAlign() const { return MMO->getBaseAlign(); }
1264 Align getAlign() const { return MMO->getAlign(); }
1265 // FIXME: Remove once transition to getAlign is over.
1266 unsigned getAlignment() const { return MMO->getAlign().value(); }
1267
1268 /// Return the SubclassData value, without HasDebugValue. This contains an
1269 /// encoding of the volatile flag, as well as bits used by subclasses. This
1270 /// function should only be used to compute a FoldingSetNodeID value.
1271 /// The HasDebugValue bit is masked out because CSE map needs to match
1272 /// nodes with debug info with nodes without debug info. Same is about
1273 /// isDivergent bit.
1274 unsigned getRawSubclassData() const {
1275 uint16_t Data;
1276 union {
1277 char RawSDNodeBits[sizeof(uint16_t)];
1278 SDNodeBitfields SDNodeBits;
1279 };
1280 memcpy(&RawSDNodeBits, &this->RawSDNodeBits, sizeof(this->RawSDNodeBits));
1281 SDNodeBits.HasDebugValue = 0;
1282 SDNodeBits.IsDivergent = false;
1283 memcpy(&Data, &RawSDNodeBits, sizeof(RawSDNodeBits));
1284 return Data;
1285 }
1286
1287 bool isVolatile() const { return MemSDNodeBits.IsVolatile; }
1288 bool isNonTemporal() const { return MemSDNodeBits.IsNonTemporal; }
1289 bool isDereferenceable() const { return MemSDNodeBits.IsDereferenceable; }
1290 bool isInvariant() const { return MemSDNodeBits.IsInvariant; }
1291
1292 // Returns the offset from the location of the access.
1293 int64_t getSrcValueOffset() const { return MMO->getOffset(); }
1294
1295 /// Returns the AA info that describes the dereference.
1296 AAMDNodes getAAInfo() const { return MMO->getAAInfo(); }
1297
1298 /// Returns the Ranges that describes the dereference.
1299 const MDNode *getRanges() const { return MMO->getRanges(); }
1300
1301 /// Returns the synchronization scope ID for this memory operation.
1302 SyncScope::ID getSyncScopeID() const { return MMO->getSyncScopeID(); }
1303
1304 /// Return the atomic ordering requirements for this memory operation. For
1305 /// cmpxchg atomic operations, return the atomic ordering requirements when
1306 /// store occurs.
1307 AtomicOrdering getSuccessOrdering() const {
1308 return MMO->getSuccessOrdering();
1309 }
1310
1311 /// Return a single atomic ordering that is at least as strong as both the
1312 /// success and failure orderings for an atomic operation. (For operations
1313 /// other than cmpxchg, this is equivalent to getSuccessOrdering().)
1314 AtomicOrdering getMergedOrdering() const { return MMO->getMergedOrdering(); }
1315
1316 /// Return true if the memory operation ordering is Unordered or higher.
1317 bool isAtomic() const { return MMO->isAtomic(); }
1318
1319 /// Returns true if the memory operation doesn't imply any ordering
1320 /// constraints on surrounding memory operations beyond the normal memory
1321 /// aliasing rules.
1322 bool isUnordered() const { return MMO->isUnordered(); }
1323
1324 /// Returns true if the memory operation is neither atomic or volatile.
1325 bool isSimple() const { return !isAtomic() && !isVolatile(); }
1326
1327 /// Return the type of the in-memory value.
1328 EVT getMemoryVT() const { return MemoryVT; }
1329
1330 /// Return a MachineMemOperand object describing the memory
1331 /// reference performed by operation.
1332 MachineMemOperand *getMemOperand() const { return MMO; }
1333
1334 const MachinePointerInfo &getPointerInfo() const {
1335 return MMO->getPointerInfo();
1336 }
1337
1338 /// Return the address space for the associated pointer
1339 unsigned getAddressSpace() const {
1340 return getPointerInfo().getAddrSpace();
1341 }
1342
1343 /// Update this MemSDNode's MachineMemOperand information
1344 /// to reflect the alignment of NewMMO, if it has a greater alignment.
1345 /// This must only be used when the new alignment applies to all users of
1346 /// this MachineMemOperand.
1347 void refineAlignment(const MachineMemOperand *NewMMO) {
1348 MMO->refineAlignment(NewMMO);
1349 }
1350
1351 const SDValue &getChain() const { return getOperand(0); }
1352
1353 const SDValue &getBasePtr() const {
1354 switch (getOpcode()) {
1355 case ISD::STORE:
1356 case ISD::MSTORE:
1357 return getOperand(2);
1358 case ISD::MGATHER:
1359 case ISD::MSCATTER:
1360 return getOperand(3);
1361 default:
1362 return getOperand(1);
1363 }
1364 }
1365
1366 // Methods to support isa and dyn_cast
1367 static bool classof(const SDNode *N) {
1368 // For some targets, we lower some target intrinsics to a MemIntrinsicNode
1369 // with either an intrinsic or a target opcode.
1370 switch (N->getOpcode()) {
1371 case ISD::LOAD:
1372 case ISD::STORE:
1373 case ISD::PREFETCH:
1374 case ISD::ATOMIC_CMP_SWAP:
1375 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
1376 case ISD::ATOMIC_SWAP:
1377 case ISD::ATOMIC_LOAD_ADD:
1378 case ISD::ATOMIC_LOAD_SUB:
1379 case ISD::ATOMIC_LOAD_AND:
1380 case ISD::ATOMIC_LOAD_CLR:
1381 case ISD::ATOMIC_LOAD_OR:
1382 case ISD::ATOMIC_LOAD_XOR:
1383 case ISD::ATOMIC_LOAD_NAND:
1384 case ISD::ATOMIC_LOAD_MIN:
1385 case ISD::ATOMIC_LOAD_MAX:
1386 case ISD::ATOMIC_LOAD_UMIN:
1387 case ISD::ATOMIC_LOAD_UMAX:
1388 case ISD::ATOMIC_LOAD_FADD:
1389 case ISD::ATOMIC_LOAD_FSUB:
1390 case ISD::ATOMIC_LOAD:
1391 case ISD::ATOMIC_STORE:
1392 case ISD::MLOAD:
1393 case ISD::MSTORE:
1394 case ISD::MGATHER:
1395 case ISD::MSCATTER:
1396 return true;
1397 default:
1398 return N->isMemIntrinsic() || N->isTargetMemoryOpcode();
1399 }
1400 }
1401};
1402
1403/// This is an SDNode representing atomic operations.
1404class AtomicSDNode : public MemSDNode {
1405public:
1406 AtomicSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, SDVTList VTL,
1407 EVT MemVT, MachineMemOperand *MMO)
1408 : MemSDNode(Opc, Order, dl, VTL, MemVT, MMO) {
1409 assert(((Opc != ISD::ATOMIC_LOAD && Opc != ISD::ATOMIC_STORE) ||(static_cast <bool> (((Opc != ISD::ATOMIC_LOAD &&
Opc != ISD::ATOMIC_STORE) || MMO->isAtomic()) && "then why are we using an AtomicSDNode?"
) ? void (0) : __assert_fail ("((Opc != ISD::ATOMIC_LOAD && Opc != ISD::ATOMIC_STORE) || MMO->isAtomic()) && \"then why are we using an AtomicSDNode?\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1410, __extension__ __PRETTY_FUNCTION__))
1410 MMO->isAtomic()) && "then why are we using an AtomicSDNode?")(static_cast <bool> (((Opc != ISD::ATOMIC_LOAD &&
Opc != ISD::ATOMIC_STORE) || MMO->isAtomic()) && "then why are we using an AtomicSDNode?"
) ? void (0) : __assert_fail ("((Opc != ISD::ATOMIC_LOAD && Opc != ISD::ATOMIC_STORE) || MMO->isAtomic()) && \"then why are we using an AtomicSDNode?\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1410, __extension__ __PRETTY_FUNCTION__))
;
1411 }
1412
1413 const SDValue &getBasePtr() const { return getOperand(1); }
1414 const SDValue &getVal() const { return getOperand(2); }
1415
1416 /// Returns true if this SDNode represents cmpxchg atomic operation, false
1417 /// otherwise.
1418 bool isCompareAndSwap() const {
1419 unsigned Op = getOpcode();
1420 return Op == ISD::ATOMIC_CMP_SWAP ||
1421 Op == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS;
1422 }
1423
1424 /// For cmpxchg atomic operations, return the atomic ordering requirements
1425 /// when store does not occur.
1426 AtomicOrdering getFailureOrdering() const {
1427 assert(isCompareAndSwap() && "Must be cmpxchg operation")(static_cast <bool> (isCompareAndSwap() && "Must be cmpxchg operation"
) ? void (0) : __assert_fail ("isCompareAndSwap() && \"Must be cmpxchg operation\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1427, __extension__ __PRETTY_FUNCTION__))
;
1428 return MMO->getFailureOrdering();
1429 }
1430
1431 // Methods to support isa and dyn_cast
1432 static bool classof(const SDNode *N) {
1433 return N->getOpcode() == ISD::ATOMIC_CMP_SWAP ||
1434 N->getOpcode() == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS ||
1435 N->getOpcode() == ISD::ATOMIC_SWAP ||
1436 N->getOpcode() == ISD::ATOMIC_LOAD_ADD ||
1437 N->getOpcode() == ISD::ATOMIC_LOAD_SUB ||
1438 N->getOpcode() == ISD::ATOMIC_LOAD_AND ||
1439 N->getOpcode() == ISD::ATOMIC_LOAD_CLR ||
1440 N->getOpcode() == ISD::ATOMIC_LOAD_OR ||
1441 N->getOpcode() == ISD::ATOMIC_LOAD_XOR ||
1442 N->getOpcode() == ISD::ATOMIC_LOAD_NAND ||
1443 N->getOpcode() == ISD::ATOMIC_LOAD_MIN ||
1444 N->getOpcode() == ISD::ATOMIC_LOAD_MAX ||
1445 N->getOpcode() == ISD::ATOMIC_LOAD_UMIN ||
1446 N->getOpcode() == ISD::ATOMIC_LOAD_UMAX ||
1447 N->getOpcode() == ISD::ATOMIC_LOAD_FADD ||
1448 N->getOpcode() == ISD::ATOMIC_LOAD_FSUB ||
1449 N->getOpcode() == ISD::ATOMIC_LOAD ||
1450 N->getOpcode() == ISD::ATOMIC_STORE;
1451 }
1452};
1453
1454/// This SDNode is used for target intrinsics that touch
1455/// memory and need an associated MachineMemOperand. Its opcode may be
1456/// INTRINSIC_VOID, INTRINSIC_W_CHAIN, PREFETCH, or a target-specific opcode
1457/// with a value not less than FIRST_TARGET_MEMORY_OPCODE.
1458class MemIntrinsicSDNode : public MemSDNode {
1459public:
1460 MemIntrinsicSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl,
1461 SDVTList VTs, EVT MemoryVT, MachineMemOperand *MMO)
1462 : MemSDNode(Opc, Order, dl, VTs, MemoryVT, MMO) {
1463 SDNodeBits.IsMemIntrinsic = true;
1464 }
1465
1466 // Methods to support isa and dyn_cast
1467 static bool classof(const SDNode *N) {
1468 // We lower some target intrinsics to their target opcode
1469 // early a node with a target opcode can be of this class
1470 return N->isMemIntrinsic() ||
1471 N->getOpcode() == ISD::PREFETCH ||
1472 N->isTargetMemoryOpcode();
1473 }
1474};
1475
1476/// This SDNode is used to implement the code generator
1477/// support for the llvm IR shufflevector instruction. It combines elements
1478/// from two input vectors into a new input vector, with the selection and
1479/// ordering of elements determined by an array of integers, referred to as
1480/// the shuffle mask. For input vectors of width N, mask indices of 0..N-1
1481/// refer to elements from the LHS input, and indices from N to 2N-1 the RHS.
1482/// An index of -1 is treated as undef, such that the code generator may put
1483/// any value in the corresponding element of the result.
1484class ShuffleVectorSDNode : public SDNode {
1485 // The memory for Mask is owned by the SelectionDAG's OperandAllocator, and
1486 // is freed when the SelectionDAG object is destroyed.
1487 const int *Mask;
1488
1489protected:
1490 friend class SelectionDAG;
1491
1492 ShuffleVectorSDNode(EVT VT, unsigned Order, const DebugLoc &dl, const int *M)
1493 : SDNode(ISD::VECTOR_SHUFFLE, Order, dl, getSDVTList(VT)), Mask(M) {}
1494
1495public:
1496 ArrayRef<int> getMask() const {
1497 EVT VT = getValueType(0);
1498 return makeArrayRef(Mask, VT.getVectorNumElements());
1499 }
1500
1501 int getMaskElt(unsigned Idx) const {
1502 assert(Idx < getValueType(0).getVectorNumElements() && "Idx out of range!")(static_cast <bool> (Idx < getValueType(0).getVectorNumElements
() && "Idx out of range!") ? void (0) : __assert_fail
("Idx < getValueType(0).getVectorNumElements() && \"Idx out of range!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1502, __extension__ __PRETTY_FUNCTION__))
;
1503 return Mask[Idx];
1504 }
1505
1506 bool isSplat() const { return isSplatMask(Mask, getValueType(0)); }
1507
1508 int getSplatIndex() const {
1509 assert(isSplat() && "Cannot get splat index for non-splat!")(static_cast <bool> (isSplat() && "Cannot get splat index for non-splat!"
) ? void (0) : __assert_fail ("isSplat() && \"Cannot get splat index for non-splat!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1509, __extension__ __PRETTY_FUNCTION__))
;
1510 EVT VT = getValueType(0);
1511 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i)
1512 if (Mask[i] >= 0)
1513 return Mask[i];
1514
1515 // We can choose any index value here and be correct because all elements
1516 // are undefined. Return 0 for better potential for callers to simplify.
1517 return 0;
1518 }
1519
1520 static bool isSplatMask(const int *Mask, EVT VT);
1521
1522 /// Change values in a shuffle permute mask assuming
1523 /// the two vector operands have swapped position.
1524 static void commuteMask(MutableArrayRef<int> Mask) {
1525 unsigned NumElems = Mask.size();
1526 for (unsigned i = 0; i != NumElems; ++i) {
1527 int idx = Mask[i];
1528 if (idx < 0)
1529 continue;
1530 else if (idx < (int)NumElems)
1531 Mask[i] = idx + NumElems;
1532 else
1533 Mask[i] = idx - NumElems;
1534 }
1535 }
1536
1537 static bool classof(const SDNode *N) {
1538 return N->getOpcode() == ISD::VECTOR_SHUFFLE;
1539 }
1540};
1541
1542class ConstantSDNode : public SDNode {
1543 friend class SelectionDAG;
1544
1545 const ConstantInt *Value;
1546
1547 ConstantSDNode(bool isTarget, bool isOpaque, const ConstantInt *val, EVT VT)
1548 : SDNode(isTarget ? ISD::TargetConstant : ISD::Constant, 0, DebugLoc(),
1549 getSDVTList(VT)),
1550 Value(val) {
1551 ConstantSDNodeBits.IsOpaque = isOpaque;
1552 }
1553
1554public:
1555 const ConstantInt *getConstantIntValue() const { return Value; }
1556 const APInt &getAPIntValue() const { return Value->getValue(); }
1557 uint64_t getZExtValue() const { return Value->getZExtValue(); }
1558 int64_t getSExtValue() const { return Value->getSExtValue(); }
1559 uint64_t getLimitedValue(uint64_t Limit = UINT64_MAX(18446744073709551615UL)) {
1560 return Value->getLimitedValue(Limit);
1561 }
1562 MaybeAlign getMaybeAlignValue() const { return Value->getMaybeAlignValue(); }
1563 Align getAlignValue() const { return Value->getAlignValue(); }
1564
1565 bool isOne() const { return Value->isOne(); }
1566 bool isNullValue() const { return Value->isZero(); }
1567 bool isAllOnesValue() const { return Value->isMinusOne(); }
1568 bool isMaxSignedValue() const { return Value->isMaxValue(true); }
1569 bool isMinSignedValue() const { return Value->isMinValue(true); }
1570
1571 bool isOpaque() const { return ConstantSDNodeBits.IsOpaque; }
1572
1573 static bool classof(const SDNode *N) {
1574 return N->getOpcode() == ISD::Constant ||
1575 N->getOpcode() == ISD::TargetConstant;
1576 }
1577};
1578
1579uint64_t SDNode::getConstantOperandVal(unsigned Num) const {
1580 return cast<ConstantSDNode>(getOperand(Num))->getZExtValue();
1581}
1582
1583const APInt &SDNode::getConstantOperandAPInt(unsigned Num) const {
1584 return cast<ConstantSDNode>(getOperand(Num))->getAPIntValue();
1585}
1586
1587class ConstantFPSDNode : public SDNode {
1588 friend class SelectionDAG;
1589
1590 const ConstantFP *Value;
1591
1592 ConstantFPSDNode(bool isTarget, const ConstantFP *val, EVT VT)
1593 : SDNode(isTarget ? ISD::TargetConstantFP : ISD::ConstantFP, 0,
1594 DebugLoc(), getSDVTList(VT)),
1595 Value(val) {}
1596
1597public:
1598 const APFloat& getValueAPF() const { return Value->getValueAPF(); }
1599 const ConstantFP *getConstantFPValue() const { return Value; }
1600
1601 /// Return true if the value is positive or negative zero.
1602 bool isZero() const { return Value->isZero(); }
1603
1604 /// Return true if the value is a NaN.
1605 bool isNaN() const { return Value->isNaN(); }
1606
1607 /// Return true if the value is an infinity
1608 bool isInfinity() const { return Value->isInfinity(); }
1609
1610 /// Return true if the value is negative.
1611 bool isNegative() const { return Value->isNegative(); }
1612
1613 /// We don't rely on operator== working on double values, as
1614 /// it returns true for things that are clearly not equal, like -0.0 and 0.0.
1615 /// As such, this method can be used to do an exact bit-for-bit comparison of
1616 /// two floating point values.
1617
1618 /// We leave the version with the double argument here because it's just so
1619 /// convenient to write "2.0" and the like. Without this function we'd
1620 /// have to duplicate its logic everywhere it's called.
1621 bool isExactlyValue(double V) const {
1622 return Value->getValueAPF().isExactlyValue(V);
1623 }
1624 bool isExactlyValue(const APFloat& V) const;
1625
1626 static bool isValueValidForType(EVT VT, const APFloat& Val);
1627
1628 static bool classof(const SDNode *N) {
1629 return N->getOpcode() == ISD::ConstantFP ||
1630 N->getOpcode() == ISD::TargetConstantFP;
1631 }
1632};
1633
1634/// Returns true if \p V is a constant integer zero.
1635bool isNullConstant(SDValue V);
1636
1637/// Returns true if \p V is an FP constant with a value of positive zero.
1638bool isNullFPConstant(SDValue V);
1639
1640/// Returns true if \p V is an integer constant with all bits set.
1641bool isAllOnesConstant(SDValue V);
1642
1643/// Returns true if \p V is a constant integer one.
1644bool isOneConstant(SDValue V);
1645
1646/// Return the non-bitcasted source operand of \p V if it exists.
1647/// If \p V is not a bitcasted value, it is returned as-is.
1648SDValue peekThroughBitcasts(SDValue V);
1649
1650/// Return the non-bitcasted and one-use source operand of \p V if it exists.
1651/// If \p V is not a bitcasted one-use value, it is returned as-is.
1652SDValue peekThroughOneUseBitcasts(SDValue V);
1653
1654/// Return the non-extracted vector source operand of \p V if it exists.
1655/// If \p V is not an extracted subvector, it is returned as-is.
1656SDValue peekThroughExtractSubvectors(SDValue V);
1657
1658/// Returns true if \p V is a bitwise not operation. Assumes that an all ones
1659/// constant is canonicalized to be operand 1.
1660bool isBitwiseNot(SDValue V, bool AllowUndefs = false);
1661
1662/// Returns the SDNode if it is a constant splat BuildVector or constant int.
1663ConstantSDNode *isConstOrConstSplat(SDValue N, bool AllowUndefs = false,
1664 bool AllowTruncation = false);
1665
1666/// Returns the SDNode if it is a demanded constant splat BuildVector or
1667/// constant int.
1668ConstantSDNode *isConstOrConstSplat(SDValue N, const APInt &DemandedElts,
1669 bool AllowUndefs = false,
1670 bool AllowTruncation = false);
1671
1672/// Returns the SDNode if it is a constant splat BuildVector or constant float.
1673ConstantFPSDNode *isConstOrConstSplatFP(SDValue N, bool AllowUndefs = false);
1674
1675/// Returns the SDNode if it is a demanded constant splat BuildVector or
1676/// constant float.
1677ConstantFPSDNode *isConstOrConstSplatFP(SDValue N, const APInt &DemandedElts,
1678 bool AllowUndefs = false);
1679
1680/// Return true if the value is a constant 0 integer or a splatted vector of
1681/// a constant 0 integer (with no undefs by default).
1682/// Build vector implicit truncation is not an issue for null values.
1683bool isNullOrNullSplat(SDValue V, bool AllowUndefs = false);
1684
1685/// Return true if the value is a constant 1 integer or a splatted vector of a
1686/// constant 1 integer (with no undefs).
1687/// Does not permit build vector implicit truncation.
1688bool isOneOrOneSplat(SDValue V, bool AllowUndefs = false);
1689
1690/// Return true if the value is a constant -1 integer or a splatted vector of a
1691/// constant -1 integer (with no undefs).
1692/// Does not permit build vector implicit truncation.
1693bool isAllOnesOrAllOnesSplat(SDValue V, bool AllowUndefs = false);
1694
1695/// Return true if \p V is either a integer or FP constant.
1696inline bool isIntOrFPConstant(SDValue V) {
1697 return isa<ConstantSDNode>(V) || isa<ConstantFPSDNode>(V);
1698}
1699
1700class GlobalAddressSDNode : public SDNode {
1701 friend class SelectionDAG;
1702
1703 const GlobalValue *TheGlobal;
1704 int64_t Offset;
1705 unsigned TargetFlags;
1706
1707 GlobalAddressSDNode(unsigned Opc, unsigned Order, const DebugLoc &DL,
1708 const GlobalValue *GA, EVT VT, int64_t o,
1709 unsigned TF);
1710
1711public:
1712 const GlobalValue *getGlobal() const { return TheGlobal; }
1713 int64_t getOffset() const { return Offset; }
1714 unsigned getTargetFlags() const { return TargetFlags; }
1715 // Return the address space this GlobalAddress belongs to.
1716 unsigned getAddressSpace() const;
1717
1718 static bool classof(const SDNode *N) {
1719 return N->getOpcode() == ISD::GlobalAddress ||
1720 N->getOpcode() == ISD::TargetGlobalAddress ||
1721 N->getOpcode() == ISD::GlobalTLSAddress ||
1722 N->getOpcode() == ISD::TargetGlobalTLSAddress;
1723 }
1724};
1725
1726class FrameIndexSDNode : public SDNode {
1727 friend class SelectionDAG;
1728
1729 int FI;
1730
1731 FrameIndexSDNode(int fi, EVT VT, bool isTarg)
1732 : SDNode(isTarg ? ISD::TargetFrameIndex : ISD::FrameIndex,
1733 0, DebugLoc(), getSDVTList(VT)), FI(fi) {
1734 }
1735
1736public:
1737 int getIndex() const { return FI; }
1738
1739 static bool classof(const SDNode *N) {
1740 return N->getOpcode() == ISD::FrameIndex ||
1741 N->getOpcode() == ISD::TargetFrameIndex;
1742 }
1743};
1744
1745/// This SDNode is used for LIFETIME_START/LIFETIME_END values, which indicate
1746/// the offet and size that are started/ended in the underlying FrameIndex.
1747class LifetimeSDNode : public SDNode {
1748 friend class SelectionDAG;
1749 int64_t Size;
1750 int64_t Offset; // -1 if offset is unknown.
1751
1752 LifetimeSDNode(unsigned Opcode, unsigned Order, const DebugLoc &dl,
1753 SDVTList VTs, int64_t Size, int64_t Offset)
1754 : SDNode(Opcode, Order, dl, VTs), Size(Size), Offset(Offset) {}
1755public:
1756 int64_t getFrameIndex() const {
1757 return cast<FrameIndexSDNode>(getOperand(1))->getIndex();
1758 }
1759
1760 bool hasOffset() const { return Offset >= 0; }
1761 int64_t getOffset() const {
1762 assert(hasOffset() && "offset is unknown")(static_cast <bool> (hasOffset() && "offset is unknown"
) ? void (0) : __assert_fail ("hasOffset() && \"offset is unknown\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1762, __extension__ __PRETTY_FUNCTION__))
;
1763 return Offset;
1764 }
1765 int64_t getSize() const {
1766 assert(hasOffset() && "offset is unknown")(static_cast <bool> (hasOffset() && "offset is unknown"
) ? void (0) : __assert_fail ("hasOffset() && \"offset is unknown\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1766, __extension__ __PRETTY_FUNCTION__))
;
1767 return Size;
1768 }
1769
1770 // Methods to support isa and dyn_cast
1771 static bool classof(const SDNode *N) {
1772 return N->getOpcode() == ISD::LIFETIME_START ||
1773 N->getOpcode() == ISD::LIFETIME_END;
1774 }
1775};
1776
1777/// This SDNode is used for PSEUDO_PROBE values, which are the function guid and
1778/// the index of the basic block being probed. A pseudo probe serves as a place
1779/// holder and will be removed at the end of compilation. It does not have any
1780/// operand because we do not want the instruction selection to deal with any.
1781class PseudoProbeSDNode : public SDNode {
1782 friend class SelectionDAG;
1783 uint64_t Guid;
1784 uint64_t Index;
1785 uint32_t Attributes;
1786
1787 PseudoProbeSDNode(unsigned Opcode, unsigned Order, const DebugLoc &Dl,
1788 SDVTList VTs, uint64_t Guid, uint64_t Index, uint32_t Attr)
1789 : SDNode(Opcode, Order, Dl, VTs), Guid(Guid), Index(Index),
1790 Attributes(Attr) {}
1791
1792public:
1793 uint64_t getGuid() const { return Guid; }
1794 uint64_t getIndex() const { return Index; }
1795 uint32_t getAttributes() const { return Attributes; }
1796
1797 // Methods to support isa and dyn_cast
1798 static bool classof(const SDNode *N) {
1799 return N->getOpcode() == ISD::PSEUDO_PROBE;
1800 }
1801};
1802
1803class JumpTableSDNode : public SDNode {
1804 friend class SelectionDAG;
1805
1806 int JTI;
1807 unsigned TargetFlags;
1808
1809 JumpTableSDNode(int jti, EVT VT, bool isTarg, unsigned TF)
1810 : SDNode(isTarg ? ISD::TargetJumpTable : ISD::JumpTable,
1811 0, DebugLoc(), getSDVTList(VT)), JTI(jti), TargetFlags(TF) {
1812 }
1813
1814public:
1815 int getIndex() const { return JTI; }
1816 unsigned getTargetFlags() const { return TargetFlags; }
1817
1818 static bool classof(const SDNode *N) {
1819 return N->getOpcode() == ISD::JumpTable ||
1820 N->getOpcode() == ISD::TargetJumpTable;
1821 }
1822};
1823
1824class ConstantPoolSDNode : public SDNode {
1825 friend class SelectionDAG;
1826
1827 union {
1828 const Constant *ConstVal;
1829 MachineConstantPoolValue *MachineCPVal;
1830 } Val;
1831 int Offset; // It's a MachineConstantPoolValue if top bit is set.
1832 Align Alignment; // Minimum alignment requirement of CP.
1833 unsigned TargetFlags;
1834
1835 ConstantPoolSDNode(bool isTarget, const Constant *c, EVT VT, int o,
1836 Align Alignment, unsigned TF)
1837 : SDNode(isTarget ? ISD::TargetConstantPool : ISD::ConstantPool, 0,
1838 DebugLoc(), getSDVTList(VT)),
1839 Offset(o), Alignment(Alignment), TargetFlags(TF) {
1840 assert(Offset >= 0 && "Offset is too large")(static_cast <bool> (Offset >= 0 && "Offset is too large"
) ? void (0) : __assert_fail ("Offset >= 0 && \"Offset is too large\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1840, __extension__ __PRETTY_FUNCTION__))
;
1841 Val.ConstVal = c;
1842 }
1843
1844 ConstantPoolSDNode(bool isTarget, MachineConstantPoolValue *v, EVT VT, int o,
1845 Align Alignment, unsigned TF)
1846 : SDNode(isTarget ? ISD::TargetConstantPool : ISD::ConstantPool, 0,
1847 DebugLoc(), getSDVTList(VT)),
1848 Offset(o), Alignment(Alignment), TargetFlags(TF) {
1849 assert(Offset >= 0 && "Offset is too large")(static_cast <bool> (Offset >= 0 && "Offset is too large"
) ? void (0) : __assert_fail ("Offset >= 0 && \"Offset is too large\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1849, __extension__ __PRETTY_FUNCTION__))
;
1850 Val.MachineCPVal = v;
1851 Offset |= 1 << (sizeof(unsigned)*CHAR_BIT8-1);
1852 }
1853
1854public:
1855 bool isMachineConstantPoolEntry() const {
1856 return Offset < 0;
1857 }
1858
1859 const Constant *getConstVal() const {
1860 assert(!isMachineConstantPoolEntry() && "Wrong constantpool type")(static_cast <bool> (!isMachineConstantPoolEntry() &&
"Wrong constantpool type") ? void (0) : __assert_fail ("!isMachineConstantPoolEntry() && \"Wrong constantpool type\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1860, __extension__ __PRETTY_FUNCTION__))
;
1861 return Val.ConstVal;
1862 }
1863
1864 MachineConstantPoolValue *getMachineCPVal() const {
1865 assert(isMachineConstantPoolEntry() && "Wrong constantpool type")(static_cast <bool> (isMachineConstantPoolEntry() &&
"Wrong constantpool type") ? void (0) : __assert_fail ("isMachineConstantPoolEntry() && \"Wrong constantpool type\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1865, __extension__ __PRETTY_FUNCTION__))
;
1866 return Val.MachineCPVal;
1867 }
1868
1869 int getOffset() const {
1870 return Offset & ~(1 << (sizeof(unsigned)*CHAR_BIT8-1));
1871 }
1872
1873 // Return the alignment of this constant pool object, which is either 0 (for
1874 // default alignment) or the desired value.
1875 Align getAlign() const { return Alignment; }
1876 unsigned getTargetFlags() const { return TargetFlags; }
1877
1878 Type *getType() const;
1879
1880 static bool classof(const SDNode *N) {
1881 return N->getOpcode() == ISD::ConstantPool ||
1882 N->getOpcode() == ISD::TargetConstantPool;
1883 }
1884};
1885
1886/// Completely target-dependent object reference.
1887class TargetIndexSDNode : public SDNode {
1888 friend class SelectionDAG;
1889
1890 unsigned TargetFlags;
1891 int Index;
1892 int64_t Offset;
1893
1894public:
1895 TargetIndexSDNode(int Idx, EVT VT, int64_t Ofs, unsigned TF)
1896 : SDNode(ISD::TargetIndex, 0, DebugLoc(), getSDVTList(VT)),
1897 TargetFlags(TF), Index(Idx), Offset(Ofs) {}
1898
1899 unsigned getTargetFlags() const { return TargetFlags; }
1900 int getIndex() const { return Index; }
1901 int64_t getOffset() const { return Offset; }
1902
1903 static bool classof(const SDNode *N) {
1904 return N->getOpcode() == ISD::TargetIndex;
1905 }
1906};
1907
1908class BasicBlockSDNode : public SDNode {
1909 friend class SelectionDAG;
1910
1911 MachineBasicBlock *MBB;
1912
1913 /// Debug info is meaningful and potentially useful here, but we create
1914 /// blocks out of order when they're jumped to, which makes it a bit
1915 /// harder. Let's see if we need it first.
1916 explicit BasicBlockSDNode(MachineBasicBlock *mbb)
1917 : SDNode(ISD::BasicBlock, 0, DebugLoc(), getSDVTList(MVT::Other)), MBB(mbb)
1918 {}
1919
1920public:
1921 MachineBasicBlock *getBasicBlock() const { return MBB; }
1922
1923 static bool classof(const SDNode *N) {
1924 return N->getOpcode() == ISD::BasicBlock;
1925 }
1926};
1927
1928/// A "pseudo-class" with methods for operating on BUILD_VECTORs.
1929class BuildVectorSDNode : public SDNode {
1930public:
1931 // These are constructed as SDNodes and then cast to BuildVectorSDNodes.
1932 explicit BuildVectorSDNode() = delete;
1933
1934 /// Check if this is a constant splat, and if so, find the
1935 /// smallest element size that splats the vector. If MinSplatBits is
1936 /// nonzero, the element size must be at least that large. Note that the
1937 /// splat element may be the entire vector (i.e., a one element vector).
1938 /// Returns the splat element value in SplatValue. Any undefined bits in
1939 /// that value are zero, and the corresponding bits in the SplatUndef mask
1940 /// are set. The SplatBitSize value is set to the splat element size in
1941 /// bits. HasAnyUndefs is set to true if any bits in the vector are
1942 /// undefined. isBigEndian describes the endianness of the target.
1943 bool isConstantSplat(APInt &SplatValue, APInt &SplatUndef,
1944 unsigned &SplatBitSize, bool &HasAnyUndefs,
1945 unsigned MinSplatBits = 0,
1946 bool isBigEndian = false) const;
1947
1948 /// Returns the demanded splatted value or a null value if this is not a
1949 /// splat.
1950 ///
1951 /// The DemandedElts mask indicates the elements that must be in the splat.
1952 /// If passed a non-null UndefElements bitvector, it will resize it to match
1953 /// the vector width and set the bits where elements are undef.
1954 SDValue getSplatValue(const APInt &DemandedElts,
1955 BitVector *UndefElements = nullptr) const;
1956
1957 /// Returns the splatted value or a null value if this is not a splat.
1958 ///
1959 /// If passed a non-null UndefElements bitvector, it will resize it to match
1960 /// the vector width and set the bits where elements are undef.
1961 SDValue getSplatValue(BitVector *UndefElements = nullptr) const;
1962
1963 /// Find the shortest repeating sequence of values in the build vector.
1964 ///
1965 /// e.g. { u, X, u, X, u, u, X, u } -> { X }
1966 /// { X, Y, u, Y, u, u, X, u } -> { X, Y }
1967 ///
1968 /// Currently this must be a power-of-2 build vector.
1969 /// The DemandedElts mask indicates the elements that must be present,
1970 /// undemanded elements in Sequence may be null (SDValue()). If passed a
1971 /// non-null UndefElements bitvector, it will resize it to match the original
1972 /// vector width and set the bits where elements are undef. If result is
1973 /// false, Sequence will be empty.
1974 bool getRepeatedSequence(const APInt &DemandedElts,
1975 SmallVectorImpl<SDValue> &Sequence,
1976 BitVector *UndefElements = nullptr) const;
1977
1978 /// Find the shortest repeating sequence of values in the build vector.
1979 ///
1980 /// e.g. { u, X, u, X, u, u, X, u } -> { X }
1981 /// { X, Y, u, Y, u, u, X, u } -> { X, Y }
1982 ///
1983 /// Currently this must be a power-of-2 build vector.
1984 /// If passed a non-null UndefElements bitvector, it will resize it to match
1985 /// the original vector width and set the bits where elements are undef.
1986 /// If result is false, Sequence will be empty.
1987 bool getRepeatedSequence(SmallVectorImpl<SDValue> &Sequence,
1988 BitVector *UndefElements = nullptr) const;
1989
1990 /// Returns the demanded splatted constant or null if this is not a constant
1991 /// splat.
1992 ///
1993 /// The DemandedElts mask indicates the elements that must be in the splat.
1994 /// If passed a non-null UndefElements bitvector, it will resize it to match
1995 /// the vector width and set the bits where elements are undef.
1996 ConstantSDNode *
1997 getConstantSplatNode(const APInt &DemandedElts,
1998 BitVector *UndefElements = nullptr) const;
1999
2000 /// Returns the splatted constant or null if this is not a constant
2001 /// splat.
2002 ///
2003 /// If passed a non-null UndefElements bitvector, it will resize it to match
2004 /// the vector width and set the bits where elements are undef.
2005 ConstantSDNode *
2006 getConstantSplatNode(BitVector *UndefElements = nullptr) const;
2007
2008 /// Returns the demanded splatted constant FP or null if this is not a
2009 /// constant FP splat.
2010 ///
2011 /// The DemandedElts mask indicates the elements that must be in the splat.
2012 /// If passed a non-null UndefElements bitvector, it will resize it to match
2013 /// the vector width and set the bits where elements are undef.
2014 ConstantFPSDNode *
2015 getConstantFPSplatNode(const APInt &DemandedElts,
2016 BitVector *UndefElements = nullptr) const;
2017
2018 /// Returns the splatted constant FP or null if this is not a constant
2019 /// FP splat.
2020 ///
2021 /// If passed a non-null UndefElements bitvector, it will resize it to match
2022 /// the vector width and set the bits where elements are undef.
2023 ConstantFPSDNode *
2024 getConstantFPSplatNode(BitVector *UndefElements = nullptr) const;
2025
2026 /// If this is a constant FP splat and the splatted constant FP is an
2027 /// exact power or 2, return the log base 2 integer value. Otherwise,
2028 /// return -1.
2029 ///
2030 /// The BitWidth specifies the necessary bit precision.
2031 int32_t getConstantFPSplatPow2ToLog2Int(BitVector *UndefElements,
2032 uint32_t BitWidth) const;
2033
2034 bool isConstant() const;
2035
2036 static bool classof(const SDNode *N) {
2037 return N->getOpcode() == ISD::BUILD_VECTOR;
2038 }
2039};
2040
2041/// An SDNode that holds an arbitrary LLVM IR Value. This is
2042/// used when the SelectionDAG needs to make a simple reference to something
2043/// in the LLVM IR representation.
2044///
2045class SrcValueSDNode : public SDNode {
2046 friend class SelectionDAG;
2047
2048 const Value *V;
2049
2050 /// Create a SrcValue for a general value.
2051 explicit SrcValueSDNode(const Value *v)
2052 : SDNode(ISD::SRCVALUE, 0, DebugLoc(), getSDVTList(MVT::Other)), V(v) {}
2053
2054public:
2055 /// Return the contained Value.
2056 const Value *getValue() const { return V; }
2057
2058 static bool classof(const SDNode *N) {
2059 return N->getOpcode() == ISD::SRCVALUE;
2060 }
2061};
2062
2063class MDNodeSDNode : public SDNode {
2064 friend class SelectionDAG;
2065
2066 const MDNode *MD;
2067
2068 explicit MDNodeSDNode(const MDNode *md)
2069 : SDNode(ISD::MDNODE_SDNODE, 0, DebugLoc(), getSDVTList(MVT::Other)), MD(md)
2070 {}
2071
2072public:
2073 const MDNode *getMD() const { return MD; }
2074
2075 static bool classof(const SDNode *N) {
2076 return N->getOpcode() == ISD::MDNODE_SDNODE;
2077 }
2078};
2079
2080class RegisterSDNode : public SDNode {
2081 friend class SelectionDAG;
2082
2083 Register Reg;
2084
2085 RegisterSDNode(Register reg, EVT VT)
2086 : SDNode(ISD::Register, 0, DebugLoc(), getSDVTList(VT)), Reg(reg) {}
2087
2088public:
2089 Register getReg() const { return Reg; }
2090
2091 static bool classof(const SDNode *N) {
2092 return N->getOpcode() == ISD::Register;
2093 }
2094};
2095
2096class RegisterMaskSDNode : public SDNode {
2097 friend class SelectionDAG;
2098
2099 // The memory for RegMask is not owned by the node.
2100 const uint32_t *RegMask;
2101
2102 RegisterMaskSDNode(const uint32_t *mask)
2103 : SDNode(ISD::RegisterMask, 0, DebugLoc(), getSDVTList(MVT::Untyped)),
2104 RegMask(mask) {}
2105
2106public:
2107 const uint32_t *getRegMask() const { return RegMask; }
2108
2109 static bool classof(const SDNode *N) {
2110 return N->getOpcode() == ISD::RegisterMask;
2111 }
2112};
2113
2114class BlockAddressSDNode : public SDNode {
2115 friend class SelectionDAG;
2116
2117 const BlockAddress *BA;
2118 int64_t Offset;
2119 unsigned TargetFlags;
2120
2121 BlockAddressSDNode(unsigned NodeTy, EVT VT, const BlockAddress *ba,
2122 int64_t o, unsigned Flags)
2123 : SDNode(NodeTy, 0, DebugLoc(), getSDVTList(VT)),
2124 BA(ba), Offset(o), TargetFlags(Flags) {}
2125
2126public:
2127 const BlockAddress *getBlockAddress() const { return BA; }
2128 int64_t getOffset() const { return Offset; }
2129 unsigned getTargetFlags() const { return TargetFlags; }
2130
2131 static bool classof(const SDNode *N) {
2132 return N->getOpcode() == ISD::BlockAddress ||
2133 N->getOpcode() == ISD::TargetBlockAddress;
2134 }
2135};
2136
2137class LabelSDNode : public SDNode {
2138 friend class SelectionDAG;
2139
2140 MCSymbol *Label;
2141
2142 LabelSDNode(unsigned Opcode, unsigned Order, const DebugLoc &dl, MCSymbol *L)
2143 : SDNode(Opcode, Order, dl, getSDVTList(MVT::Other)), Label(L) {
2144 assert(LabelSDNode::classof(this) && "not a label opcode")(static_cast <bool> (LabelSDNode::classof(this) &&
"not a label opcode") ? void (0) : __assert_fail ("LabelSDNode::classof(this) && \"not a label opcode\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 2144, __extension__ __PRETTY_FUNCTION__))
;
2145 }
2146
2147public:
2148 MCSymbol *getLabel() const { return Label; }
2149
2150 static bool classof(const SDNode *N) {
2151 return N->getOpcode() == ISD::EH_LABEL ||
2152 N->getOpcode() == ISD::ANNOTATION_LABEL;
2153 }
2154};
2155
2156class ExternalSymbolSDNode : public SDNode {
2157 friend class SelectionDAG;
2158
2159 const char *Symbol;
2160 unsigned TargetFlags;
2161
2162 ExternalSymbolSDNode(bool isTarget, const char *Sym, unsigned TF, EVT VT)
2163 : SDNode(isTarget ? ISD::TargetExternalSymbol : ISD::ExternalSymbol, 0,
2164 DebugLoc(), getSDVTList(VT)),
2165 Symbol(Sym), TargetFlags(TF) {}
2166
2167public:
2168 const char *getSymbol() const { return Symbol; }
2169 unsigned getTargetFlags() const { return TargetFlags; }
2170
2171 static bool classof(const SDNode *N) {
2172 return N->getOpcode() == ISD::ExternalSymbol ||
2173 N->getOpcode() == ISD::TargetExternalSymbol;
2174 }
2175};
2176
2177class MCSymbolSDNode : public SDNode {
2178 friend class SelectionDAG;
2179
2180 MCSymbol *Symbol;
2181
2182 MCSymbolSDNode(MCSymbol *Symbol, EVT VT)
2183 : SDNode(ISD::MCSymbol, 0, DebugLoc(), getSDVTList(VT)), Symbol(Symbol) {}
2184
2185public:
2186 MCSymbol *getMCSymbol() const { return Symbol; }
2187
2188 static bool classof(const SDNode *N) {
2189 return N->getOpcode() == ISD::MCSymbol;
2190 }
2191};
2192
2193class CondCodeSDNode : public SDNode {
2194 friend class SelectionDAG;
2195
2196 ISD::CondCode Condition;
2197
2198 explicit CondCodeSDNode(ISD::CondCode Cond)
2199 : SDNode(ISD::CONDCODE, 0, DebugLoc(), getSDVTList(MVT::Other)),
2200 Condition(Cond) {}
2201
2202public:
2203 ISD::CondCode get() const { return Condition; }
2204
2205 static bool classof(const SDNode *N) {
2206 return N->getOpcode() == ISD::CONDCODE;
2207 }
2208};
2209
2210/// This class is used to represent EVT's, which are used
2211/// to parameterize some operations.
2212class VTSDNode : public SDNode {
2213 friend class SelectionDAG;
2214
2215 EVT ValueType;
2216
2217 explicit VTSDNode(EVT VT)
2218 : SDNode(ISD::VALUETYPE, 0, DebugLoc(), getSDVTList(MVT::Other)),
2219 ValueType(VT) {}
2220
2221public:
2222 EVT getVT() const { return ValueType; }
2223
2224 static bool classof(const SDNode *N) {
2225 return N->getOpcode() == ISD::VALUETYPE;
2226 }
2227};
2228
2229/// Base class for LoadSDNode and StoreSDNode
2230class LSBaseSDNode : public MemSDNode {
2231public:
2232 LSBaseSDNode(ISD::NodeType NodeTy, unsigned Order, const DebugLoc &dl,
2233 SDVTList VTs, ISD::MemIndexedMode AM, EVT MemVT,
2234 MachineMemOperand *MMO)
2235 : MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) {
2236 LSBaseSDNodeBits.AddressingMode = AM;
2237 assert(getAddressingMode() == AM && "Value truncated")(static_cast <bool> (getAddressingMode() == AM &&
"Value truncated") ? void (0) : __assert_fail ("getAddressingMode() == AM && \"Value truncated\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 2237, __extension__ __PRETTY_FUNCTION__))
;
2238 }
2239
2240 const SDValue &getOffset() const {
2241 return getOperand(getOpcode() == ISD::LOAD ? 2 : 3);
2242 }
2243
2244 /// Return the addressing mode for this load or store:
2245 /// unindexed, pre-inc, pre-dec, post-inc, or post-dec.
2246 ISD::MemIndexedMode getAddressingMode() const {
2247 return static_cast<ISD::MemIndexedMode>(LSBaseSDNodeBits.AddressingMode);
2248 }
2249
2250 /// Return true if this is a pre/post inc/dec load/store.
2251 bool isIndexed() const { return getAddressingMode() != ISD::UNINDEXED; }
2252
2253 /// Return true if this is NOT a pre/post inc/dec load/store.
2254 bool isUnindexed() const { return getAddressingMode() == ISD::UNINDEXED; }
2255
2256 static bool classof(const SDNode *N) {
2257 return N->getOpcode() == ISD::LOAD ||
2258 N->getOpcode() == ISD::STORE;
2259 }
2260};
2261
2262/// This class is used to represent ISD::LOAD nodes.
2263class LoadSDNode : public LSBaseSDNode {
2264 friend class SelectionDAG;
2265
2266 LoadSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
2267 ISD::MemIndexedMode AM, ISD::LoadExtType ETy, EVT MemVT,
2268 MachineMemOperand *MMO)
2269 : LSBaseSDNode(ISD::LOAD, Order, dl, VTs, AM, MemVT, MMO) {
2270 LoadSDNodeBits.ExtTy = ETy;
2271 assert(readMem() && "Load MachineMemOperand is not a load!")(static_cast <bool> (readMem() && "Load MachineMemOperand is not a load!"
) ? void (0) : __assert_fail ("readMem() && \"Load MachineMemOperand is not a load!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 2271, __extension__ __PRETTY_FUNCTION__))
;
2272 assert(!writeMem() && "Load MachineMemOperand is a store!")(static_cast <bool> (!writeMem() && "Load MachineMemOperand is a store!"
) ? void (0) : __assert_fail ("!writeMem() && \"Load MachineMemOperand is a store!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 2272, __extension__ __PRETTY_FUNCTION__))
;
2273 }
2274
2275public:
2276 /// Return whether this is a plain node,
2277 /// or one of the varieties of value-extending loads.
2278 ISD::LoadExtType getExtensionType() const {
2279 return static_cast<ISD::LoadExtType>(LoadSDNodeBits.ExtTy);
2280 }
2281
2282 const SDValue &getBasePtr() const { return getOperand(1); }
2283 const SDValue &getOffset() const { return getOperand(2); }
2284
2285 static bool classof(const SDNode *N) {
2286 return N->getOpcode() == ISD::LOAD;
2287 }
2288};
2289
2290/// This class is used to represent ISD::STORE nodes.
2291class StoreSDNode : public LSBaseSDNode {
2292 friend class SelectionDAG;
2293
2294 StoreSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
2295 ISD::MemIndexedMode AM, bool isTrunc, EVT MemVT,
2296 MachineMemOperand *MMO)
2297 : LSBaseSDNode(ISD::STORE, Order, dl, VTs, AM, MemVT, MMO) {
2298 StoreSDNodeBits.IsTruncating = isTrunc;
2299 assert(!readMem() && "Store MachineMemOperand is a load!")(static_cast <bool> (!readMem() && "Store MachineMemOperand is a load!"
) ? void (0) : __assert_fail ("!readMem() && \"Store MachineMemOperand is a load!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 2299, __extension__ __PRETTY_FUNCTION__))
;
2300 assert(writeMem() && "Store MachineMemOperand is not a store!")(static_cast <bool> (writeMem() && "Store MachineMemOperand is not a store!"
) ? void (0) : __assert_fail ("writeMem() && \"Store MachineMemOperand is not a store!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 2300, __extension__ __PRETTY_FUNCTION__))
;
2301 }
2302
2303public:
2304 /// Return true if the op does a truncation before store.
2305 /// For integers this is the same as doing a TRUNCATE and storing the result.
2306 /// For floats, it is the same as doing an FP_ROUND and storing the result.
2307 bool isTruncatingStore() const { return StoreSDNodeBits.IsTruncating; }
2308 void setTruncatingStore(bool Truncating) {
2309 StoreSDNodeBits.IsTruncating = Truncating;
2310 }
2311
2312 const SDValue &getValue() const { return getOperand(1); }
2313 const SDValue &getBasePtr() const { return getOperand(2); }
2314 const SDValue &getOffset() const { return getOperand(3); }
2315
2316 static bool classof(const SDNode *N) {
2317 return N->getOpcode() == ISD::STORE;
2318 }
2319};
2320
2321/// This base class is used to represent MLOAD and MSTORE nodes
2322class MaskedLoadStoreSDNode : public MemSDNode {
2323public:
2324 friend class SelectionDAG;
2325
2326 MaskedLoadStoreSDNode(ISD::NodeType NodeTy, unsigned Order,
2327 const DebugLoc &dl, SDVTList VTs,
2328 ISD::MemIndexedMode AM, EVT MemVT,
2329 MachineMemOperand *MMO)
2330 : MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) {
2331 LSBaseSDNodeBits.AddressingMode = AM;
2332 assert(getAddressingMode() == AM && "Value truncated")(static_cast <bool> (getAddressingMode() == AM &&
"Value truncated") ? void (0) : __assert_fail ("getAddressingMode() == AM && \"Value truncated\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 2332, __extension__ __PRETTY_FUNCTION__))
;
2333 }
2334
2335 // MaskedLoadSDNode (Chain, ptr, offset, mask, passthru)
2336 // MaskedStoreSDNode (Chain, data, ptr, offset, mask)
2337 // Mask is a vector of i1 elements
2338 const SDValue &getOffset() const {
2339 return getOperand(getOpcode() == ISD::MLOAD ? 2 : 3);
2340 }
2341 const SDValue &getMask() const {
2342 return getOperand(getOpcode() == ISD::MLOAD ? 3 : 4);
2343 }
2344
2345 /// Return the addressing mode for this load or store:
2346 /// unindexed, pre-inc, pre-dec, post-inc, or post-dec.
2347 ISD::MemIndexedMode getAddressingMode() const {
2348 return static_cast<ISD::MemIndexedMode>(LSBaseSDNodeBits.AddressingMode);
2349 }
2350
2351 /// Return true if this is a pre/post inc/dec load/store.
2352 bool isIndexed() const { return getAddressingMode() != ISD::UNINDEXED; }
2353
2354 /// Return true if this is NOT a pre/post inc/dec load/store.
2355 bool isUnindexed() const { return getAddressingMode() == ISD::UNINDEXED; }
2356
2357 static bool classof(const SDNode *N) {
2358 return N->getOpcode() == ISD::MLOAD ||
2359 N->getOpcode() == ISD::MSTORE;
2360 }
2361};
2362
2363/// This class is used to represent an MLOAD node
2364class MaskedLoadSDNode : public MaskedLoadStoreSDNode {
2365public:
2366 friend class SelectionDAG;
2367
2368 MaskedLoadSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
2369 ISD::MemIndexedMode AM, ISD::LoadExtType ETy,
2370 bool IsExpanding, EVT MemVT, MachineMemOperand *MMO)
2371 : MaskedLoadStoreSDNode(ISD::MLOAD, Order, dl, VTs, AM, MemVT, MMO) {
2372 LoadSDNodeBits.ExtTy = ETy;
2373 LoadSDNodeBits.IsExpanding = IsExpanding;
2374 }
2375
2376 ISD::LoadExtType getExtensionType() const {
2377 return static_cast<ISD::LoadExtType>(LoadSDNodeBits.ExtTy);
2378 }
2379
2380 const SDValue &getBasePtr() const { return getOperand(1); }
2381 const SDValue &getOffset() const { return getOperand(2); }
2382 const SDValue &getMask() const { return getOperand(3); }
2383 const SDValue &getPassThru() const { return getOperand(4); }
2384
2385 static bool classof(const SDNode *N) {
2386 return N->getOpcode() == ISD::MLOAD;
2387 }
2388
2389 bool isExpandingLoad() const { return LoadSDNodeBits.IsExpanding; }
2390};
2391
2392/// This class is used to represent an MSTORE node
2393class MaskedStoreSDNode : public MaskedLoadStoreSDNode {
2394public:
2395 friend class SelectionDAG;
2396
2397 MaskedStoreSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
2398 ISD::MemIndexedMode AM, bool isTrunc, bool isCompressing,
2399 EVT MemVT, MachineMemOperand *MMO)
2400 : MaskedLoadStoreSDNode(ISD::MSTORE, Order, dl, VTs, AM, MemVT, MMO) {
2401 StoreSDNodeBits.IsTruncating = isTrunc;
2402 StoreSDNodeBits.IsCompressing = isCompressing;
2403 }
2404
2405 /// Return true if the op does a truncation before store.
2406 /// For integers this is the same as doing a TRUNCATE and storing the result.
2407 /// For floats, it is the same as doing an FP_ROUND and storing the result.
2408 bool isTruncatingStore() const { return StoreSDNodeBits.IsTruncating; }
2409
2410 /// Returns true if the op does a compression to the vector before storing.
2411 /// The node contiguously stores the active elements (integers or floats)
2412 /// in src (those with their respective bit set in writemask k) to unaligned
2413 /// memory at base_addr.
2414 bool isCompressingStore() const { return StoreSDNodeBits.IsCompressing; }
2415
2416 const SDValue &getValue() const { return getOperand(1); }
2417 const SDValue &getBasePtr() const { return getOperand(2); }
2418 const SDValue &getOffset() const { return getOperand(3); }
2419 const SDValue &getMask() const { return getOperand(4); }
2420
2421 static bool classof(const SDNode *N) {
2422 return N->getOpcode() == ISD::MSTORE;
2423 }
2424};
2425
2426/// This is a base class used to represent
2427/// MGATHER and MSCATTER nodes
2428///
2429class MaskedGatherScatterSDNode : public MemSDNode {
2430public:
2431 friend class SelectionDAG;
2432
2433 MaskedGatherScatterSDNode(ISD::NodeType NodeTy, unsigned Order,
2434 const DebugLoc &dl, SDVTList VTs, EVT MemVT,
2435 MachineMemOperand *MMO, ISD::MemIndexType IndexType)
2436 : MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) {
2437 LSBaseSDNodeBits.AddressingMode = IndexType;
2438 assert(getIndexType() == IndexType && "Value truncated")(static_cast <bool> (getIndexType() == IndexType &&
"Value truncated") ? void (0) : __assert_fail ("getIndexType() == IndexType && \"Value truncated\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 2438, __extension__ __PRETTY_FUNCTION__))
;
2439 }
2440
2441 /// How is Index applied to BasePtr when computing addresses.
2442 ISD::MemIndexType getIndexType() const {
2443 return static_cast<ISD::MemIndexType>(LSBaseSDNodeBits.AddressingMode);
2444 }
2445 void setIndexType(ISD::MemIndexType IndexType) {
2446 LSBaseSDNodeBits.AddressingMode = IndexType;
2447 }
2448 bool isIndexScaled() const {
2449 return (getIndexType() == ISD::SIGNED_SCALED) ||
2450 (getIndexType() == ISD::UNSIGNED_SCALED);
2451 }
2452 bool isIndexSigned() const {
2453 return (getIndexType() == ISD::SIGNED_SCALED) ||
2454 (getIndexType() == ISD::SIGNED_UNSCALED);
2455 }
2456
2457 // In the both nodes address is Op1, mask is Op2:
2458 // MaskedGatherSDNode (Chain, passthru, mask, base, index, scale)
2459 // MaskedScatterSDNode (Chain, value, mask, base, index, scale)
2460 // Mask is a vector of i1 elements
2461 const SDValue &getBasePtr() const { return getOperand(3); }
2462 const SDValue &getIndex() const { return getOperand(4); }
2463 const SDValue &getMask() const { return getOperand(2); }
2464 const SDValue &getScale() const { return getOperand(5); }
2465
2466 static bool classof(const SDNode *N) {
2467 return N->getOpcode() == ISD::MGATHER ||
2468 N->getOpcode() == ISD::MSCATTER;
2469 }
2470};
2471
2472/// This class is used to represent an MGATHER node
2473///
2474class MaskedGatherSDNode : public MaskedGatherScatterSDNode {
2475public:
2476 friend class SelectionDAG;
2477
2478 MaskedGatherSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
2479 EVT MemVT, MachineMemOperand *MMO,
2480 ISD::MemIndexType IndexType, ISD::LoadExtType ETy)
2481 : MaskedGatherScatterSDNode(ISD::MGATHER, Order, dl, VTs, MemVT, MMO,
2482 IndexType) {
2483 LoadSDNodeBits.ExtTy = ETy;
2484 }
2485
2486 const SDValue &getPassThru() const { return getOperand(1); }
2487
2488 ISD::LoadExtType getExtensionType() const {
2489 return ISD::LoadExtType(LoadSDNodeBits.ExtTy);
2490 }
2491
2492 static bool classof(const SDNode *N) {
2493 return N->getOpcode() == ISD::MGATHER;
2494 }
2495};
2496
2497/// This class is used to represent an MSCATTER node
2498///
2499class MaskedScatterSDNode : public MaskedGatherScatterSDNode {
2500public:
2501 friend class SelectionDAG;
2502
2503 MaskedScatterSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
2504 EVT MemVT, MachineMemOperand *MMO,
2505 ISD::MemIndexType IndexType, bool IsTrunc)
2506 : MaskedGatherScatterSDNode(ISD::MSCATTER, Order, dl, VTs, MemVT, MMO,
2507 IndexType) {
2508 StoreSDNodeBits.IsTruncating = IsTrunc;
2509 }
2510
2511 /// Return true if the op does a truncation before store.
2512 /// For integers this is the same as doing a TRUNCATE and storing the result.
2513 /// For floats, it is the same as doing an FP_ROUND and storing the result.
2514 bool isTruncatingStore() const { return StoreSDNodeBits.IsTruncating; }
2515
2516 const SDValue &getValue() const { return getOperand(1); }
2517
2518 static bool classof(const SDNode *N) {
2519 return N->getOpcode() == ISD::MSCATTER;
2520 }
2521};
2522
2523/// An SDNode that represents everything that will be needed
2524/// to construct a MachineInstr. These nodes are created during the
2525/// instruction selection proper phase.
2526///
2527/// Note that the only supported way to set the `memoperands` is by calling the
2528/// `SelectionDAG::setNodeMemRefs` function as the memory management happens
2529/// inside the DAG rather than in the node.
2530class MachineSDNode : public SDNode {
2531private:
2532 friend class SelectionDAG;
2533
2534 MachineSDNode(unsigned Opc, unsigned Order, const DebugLoc &DL, SDVTList VTs)
2535 : SDNode(Opc, Order, DL, VTs) {}
2536
2537 // We use a pointer union between a single `MachineMemOperand` pointer and
2538 // a pointer to an array of `MachineMemOperand` pointers. This is null when
2539 // the number of these is zero, the single pointer variant used when the
2540 // number is one, and the array is used for larger numbers.
2541 //
2542 // The array is allocated via the `SelectionDAG`'s allocator and so will
2543 // always live until the DAG is cleaned up and doesn't require ownership here.
2544 //
2545 // We can't use something simpler like `TinyPtrVector` here because `SDNode`
2546 // subclasses aren't managed in a conforming C++ manner. See the comments on
2547 // `SelectionDAG::MorphNodeTo` which details what all goes on, but the
2548 // constraint here is that these don't manage memory with their constructor or
2549 // destructor and can be initialized to a good state even if they start off
2550 // uninitialized.
2551 PointerUnion<MachineMemOperand *, MachineMemOperand **> MemRefs = {};
2552
2553 // Note that this could be folded into the above `MemRefs` member if doing so
2554 // is advantageous at some point. We don't need to store this in most cases.
2555 // However, at the moment this doesn't appear to make the allocation any
2556 // smaller and makes the code somewhat simpler to read.
2557 int NumMemRefs = 0;
2558
2559public:
2560 using mmo_iterator = ArrayRef<MachineMemOperand *>::const_iterator;
2561
2562 ArrayRef<MachineMemOperand *> memoperands() const {
2563 // Special case the common cases.
2564 if (NumMemRefs == 0)
2565 return {};
2566 if (NumMemRefs == 1)
2567 return makeArrayRef(MemRefs.getAddrOfPtr1(), 1);
2568
2569 // Otherwise we have an actual array.
2570 return makeArrayRef(MemRefs.get<MachineMemOperand **>(), NumMemRefs);
2571 }
2572 mmo_iterator memoperands_begin() const { return memoperands().begin(); }
2573 mmo_iterator memoperands_end() const { return memoperands().end(); }
2574 bool memoperands_empty() const { return memoperands().empty(); }
2575
2576 /// Clear out the memory reference descriptor list.
2577 void clearMemRefs() {
2578 MemRefs = nullptr;
2579 NumMemRefs = 0;
2580 }
2581
2582 static bool classof(const SDNode *N) {
2583 return N->isMachineOpcode();
2584 }
2585};
2586
2587/// An SDNode that records if a register contains a value that is guaranteed to
2588/// be aligned accordingly.
2589class AssertAlignSDNode : public SDNode {
2590 Align Alignment;
2591
2592public:
2593 AssertAlignSDNode(unsigned Order, const DebugLoc &DL, EVT VT, Align A)
2594 : SDNode(ISD::AssertAlign, Order, DL, getSDVTList(VT)), Alignment(A) {}
2595
2596 Align getAlign() const { return Alignment; }
2597
2598 static bool classof(const SDNode *N) {
2599 return N->getOpcode() == ISD::AssertAlign;
2600 }
2601};
2602
2603class SDNodeIterator {
2604 const SDNode *Node;
2605 unsigned Operand;
2606
2607 SDNodeIterator(const SDNode *N, unsigned Op) : Node(N), Operand(Op) {}
2608
2609public:
2610 using iterator_category = std::forward_iterator_tag;
2611 using value_type = SDNode;
2612 using difference_type = std::ptrdiff_t;
2613 using pointer = value_type *;
2614 using reference = value_type &;
2615
2616 bool operator==(const SDNodeIterator& x) const {
2617 return Operand == x.Operand;
2618 }
2619 bool operator!=(const SDNodeIterator& x) const { return !operator==(x); }
2620
2621 pointer operator*() const {
2622 return Node->getOperand(Operand).getNode();
2623 }
2624 pointer operator->() const { return operator*(); }
2625
2626 SDNodeIterator& operator++() { // Preincrement
2627 ++Operand;
2628 return *this;
2629 }
2630 SDNodeIterator operator++(int) { // Postincrement
2631 SDNodeIterator tmp = *this; ++*this; return tmp;
2632 }
2633 size_t operator-(SDNodeIterator Other) const {
2634 assert(Node == Other.Node &&(static_cast <bool> (Node == Other.Node && "Cannot compare iterators of two different nodes!"
) ? void (0) : __assert_fail ("Node == Other.Node && \"Cannot compare iterators of two different nodes!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 2635, __extension__ __PRETTY_FUNCTION__))
2635 "Cannot compare iterators of two different nodes!")(static_cast <bool> (Node == Other.Node && "Cannot compare iterators of two different nodes!"
) ? void (0) : __assert_fail ("Node == Other.Node && \"Cannot compare iterators of two different nodes!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 2635, __extension__ __PRETTY_FUNCTION__))
;
2636 return Operand - Other.Operand;
2637 }
2638
2639 static SDNodeIterator begin(const SDNode *N) { return SDNodeIterator(N, 0); }
2640 static SDNodeIterator end (const SDNode *N) {
2641 return SDNodeIterator(N, N->getNumOperands());
2642 }
2643
2644 unsigned getOperand() const { return Operand; }
2645 const SDNode *getNode() const { return Node; }
2646};
2647
2648template <> struct GraphTraits<SDNode*> {
2649 using NodeRef = SDNode *;
2650 using ChildIteratorType = SDNodeIterator;
2651
2652 static NodeRef getEntryNode(SDNode *N) { return N; }
2653
2654 static ChildIteratorType child_begin(NodeRef N) {
2655 return SDNodeIterator::begin(N);
2656 }
2657
2658 static ChildIteratorType child_end(NodeRef N) {
2659 return SDNodeIterator::end(N);
2660 }
2661};
2662
2663/// A representation of the largest SDNode, for use in sizeof().
2664///
2665/// This needs to be a union because the largest node differs on 32 bit systems
2666/// with 4 and 8 byte pointer alignment, respectively.
2667using LargestSDNode = AlignedCharArrayUnion<AtomicSDNode, TargetIndexSDNode,
2668 BlockAddressSDNode,
2669 GlobalAddressSDNode,
2670 PseudoProbeSDNode>;
2671
2672/// The SDNode class with the greatest alignment requirement.
2673using MostAlignedSDNode = GlobalAddressSDNode;
2674
2675namespace ISD {
2676
2677 /// Returns true if the specified node is a non-extending and unindexed load.
2678 inline bool isNormalLoad(const SDNode *N) {
2679 const LoadSDNode *Ld = dyn_cast<LoadSDNode>(N);
2680 return Ld && Ld->getExtensionType() == ISD::NON_EXTLOAD &&
2681 Ld->getAddressingMode() == ISD::UNINDEXED;
2682 }
2683
2684 /// Returns true if the specified node is a non-extending load.
2685 inline bool isNON_EXTLoad(const SDNode *N) {
2686 return isa<LoadSDNode>(N) &&
2687 cast<LoadSDNode>(N)->getExtensionType() == ISD::NON_EXTLOAD;
2688 }
2689
2690 /// Returns true if the specified node is a EXTLOAD.
2691 inline bool isEXTLoad(const SDNode *N) {
2692 return isa<LoadSDNode>(N) &&
2693 cast<LoadSDNode>(N)->getExtensionType() == ISD::EXTLOAD;
2694 }
2695
2696 /// Returns true if the specified node is a SEXTLOAD.
2697 inline bool isSEXTLoad(const SDNode *N) {
2698 return isa<LoadSDNode>(N) &&
2699 cast<LoadSDNode>(N)->getExtensionType() == ISD::SEXTLOAD;
2700 }
2701
2702 /// Returns true if the specified node is a ZEXTLOAD.
2703 inline bool isZEXTLoad(const SDNode *N) {
2704 return isa<LoadSDNode>(N) &&
2705 cast<LoadSDNode>(N)->getExtensionType() == ISD::ZEXTLOAD;
2706 }
2707
2708 /// Returns true if the specified node is an unindexed load.
2709 inline bool isUNINDEXEDLoad(const SDNode *N) {
2710 return isa<LoadSDNode>(N) &&
2711 cast<LoadSDNode>(N)->getAddressingMode() == ISD::UNINDEXED;
2712 }
2713
2714 /// Returns true if the specified node is a non-truncating
2715 /// and unindexed store.
2716 inline bool isNormalStore(const SDNode *N) {
2717 const StoreSDNode *St = dyn_cast<StoreSDNode>(N);
2718 return St && !St->isTruncatingStore() &&
2719 St->getAddressingMode() == ISD::UNINDEXED;
2720 }
2721
2722 /// Returns true if the specified node is an unindexed store.
2723 inline bool isUNINDEXEDStore(const SDNode *N) {
2724 return isa<StoreSDNode>(N) &&
2725 cast<StoreSDNode>(N)->getAddressingMode() == ISD::UNINDEXED;
2726 }
2727
2728 /// Attempt to match a unary predicate against a scalar/splat constant or
2729 /// every element of a constant BUILD_VECTOR.
2730 /// If AllowUndef is true, then UNDEF elements will pass nullptr to Match.
2731 bool matchUnaryPredicate(SDValue Op,
2732 std::function<bool(ConstantSDNode *)> Match,
2733 bool AllowUndefs = false);
2734
2735 /// Attempt to match a binary predicate against a pair of scalar/splat
2736 /// constants or every element of a pair of constant BUILD_VECTORs.
2737 /// If AllowUndef is true, then UNDEF elements will pass nullptr to Match.
2738 /// If AllowTypeMismatch is true then RetType + ArgTypes don't need to match.
2739 bool matchBinaryPredicate(
2740 SDValue LHS, SDValue RHS,
2741 std::function<bool(ConstantSDNode *, ConstantSDNode *)> Match,
2742 bool AllowUndefs = false, bool AllowTypeMismatch = false);
2743
2744 /// Returns true if the specified value is the overflow result from one
2745 /// of the overflow intrinsic nodes.
2746 inline bool isOverflowIntrOpRes(SDValue Op) {
2747 unsigned Opc = Op.getOpcode();
2748 return (Op.getResNo() == 1 &&
2749 (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO ||
2750 Opc == ISD::USUBO || Opc == ISD::SMULO || Opc == ISD::UMULO));
2751 }
2752
2753} // end namespace ISD
2754
2755} // end namespace llvm
2756
2757#endif // LLVM_CODEGEN_SELECTIONDAGNODES_H