Bug Summary

File:lib/Target/ARM/ARMISelDAGToDAG.cpp
Location:line 1385, column 7
Description:Value stored to 'ShOpcVal' is never read

Annotated Source Code

1//===-- ARMISelDAGToDAG.cpp - A dag to dag inst selector for ARM ----------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines an instruction selector for the ARM target.
11//
12//===----------------------------------------------------------------------===//
13
14#include "ARM.h"
15#include "ARMBaseInstrInfo.h"
16#include "ARMTargetMachine.h"
17#include "MCTargetDesc/ARMAddressingModes.h"
18#include "llvm/CodeGen/MachineFrameInfo.h"
19#include "llvm/CodeGen/MachineFunction.h"
20#include "llvm/CodeGen/MachineInstrBuilder.h"
21#include "llvm/CodeGen/MachineRegisterInfo.h"
22#include "llvm/CodeGen/SelectionDAG.h"
23#include "llvm/CodeGen/SelectionDAGISel.h"
24#include "llvm/IR/CallingConv.h"
25#include "llvm/IR/Constants.h"
26#include "llvm/IR/DerivedTypes.h"
27#include "llvm/IR/Function.h"
28#include "llvm/IR/Intrinsics.h"
29#include "llvm/IR/LLVMContext.h"
30#include "llvm/Support/CommandLine.h"
31#include "llvm/Support/Compiler.h"
32#include "llvm/Support/Debug.h"
33#include "llvm/Support/ErrorHandling.h"
34#include "llvm/Target/TargetLowering.h"
35#include "llvm/Target/TargetOptions.h"
36
37using namespace llvm;
38
39#define DEBUG_TYPE"arm-isel" "arm-isel"
40
41static cl::opt<bool>
42DisableShifterOp("disable-shifter-op", cl::Hidden,
43 cl::desc("Disable isel of shifter-op"),
44 cl::init(false));
45
46static cl::opt<bool>
47CheckVMLxHazard("check-vmlx-hazard", cl::Hidden,
48 cl::desc("Check fp vmla / vmls hazard at isel time"),
49 cl::init(true));
50
51//===--------------------------------------------------------------------===//
52/// ARMDAGToDAGISel - ARM specific code to select ARM machine
53/// instructions for SelectionDAG operations.
54///
55namespace {
56
57enum AddrMode2Type {
58 AM2_BASE, // Simple AM2 (+-imm12)
59 AM2_SHOP // Shifter-op AM2
60};
61
62class ARMDAGToDAGISel : public SelectionDAGISel {
63 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
64 /// make the right decision when generating code for different targets.
65 const ARMSubtarget *Subtarget;
66
67public:
68 explicit ARMDAGToDAGISel(ARMBaseTargetMachine &tm, CodeGenOpt::Level OptLevel)
69 : SelectionDAGISel(tm, OptLevel) {}
70
71 bool runOnMachineFunction(MachineFunction &MF) override {
72 // Reset the subtarget each time through.
73 Subtarget = &MF.getTarget().getSubtarget<ARMSubtarget>();
74 SelectionDAGISel::runOnMachineFunction(MF);
75 return true;
76 }
77
78 const char *getPassName() const override {
79 return "ARM Instruction Selection";
80 }
81
82 void PreprocessISelDAG() override;
83
84 /// getI32Imm - Return a target constant of type i32 with the specified
85 /// value.
86 inline SDValue getI32Imm(unsigned Imm) {
87 return CurDAG->getTargetConstant(Imm, MVT::i32);
88 }
89
90 SDNode *Select(SDNode *N) override;
91
92
93 bool hasNoVMLxHazardUse(SDNode *N) const;
94 bool isShifterOpProfitable(const SDValue &Shift,
95 ARM_AM::ShiftOpc ShOpcVal, unsigned ShAmt);
96 bool SelectRegShifterOperand(SDValue N, SDValue &A,
97 SDValue &B, SDValue &C,
98 bool CheckProfitability = true);
99 bool SelectImmShifterOperand(SDValue N, SDValue &A,
100 SDValue &B, bool CheckProfitability = true);
101 bool SelectShiftRegShifterOperand(SDValue N, SDValue &A,
102 SDValue &B, SDValue &C) {
103 // Don't apply the profitability check
104 return SelectRegShifterOperand(N, A, B, C, false);
105 }
106 bool SelectShiftImmShifterOperand(SDValue N, SDValue &A,
107 SDValue &B) {
108 // Don't apply the profitability check
109 return SelectImmShifterOperand(N, A, B, false);
110 }
111
112 bool SelectAddrModeImm12(SDValue N, SDValue &Base, SDValue &OffImm);
113 bool SelectLdStSOReg(SDValue N, SDValue &Base, SDValue &Offset, SDValue &Opc);
114
115 AddrMode2Type SelectAddrMode2Worker(SDValue N, SDValue &Base,
116 SDValue &Offset, SDValue &Opc);
117 bool SelectAddrMode2Base(SDValue N, SDValue &Base, SDValue &Offset,
118 SDValue &Opc) {
119 return SelectAddrMode2Worker(N, Base, Offset, Opc) == AM2_BASE;
120 }
121
122 bool SelectAddrMode2ShOp(SDValue N, SDValue &Base, SDValue &Offset,
123 SDValue &Opc) {
124 return SelectAddrMode2Worker(N, Base, Offset, Opc) == AM2_SHOP;
125 }
126
127 bool SelectAddrMode2(SDValue N, SDValue &Base, SDValue &Offset,
128 SDValue &Opc) {
129 SelectAddrMode2Worker(N, Base, Offset, Opc);
130// return SelectAddrMode2ShOp(N, Base, Offset, Opc);
131 // This always matches one way or another.
132 return true;
133 }
134
135 bool SelectCMOVPred(SDValue N, SDValue &Pred, SDValue &Reg) {
136 const ConstantSDNode *CN = cast<ConstantSDNode>(N);
137 Pred = CurDAG->getTargetConstant(CN->getZExtValue(), MVT::i32);
138 Reg = CurDAG->getRegister(ARM::CPSR, MVT::i32);
139 return true;
140 }
141
142 bool SelectAddrMode2OffsetReg(SDNode *Op, SDValue N,
143 SDValue &Offset, SDValue &Opc);
144 bool SelectAddrMode2OffsetImm(SDNode *Op, SDValue N,
145 SDValue &Offset, SDValue &Opc);
146 bool SelectAddrMode2OffsetImmPre(SDNode *Op, SDValue N,
147 SDValue &Offset, SDValue &Opc);
148 bool SelectAddrOffsetNone(SDValue N, SDValue &Base);
149 bool SelectAddrMode3(SDValue N, SDValue &Base,
150 SDValue &Offset, SDValue &Opc);
151 bool SelectAddrMode3Offset(SDNode *Op, SDValue N,
152 SDValue &Offset, SDValue &Opc);
153 bool SelectAddrMode5(SDValue N, SDValue &Base,
154 SDValue &Offset);
155 bool SelectAddrMode6(SDNode *Parent, SDValue N, SDValue &Addr,SDValue &Align);
156 bool SelectAddrMode6Offset(SDNode *Op, SDValue N, SDValue &Offset);
157
158 bool SelectAddrModePC(SDValue N, SDValue &Offset, SDValue &Label);
159
160 // Thumb Addressing Modes:
161 bool SelectThumbAddrModeRR(SDValue N, SDValue &Base, SDValue &Offset);
162 bool SelectThumbAddrModeRI(SDValue N, SDValue &Base, SDValue &Offset,
163 unsigned Scale);
164 bool SelectThumbAddrModeRI5S1(SDValue N, SDValue &Base, SDValue &Offset);
165 bool SelectThumbAddrModeRI5S2(SDValue N, SDValue &Base, SDValue &Offset);
166 bool SelectThumbAddrModeRI5S4(SDValue N, SDValue &Base, SDValue &Offset);
167 bool SelectThumbAddrModeImm5S(SDValue N, unsigned Scale, SDValue &Base,
168 SDValue &OffImm);
169 bool SelectThumbAddrModeImm5S1(SDValue N, SDValue &Base,
170 SDValue &OffImm);
171 bool SelectThumbAddrModeImm5S2(SDValue N, SDValue &Base,
172 SDValue &OffImm);
173 bool SelectThumbAddrModeImm5S4(SDValue N, SDValue &Base,
174 SDValue &OffImm);
175 bool SelectThumbAddrModeSP(SDValue N, SDValue &Base, SDValue &OffImm);
176
177 // Thumb 2 Addressing Modes:
178 bool SelectT2ShifterOperandReg(SDValue N,
179 SDValue &BaseReg, SDValue &Opc);
180 bool SelectT2AddrModeImm12(SDValue N, SDValue &Base, SDValue &OffImm);
181 bool SelectT2AddrModeImm8(SDValue N, SDValue &Base,
182 SDValue &OffImm);
183 bool SelectT2AddrModeImm8Offset(SDNode *Op, SDValue N,
184 SDValue &OffImm);
185 bool SelectT2AddrModeSoReg(SDValue N, SDValue &Base,
186 SDValue &OffReg, SDValue &ShImm);
187 bool SelectT2AddrModeExclusive(SDValue N, SDValue &Base, SDValue &OffImm);
188
189 inline bool is_so_imm(unsigned Imm) const {
190 return ARM_AM::getSOImmVal(Imm) != -1;
191 }
192
193 inline bool is_so_imm_not(unsigned Imm) const {
194 return ARM_AM::getSOImmVal(~Imm) != -1;
195 }
196
197 inline bool is_t2_so_imm(unsigned Imm) const {
198 return ARM_AM::getT2SOImmVal(Imm) != -1;
199 }
200
201 inline bool is_t2_so_imm_not(unsigned Imm) const {
202 return ARM_AM::getT2SOImmVal(~Imm) != -1;
203 }
204
205 // Include the pieces autogenerated from the target description.
206#include "ARMGenDAGISel.inc"
207
208private:
209 /// SelectARMIndexedLoad - Indexed (pre/post inc/dec) load matching code for
210 /// ARM.
211 SDNode *SelectARMIndexedLoad(SDNode *N);
212 SDNode *SelectT2IndexedLoad(SDNode *N);
213
214 /// SelectVLD - Select NEON load intrinsics. NumVecs should be
215 /// 1, 2, 3 or 4. The opcode arrays specify the instructions used for
216 /// loads of D registers and even subregs and odd subregs of Q registers.
217 /// For NumVecs <= 2, QOpcodes1 is not used.
218 SDNode *SelectVLD(SDNode *N, bool isUpdating, unsigned NumVecs,
219 const uint16_t *DOpcodes,
220 const uint16_t *QOpcodes0, const uint16_t *QOpcodes1);
221
222 /// SelectVST - Select NEON store intrinsics. NumVecs should
223 /// be 1, 2, 3 or 4. The opcode arrays specify the instructions used for
224 /// stores of D registers and even subregs and odd subregs of Q registers.
225 /// For NumVecs <= 2, QOpcodes1 is not used.
226 SDNode *SelectVST(SDNode *N, bool isUpdating, unsigned NumVecs,
227 const uint16_t *DOpcodes,
228 const uint16_t *QOpcodes0, const uint16_t *QOpcodes1);
229
230 /// SelectVLDSTLane - Select NEON load/store lane intrinsics. NumVecs should
231 /// be 2, 3 or 4. The opcode arrays specify the instructions used for
232 /// load/store of D registers and Q registers.
233 SDNode *SelectVLDSTLane(SDNode *N, bool IsLoad,
234 bool isUpdating, unsigned NumVecs,
235 const uint16_t *DOpcodes, const uint16_t *QOpcodes);
236
237 /// SelectVLDDup - Select NEON load-duplicate intrinsics. NumVecs
238 /// should be 2, 3 or 4. The opcode array specifies the instructions used
239 /// for loading D registers. (Q registers are not supported.)
240 SDNode *SelectVLDDup(SDNode *N, bool isUpdating, unsigned NumVecs,
241 const uint16_t *Opcodes);
242
243 /// SelectVTBL - Select NEON VTBL and VTBX intrinsics. NumVecs should be 2,
244 /// 3 or 4. These are custom-selected so that a REG_SEQUENCE can be
245 /// generated to force the table registers to be consecutive.
246 SDNode *SelectVTBL(SDNode *N, bool IsExt, unsigned NumVecs, unsigned Opc);
247
248 /// SelectV6T2BitfieldExtractOp - Select SBFX/UBFX instructions for ARM.
249 SDNode *SelectV6T2BitfieldExtractOp(SDNode *N, bool isSigned);
250
251 // Select special operations if node forms integer ABS pattern
252 SDNode *SelectABSOp(SDNode *N);
253
254 SDNode *SelectInlineAsm(SDNode *N);
255
256 SDNode *SelectConcatVector(SDNode *N);
257
258 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
259 /// inline asm expressions.
260 bool SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode,
261 std::vector<SDValue> &OutOps) override;
262
263 // Form pairs of consecutive R, S, D, or Q registers.
264 SDNode *createGPRPairNode(EVT VT, SDValue V0, SDValue V1);
265 SDNode *createSRegPairNode(EVT VT, SDValue V0, SDValue V1);
266 SDNode *createDRegPairNode(EVT VT, SDValue V0, SDValue V1);
267 SDNode *createQRegPairNode(EVT VT, SDValue V0, SDValue V1);
268
269 // Form sequences of 4 consecutive S, D, or Q registers.
270 SDNode *createQuadSRegsNode(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3);
271 SDNode *createQuadDRegsNode(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3);
272 SDNode *createQuadQRegsNode(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3);
273
274 // Get the alignment operand for a NEON VLD or VST instruction.
275 SDValue GetVLDSTAlign(SDValue Align, unsigned NumVecs, bool is64BitVector);
276};
277}
278
279/// isInt32Immediate - This method tests to see if the node is a 32-bit constant
280/// operand. If so Imm will receive the 32-bit value.
281static bool isInt32Immediate(SDNode *N, unsigned &Imm) {
282 if (N->getOpcode() == ISD::Constant && N->getValueType(0) == MVT::i32) {
283 Imm = cast<ConstantSDNode>(N)->getZExtValue();
284 return true;
285 }
286 return false;
287}
288
289// isInt32Immediate - This method tests to see if a constant operand.
290// If so Imm will receive the 32 bit value.
291static bool isInt32Immediate(SDValue N, unsigned &Imm) {
292 return isInt32Immediate(N.getNode(), Imm);
293}
294
295// isOpcWithIntImmediate - This method tests to see if the node is a specific
296// opcode and that it has a immediate integer right operand.
297// If so Imm will receive the 32 bit value.
298static bool isOpcWithIntImmediate(SDNode *N, unsigned Opc, unsigned& Imm) {
299 return N->getOpcode() == Opc &&
300 isInt32Immediate(N->getOperand(1).getNode(), Imm);
301}
302
303/// \brief Check whether a particular node is a constant value representable as
304/// (N * Scale) where (N in [\p RangeMin, \p RangeMax).
305///
306/// \param ScaledConstant [out] - On success, the pre-scaled constant value.
307static bool isScaledConstantInRange(SDValue Node, int Scale,
308 int RangeMin, int RangeMax,
309 int &ScaledConstant) {
310 assert(Scale > 0 && "Invalid scale!")((Scale > 0 && "Invalid scale!") ? static_cast<
void> (0) : __assert_fail ("Scale > 0 && \"Invalid scale!\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.6~svn220848/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 310, __PRETTY_FUNCTION__))
;
311
312 // Check that this is a constant.
313 const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Node);
314 if (!C)
315 return false;
316
317 ScaledConstant = (int) C->getZExtValue();
318 if ((ScaledConstant % Scale) != 0)
319 return false;
320
321 ScaledConstant /= Scale;
322 return ScaledConstant >= RangeMin && ScaledConstant < RangeMax;
323}
324
325void ARMDAGToDAGISel::PreprocessISelDAG() {
326 if (!Subtarget->hasV6T2Ops())
327 return;
328
329 bool isThumb2 = Subtarget->isThumb();
330 for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
331 E = CurDAG->allnodes_end(); I != E; ) {
332 SDNode *N = I++; // Preincrement iterator to avoid invalidation issues.
333
334 if (N->getOpcode() != ISD::ADD)
335 continue;
336
337 // Look for (add X1, (and (srl X2, c1), c2)) where c2 is constant with
338 // leading zeros, followed by consecutive set bits, followed by 1 or 2
339 // trailing zeros, e.g. 1020.
340 // Transform the expression to
341 // (add X1, (shl (and (srl X2, c1), (c2>>tz)), tz)) where tz is the number
342 // of trailing zeros of c2. The left shift would be folded as an shifter
343 // operand of 'add' and the 'and' and 'srl' would become a bits extraction
344 // node (UBFX).
345
346 SDValue N0 = N->getOperand(0);
347 SDValue N1 = N->getOperand(1);
348 unsigned And_imm = 0;
349 if (!isOpcWithIntImmediate(N1.getNode(), ISD::AND, And_imm)) {
350 if (isOpcWithIntImmediate(N0.getNode(), ISD::AND, And_imm))
351 std::swap(N0, N1);
352 }
353 if (!And_imm)
354 continue;
355
356 // Check if the AND mask is an immediate of the form: 000.....1111111100
357 unsigned TZ = countTrailingZeros(And_imm);
358 if (TZ != 1 && TZ != 2)
359 // Be conservative here. Shifter operands aren't always free. e.g. On
360 // Swift, left shifter operand of 1 / 2 for free but others are not.
361 // e.g.
362 // ubfx r3, r1, #16, #8
363 // ldr.w r3, [r0, r3, lsl #2]
364 // vs.
365 // mov.w r9, #1020
366 // and.w r2, r9, r1, lsr #14
367 // ldr r2, [r0, r2]
368 continue;
369 And_imm >>= TZ;
370 if (And_imm & (And_imm + 1))
371 continue;
372
373 // Look for (and (srl X, c1), c2).
374 SDValue Srl = N1.getOperand(0);
375 unsigned Srl_imm = 0;
376 if (!isOpcWithIntImmediate(Srl.getNode(), ISD::SRL, Srl_imm) ||
377 (Srl_imm <= 2))
378 continue;
379
380 // Make sure first operand is not a shifter operand which would prevent
381 // folding of the left shift.
382 SDValue CPTmp0;
383 SDValue CPTmp1;
384 SDValue CPTmp2;
385 if (isThumb2) {
386 if (SelectT2ShifterOperandReg(N0, CPTmp0, CPTmp1))
387 continue;
388 } else {
389 if (SelectImmShifterOperand(N0, CPTmp0, CPTmp1) ||
390 SelectRegShifterOperand(N0, CPTmp0, CPTmp1, CPTmp2))
391 continue;
392 }
393
394 // Now make the transformation.
395 Srl = CurDAG->getNode(ISD::SRL, SDLoc(Srl), MVT::i32,
396 Srl.getOperand(0),
397 CurDAG->getConstant(Srl_imm+TZ, MVT::i32));
398 N1 = CurDAG->getNode(ISD::AND, SDLoc(N1), MVT::i32,
399 Srl, CurDAG->getConstant(And_imm, MVT::i32));
400 N1 = CurDAG->getNode(ISD::SHL, SDLoc(N1), MVT::i32,
401 N1, CurDAG->getConstant(TZ, MVT::i32));
402 CurDAG->UpdateNodeOperands(N, N0, N1);
403 }
404}
405
406/// hasNoVMLxHazardUse - Return true if it's desirable to select a FP MLA / MLS
407/// node. VFP / NEON fp VMLA / VMLS instructions have special RAW hazards (at
408/// least on current ARM implementations) which should be avoidded.
409bool ARMDAGToDAGISel::hasNoVMLxHazardUse(SDNode *N) const {
410 if (OptLevel == CodeGenOpt::None)
411 return true;
412
413 if (!CheckVMLxHazard)
414 return true;
415
416 if (!Subtarget->isCortexA7() && !Subtarget->isCortexA8() &&
417 !Subtarget->isCortexA9() && !Subtarget->isSwift())
418 return true;
419
420 if (!N->hasOneUse())
421 return false;
422
423 SDNode *Use = *N->use_begin();
424 if (Use->getOpcode() == ISD::CopyToReg)
425 return true;
426 if (Use->isMachineOpcode()) {
427 const ARMBaseInstrInfo *TII = static_cast<const ARMBaseInstrInfo *>(
428 CurDAG->getSubtarget().getInstrInfo());
429
430 const MCInstrDesc &MCID = TII->get(Use->getMachineOpcode());
431 if (MCID.mayStore())
432 return true;
433 unsigned Opcode = MCID.getOpcode();
434 if (Opcode == ARM::VMOVRS || Opcode == ARM::VMOVRRD)
435 return true;
436 // vmlx feeding into another vmlx. We actually want to unfold
437 // the use later in the MLxExpansion pass. e.g.
438 // vmla
439 // vmla (stall 8 cycles)
440 //
441 // vmul (5 cycles)
442 // vadd (5 cycles)
443 // vmla
444 // This adds up to about 18 - 19 cycles.
445 //
446 // vmla
447 // vmul (stall 4 cycles)
448 // vadd adds up to about 14 cycles.
449 return TII->isFpMLxInstruction(Opcode);
450 }
451
452 return false;
453}
454
455bool ARMDAGToDAGISel::isShifterOpProfitable(const SDValue &Shift,
456 ARM_AM::ShiftOpc ShOpcVal,
457 unsigned ShAmt) {
458 if (!Subtarget->isLikeA9() && !Subtarget->isSwift())
459 return true;
460 if (Shift.hasOneUse())
461 return true;
462 // R << 2 is free.
463 return ShOpcVal == ARM_AM::lsl &&
464 (ShAmt == 2 || (Subtarget->isSwift() && ShAmt == 1));
465}
466
467bool ARMDAGToDAGISel::SelectImmShifterOperand(SDValue N,
468 SDValue &BaseReg,
469 SDValue &Opc,
470 bool CheckProfitability) {
471 if (DisableShifterOp)
472 return false;
473
474 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOpcode());
475
476 // Don't match base register only case. That is matched to a separate
477 // lower complexity pattern with explicit register operand.
478 if (ShOpcVal == ARM_AM::no_shift) return false;
479
480 BaseReg = N.getOperand(0);
481 unsigned ShImmVal = 0;
482 ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1));
483 if (!RHS) return false;
484 ShImmVal = RHS->getZExtValue() & 31;
485 Opc = CurDAG->getTargetConstant(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal),
486 MVT::i32);
487 return true;
488}
489
490bool ARMDAGToDAGISel::SelectRegShifterOperand(SDValue N,
491 SDValue &BaseReg,
492 SDValue &ShReg,
493 SDValue &Opc,
494 bool CheckProfitability) {
495 if (DisableShifterOp)
496 return false;
497
498 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOpcode());
499
500 // Don't match base register only case. That is matched to a separate
501 // lower complexity pattern with explicit register operand.
502 if (ShOpcVal == ARM_AM::no_shift) return false;
503
504 BaseReg = N.getOperand(0);
505 unsigned ShImmVal = 0;
506 ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1));
507 if (RHS) return false;
508
509 ShReg = N.getOperand(1);
510 if (CheckProfitability && !isShifterOpProfitable(N, ShOpcVal, ShImmVal))
511 return false;
512 Opc = CurDAG->getTargetConstant(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal),
513 MVT::i32);
514 return true;
515}
516
517
518bool ARMDAGToDAGISel::SelectAddrModeImm12(SDValue N,
519 SDValue &Base,
520 SDValue &OffImm) {
521 // Match simple R + imm12 operands.
522
523 // Base only.
524 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
525 !CurDAG->isBaseWithConstantOffset(N)) {
526 if (N.getOpcode() == ISD::FrameIndex) {
527 // Match frame index.
528 int FI = cast<FrameIndexSDNode>(N)->getIndex();
529 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
530 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
531 return true;
532 }
533
534 if (N.getOpcode() == ARMISD::Wrapper &&
535 N.getOperand(0).getOpcode() != ISD::TargetGlobalAddress) {
536 Base = N.getOperand(0);
537 } else
538 Base = N;
539 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
540 return true;
541 }
542
543 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
544 int RHSC = (int)RHS->getSExtValue();
545 if (N.getOpcode() == ISD::SUB)
546 RHSC = -RHSC;
547
548 if (RHSC > -0x1000 && RHSC < 0x1000) { // 12 bits
549 Base = N.getOperand(0);
550 if (Base.getOpcode() == ISD::FrameIndex) {
551 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
552 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
553 }
554 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
555 return true;
556 }
557 }
558
559 // Base only.
560 Base = N;
561 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
562 return true;
563}
564
565
566
567bool ARMDAGToDAGISel::SelectLdStSOReg(SDValue N, SDValue &Base, SDValue &Offset,
568 SDValue &Opc) {
569 if (N.getOpcode() == ISD::MUL &&
570 ((!Subtarget->isLikeA9() && !Subtarget->isSwift()) || N.hasOneUse())) {
571 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
572 // X * [3,5,9] -> X + X * [2,4,8] etc.
573 int RHSC = (int)RHS->getZExtValue();
574 if (RHSC & 1) {
575 RHSC = RHSC & ~1;
576 ARM_AM::AddrOpc AddSub = ARM_AM::add;
577 if (RHSC < 0) {
578 AddSub = ARM_AM::sub;
579 RHSC = - RHSC;
580 }
581 if (isPowerOf2_32(RHSC)) {
582 unsigned ShAmt = Log2_32(RHSC);
583 Base = Offset = N.getOperand(0);
584 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt,
585 ARM_AM::lsl),
586 MVT::i32);
587 return true;
588 }
589 }
590 }
591 }
592
593 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
594 // ISD::OR that is equivalent to an ISD::ADD.
595 !CurDAG->isBaseWithConstantOffset(N))
596 return false;
597
598 // Leave simple R +/- imm12 operands for LDRi12
599 if (N.getOpcode() == ISD::ADD || N.getOpcode() == ISD::OR) {
600 int RHSC;
601 if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/1,
602 -0x1000+1, 0x1000, RHSC)) // 12 bits.
603 return false;
604 }
605
606 // Otherwise this is R +/- [possibly shifted] R.
607 ARM_AM::AddrOpc AddSub = N.getOpcode() == ISD::SUB ? ARM_AM::sub:ARM_AM::add;
608 ARM_AM::ShiftOpc ShOpcVal =
609 ARM_AM::getShiftOpcForNode(N.getOperand(1).getOpcode());
610 unsigned ShAmt = 0;
611
612 Base = N.getOperand(0);
613 Offset = N.getOperand(1);
614
615 if (ShOpcVal != ARM_AM::no_shift) {
616 // Check to see if the RHS of the shift is a constant, if not, we can't fold
617 // it.
618 if (ConstantSDNode *Sh =
619 dyn_cast<ConstantSDNode>(N.getOperand(1).getOperand(1))) {
620 ShAmt = Sh->getZExtValue();
621 if (isShifterOpProfitable(Offset, ShOpcVal, ShAmt))
622 Offset = N.getOperand(1).getOperand(0);
623 else {
624 ShAmt = 0;
625 ShOpcVal = ARM_AM::no_shift;
626 }
627 } else {
628 ShOpcVal = ARM_AM::no_shift;
629 }
630 }
631
632 // Try matching (R shl C) + (R).
633 if (N.getOpcode() != ISD::SUB && ShOpcVal == ARM_AM::no_shift &&
634 !(Subtarget->isLikeA9() || Subtarget->isSwift() ||
635 N.getOperand(0).hasOneUse())) {
636 ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(0).getOpcode());
637 if (ShOpcVal != ARM_AM::no_shift) {
638 // Check to see if the RHS of the shift is a constant, if not, we can't
639 // fold it.
640 if (ConstantSDNode *Sh =
641 dyn_cast<ConstantSDNode>(N.getOperand(0).getOperand(1))) {
642 ShAmt = Sh->getZExtValue();
643 if (isShifterOpProfitable(N.getOperand(0), ShOpcVal, ShAmt)) {
644 Offset = N.getOperand(0).getOperand(0);
645 Base = N.getOperand(1);
646 } else {
647 ShAmt = 0;
648 ShOpcVal = ARM_AM::no_shift;
649 }
650 } else {
651 ShOpcVal = ARM_AM::no_shift;
652 }
653 }
654 }
655
656 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal),
657 MVT::i32);
658 return true;
659}
660
661
662//-----
663
664AddrMode2Type ARMDAGToDAGISel::SelectAddrMode2Worker(SDValue N,
665 SDValue &Base,
666 SDValue &Offset,
667 SDValue &Opc) {
668 if (N.getOpcode() == ISD::MUL &&
669 (!(Subtarget->isLikeA9() || Subtarget->isSwift()) || N.hasOneUse())) {
670 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
671 // X * [3,5,9] -> X + X * [2,4,8] etc.
672 int RHSC = (int)RHS->getZExtValue();
673 if (RHSC & 1) {
674 RHSC = RHSC & ~1;
675 ARM_AM::AddrOpc AddSub = ARM_AM::add;
676 if (RHSC < 0) {
677 AddSub = ARM_AM::sub;
678 RHSC = - RHSC;
679 }
680 if (isPowerOf2_32(RHSC)) {
681 unsigned ShAmt = Log2_32(RHSC);
682 Base = Offset = N.getOperand(0);
683 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt,
684 ARM_AM::lsl),
685 MVT::i32);
686 return AM2_SHOP;
687 }
688 }
689 }
690 }
691
692 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
693 // ISD::OR that is equivalent to an ADD.
694 !CurDAG->isBaseWithConstantOffset(N)) {
695 Base = N;
696 if (N.getOpcode() == ISD::FrameIndex) {
697 int FI = cast<FrameIndexSDNode>(N)->getIndex();
698 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
699 } else if (N.getOpcode() == ARMISD::Wrapper &&
700 N.getOperand(0).getOpcode() != ISD::TargetGlobalAddress) {
701 Base = N.getOperand(0);
702 }
703 Offset = CurDAG->getRegister(0, MVT::i32);
704 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(ARM_AM::add, 0,
705 ARM_AM::no_shift),
706 MVT::i32);
707 return AM2_BASE;
708 }
709
710 // Match simple R +/- imm12 operands.
711 if (N.getOpcode() != ISD::SUB) {
712 int RHSC;
713 if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/1,
714 -0x1000+1, 0x1000, RHSC)) { // 12 bits.
715 Base = N.getOperand(0);
716 if (Base.getOpcode() == ISD::FrameIndex) {
717 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
718 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
719 }
720 Offset = CurDAG->getRegister(0, MVT::i32);
721
722 ARM_AM::AddrOpc AddSub = ARM_AM::add;
723 if (RHSC < 0) {
724 AddSub = ARM_AM::sub;
725 RHSC = - RHSC;
726 }
727 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, RHSC,
728 ARM_AM::no_shift),
729 MVT::i32);
730 return AM2_BASE;
731 }
732 }
733
734 if ((Subtarget->isLikeA9() || Subtarget->isSwift()) && !N.hasOneUse()) {
735 // Compute R +/- (R << N) and reuse it.
736 Base = N;
737 Offset = CurDAG->getRegister(0, MVT::i32);
738 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(ARM_AM::add, 0,
739 ARM_AM::no_shift),
740 MVT::i32);
741 return AM2_BASE;
742 }
743
744 // Otherwise this is R +/- [possibly shifted] R.
745 ARM_AM::AddrOpc AddSub = N.getOpcode() != ISD::SUB ? ARM_AM::add:ARM_AM::sub;
746 ARM_AM::ShiftOpc ShOpcVal =
747 ARM_AM::getShiftOpcForNode(N.getOperand(1).getOpcode());
748 unsigned ShAmt = 0;
749
750 Base = N.getOperand(0);
751 Offset = N.getOperand(1);
752
753 if (ShOpcVal != ARM_AM::no_shift) {
754 // Check to see if the RHS of the shift is a constant, if not, we can't fold
755 // it.
756 if (ConstantSDNode *Sh =
757 dyn_cast<ConstantSDNode>(N.getOperand(1).getOperand(1))) {
758 ShAmt = Sh->getZExtValue();
759 if (isShifterOpProfitable(Offset, ShOpcVal, ShAmt))
760 Offset = N.getOperand(1).getOperand(0);
761 else {
762 ShAmt = 0;
763 ShOpcVal = ARM_AM::no_shift;
764 }
765 } else {
766 ShOpcVal = ARM_AM::no_shift;
767 }
768 }
769
770 // Try matching (R shl C) + (R).
771 if (N.getOpcode() != ISD::SUB && ShOpcVal == ARM_AM::no_shift &&
772 !(Subtarget->isLikeA9() || Subtarget->isSwift() ||
773 N.getOperand(0).hasOneUse())) {
774 ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(0).getOpcode());
775 if (ShOpcVal != ARM_AM::no_shift) {
776 // Check to see if the RHS of the shift is a constant, if not, we can't
777 // fold it.
778 if (ConstantSDNode *Sh =
779 dyn_cast<ConstantSDNode>(N.getOperand(0).getOperand(1))) {
780 ShAmt = Sh->getZExtValue();
781 if (isShifterOpProfitable(N.getOperand(0), ShOpcVal, ShAmt)) {
782 Offset = N.getOperand(0).getOperand(0);
783 Base = N.getOperand(1);
784 } else {
785 ShAmt = 0;
786 ShOpcVal = ARM_AM::no_shift;
787 }
788 } else {
789 ShOpcVal = ARM_AM::no_shift;
790 }
791 }
792 }
793
794 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal),
795 MVT::i32);
796 return AM2_SHOP;
797}
798
799bool ARMDAGToDAGISel::SelectAddrMode2OffsetReg(SDNode *Op, SDValue N,
800 SDValue &Offset, SDValue &Opc) {
801 unsigned Opcode = Op->getOpcode();
802 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
803 ? cast<LoadSDNode>(Op)->getAddressingMode()
804 : cast<StoreSDNode>(Op)->getAddressingMode();
805 ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC)
806 ? ARM_AM::add : ARM_AM::sub;
807 int Val;
808 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x1000, Val))
809 return false;
810
811 Offset = N;
812 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOpcode());
813 unsigned ShAmt = 0;
814 if (ShOpcVal != ARM_AM::no_shift) {
815 // Check to see if the RHS of the shift is a constant, if not, we can't fold
816 // it.
817 if (ConstantSDNode *Sh = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
818 ShAmt = Sh->getZExtValue();
819 if (isShifterOpProfitable(N, ShOpcVal, ShAmt))
820 Offset = N.getOperand(0);
821 else {
822 ShAmt = 0;
823 ShOpcVal = ARM_AM::no_shift;
824 }
825 } else {
826 ShOpcVal = ARM_AM::no_shift;
827 }
828 }
829
830 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal),
831 MVT::i32);
832 return true;
833}
834
835bool ARMDAGToDAGISel::SelectAddrMode2OffsetImmPre(SDNode *Op, SDValue N,
836 SDValue &Offset, SDValue &Opc) {
837 unsigned Opcode = Op->getOpcode();
838 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
839 ? cast<LoadSDNode>(Op)->getAddressingMode()
840 : cast<StoreSDNode>(Op)->getAddressingMode();
841 ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC)
842 ? ARM_AM::add : ARM_AM::sub;
843 int Val;
844 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x1000, Val)) { // 12 bits.
845 if (AddSub == ARM_AM::sub) Val *= -1;
846 Offset = CurDAG->getRegister(0, MVT::i32);
847 Opc = CurDAG->getTargetConstant(Val, MVT::i32);
848 return true;
849 }
850
851 return false;
852}
853
854
855bool ARMDAGToDAGISel::SelectAddrMode2OffsetImm(SDNode *Op, SDValue N,
856 SDValue &Offset, SDValue &Opc) {
857 unsigned Opcode = Op->getOpcode();
858 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
859 ? cast<LoadSDNode>(Op)->getAddressingMode()
860 : cast<StoreSDNode>(Op)->getAddressingMode();
861 ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC)
862 ? ARM_AM::add : ARM_AM::sub;
863 int Val;
864 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x1000, Val)) { // 12 bits.
865 Offset = CurDAG->getRegister(0, MVT::i32);
866 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, Val,
867 ARM_AM::no_shift),
868 MVT::i32);
869 return true;
870 }
871
872 return false;
873}
874
875bool ARMDAGToDAGISel::SelectAddrOffsetNone(SDValue N, SDValue &Base) {
876 Base = N;
877 return true;
878}
879
880bool ARMDAGToDAGISel::SelectAddrMode3(SDValue N,
881 SDValue &Base, SDValue &Offset,
882 SDValue &Opc) {
883 if (N.getOpcode() == ISD::SUB) {
884 // X - C is canonicalize to X + -C, no need to handle it here.
885 Base = N.getOperand(0);
886 Offset = N.getOperand(1);
887 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::sub, 0),MVT::i32);
888 return true;
889 }
890
891 if (!CurDAG->isBaseWithConstantOffset(N)) {
892 Base = N;
893 if (N.getOpcode() == ISD::FrameIndex) {
894 int FI = cast<FrameIndexSDNode>(N)->getIndex();
895 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
896 }
897 Offset = CurDAG->getRegister(0, MVT::i32);
898 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0),MVT::i32);
899 return true;
900 }
901
902 // If the RHS is +/- imm8, fold into addr mode.
903 int RHSC;
904 if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/1,
905 -256 + 1, 256, RHSC)) { // 8 bits.
906 Base = N.getOperand(0);
907 if (Base.getOpcode() == ISD::FrameIndex) {
908 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
909 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
910 }
911 Offset = CurDAG->getRegister(0, MVT::i32);
912
913 ARM_AM::AddrOpc AddSub = ARM_AM::add;
914 if (RHSC < 0) {
915 AddSub = ARM_AM::sub;
916 RHSC = -RHSC;
917 }
918 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, RHSC),MVT::i32);
919 return true;
920 }
921
922 Base = N.getOperand(0);
923 Offset = N.getOperand(1);
924 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0), MVT::i32);
925 return true;
926}
927
928bool ARMDAGToDAGISel::SelectAddrMode3Offset(SDNode *Op, SDValue N,
929 SDValue &Offset, SDValue &Opc) {
930 unsigned Opcode = Op->getOpcode();
931 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
932 ? cast<LoadSDNode>(Op)->getAddressingMode()
933 : cast<StoreSDNode>(Op)->getAddressingMode();
934 ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC)
935 ? ARM_AM::add : ARM_AM::sub;
936 int Val;
937 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 256, Val)) { // 12 bits.
938 Offset = CurDAG->getRegister(0, MVT::i32);
939 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, Val), MVT::i32);
940 return true;
941 }
942
943 Offset = N;
944 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, 0), MVT::i32);
945 return true;
946}
947
948bool ARMDAGToDAGISel::SelectAddrMode5(SDValue N,
949 SDValue &Base, SDValue &Offset) {
950 if (!CurDAG->isBaseWithConstantOffset(N)) {
951 Base = N;
952 if (N.getOpcode() == ISD::FrameIndex) {
953 int FI = cast<FrameIndexSDNode>(N)->getIndex();
954 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
955 } else if (N.getOpcode() == ARMISD::Wrapper &&
956 N.getOperand(0).getOpcode() != ISD::TargetGlobalAddress) {
957 Base = N.getOperand(0);
958 }
959 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(ARM_AM::add, 0),
960 MVT::i32);
961 return true;
962 }
963
964 // If the RHS is +/- imm8, fold into addr mode.
965 int RHSC;
966 if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/4,
967 -256 + 1, 256, RHSC)) {
968 Base = N.getOperand(0);
969 if (Base.getOpcode() == ISD::FrameIndex) {
970 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
971 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
972 }
973
974 ARM_AM::AddrOpc AddSub = ARM_AM::add;
975 if (RHSC < 0) {
976 AddSub = ARM_AM::sub;
977 RHSC = -RHSC;
978 }
979 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(AddSub, RHSC),
980 MVT::i32);
981 return true;
982 }
983
984 Base = N;
985 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(ARM_AM::add, 0),
986 MVT::i32);
987 return true;
988}
989
990bool ARMDAGToDAGISel::SelectAddrMode6(SDNode *Parent, SDValue N, SDValue &Addr,
991 SDValue &Align) {
992 Addr = N;
993
994 unsigned Alignment = 0;
995 if (LSBaseSDNode *LSN = dyn_cast<LSBaseSDNode>(Parent)) {
996 // This case occurs only for VLD1-lane/dup and VST1-lane instructions.
997 // The maximum alignment is equal to the memory size being referenced.
998 unsigned LSNAlign = LSN->getAlignment();
999 unsigned MemSize = LSN->getMemoryVT().getSizeInBits() / 8;
1000 if (LSNAlign >= MemSize && MemSize > 1)
1001 Alignment = MemSize;
1002 } else {
1003 // All other uses of addrmode6 are for intrinsics. For now just record
1004 // the raw alignment value; it will be refined later based on the legal
1005 // alignment operands for the intrinsic.
1006 Alignment = cast<MemIntrinsicSDNode>(Parent)->getAlignment();
1007 }
1008
1009 Align = CurDAG->getTargetConstant(Alignment, MVT::i32);
1010 return true;
1011}
1012
1013bool ARMDAGToDAGISel::SelectAddrMode6Offset(SDNode *Op, SDValue N,
1014 SDValue &Offset) {
1015 LSBaseSDNode *LdSt = cast<LSBaseSDNode>(Op);
1016 ISD::MemIndexedMode AM = LdSt->getAddressingMode();
1017 if (AM != ISD::POST_INC)
1018 return false;
1019 Offset = N;
1020 if (ConstantSDNode *NC = dyn_cast<ConstantSDNode>(N)) {
1021 if (NC->getZExtValue() * 8 == LdSt->getMemoryVT().getSizeInBits())
1022 Offset = CurDAG->getRegister(0, MVT::i32);
1023 }
1024 return true;
1025}
1026
1027bool ARMDAGToDAGISel::SelectAddrModePC(SDValue N,
1028 SDValue &Offset, SDValue &Label) {
1029 if (N.getOpcode() == ARMISD::PIC_ADD && N.hasOneUse()) {
1030 Offset = N.getOperand(0);
1031 SDValue N1 = N.getOperand(1);
1032 Label = CurDAG->getTargetConstant(cast<ConstantSDNode>(N1)->getZExtValue(),
1033 MVT::i32);
1034 return true;
1035 }
1036
1037 return false;
1038}
1039
1040
1041//===----------------------------------------------------------------------===//
1042// Thumb Addressing Modes
1043//===----------------------------------------------------------------------===//
1044
1045bool ARMDAGToDAGISel::SelectThumbAddrModeRR(SDValue N,
1046 SDValue &Base, SDValue &Offset){
1047 if (N.getOpcode() != ISD::ADD && !CurDAG->isBaseWithConstantOffset(N)) {
1048 ConstantSDNode *NC = dyn_cast<ConstantSDNode>(N);
1049 if (!NC || !NC->isNullValue())
1050 return false;
1051
1052 Base = Offset = N;
1053 return true;
1054 }
1055
1056 Base = N.getOperand(0);
1057 Offset = N.getOperand(1);
1058 return true;
1059}
1060
1061bool
1062ARMDAGToDAGISel::SelectThumbAddrModeRI(SDValue N, SDValue &Base,
1063 SDValue &Offset, unsigned Scale) {
1064 if (Scale == 4) {
1065 SDValue TmpBase, TmpOffImm;
1066 if (SelectThumbAddrModeSP(N, TmpBase, TmpOffImm))
1067 return false; // We want to select tLDRspi / tSTRspi instead.
1068
1069 if (N.getOpcode() == ARMISD::Wrapper &&
1070 N.getOperand(0).getOpcode() == ISD::TargetConstantPool)
1071 return false; // We want to select tLDRpci instead.
1072 }
1073
1074 if (!CurDAG->isBaseWithConstantOffset(N))
1075 return false;
1076
1077 // Thumb does not have [sp, r] address mode.
1078 RegisterSDNode *LHSR = dyn_cast<RegisterSDNode>(N.getOperand(0));
1079 RegisterSDNode *RHSR = dyn_cast<RegisterSDNode>(N.getOperand(1));
1080 if ((LHSR && LHSR->getReg() == ARM::SP) ||
1081 (RHSR && RHSR->getReg() == ARM::SP))
1082 return false;
1083
1084 // FIXME: Why do we explicitly check for a match here and then return false?
1085 // Presumably to allow something else to match, but shouldn't this be
1086 // documented?
1087 int RHSC;
1088 if (isScaledConstantInRange(N.getOperand(1), Scale, 0, 32, RHSC))
1089 return false;
1090
1091 Base = N.getOperand(0);
1092 Offset = N.getOperand(1);
1093 return true;
1094}
1095
1096bool
1097ARMDAGToDAGISel::SelectThumbAddrModeRI5S1(SDValue N,
1098 SDValue &Base,
1099 SDValue &Offset) {
1100 return SelectThumbAddrModeRI(N, Base, Offset, 1);
1101}
1102
1103bool
1104ARMDAGToDAGISel::SelectThumbAddrModeRI5S2(SDValue N,
1105 SDValue &Base,
1106 SDValue &Offset) {
1107 return SelectThumbAddrModeRI(N, Base, Offset, 2);
1108}
1109
1110bool
1111ARMDAGToDAGISel::SelectThumbAddrModeRI5S4(SDValue N,
1112 SDValue &Base,
1113 SDValue &Offset) {
1114 return SelectThumbAddrModeRI(N, Base, Offset, 4);
1115}
1116
1117bool
1118ARMDAGToDAGISel::SelectThumbAddrModeImm5S(SDValue N, unsigned Scale,
1119 SDValue &Base, SDValue &OffImm) {
1120 if (Scale == 4) {
1121 SDValue TmpBase, TmpOffImm;
1122 if (SelectThumbAddrModeSP(N, TmpBase, TmpOffImm))
1123 return false; // We want to select tLDRspi / tSTRspi instead.
1124
1125 if (N.getOpcode() == ARMISD::Wrapper &&
1126 N.getOperand(0).getOpcode() == ISD::TargetConstantPool)
1127 return false; // We want to select tLDRpci instead.
1128 }
1129
1130 if (!CurDAG->isBaseWithConstantOffset(N)) {
1131 if (N.getOpcode() == ARMISD::Wrapper &&
1132 N.getOperand(0).getOpcode() != ISD::TargetGlobalAddress) {
1133 Base = N.getOperand(0);
1134 } else {
1135 Base = N;
1136 }
1137
1138 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1139 return true;
1140 }
1141
1142 RegisterSDNode *LHSR = dyn_cast<RegisterSDNode>(N.getOperand(0));
1143 RegisterSDNode *RHSR = dyn_cast<RegisterSDNode>(N.getOperand(1));
1144 if ((LHSR && LHSR->getReg() == ARM::SP) ||
1145 (RHSR && RHSR->getReg() == ARM::SP)) {
1146 ConstantSDNode *LHS = dyn_cast<ConstantSDNode>(N.getOperand(0));
1147 ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1));
1148 unsigned LHSC = LHS ? LHS->getZExtValue() : 0;
1149 unsigned RHSC = RHS ? RHS->getZExtValue() : 0;
1150
1151 // Thumb does not have [sp, #imm5] address mode for non-zero imm5.
1152 if (LHSC != 0 || RHSC != 0) return false;
1153
1154 Base = N;
1155 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1156 return true;
1157 }
1158
1159 // If the RHS is + imm5 * scale, fold into addr mode.
1160 int RHSC;
1161 if (isScaledConstantInRange(N.getOperand(1), Scale, 0, 32, RHSC)) {
1162 Base = N.getOperand(0);
1163 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
1164 return true;
1165 }
1166
1167 Base = N.getOperand(0);
1168 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1169 return true;
1170}
1171
1172bool
1173ARMDAGToDAGISel::SelectThumbAddrModeImm5S4(SDValue N, SDValue &Base,
1174 SDValue &OffImm) {
1175 return SelectThumbAddrModeImm5S(N, 4, Base, OffImm);
1176}
1177
1178bool
1179ARMDAGToDAGISel::SelectThumbAddrModeImm5S2(SDValue N, SDValue &Base,
1180 SDValue &OffImm) {
1181 return SelectThumbAddrModeImm5S(N, 2, Base, OffImm);
1182}
1183
1184bool
1185ARMDAGToDAGISel::SelectThumbAddrModeImm5S1(SDValue N, SDValue &Base,
1186 SDValue &OffImm) {
1187 return SelectThumbAddrModeImm5S(N, 1, Base, OffImm);
1188}
1189
1190bool ARMDAGToDAGISel::SelectThumbAddrModeSP(SDValue N,
1191 SDValue &Base, SDValue &OffImm) {
1192 if (N.getOpcode() == ISD::FrameIndex) {
1193 int FI = cast<FrameIndexSDNode>(N)->getIndex();
1194 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
1195 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1196 return true;
1197 }
1198
1199 if (!CurDAG->isBaseWithConstantOffset(N))
1200 return false;
1201
1202 RegisterSDNode *LHSR = dyn_cast<RegisterSDNode>(N.getOperand(0));
1203 if (N.getOperand(0).getOpcode() == ISD::FrameIndex ||
1204 (LHSR && LHSR->getReg() == ARM::SP)) {
1205 // If the RHS is + imm8 * scale, fold into addr mode.
1206 int RHSC;
1207 if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/4, 0, 256, RHSC)) {
1208 Base = N.getOperand(0);
1209 if (Base.getOpcode() == ISD::FrameIndex) {
1210 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1211 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
1212 }
1213 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
1214 return true;
1215 }
1216 }
1217
1218 return false;
1219}
1220
1221
1222//===----------------------------------------------------------------------===//
1223// Thumb 2 Addressing Modes
1224//===----------------------------------------------------------------------===//
1225
1226
1227bool ARMDAGToDAGISel::SelectT2ShifterOperandReg(SDValue N, SDValue &BaseReg,
1228 SDValue &Opc) {
1229 if (DisableShifterOp)
1230 return false;
1231
1232 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOpcode());
1233
1234 // Don't match base register only case. That is matched to a separate
1235 // lower complexity pattern with explicit register operand.
1236 if (ShOpcVal == ARM_AM::no_shift) return false;
1237
1238 BaseReg = N.getOperand(0);
1239 unsigned ShImmVal = 0;
1240 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1241 ShImmVal = RHS->getZExtValue() & 31;
1242 Opc = getI32Imm(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal));
1243 return true;
1244 }
1245
1246 return false;
1247}
1248
1249bool ARMDAGToDAGISel::SelectT2AddrModeImm12(SDValue N,
1250 SDValue &Base, SDValue &OffImm) {
1251 // Match simple R + imm12 operands.
1252
1253 // Base only.
1254 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
1255 !CurDAG->isBaseWithConstantOffset(N)) {
1256 if (N.getOpcode() == ISD::FrameIndex) {
1257 // Match frame index.
1258 int FI = cast<FrameIndexSDNode>(N)->getIndex();
1259 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
1260 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1261 return true;
1262 }
1263
1264 if (N.getOpcode() == ARMISD::Wrapper &&
1265 N.getOperand(0).getOpcode() != ISD::TargetGlobalAddress) {
1266 Base = N.getOperand(0);
1267 if (Base.getOpcode() == ISD::TargetConstantPool)
1268 return false; // We want to select t2LDRpci instead.
1269 } else
1270 Base = N;
1271 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1272 return true;
1273 }
1274
1275 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1276 if (SelectT2AddrModeImm8(N, Base, OffImm))
1277 // Let t2LDRi8 handle (R - imm8).
1278 return false;
1279
1280 int RHSC = (int)RHS->getZExtValue();
1281 if (N.getOpcode() == ISD::SUB)
1282 RHSC = -RHSC;
1283
1284 if (RHSC >= 0 && RHSC < 0x1000) { // 12 bits (unsigned)
1285 Base = N.getOperand(0);
1286 if (Base.getOpcode() == ISD::FrameIndex) {
1287 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1288 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
1289 }
1290 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
1291 return true;
1292 }
1293 }
1294
1295 // Base only.
1296 Base = N;
1297 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1298 return true;
1299}
1300
1301bool ARMDAGToDAGISel::SelectT2AddrModeImm8(SDValue N,
1302 SDValue &Base, SDValue &OffImm) {
1303 // Match simple R - imm8 operands.
1304 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
1305 !CurDAG->isBaseWithConstantOffset(N))
1306 return false;
1307
1308 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1309 int RHSC = (int)RHS->getSExtValue();
1310 if (N.getOpcode() == ISD::SUB)
1311 RHSC = -RHSC;
1312
1313 if ((RHSC >= -255) && (RHSC < 0)) { // 8 bits (always negative)
1314 Base = N.getOperand(0);
1315 if (Base.getOpcode() == ISD::FrameIndex) {
1316 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1317 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
1318 }
1319 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
1320 return true;
1321 }
1322 }
1323
1324 return false;
1325}
1326
1327bool ARMDAGToDAGISel::SelectT2AddrModeImm8Offset(SDNode *Op, SDValue N,
1328 SDValue &OffImm){
1329 unsigned Opcode = Op->getOpcode();
1330 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
1331 ? cast<LoadSDNode>(Op)->getAddressingMode()
1332 : cast<StoreSDNode>(Op)->getAddressingMode();
1333 int RHSC;
1334 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x100, RHSC)) { // 8 bits.
1335 OffImm = ((AM == ISD::PRE_INC) || (AM == ISD::POST_INC))
1336 ? CurDAG->getTargetConstant(RHSC, MVT::i32)
1337 : CurDAG->getTargetConstant(-RHSC, MVT::i32);
1338 return true;
1339 }
1340
1341 return false;
1342}
1343
1344bool ARMDAGToDAGISel::SelectT2AddrModeSoReg(SDValue N,
1345 SDValue &Base,
1346 SDValue &OffReg, SDValue &ShImm) {
1347 // (R - imm8) should be handled by t2LDRi8. The rest are handled by t2LDRi12.
1348 if (N.getOpcode() != ISD::ADD && !CurDAG->isBaseWithConstantOffset(N))
1349 return false;
1350
1351 // Leave (R + imm12) for t2LDRi12, (R - imm8) for t2LDRi8.
1352 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1353 int RHSC = (int)RHS->getZExtValue();
1354 if (RHSC >= 0 && RHSC < 0x1000) // 12 bits (unsigned)
1355 return false;
1356 else if (RHSC < 0 && RHSC >= -255) // 8 bits
1357 return false;
1358 }
1359
1360 // Look for (R + R) or (R + (R << [1,2,3])).
1361 unsigned ShAmt = 0;
1362 Base = N.getOperand(0);
1363 OffReg = N.getOperand(1);
1364
1365 // Swap if it is ((R << c) + R).
1366 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(OffReg.getOpcode());
1367 if (ShOpcVal != ARM_AM::lsl) {
1368 ShOpcVal = ARM_AM::getShiftOpcForNode(Base.getOpcode());
1369 if (ShOpcVal == ARM_AM::lsl)
1370 std::swap(Base, OffReg);
1371 }
1372
1373 if (ShOpcVal == ARM_AM::lsl) {
1374 // Check to see if the RHS of the shift is a constant, if not, we can't fold
1375 // it.
1376 if (ConstantSDNode *Sh = dyn_cast<ConstantSDNode>(OffReg.getOperand(1))) {
1377 ShAmt = Sh->getZExtValue();
1378 if (ShAmt < 4 && isShifterOpProfitable(OffReg, ShOpcVal, ShAmt))
1379 OffReg = OffReg.getOperand(0);
1380 else {
1381 ShAmt = 0;
1382 ShOpcVal = ARM_AM::no_shift;
1383 }
1384 } else {
1385 ShOpcVal = ARM_AM::no_shift;
Value stored to 'ShOpcVal' is never read
1386 }
1387 }
1388
1389 ShImm = CurDAG->getTargetConstant(ShAmt, MVT::i32);
1390
1391 return true;
1392}
1393
1394bool ARMDAGToDAGISel::SelectT2AddrModeExclusive(SDValue N, SDValue &Base,
1395 SDValue &OffImm) {
1396 // This *must* succeed since it's used for the irreplaceable ldrex and strex
1397 // instructions.
1398 Base = N;
1399 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1400
1401 if (N.getOpcode() != ISD::ADD || !CurDAG->isBaseWithConstantOffset(N))
1402 return true;
1403
1404 ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1));
1405 if (!RHS)
1406 return true;
1407
1408 uint32_t RHSC = (int)RHS->getZExtValue();
1409 if (RHSC > 1020 || RHSC % 4 != 0)
1410 return true;
1411
1412 Base = N.getOperand(0);
1413 if (Base.getOpcode() == ISD::FrameIndex) {
1414 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1415 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
1416 }
1417
1418 OffImm = CurDAG->getTargetConstant(RHSC / 4, MVT::i32);
1419 return true;
1420}
1421
1422//===--------------------------------------------------------------------===//
1423
1424/// getAL - Returns a ARMCC::AL immediate node.
1425static inline SDValue getAL(SelectionDAG *CurDAG) {
1426 return CurDAG->getTargetConstant((uint64_t)ARMCC::AL, MVT::i32);
1427}
1428
1429SDNode *ARMDAGToDAGISel::SelectARMIndexedLoad(SDNode *N) {
1430 LoadSDNode *LD = cast<LoadSDNode>(N);
1431 ISD::MemIndexedMode AM = LD->getAddressingMode();
1432 if (AM == ISD::UNINDEXED)
1433 return nullptr;
1434
1435 EVT LoadedVT = LD->getMemoryVT();
1436 SDValue Offset, AMOpc;
1437 bool isPre = (AM == ISD::PRE_INC) || (AM == ISD::PRE_DEC);
1438 unsigned Opcode = 0;
1439 bool Match = false;
1440 if (LoadedVT == MVT::i32 && isPre &&
1441 SelectAddrMode2OffsetImmPre(N, LD->getOffset(), Offset, AMOpc)) {
1442 Opcode = ARM::LDR_PRE_IMM;
1443 Match = true;
1444 } else if (LoadedVT == MVT::i32 && !isPre &&
1445 SelectAddrMode2OffsetImm(N, LD->getOffset(), Offset, AMOpc)) {
1446 Opcode = ARM::LDR_POST_IMM;
1447 Match = true;
1448 } else if (LoadedVT == MVT::i32 &&
1449 SelectAddrMode2OffsetReg(N, LD->getOffset(), Offset, AMOpc)) {
1450 Opcode = isPre ? ARM::LDR_PRE_REG : ARM::LDR_POST_REG;
1451 Match = true;
1452
1453 } else if (LoadedVT == MVT::i16 &&
1454 SelectAddrMode3Offset(N, LD->getOffset(), Offset, AMOpc)) {
1455 Match = true;
1456 Opcode = (LD->getExtensionType() == ISD::SEXTLOAD)
1457 ? (isPre ? ARM::LDRSH_PRE : ARM::LDRSH_POST)
1458 : (isPre ? ARM::LDRH_PRE : ARM::LDRH_POST);
1459 } else if (LoadedVT == MVT::i8 || LoadedVT == MVT::i1) {
1460 if (LD->getExtensionType() == ISD::SEXTLOAD) {
1461 if (SelectAddrMode3Offset(N, LD->getOffset(), Offset, AMOpc)) {
1462 Match = true;
1463 Opcode = isPre ? ARM::LDRSB_PRE : ARM::LDRSB_POST;
1464 }
1465 } else {
1466 if (isPre &&
1467 SelectAddrMode2OffsetImmPre(N, LD->getOffset(), Offset, AMOpc)) {
1468 Match = true;
1469 Opcode = ARM::LDRB_PRE_IMM;
1470 } else if (!isPre &&
1471 SelectAddrMode2OffsetImm(N, LD->getOffset(), Offset, AMOpc)) {
1472 Match = true;
1473 Opcode = ARM::LDRB_POST_IMM;
1474 } else if (SelectAddrMode2OffsetReg(N, LD->getOffset(), Offset, AMOpc)) {
1475 Match = true;
1476 Opcode = isPre ? ARM::LDRB_PRE_REG : ARM::LDRB_POST_REG;
1477 }
1478 }
1479 }
1480
1481 if (Match) {
1482 if (Opcode == ARM::LDR_PRE_IMM || Opcode == ARM::LDRB_PRE_IMM) {
1483 SDValue Chain = LD->getChain();
1484 SDValue Base = LD->getBasePtr();
1485 SDValue Ops[]= { Base, AMOpc, getAL(CurDAG),
1486 CurDAG->getRegister(0, MVT::i32), Chain };
1487 return CurDAG->getMachineNode(Opcode, SDLoc(N), MVT::i32,
1488 MVT::i32, MVT::Other, Ops);
1489 } else {
1490 SDValue Chain = LD->getChain();
1491 SDValue Base = LD->getBasePtr();
1492 SDValue Ops[]= { Base, Offset, AMOpc, getAL(CurDAG),
1493 CurDAG->getRegister(0, MVT::i32), Chain };
1494 return CurDAG->getMachineNode(Opcode, SDLoc(N), MVT::i32,
1495 MVT::i32, MVT::Other, Ops);
1496 }
1497 }
1498
1499 return nullptr;
1500}
1501
1502SDNode *ARMDAGToDAGISel::SelectT2IndexedLoad(SDNode *N) {
1503 LoadSDNode *LD = cast<LoadSDNode>(N);
1504 ISD::MemIndexedMode AM = LD->getAddressingMode();
1505 if (AM == ISD::UNINDEXED)
1506 return nullptr;
1507
1508 EVT LoadedVT = LD->getMemoryVT();
1509 bool isSExtLd = LD->getExtensionType() == ISD::SEXTLOAD;
1510 SDValue Offset;
1511 bool isPre = (AM == ISD::PRE_INC) || (AM == ISD::PRE_DEC);
1512 unsigned Opcode = 0;
1513 bool Match = false;
1514 if (SelectT2AddrModeImm8Offset(N, LD->getOffset(), Offset)) {
1515 switch (LoadedVT.getSimpleVT().SimpleTy) {
1516 case MVT::i32:
1517 Opcode = isPre ? ARM::t2LDR_PRE : ARM::t2LDR_POST;
1518 break;
1519 case MVT::i16:
1520 if (isSExtLd)
1521 Opcode = isPre ? ARM::t2LDRSH_PRE : ARM::t2LDRSH_POST;
1522 else
1523 Opcode = isPre ? ARM::t2LDRH_PRE : ARM::t2LDRH_POST;
1524 break;
1525 case MVT::i8:
1526 case MVT::i1:
1527 if (isSExtLd)
1528 Opcode = isPre ? ARM::t2LDRSB_PRE : ARM::t2LDRSB_POST;
1529 else
1530 Opcode = isPre ? ARM::t2LDRB_PRE : ARM::t2LDRB_POST;
1531 break;
1532 default:
1533 return nullptr;
1534 }
1535 Match = true;
1536 }
1537
1538 if (Match) {
1539 SDValue Chain = LD->getChain();
1540 SDValue Base = LD->getBasePtr();
1541 SDValue Ops[]= { Base, Offset, getAL(CurDAG),
1542 CurDAG->getRegister(0, MVT::i32), Chain };
1543 return CurDAG->getMachineNode(Opcode, SDLoc(N), MVT::i32, MVT::i32,
1544 MVT::Other, Ops);
1545 }
1546
1547 return nullptr;
1548}
1549
1550/// \brief Form a GPRPair pseudo register from a pair of GPR regs.
1551SDNode *ARMDAGToDAGISel::createGPRPairNode(EVT VT, SDValue V0, SDValue V1) {
1552 SDLoc dl(V0.getNode());
1553 SDValue RegClass =
1554 CurDAG->getTargetConstant(ARM::GPRPairRegClassID, MVT::i32);
1555 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::gsub_0, MVT::i32);
1556 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::gsub_1, MVT::i32);
1557 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 };
1558 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
1559}
1560
1561/// \brief Form a D register from a pair of S registers.
1562SDNode *ARMDAGToDAGISel::createSRegPairNode(EVT VT, SDValue V0, SDValue V1) {
1563 SDLoc dl(V0.getNode());
1564 SDValue RegClass =
1565 CurDAG->getTargetConstant(ARM::DPR_VFP2RegClassID, MVT::i32);
1566 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::ssub_0, MVT::i32);
1567 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::ssub_1, MVT::i32);
1568 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 };
1569 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
1570}
1571
1572/// \brief Form a quad register from a pair of D registers.
1573SDNode *ARMDAGToDAGISel::createDRegPairNode(EVT VT, SDValue V0, SDValue V1) {
1574 SDLoc dl(V0.getNode());
1575 SDValue RegClass = CurDAG->getTargetConstant(ARM::QPRRegClassID, MVT::i32);
1576 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::dsub_0, MVT::i32);
1577 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::dsub_1, MVT::i32);
1578 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 };
1579 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
1580}
1581
1582/// \brief Form 4 consecutive D registers from a pair of Q registers.
1583SDNode *ARMDAGToDAGISel::createQRegPairNode(EVT VT, SDValue V0, SDValue V1) {
1584 SDLoc dl(V0.getNode());
1585 SDValue RegClass = CurDAG->getTargetConstant(ARM::QQPRRegClassID, MVT::i32);
1586 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::qsub_0, MVT::i32);
1587 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::qsub_1, MVT::i32);
1588 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 };
1589 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
1590}
1591
1592/// \brief Form 4 consecutive S registers.
1593SDNode *ARMDAGToDAGISel::createQuadSRegsNode(EVT VT, SDValue V0, SDValue V1,
1594 SDValue V2, SDValue V3) {
1595 SDLoc dl(V0.getNode());
1596 SDValue RegClass =
1597 CurDAG->getTargetConstant(ARM::QPR_VFP2RegClassID, MVT::i32);
1598 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::ssub_0, MVT::i32);
1599 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::ssub_1, MVT::i32);
1600 SDValue SubReg2 = CurDAG->getTargetConstant(ARM::ssub_2, MVT::i32);
1601 SDValue SubReg3 = CurDAG->getTargetConstant(ARM::ssub_3, MVT::i32);
1602 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1,
1603 V2, SubReg2, V3, SubReg3 };
1604 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
1605}
1606
1607/// \brief Form 4 consecutive D registers.
1608SDNode *ARMDAGToDAGISel::createQuadDRegsNode(EVT VT, SDValue V0, SDValue V1,
1609 SDValue V2, SDValue V3) {
1610 SDLoc dl(V0.getNode());
1611 SDValue RegClass = CurDAG->getTargetConstant(ARM::QQPRRegClassID, MVT::i32);
1612 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::dsub_0, MVT::i32);
1613 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::dsub_1, MVT::i32);
1614 SDValue SubReg2 = CurDAG->getTargetConstant(ARM::dsub_2, MVT::i32);
1615 SDValue SubReg3 = CurDAG->getTargetConstant(ARM::dsub_3, MVT::i32);
1616 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1,
1617 V2, SubReg2, V3, SubReg3 };
1618 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
1619}
1620
1621/// \brief Form 4 consecutive Q registers.
1622SDNode *ARMDAGToDAGISel::createQuadQRegsNode(EVT VT, SDValue V0, SDValue V1,
1623 SDValue V2, SDValue V3) {
1624 SDLoc dl(V0.getNode());
1625 SDValue RegClass = CurDAG->getTargetConstant(ARM::QQQQPRRegClassID, MVT::i32);
1626 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::qsub_0, MVT::i32);
1627 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::qsub_1, MVT::i32);
1628 SDValue SubReg2 = CurDAG->getTargetConstant(ARM::qsub_2, MVT::i32);
1629 SDValue SubReg3 = CurDAG->getTargetConstant(ARM::qsub_3, MVT::i32);
1630 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1,
1631 V2, SubReg2, V3, SubReg3 };
1632 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
1633}
1634
1635/// GetVLDSTAlign - Get the alignment (in bytes) for the alignment operand
1636/// of a NEON VLD or VST instruction. The supported values depend on the
1637/// number of registers being loaded.
1638SDValue ARMDAGToDAGISel::GetVLDSTAlign(SDValue Align, unsigned NumVecs,
1639 bool is64BitVector) {
1640 unsigned NumRegs = NumVecs;
1641 if (!is64BitVector && NumVecs < 3)
1642 NumRegs *= 2;
1643
1644 unsigned Alignment = cast<ConstantSDNode>(Align)->getZExtValue();
1645 if (Alignment >= 32 && NumRegs == 4)
1646 Alignment = 32;
1647 else if (Alignment >= 16 && (NumRegs == 2 || NumRegs == 4))
1648 Alignment = 16;
1649 else if (Alignment >= 8)
1650 Alignment = 8;
1651 else
1652 Alignment = 0;
1653
1654 return CurDAG->getTargetConstant(Alignment, MVT::i32);
1655}
1656
1657static bool isVLDfixed(unsigned Opc)
1658{
1659 switch (Opc) {
1660 default: return false;
1661 case ARM::VLD1d8wb_fixed : return true;
1662 case ARM::VLD1d16wb_fixed : return true;
1663 case ARM::VLD1d64Qwb_fixed : return true;
1664 case ARM::VLD1d32wb_fixed : return true;
1665 case ARM::VLD1d64wb_fixed : return true;
1666 case ARM::VLD1d64TPseudoWB_fixed : return true;
1667 case ARM::VLD1d64QPseudoWB_fixed : return true;
1668 case ARM::VLD1q8wb_fixed : return true;
1669 case ARM::VLD1q16wb_fixed : return true;
1670 case ARM::VLD1q32wb_fixed : return true;
1671 case ARM::VLD1q64wb_fixed : return true;
1672 case ARM::VLD2d8wb_fixed : return true;
1673 case ARM::VLD2d16wb_fixed : return true;
1674 case ARM::VLD2d32wb_fixed : return true;
1675 case ARM::VLD2q8PseudoWB_fixed : return true;
1676 case ARM::VLD2q16PseudoWB_fixed : return true;
1677 case ARM::VLD2q32PseudoWB_fixed : return true;
1678 case ARM::VLD2DUPd8wb_fixed : return true;
1679 case ARM::VLD2DUPd16wb_fixed : return true;
1680 case ARM::VLD2DUPd32wb_fixed : return true;
1681 }
1682}
1683
1684static bool isVSTfixed(unsigned Opc)
1685{
1686 switch (Opc) {
1687 default: return false;
1688 case ARM::VST1d8wb_fixed : return true;
1689 case ARM::VST1d16wb_fixed : return true;
1690 case ARM::VST1d32wb_fixed : return true;
1691 case ARM::VST1d64wb_fixed : return true;
1692 case ARM::VST1q8wb_fixed : return true;
1693 case ARM::VST1q16wb_fixed : return true;
1694 case ARM::VST1q32wb_fixed : return true;
1695 case ARM::VST1q64wb_fixed : return true;
1696 case ARM::VST1d64TPseudoWB_fixed : return true;
1697 case ARM::VST1d64QPseudoWB_fixed : return true;
1698 case ARM::VST2d8wb_fixed : return true;
1699 case ARM::VST2d16wb_fixed : return true;
1700 case ARM::VST2d32wb_fixed : return true;
1701 case ARM::VST2q8PseudoWB_fixed : return true;
1702 case ARM::VST2q16PseudoWB_fixed : return true;
1703 case ARM::VST2q32PseudoWB_fixed : return true;
1704 }
1705}
1706
1707// Get the register stride update opcode of a VLD/VST instruction that
1708// is otherwise equivalent to the given fixed stride updating instruction.
1709static unsigned getVLDSTRegisterUpdateOpcode(unsigned Opc) {
1710 assert((isVLDfixed(Opc) || isVSTfixed(Opc))(((isVLDfixed(Opc) || isVSTfixed(Opc)) && "Incorrect fixed stride updating instruction."
) ? static_cast<void> (0) : __assert_fail ("(isVLDfixed(Opc) || isVSTfixed(Opc)) && \"Incorrect fixed stride updating instruction.\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.6~svn220848/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 1711, __PRETTY_FUNCTION__))
1711 && "Incorrect fixed stride updating instruction.")(((isVLDfixed(Opc) || isVSTfixed(Opc)) && "Incorrect fixed stride updating instruction."
) ? static_cast<void> (0) : __assert_fail ("(isVLDfixed(Opc) || isVSTfixed(Opc)) && \"Incorrect fixed stride updating instruction.\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.6~svn220848/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 1711, __PRETTY_FUNCTION__))
;
1712 switch (Opc) {
1713 default: break;
1714 case ARM::VLD1d8wb_fixed: return ARM::VLD1d8wb_register;
1715 case ARM::VLD1d16wb_fixed: return ARM::VLD1d16wb_register;
1716 case ARM::VLD1d32wb_fixed: return ARM::VLD1d32wb_register;
1717 case ARM::VLD1d64wb_fixed: return ARM::VLD1d64wb_register;
1718 case ARM::VLD1q8wb_fixed: return ARM::VLD1q8wb_register;
1719 case ARM::VLD1q16wb_fixed: return ARM::VLD1q16wb_register;
1720 case ARM::VLD1q32wb_fixed: return ARM::VLD1q32wb_register;
1721 case ARM::VLD1q64wb_fixed: return ARM::VLD1q64wb_register;
1722 case ARM::VLD1d64Twb_fixed: return ARM::VLD1d64Twb_register;
1723 case ARM::VLD1d64Qwb_fixed: return ARM::VLD1d64Qwb_register;
1724 case ARM::VLD1d64TPseudoWB_fixed: return ARM::VLD1d64TPseudoWB_register;
1725 case ARM::VLD1d64QPseudoWB_fixed: return ARM::VLD1d64QPseudoWB_register;
1726
1727 case ARM::VST1d8wb_fixed: return ARM::VST1d8wb_register;
1728 case ARM::VST1d16wb_fixed: return ARM::VST1d16wb_register;
1729 case ARM::VST1d32wb_fixed: return ARM::VST1d32wb_register;
1730 case ARM::VST1d64wb_fixed: return ARM::VST1d64wb_register;
1731 case ARM::VST1q8wb_fixed: return ARM::VST1q8wb_register;
1732 case ARM::VST1q16wb_fixed: return ARM::VST1q16wb_register;
1733 case ARM::VST1q32wb_fixed: return ARM::VST1q32wb_register;
1734 case ARM::VST1q64wb_fixed: return ARM::VST1q64wb_register;
1735 case ARM::VST1d64TPseudoWB_fixed: return ARM::VST1d64TPseudoWB_register;
1736 case ARM::VST1d64QPseudoWB_fixed: return ARM::VST1d64QPseudoWB_register;
1737
1738 case ARM::VLD2d8wb_fixed: return ARM::VLD2d8wb_register;
1739 case ARM::VLD2d16wb_fixed: return ARM::VLD2d16wb_register;
1740 case ARM::VLD2d32wb_fixed: return ARM::VLD2d32wb_register;
1741 case ARM::VLD2q8PseudoWB_fixed: return ARM::VLD2q8PseudoWB_register;
1742 case ARM::VLD2q16PseudoWB_fixed: return ARM::VLD2q16PseudoWB_register;
1743 case ARM::VLD2q32PseudoWB_fixed: return ARM::VLD2q32PseudoWB_register;
1744
1745 case ARM::VST2d8wb_fixed: return ARM::VST2d8wb_register;
1746 case ARM::VST2d16wb_fixed: return ARM::VST2d16wb_register;
1747 case ARM::VST2d32wb_fixed: return ARM::VST2d32wb_register;
1748 case ARM::VST2q8PseudoWB_fixed: return ARM::VST2q8PseudoWB_register;
1749 case ARM::VST2q16PseudoWB_fixed: return ARM::VST2q16PseudoWB_register;
1750 case ARM::VST2q32PseudoWB_fixed: return ARM::VST2q32PseudoWB_register;
1751
1752 case ARM::VLD2DUPd8wb_fixed: return ARM::VLD2DUPd8wb_register;
1753 case ARM::VLD2DUPd16wb_fixed: return ARM::VLD2DUPd16wb_register;
1754 case ARM::VLD2DUPd32wb_fixed: return ARM::VLD2DUPd32wb_register;
1755 }
1756 return Opc; // If not one we handle, return it unchanged.
1757}
1758
1759SDNode *ARMDAGToDAGISel::SelectVLD(SDNode *N, bool isUpdating, unsigned NumVecs,
1760 const uint16_t *DOpcodes,
1761 const uint16_t *QOpcodes0,
1762 const uint16_t *QOpcodes1) {
1763 assert(NumVecs >= 1 && NumVecs <= 4 && "VLD NumVecs out-of-range")((NumVecs >= 1 && NumVecs <= 4 && "VLD NumVecs out-of-range"
) ? static_cast<void> (0) : __assert_fail ("NumVecs >= 1 && NumVecs <= 4 && \"VLD NumVecs out-of-range\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.6~svn220848/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 1763, __PRETTY_FUNCTION__))
;
1764 SDLoc dl(N);
1765
1766 SDValue MemAddr, Align;
1767 unsigned AddrOpIdx = isUpdating ? 1 : 2;
1768 if (!SelectAddrMode6(N, N->getOperand(AddrOpIdx), MemAddr, Align))
1769 return nullptr;
1770
1771 SDValue Chain = N->getOperand(0);
1772 EVT VT = N->getValueType(0);
1773 bool is64BitVector = VT.is64BitVector();
1774 Align = GetVLDSTAlign(Align, NumVecs, is64BitVector);
1775
1776 unsigned OpcodeIndex;
1777 switch (VT.getSimpleVT().SimpleTy) {
1778 default: llvm_unreachable("unhandled vld type")::llvm::llvm_unreachable_internal("unhandled vld type", "/tmp/buildd/llvm-toolchain-snapshot-3.6~svn220848/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 1778)
;
1779 // Double-register operations:
1780 case MVT::v8i8: OpcodeIndex = 0; break;
1781 case MVT::v4i16: OpcodeIndex = 1; break;
1782 case MVT::v2f32:
1783 case MVT::v2i32: OpcodeIndex = 2; break;
1784 case MVT::v1i64: OpcodeIndex = 3; break;
1785 // Quad-register operations:
1786 case MVT::v16i8: OpcodeIndex = 0; break;
1787 case MVT::v8i16: OpcodeIndex = 1; break;
1788 case MVT::v4f32:
1789 case MVT::v4i32: OpcodeIndex = 2; break;
1790 case MVT::v2i64: OpcodeIndex = 3;
1791 assert(NumVecs == 1 && "v2i64 type only supported for VLD1")((NumVecs == 1 && "v2i64 type only supported for VLD1"
) ? static_cast<void> (0) : __assert_fail ("NumVecs == 1 && \"v2i64 type only supported for VLD1\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.6~svn220848/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 1791, __PRETTY_FUNCTION__))
;
1792 break;
1793 }
1794
1795 EVT ResTy;
1796 if (NumVecs == 1)
1797 ResTy = VT;
1798 else {
1799 unsigned ResTyElts = (NumVecs == 3) ? 4 : NumVecs;
1800 if (!is64BitVector)
1801 ResTyElts *= 2;
1802 ResTy = EVT::getVectorVT(*CurDAG->getContext(), MVT::i64, ResTyElts);
1803 }
1804 std::vector<EVT> ResTys;
1805 ResTys.push_back(ResTy);
1806 if (isUpdating)
1807 ResTys.push_back(MVT::i32);
1808 ResTys.push_back(MVT::Other);
1809
1810 SDValue Pred = getAL(CurDAG);
1811 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
1812 SDNode *VLd;
1813 SmallVector<SDValue, 7> Ops;
1814
1815 // Double registers and VLD1/VLD2 quad registers are directly supported.
1816 if (is64BitVector || NumVecs <= 2) {
1817 unsigned Opc = (is64BitVector ? DOpcodes[OpcodeIndex] :
1818 QOpcodes0[OpcodeIndex]);
1819 Ops.push_back(MemAddr);
1820 Ops.push_back(Align);
1821 if (isUpdating) {
1822 SDValue Inc = N->getOperand(AddrOpIdx + 1);
1823 // FIXME: VLD1/VLD2 fixed increment doesn't need Reg0. Remove the reg0
1824 // case entirely when the rest are updated to that form, too.
1825 if ((NumVecs <= 2) && !isa<ConstantSDNode>(Inc.getNode()))
1826 Opc = getVLDSTRegisterUpdateOpcode(Opc);
1827 // FIXME: We use a VLD1 for v1i64 even if the pseudo says vld2/3/4, so
1828 // check for that explicitly too. Horribly hacky, but temporary.
1829 if ((NumVecs > 2 && !isVLDfixed(Opc)) ||
1830 !isa<ConstantSDNode>(Inc.getNode()))
1831 Ops.push_back(isa<ConstantSDNode>(Inc.getNode()) ? Reg0 : Inc);
1832 }
1833 Ops.push_back(Pred);
1834 Ops.push_back(Reg0);
1835 Ops.push_back(Chain);
1836 VLd = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1837
1838 } else {
1839 // Otherwise, quad registers are loaded with two separate instructions,
1840 // where one loads the even registers and the other loads the odd registers.
1841 EVT AddrTy = MemAddr.getValueType();
1842
1843 // Load the even subregs. This is always an updating load, so that it
1844 // provides the address to the second load for the odd subregs.
1845 SDValue ImplDef =
1846 SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, ResTy), 0);
1847 const SDValue OpsA[] = { MemAddr, Align, Reg0, ImplDef, Pred, Reg0, Chain };
1848 SDNode *VLdA = CurDAG->getMachineNode(QOpcodes0[OpcodeIndex], dl,
1849 ResTy, AddrTy, MVT::Other, OpsA);
1850 Chain = SDValue(VLdA, 2);
1851
1852 // Load the odd subregs.
1853 Ops.push_back(SDValue(VLdA, 1));
1854 Ops.push_back(Align);
1855 if (isUpdating) {
1856 SDValue Inc = N->getOperand(AddrOpIdx + 1);
1857 assert(isa<ConstantSDNode>(Inc.getNode()) &&((isa<ConstantSDNode>(Inc.getNode()) && "only constant post-increment update allowed for VLD3/4"
) ? static_cast<void> (0) : __assert_fail ("isa<ConstantSDNode>(Inc.getNode()) && \"only constant post-increment update allowed for VLD3/4\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.6~svn220848/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 1858, __PRETTY_FUNCTION__))
1858 "only constant post-increment update allowed for VLD3/4")((isa<ConstantSDNode>(Inc.getNode()) && "only constant post-increment update allowed for VLD3/4"
) ? static_cast<void> (0) : __assert_fail ("isa<ConstantSDNode>(Inc.getNode()) && \"only constant post-increment update allowed for VLD3/4\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.6~svn220848/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 1858, __PRETTY_FUNCTION__))
;
1859 (void)Inc;
1860 Ops.push_back(Reg0);
1861 }
1862 Ops.push_back(SDValue(VLdA, 0));
1863 Ops.push_back(Pred);
1864 Ops.push_back(Reg0);
1865 Ops.push_back(Chain);
1866 VLd = CurDAG->getMachineNode(QOpcodes1[OpcodeIndex], dl, ResTys, Ops);
1867 }
1868
1869 // Transfer memoperands.
1870 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1871 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1872 cast<MachineSDNode>(VLd)->setMemRefs(MemOp, MemOp + 1);
1873
1874 if (NumVecs == 1)
1875 return VLd;
1876
1877 // Extract out the subregisters.
1878 SDValue SuperReg = SDValue(VLd, 0);
1879 assert(ARM::dsub_7 == ARM::dsub_0+7 &&((ARM::dsub_7 == ARM::dsub_0+7 && ARM::qsub_3 == ARM::
qsub_0+3 && "Unexpected subreg numbering") ? static_cast
<void> (0) : __assert_fail ("ARM::dsub_7 == ARM::dsub_0+7 && ARM::qsub_3 == ARM::qsub_0+3 && \"Unexpected subreg numbering\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.6~svn220848/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 1880, __PRETTY_FUNCTION__))
1880 ARM::qsub_3 == ARM::qsub_0+3 && "Unexpected subreg numbering")((ARM::dsub_7 == ARM::dsub_0+7 && ARM::qsub_3 == ARM::
qsub_0+3 && "Unexpected subreg numbering") ? static_cast
<void> (0) : __assert_fail ("ARM::dsub_7 == ARM::dsub_0+7 && ARM::qsub_3 == ARM::qsub_0+3 && \"Unexpected subreg numbering\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.6~svn220848/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 1880, __PRETTY_FUNCTION__))
;
1881 unsigned Sub0 = (is64BitVector ? ARM::dsub_0 : ARM::qsub_0);
1882 for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
1883 ReplaceUses(SDValue(N, Vec),
1884 CurDAG->getTargetExtractSubreg(Sub0 + Vec, dl, VT, SuperReg));
1885 ReplaceUses(SDValue(N, NumVecs), SDValue(VLd, 1));
1886 if (isUpdating)
1887 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLd, 2));
1888 return nullptr;
1889}
1890
1891SDNode *ARMDAGToDAGISel::SelectVST(SDNode *N, bool isUpdating, unsigned NumVecs,
1892 const uint16_t *DOpcodes,
1893 const uint16_t *QOpcodes0,
1894 const uint16_t *QOpcodes1) {
1895 assert(NumVecs >= 1 && NumVecs <= 4 && "VST NumVecs out-of-range")((NumVecs >= 1 && NumVecs <= 4 && "VST NumVecs out-of-range"
) ? static_cast<void> (0) : __assert_fail ("NumVecs >= 1 && NumVecs <= 4 && \"VST NumVecs out-of-range\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.6~svn220848/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 1895, __PRETTY_FUNCTION__))
;
1896 SDLoc dl(N);
1897
1898 SDValue MemAddr, Align;
1899 unsigned AddrOpIdx = isUpdating ? 1 : 2;
1900 unsigned Vec0Idx = 3; // AddrOpIdx + (isUpdating ? 2 : 1)
1901 if (!SelectAddrMode6(N, N->getOperand(AddrOpIdx), MemAddr, Align))
1902 return nullptr;
1903
1904 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1905 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1906
1907 SDValue Chain = N->getOperand(0);
1908 EVT VT = N->getOperand(Vec0Idx).getValueType();
1909 bool is64BitVector = VT.is64BitVector();
1910 Align = GetVLDSTAlign(Align, NumVecs, is64BitVector);
1911
1912 unsigned OpcodeIndex;
1913 switch (VT.getSimpleVT().SimpleTy) {
1914 default: llvm_unreachable("unhandled vst type")::llvm::llvm_unreachable_internal("unhandled vst type", "/tmp/buildd/llvm-toolchain-snapshot-3.6~svn220848/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 1914)
;
1915 // Double-register operations:
1916 case MVT::v8i8: OpcodeIndex = 0; break;
1917 case MVT::v4i16: OpcodeIndex = 1; break;
1918 case MVT::v2f32:
1919 case MVT::v2i32: OpcodeIndex = 2; break;
1920 case MVT::v1i64: OpcodeIndex = 3; break;
1921 // Quad-register operations:
1922 case MVT::v16i8: OpcodeIndex = 0; break;
1923 case MVT::v8i16: OpcodeIndex = 1; break;
1924 case MVT::v4f32:
1925 case MVT::v4i32: OpcodeIndex = 2; break;
1926 case MVT::v2i64: OpcodeIndex = 3;
1927 assert(NumVecs == 1 && "v2i64 type only supported for VST1")((NumVecs == 1 && "v2i64 type only supported for VST1"
) ? static_cast<void> (0) : __assert_fail ("NumVecs == 1 && \"v2i64 type only supported for VST1\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.6~svn220848/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 1927, __PRETTY_FUNCTION__))
;
1928 break;
1929 }
1930
1931 std::vector<EVT> ResTys;
1932 if (isUpdating)
1933 ResTys.push_back(MVT::i32);
1934 ResTys.push_back(MVT::Other);
1935
1936 SDValue Pred = getAL(CurDAG);
1937 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
1938 SmallVector<SDValue, 7> Ops;
1939
1940 // Double registers and VST1/VST2 quad registers are directly supported.
1941 if (is64BitVector || NumVecs <= 2) {
1942 SDValue SrcReg;
1943 if (NumVecs == 1) {
1944 SrcReg = N->getOperand(Vec0Idx);
1945 } else if (is64BitVector) {
1946 // Form a REG_SEQUENCE to force register allocation.
1947 SDValue V0 = N->getOperand(Vec0Idx + 0);
1948 SDValue V1 = N->getOperand(Vec0Idx + 1);
1949 if (NumVecs == 2)
1950 SrcReg = SDValue(createDRegPairNode(MVT::v2i64, V0, V1), 0);
1951 else {
1952 SDValue V2 = N->getOperand(Vec0Idx + 2);
1953 // If it's a vst3, form a quad D-register and leave the last part as
1954 // an undef.
1955 SDValue V3 = (NumVecs == 3)
1956 ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,dl,VT), 0)
1957 : N->getOperand(Vec0Idx + 3);
1958 SrcReg = SDValue(createQuadDRegsNode(MVT::v4i64, V0, V1, V2, V3), 0);
1959 }
1960 } else {
1961 // Form a QQ register.
1962 SDValue Q0 = N->getOperand(Vec0Idx);
1963 SDValue Q1 = N->getOperand(Vec0Idx + 1);
1964 SrcReg = SDValue(createQRegPairNode(MVT::v4i64, Q0, Q1), 0);
1965 }
1966
1967 unsigned Opc = (is64BitVector ? DOpcodes[OpcodeIndex] :
1968 QOpcodes0[OpcodeIndex]);
1969 Ops.push_back(MemAddr);
1970 Ops.push_back(Align);
1971 if (isUpdating) {
1972 SDValue Inc = N->getOperand(AddrOpIdx + 1);
1973 // FIXME: VST1/VST2 fixed increment doesn't need Reg0. Remove the reg0
1974 // case entirely when the rest are updated to that form, too.
1975 if (NumVecs <= 2 && !isa<ConstantSDNode>(Inc.getNode()))
1976 Opc = getVLDSTRegisterUpdateOpcode(Opc);
1977 // FIXME: We use a VST1 for v1i64 even if the pseudo says vld2/3/4, so
1978 // check for that explicitly too. Horribly hacky, but temporary.
1979 if (!isa<ConstantSDNode>(Inc.getNode()))
1980 Ops.push_back(Inc);
1981 else if (NumVecs > 2 && !isVSTfixed(Opc))
1982 Ops.push_back(Reg0);
1983 }
1984 Ops.push_back(SrcReg);
1985 Ops.push_back(Pred);
1986 Ops.push_back(Reg0);
1987 Ops.push_back(Chain);
1988 SDNode *VSt = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1989
1990 // Transfer memoperands.
1991 cast<MachineSDNode>(VSt)->setMemRefs(MemOp, MemOp + 1);
1992
1993 return VSt;
1994 }
1995
1996 // Otherwise, quad registers are stored with two separate instructions,
1997 // where one stores the even registers and the other stores the odd registers.
1998
1999 // Form the QQQQ REG_SEQUENCE.
2000 SDValue V0 = N->getOperand(Vec0Idx + 0);
2001 SDValue V1 = N->getOperand(Vec0Idx + 1);
2002 SDValue V2 = N->getOperand(Vec0Idx + 2);
2003 SDValue V3 = (NumVecs == 3)
2004 ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, VT), 0)
2005 : N->getOperand(Vec0Idx + 3);
2006 SDValue RegSeq = SDValue(createQuadQRegsNode(MVT::v8i64, V0, V1, V2, V3), 0);
2007
2008 // Store the even D registers. This is always an updating store, so that it
2009 // provides the address to the second store for the odd subregs.
2010 const SDValue OpsA[] = { MemAddr, Align, Reg0, RegSeq, Pred, Reg0, Chain };
2011 SDNode *VStA = CurDAG->getMachineNode(QOpcodes0[OpcodeIndex], dl,
2012 MemAddr.getValueType(),
2013 MVT::Other, OpsA);
2014 cast<MachineSDNode>(VStA)->setMemRefs(MemOp, MemOp + 1);
2015 Chain = SDValue(VStA, 1);
2016
2017 // Store the odd D registers.
2018 Ops.push_back(SDValue(VStA, 0));
2019 Ops.push_back(Align);
2020 if (isUpdating) {
2021 SDValue Inc = N->getOperand(AddrOpIdx + 1);
2022 assert(isa<ConstantSDNode>(Inc.getNode()) &&((isa<ConstantSDNode>(Inc.getNode()) && "only constant post-increment update allowed for VST3/4"
) ? static_cast<void> (0) : __assert_fail ("isa<ConstantSDNode>(Inc.getNode()) && \"only constant post-increment update allowed for VST3/4\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.6~svn220848/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 2023, __PRETTY_FUNCTION__))
2023 "only constant post-increment update allowed for VST3/4")((isa<ConstantSDNode>(Inc.getNode()) && "only constant post-increment update allowed for VST3/4"
) ? static_cast<void> (0) : __assert_fail ("isa<ConstantSDNode>(Inc.getNode()) && \"only constant post-increment update allowed for VST3/4\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.6~svn220848/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 2023, __PRETTY_FUNCTION__))
;
2024 (void)Inc;
2025 Ops.push_back(Reg0);
2026 }
2027 Ops.push_back(RegSeq);
2028 Ops.push_back(Pred);
2029 Ops.push_back(Reg0);
2030 Ops.push_back(Chain);
2031 SDNode *VStB = CurDAG->getMachineNode(QOpcodes1[OpcodeIndex], dl, ResTys,
2032 Ops);
2033 cast<MachineSDNode>(VStB)->setMemRefs(MemOp, MemOp + 1);
2034 return VStB;
2035}
2036
2037SDNode *ARMDAGToDAGISel::SelectVLDSTLane(SDNode *N, bool IsLoad,
2038 bool isUpdating, unsigned NumVecs,
2039 const uint16_t *DOpcodes,
2040 const uint16_t *QOpcodes) {
2041 assert(NumVecs >=2 && NumVecs <= 4 && "VLDSTLane NumVecs out-of-range")((NumVecs >=2 && NumVecs <= 4 && "VLDSTLane NumVecs out-of-range"
) ? static_cast<void> (0) : __assert_fail ("NumVecs >=2 && NumVecs <= 4 && \"VLDSTLane NumVecs out-of-range\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.6~svn220848/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 2041, __PRETTY_FUNCTION__))
;
2042 SDLoc dl(N);
2043
2044 SDValue MemAddr, Align;
2045 unsigned AddrOpIdx = isUpdating ? 1 : 2;
2046 unsigned Vec0Idx = 3; // AddrOpIdx + (isUpdating ? 2 : 1)
2047 if (!SelectAddrMode6(N, N->getOperand(AddrOpIdx), MemAddr, Align))
2048 return nullptr;
2049
2050 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
2051 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
2052
2053 SDValue Chain = N->getOperand(0);
2054 unsigned Lane =
2055 cast<ConstantSDNode>(N->getOperand(Vec0Idx + NumVecs))->getZExtValue();
2056 EVT VT = N->getOperand(Vec0Idx).getValueType();
2057 bool is64BitVector = VT.is64BitVector();
2058
2059 unsigned Alignment = 0;
2060 if (NumVecs != 3) {
2061 Alignment = cast<ConstantSDNode>(Align)->getZExtValue();
2062 unsigned NumBytes = NumVecs * VT.getVectorElementType().getSizeInBits()/8;
2063 if (Alignment > NumBytes)
2064 Alignment = NumBytes;
2065 if (Alignment < 8 && Alignment < NumBytes)
2066 Alignment = 0;
2067 // Alignment must be a power of two; make sure of that.
2068 Alignment = (Alignment & -Alignment);
2069 if (Alignment == 1)
2070 Alignment = 0;
2071 }
2072 Align = CurDAG->getTargetConstant(Alignment, MVT::i32);
2073
2074 unsigned OpcodeIndex;
2075 switch (VT.getSimpleVT().SimpleTy) {
2076 default: llvm_unreachable("unhandled vld/vst lane type")::llvm::llvm_unreachable_internal("unhandled vld/vst lane type"
, "/tmp/buildd/llvm-toolchain-snapshot-3.6~svn220848/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 2076)
;
2077 // Double-register operations:
2078 case MVT::v8i8: OpcodeIndex = 0; break;
2079 case MVT::v4i16: OpcodeIndex = 1; break;
2080 case MVT::v2f32:
2081 case MVT::v2i32: OpcodeIndex = 2; break;
2082 // Quad-register operations:
2083 case MVT::v8i16: OpcodeIndex = 0; break;
2084 case MVT::v4f32:
2085 case MVT::v4i32: OpcodeIndex = 1; break;
2086 }
2087
2088 std::vector<EVT> ResTys;
2089 if (IsLoad) {
2090 unsigned ResTyElts = (NumVecs == 3) ? 4 : NumVecs;
2091 if (!is64BitVector)
2092 ResTyElts *= 2;
2093 ResTys.push_back(EVT::getVectorVT(*CurDAG->getContext(),
2094 MVT::i64, ResTyElts));
2095 }
2096 if (isUpdating)
2097 ResTys.push_back(MVT::i32);
2098 ResTys.push_back(MVT::Other);
2099
2100 SDValue Pred = getAL(CurDAG);
2101 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2102
2103 SmallVector<SDValue, 8> Ops;
2104 Ops.push_back(MemAddr);
2105 Ops.push_back(Align);
2106 if (isUpdating) {
2107 SDValue Inc = N->getOperand(AddrOpIdx + 1);
2108 Ops.push_back(isa<ConstantSDNode>(Inc.getNode()) ? Reg0 : Inc);
2109 }
2110
2111 SDValue SuperReg;
2112 SDValue V0 = N->getOperand(Vec0Idx + 0);
2113 SDValue V1 = N->getOperand(Vec0Idx + 1);
2114 if (NumVecs == 2) {
2115 if (is64BitVector)
2116 SuperReg = SDValue(createDRegPairNode(MVT::v2i64, V0, V1), 0);
2117 else
2118 SuperReg = SDValue(createQRegPairNode(MVT::v4i64, V0, V1), 0);
2119 } else {
2120 SDValue V2 = N->getOperand(Vec0Idx + 2);
2121 SDValue V3 = (NumVecs == 3)
2122 ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, VT), 0)
2123 : N->getOperand(Vec0Idx + 3);
2124 if (is64BitVector)
2125 SuperReg = SDValue(createQuadDRegsNode(MVT::v4i64, V0, V1, V2, V3), 0);
2126 else
2127 SuperReg = SDValue(createQuadQRegsNode(MVT::v8i64, V0, V1, V2, V3), 0);
2128 }
2129 Ops.push_back(SuperReg);
2130 Ops.push_back(getI32Imm(Lane));
2131 Ops.push_back(Pred);
2132 Ops.push_back(Reg0);
2133 Ops.push_back(Chain);
2134
2135 unsigned Opc = (is64BitVector ? DOpcodes[OpcodeIndex] :
2136 QOpcodes[OpcodeIndex]);
2137 SDNode *VLdLn = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
2138 cast<MachineSDNode>(VLdLn)->setMemRefs(MemOp, MemOp + 1);
2139 if (!IsLoad)
2140 return VLdLn;
2141
2142 // Extract the subregisters.
2143 SuperReg = SDValue(VLdLn, 0);
2144 assert(ARM::dsub_7 == ARM::dsub_0+7 &&((ARM::dsub_7 == ARM::dsub_0+7 && ARM::qsub_3 == ARM::
qsub_0+3 && "Unexpected subreg numbering") ? static_cast
<void> (0) : __assert_fail ("ARM::dsub_7 == ARM::dsub_0+7 && ARM::qsub_3 == ARM::qsub_0+3 && \"Unexpected subreg numbering\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.6~svn220848/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 2145, __PRETTY_FUNCTION__))
2145 ARM::qsub_3 == ARM::qsub_0+3 && "Unexpected subreg numbering")((ARM::dsub_7 == ARM::dsub_0+7 && ARM::qsub_3 == ARM::
qsub_0+3 && "Unexpected subreg numbering") ? static_cast
<void> (0) : __assert_fail ("ARM::dsub_7 == ARM::dsub_0+7 && ARM::qsub_3 == ARM::qsub_0+3 && \"Unexpected subreg numbering\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.6~svn220848/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 2145, __PRETTY_FUNCTION__))
;
2146 unsigned Sub0 = is64BitVector ? ARM::dsub_0 : ARM::qsub_0;
2147 for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
2148 ReplaceUses(SDValue(N, Vec),
2149 CurDAG->getTargetExtractSubreg(Sub0 + Vec, dl, VT, SuperReg));
2150 ReplaceUses(SDValue(N, NumVecs), SDValue(VLdLn, 1));
2151 if (isUpdating)
2152 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLdLn, 2));
2153 return nullptr;
2154}
2155
2156SDNode *ARMDAGToDAGISel::SelectVLDDup(SDNode *N, bool isUpdating,
2157 unsigned NumVecs,
2158 const uint16_t *Opcodes) {
2159 assert(NumVecs >=2 && NumVecs <= 4 && "VLDDup NumVecs out-of-range")((NumVecs >=2 && NumVecs <= 4 && "VLDDup NumVecs out-of-range"
) ? static_cast<void> (0) : __assert_fail ("NumVecs >=2 && NumVecs <= 4 && \"VLDDup NumVecs out-of-range\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.6~svn220848/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 2159, __PRETTY_FUNCTION__))
;
2160 SDLoc dl(N);
2161
2162 SDValue MemAddr, Align;
2163 if (!SelectAddrMode6(N, N->getOperand(1), MemAddr, Align))
2164 return nullptr;
2165
2166 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
2167 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
2168
2169 SDValue Chain = N->getOperand(0);
2170 EVT VT = N->getValueType(0);
2171
2172 unsigned Alignment = 0;
2173 if (NumVecs != 3) {
2174 Alignment = cast<ConstantSDNode>(Align)->getZExtValue();
2175 unsigned NumBytes = NumVecs * VT.getVectorElementType().getSizeInBits()/8;
2176 if (Alignment > NumBytes)
2177 Alignment = NumBytes;
2178 if (Alignment < 8 && Alignment < NumBytes)
2179 Alignment = 0;
2180 // Alignment must be a power of two; make sure of that.
2181 Alignment = (Alignment & -Alignment);
2182 if (Alignment == 1)
2183 Alignment = 0;
2184 }
2185 Align = CurDAG->getTargetConstant(Alignment, MVT::i32);
2186
2187 unsigned OpcodeIndex;
2188 switch (VT.getSimpleVT().SimpleTy) {
2189 default: llvm_unreachable("unhandled vld-dup type")::llvm::llvm_unreachable_internal("unhandled vld-dup type", "/tmp/buildd/llvm-toolchain-snapshot-3.6~svn220848/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 2189)
;
2190 case MVT::v8i8: OpcodeIndex = 0; break;
2191 case MVT::v4i16: OpcodeIndex = 1; break;
2192 case MVT::v2f32:
2193 case MVT::v2i32: OpcodeIndex = 2; break;
2194 }
2195
2196 SDValue Pred = getAL(CurDAG);
2197 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2198 SDValue SuperReg;
2199 unsigned Opc = Opcodes[OpcodeIndex];
2200 SmallVector<SDValue, 6> Ops;
2201 Ops.push_back(MemAddr);
2202 Ops.push_back(Align);
2203 if (isUpdating) {
2204 // fixed-stride update instructions don't have an explicit writeback
2205 // operand. It's implicit in the opcode itself.
2206 SDValue Inc = N->getOperand(2);
2207 if (!isa<ConstantSDNode>(Inc.getNode()))
2208 Ops.push_back(Inc);
2209 // FIXME: VLD3 and VLD4 haven't been updated to that form yet.
2210 else if (NumVecs > 2)
2211 Ops.push_back(Reg0);
2212 }
2213 Ops.push_back(Pred);
2214 Ops.push_back(Reg0);
2215 Ops.push_back(Chain);
2216
2217 unsigned ResTyElts = (NumVecs == 3) ? 4 : NumVecs;
2218 std::vector<EVT> ResTys;
2219 ResTys.push_back(EVT::getVectorVT(*CurDAG->getContext(), MVT::i64,ResTyElts));
2220 if (isUpdating)
2221 ResTys.push_back(MVT::i32);
2222 ResTys.push_back(MVT::Other);
2223 SDNode *VLdDup = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
2224 cast<MachineSDNode>(VLdDup)->setMemRefs(MemOp, MemOp + 1);
2225 SuperReg = SDValue(VLdDup, 0);
2226
2227 // Extract the subregisters.
2228 assert(ARM::dsub_7 == ARM::dsub_0+7 && "Unexpected subreg numbering")((ARM::dsub_7 == ARM::dsub_0+7 && "Unexpected subreg numbering"
) ? static_cast<void> (0) : __assert_fail ("ARM::dsub_7 == ARM::dsub_0+7 && \"Unexpected subreg numbering\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.6~svn220848/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 2228, __PRETTY_FUNCTION__))
;
2229 unsigned SubIdx = ARM::dsub_0;
2230 for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
2231 ReplaceUses(SDValue(N, Vec),
2232 CurDAG->getTargetExtractSubreg(SubIdx+Vec, dl, VT, SuperReg));
2233 ReplaceUses(SDValue(N, NumVecs), SDValue(VLdDup, 1));
2234 if (isUpdating)
2235 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLdDup, 2));
2236 return nullptr;
2237}
2238
2239SDNode *ARMDAGToDAGISel::SelectVTBL(SDNode *N, bool IsExt, unsigned NumVecs,
2240 unsigned Opc) {
2241 assert(NumVecs >= 2 && NumVecs <= 4 && "VTBL NumVecs out-of-range")((NumVecs >= 2 && NumVecs <= 4 && "VTBL NumVecs out-of-range"
) ? static_cast<void> (0) : __assert_fail ("NumVecs >= 2 && NumVecs <= 4 && \"VTBL NumVecs out-of-range\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.6~svn220848/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 2241, __PRETTY_FUNCTION__))
;
2242 SDLoc dl(N);
2243 EVT VT = N->getValueType(0);
2244 unsigned FirstTblReg = IsExt ? 2 : 1;
2245
2246 // Form a REG_SEQUENCE to force register allocation.
2247 SDValue RegSeq;
2248 SDValue V0 = N->getOperand(FirstTblReg + 0);
2249 SDValue V1 = N->getOperand(FirstTblReg + 1);
2250 if (NumVecs == 2)
2251 RegSeq = SDValue(createDRegPairNode(MVT::v16i8, V0, V1), 0);
2252 else {
2253 SDValue V2 = N->getOperand(FirstTblReg + 2);
2254 // If it's a vtbl3, form a quad D-register and leave the last part as
2255 // an undef.
2256 SDValue V3 = (NumVecs == 3)
2257 ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, VT), 0)
2258 : N->getOperand(FirstTblReg + 3);
2259 RegSeq = SDValue(createQuadDRegsNode(MVT::v4i64, V0, V1, V2, V3), 0);
2260 }
2261
2262 SmallVector<SDValue, 6> Ops;
2263 if (IsExt)
2264 Ops.push_back(N->getOperand(1));
2265 Ops.push_back(RegSeq);
2266 Ops.push_back(N->getOperand(FirstTblReg + NumVecs));
2267 Ops.push_back(getAL(CurDAG)); // predicate
2268 Ops.push_back(CurDAG->getRegister(0, MVT::i32)); // predicate register
2269 return CurDAG->getMachineNode(Opc, dl, VT, Ops);
2270}
2271
2272SDNode *ARMDAGToDAGISel::SelectV6T2BitfieldExtractOp(SDNode *N,
2273 bool isSigned) {
2274 if (!Subtarget->hasV6T2Ops())
2275 return nullptr;
2276
2277 unsigned Opc = isSigned
2278 ? (Subtarget->isThumb() ? ARM::t2SBFX : ARM::SBFX)
2279 : (Subtarget->isThumb() ? ARM::t2UBFX : ARM::UBFX);
2280
2281 // For unsigned extracts, check for a shift right and mask
2282 unsigned And_imm = 0;
2283 if (N->getOpcode() == ISD::AND) {
2284 if (isOpcWithIntImmediate(N, ISD::AND, And_imm)) {
2285
2286 // The immediate is a mask of the low bits iff imm & (imm+1) == 0
2287 if (And_imm & (And_imm + 1))
2288 return nullptr;
2289
2290 unsigned Srl_imm = 0;
2291 if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SRL,
2292 Srl_imm)) {
2293 assert(Srl_imm > 0 && Srl_imm < 32 && "bad amount in shift node!")((Srl_imm > 0 && Srl_imm < 32 && "bad amount in shift node!"
) ? static_cast<void> (0) : __assert_fail ("Srl_imm > 0 && Srl_imm < 32 && \"bad amount in shift node!\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.6~svn220848/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 2293, __PRETTY_FUNCTION__))
;
2294
2295 // Note: The width operand is encoded as width-1.
2296 unsigned Width = CountTrailingOnes_32(And_imm) - 1;
2297 unsigned LSB = Srl_imm;
2298
2299 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2300
2301 if ((LSB + Width + 1) == N->getValueType(0).getSizeInBits()) {
2302 // It's cheaper to use a right shift to extract the top bits.
2303 if (Subtarget->isThumb()) {
2304 Opc = isSigned ? ARM::t2ASRri : ARM::t2LSRri;
2305 SDValue Ops[] = { N->getOperand(0).getOperand(0),
2306 CurDAG->getTargetConstant(LSB, MVT::i32),
2307 getAL(CurDAG), Reg0, Reg0 };
2308 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops);
2309 }
2310
2311 // ARM models shift instructions as MOVsi with shifter operand.
2312 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(ISD::SRL);
2313 SDValue ShOpc =
2314 CurDAG->getTargetConstant(ARM_AM::getSORegOpc(ShOpcVal, LSB),
2315 MVT::i32);
2316 SDValue Ops[] = { N->getOperand(0).getOperand(0), ShOpc,
2317 getAL(CurDAG), Reg0, Reg0 };
2318 return CurDAG->SelectNodeTo(N, ARM::MOVsi, MVT::i32, Ops);
2319 }
2320
2321 SDValue Ops[] = { N->getOperand(0).getOperand(0),
2322 CurDAG->getTargetConstant(LSB, MVT::i32),
2323 CurDAG->getTargetConstant(Width, MVT::i32),
2324 getAL(CurDAG), Reg0 };
2325 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops);
2326 }
2327 }
2328 return nullptr;
2329 }
2330
2331 // Otherwise, we're looking for a shift of a shift
2332 unsigned Shl_imm = 0;
2333 if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SHL, Shl_imm)) {
2334 assert(Shl_imm > 0 && Shl_imm < 32 && "bad amount in shift node!")((Shl_imm > 0 && Shl_imm < 32 && "bad amount in shift node!"
) ? static_cast<void> (0) : __assert_fail ("Shl_imm > 0 && Shl_imm < 32 && \"bad amount in shift node!\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.6~svn220848/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 2334, __PRETTY_FUNCTION__))
;
2335 unsigned Srl_imm = 0;
2336 if (isInt32Immediate(N->getOperand(1), Srl_imm)) {
2337 assert(Srl_imm > 0 && Srl_imm < 32 && "bad amount in shift node!")((Srl_imm > 0 && Srl_imm < 32 && "bad amount in shift node!"
) ? static_cast<void> (0) : __assert_fail ("Srl_imm > 0 && Srl_imm < 32 && \"bad amount in shift node!\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.6~svn220848/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 2337, __PRETTY_FUNCTION__))
;
2338 // Note: The width operand is encoded as width-1.
2339 unsigned Width = 32 - Srl_imm - 1;
2340 int LSB = Srl_imm - Shl_imm;
2341 if (LSB < 0)
2342 return nullptr;
2343 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2344 SDValue Ops[] = { N->getOperand(0).getOperand(0),
2345 CurDAG->getTargetConstant(LSB, MVT::i32),
2346 CurDAG->getTargetConstant(Width, MVT::i32),
2347 getAL(CurDAG), Reg0 };
2348 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops);
2349 }
2350 }
2351
2352 if (N->getOpcode() == ISD::SIGN_EXTEND_INREG) {
2353 unsigned Width = cast<VTSDNode>(N->getOperand(1))->getVT().getSizeInBits();
2354 unsigned LSB = 0;
2355 if (!isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SRL, LSB) &&
2356 !isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SRA, LSB))
2357 return nullptr;
2358
2359 if (LSB + Width > 32)
2360 return nullptr;
2361
2362 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2363 SDValue Ops[] = { N->getOperand(0).getOperand(0),
2364 CurDAG->getTargetConstant(LSB, MVT::i32),
2365 CurDAG->getTargetConstant(Width - 1, MVT::i32),
2366 getAL(CurDAG), Reg0 };
2367 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops);
2368 }
2369
2370 return nullptr;
2371}
2372
2373/// Target-specific DAG combining for ISD::XOR.
2374/// Target-independent combining lowers SELECT_CC nodes of the form
2375/// select_cc setg[ge] X, 0, X, -X
2376/// select_cc setgt X, -1, X, -X
2377/// select_cc setl[te] X, 0, -X, X
2378/// select_cc setlt X, 1, -X, X
2379/// which represent Integer ABS into:
2380/// Y = sra (X, size(X)-1); xor (add (X, Y), Y)
2381/// ARM instruction selection detects the latter and matches it to
2382/// ARM::ABS or ARM::t2ABS machine node.
2383SDNode *ARMDAGToDAGISel::SelectABSOp(SDNode *N){
2384 SDValue XORSrc0 = N->getOperand(0);
2385 SDValue XORSrc1 = N->getOperand(1);
2386 EVT VT = N->getValueType(0);
2387
2388 if (Subtarget->isThumb1Only())
2389 return nullptr;
2390
2391 if (XORSrc0.getOpcode() != ISD::ADD || XORSrc1.getOpcode() != ISD::SRA)
2392 return nullptr;
2393
2394 SDValue ADDSrc0 = XORSrc0.getOperand(0);
2395 SDValue ADDSrc1 = XORSrc0.getOperand(1);
2396 SDValue SRASrc0 = XORSrc1.getOperand(0);
2397 SDValue SRASrc1 = XORSrc1.getOperand(1);
2398 ConstantSDNode *SRAConstant = dyn_cast<ConstantSDNode>(SRASrc1);
2399 EVT XType = SRASrc0.getValueType();
2400 unsigned Size = XType.getSizeInBits() - 1;
2401
2402 if (ADDSrc1 == XORSrc1 && ADDSrc0 == SRASrc0 &&
2403 XType.isInteger() && SRAConstant != nullptr &&
2404 Size == SRAConstant->getZExtValue()) {
2405 unsigned Opcode = Subtarget->isThumb2() ? ARM::t2ABS : ARM::ABS;
2406 return CurDAG->SelectNodeTo(N, Opcode, VT, ADDSrc0);
2407 }
2408
2409 return nullptr;
2410}
2411
2412SDNode *ARMDAGToDAGISel::SelectConcatVector(SDNode *N) {
2413 // The only time a CONCAT_VECTORS operation can have legal types is when
2414 // two 64-bit vectors are concatenated to a 128-bit vector.
2415 EVT VT = N->getValueType(0);
2416 if (!VT.is128BitVector() || N->getNumOperands() != 2)
2417 llvm_unreachable("unexpected CONCAT_VECTORS")::llvm::llvm_unreachable_internal("unexpected CONCAT_VECTORS"
, "/tmp/buildd/llvm-toolchain-snapshot-3.6~svn220848/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 2417)
;
2418 return createDRegPairNode(VT, N->getOperand(0), N->getOperand(1));
2419}
2420
2421SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
2422 SDLoc dl(N);
2423
2424 if (N->isMachineOpcode()) {
2425 N->setNodeId(-1);
2426 return nullptr; // Already selected.
2427 }
2428
2429 switch (N->getOpcode()) {
2430 default: break;
2431 case ISD::INLINEASM: {
2432 SDNode *ResNode = SelectInlineAsm(N);
2433 if (ResNode)
2434 return ResNode;
2435 break;
2436 }
2437 case ISD::XOR: {
2438 // Select special operations if XOR node forms integer ABS pattern
2439 SDNode *ResNode = SelectABSOp(N);
2440 if (ResNode)
2441 return ResNode;
2442 // Other cases are autogenerated.
2443 break;
2444 }
2445 case ISD::Constant: {
2446 unsigned Val = cast<ConstantSDNode>(N)->getZExtValue();
2447 bool UseCP = true;
2448 if (Subtarget->useMovt(*MF))
2449 // Thumb2-aware targets have the MOVT instruction, so all immediates can
2450 // be done with MOV + MOVT, at worst.
2451 UseCP = false;
2452 else {
2453 if (Subtarget->isThumb()) {
2454 UseCP = (Val > 255 && // MOV
2455 ~Val > 255 && // MOV + MVN
2456 !ARM_AM::isThumbImmShiftedVal(Val) && // MOV + LSL
2457 !(Subtarget->hasV6T2Ops() && Val <= 0xffff)); // MOVW
2458 } else
2459 UseCP = (ARM_AM::getSOImmVal(Val) == -1 && // MOV
2460 ARM_AM::getSOImmVal(~Val) == -1 && // MVN
2461 !ARM_AM::isSOImmTwoPartVal(Val) && // two instrs.
2462 !(Subtarget->hasV6T2Ops() && Val <= 0xffff)); // MOVW
2463 }
2464
2465 if (UseCP) {
2466 SDValue CPIdx = CurDAG->getTargetConstantPool(
2467 ConstantInt::get(Type::getInt32Ty(*CurDAG->getContext()), Val),
2468 TLI->getPointerTy());
2469
2470 SDNode *ResNode;
2471 if (Subtarget->isThumb()) {
2472 SDValue Pred = getAL(CurDAG);
2473 SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
2474 SDValue Ops[] = { CPIdx, Pred, PredReg, CurDAG->getEntryNode() };
2475 ResNode = CurDAG->getMachineNode(ARM::tLDRpci, dl, MVT::i32, MVT::Other,
2476 Ops);
2477 } else {
2478 SDValue Ops[] = {
2479 CPIdx,
2480 CurDAG->getTargetConstant(0, MVT::i32),
2481 getAL(CurDAG),
2482 CurDAG->getRegister(0, MVT::i32),
2483 CurDAG->getEntryNode()
2484 };
2485 ResNode=CurDAG->getMachineNode(ARM::LDRcp, dl, MVT::i32, MVT::Other,
2486 Ops);
2487 }
2488 ReplaceUses(SDValue(N, 0), SDValue(ResNode, 0));
2489 return nullptr;
2490 }
2491
2492 // Other cases are autogenerated.
2493 break;
2494 }
2495 case ISD::FrameIndex: {
2496 // Selects to ADDri FI, 0 which in turn will become ADDri SP, imm.
2497 int FI = cast<FrameIndexSDNode>(N)->getIndex();
2498 SDValue TFI = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
2499 if (Subtarget->isThumb1Only()) {
2500 return CurDAG->SelectNodeTo(N, ARM::tADDframe, MVT::i32, TFI,
2501 CurDAG->getTargetConstant(0, MVT::i32));
2502 } else {
2503 unsigned Opc = ((Subtarget->isThumb() && Subtarget->hasThumb2()) ?
2504 ARM::t2ADDri : ARM::ADDri);
2505 SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, MVT::i32),
2506 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
2507 CurDAG->getRegister(0, MVT::i32) };
2508 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops);
2509 }
2510 }
2511 case ISD::SRL:
2512 if (SDNode *I = SelectV6T2BitfieldExtractOp(N, false))
2513 return I;
2514 break;
2515 case ISD::SIGN_EXTEND_INREG:
2516 case ISD::SRA:
2517 if (SDNode *I = SelectV6T2BitfieldExtractOp(N, true))
2518 return I;
2519 break;
2520 case ISD::MUL:
2521 if (Subtarget->isThumb1Only())
2522 break;
2523 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1))) {
2524 unsigned RHSV = C->getZExtValue();
2525 if (!RHSV) break;
2526 if (isPowerOf2_32(RHSV-1)) { // 2^n+1?
2527 unsigned ShImm = Log2_32(RHSV-1);
2528 if (ShImm >= 32)
2529 break;
2530 SDValue V = N->getOperand(0);
2531 ShImm = ARM_AM::getSORegOpc(ARM_AM::lsl, ShImm);
2532 SDValue ShImmOp = CurDAG->getTargetConstant(ShImm, MVT::i32);
2533 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2534 if (Subtarget->isThumb()) {
2535 SDValue Ops[] = { V, V, ShImmOp, getAL(CurDAG), Reg0, Reg0 };
2536 return CurDAG->SelectNodeTo(N, ARM::t2ADDrs, MVT::i32, Ops);
2537 } else {
2538 SDValue Ops[] = { V, V, Reg0, ShImmOp, getAL(CurDAG), Reg0, Reg0 };
2539 return CurDAG->SelectNodeTo(N, ARM::ADDrsi, MVT::i32, Ops);
2540 }
2541 }
2542 if (isPowerOf2_32(RHSV+1)) { // 2^n-1?
2543 unsigned ShImm = Log2_32(RHSV+1);
2544 if (ShImm >= 32)
2545 break;
2546 SDValue V = N->getOperand(0);
2547 ShImm = ARM_AM::getSORegOpc(ARM_AM::lsl, ShImm);
2548 SDValue ShImmOp = CurDAG->getTargetConstant(ShImm, MVT::i32);
2549 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2550 if (Subtarget->isThumb()) {
2551 SDValue Ops[] = { V, V, ShImmOp, getAL(CurDAG), Reg0, Reg0 };
2552 return CurDAG->SelectNodeTo(N, ARM::t2RSBrs, MVT::i32, Ops);
2553 } else {
2554 SDValue Ops[] = { V, V, Reg0, ShImmOp, getAL(CurDAG), Reg0, Reg0 };
2555 return CurDAG->SelectNodeTo(N, ARM::RSBrsi, MVT::i32, Ops);
2556 }
2557 }
2558 }
2559 break;
2560 case ISD::AND: {
2561 // Check for unsigned bitfield extract
2562 if (SDNode *I = SelectV6T2BitfieldExtractOp(N, false))
2563 return I;
2564
2565 // (and (or x, c2), c1) and top 16-bits of c1 and c2 match, lower 16-bits
2566 // of c1 are 0xffff, and lower 16-bit of c2 are 0. That is, the top 16-bits
2567 // are entirely contributed by c2 and lower 16-bits are entirely contributed
2568 // by x. That's equal to (or (and x, 0xffff), (and c1, 0xffff0000)).
2569 // Select it to: "movt x, ((c1 & 0xffff) >> 16)
2570 EVT VT = N->getValueType(0);
2571 if (VT != MVT::i32)
2572 break;
2573 unsigned Opc = (Subtarget->isThumb() && Subtarget->hasThumb2())
2574 ? ARM::t2MOVTi16
2575 : (Subtarget->hasV6T2Ops() ? ARM::MOVTi16 : 0);
2576 if (!Opc)
2577 break;
2578 SDValue N0 = N->getOperand(0), N1 = N->getOperand(1);
2579 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
2580 if (!N1C)
2581 break;
2582 if (N0.getOpcode() == ISD::OR && N0.getNode()->hasOneUse()) {
2583 SDValue N2 = N0.getOperand(1);
2584 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2);
2585 if (!N2C)
2586 break;
2587 unsigned N1CVal = N1C->getZExtValue();
2588 unsigned N2CVal = N2C->getZExtValue();
2589 if ((N1CVal & 0xffff0000U) == (N2CVal & 0xffff0000U) &&
2590 (N1CVal & 0xffffU) == 0xffffU &&
2591 (N2CVal & 0xffffU) == 0x0U) {
2592 SDValue Imm16 = CurDAG->getTargetConstant((N2CVal & 0xFFFF0000U) >> 16,
2593 MVT::i32);
2594 SDValue Ops[] = { N0.getOperand(0), Imm16,
2595 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32) };
2596 return CurDAG->getMachineNode(Opc, dl, VT, Ops);
2597 }
2598 }
2599 break;
2600 }
2601 case ARMISD::VMOVRRD:
2602 return CurDAG->getMachineNode(ARM::VMOVRRD, dl, MVT::i32, MVT::i32,
2603 N->getOperand(0), getAL(CurDAG),
2604 CurDAG->getRegister(0, MVT::i32));
2605 case ISD::UMUL_LOHI: {
2606 if (Subtarget->isThumb1Only())
2607 break;
2608 if (Subtarget->isThumb()) {
2609 SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
2610 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32) };
2611 return CurDAG->getMachineNode(ARM::t2UMULL, dl, MVT::i32, MVT::i32, Ops);
2612 } else {
2613 SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
2614 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
2615 CurDAG->getRegister(0, MVT::i32) };
2616 return CurDAG->getMachineNode(Subtarget->hasV6Ops() ?
2617 ARM::UMULL : ARM::UMULLv5,
2618 dl, MVT::i32, MVT::i32, Ops);
2619 }
2620 }
2621 case ISD::SMUL_LOHI: {
2622 if (Subtarget->isThumb1Only())
2623 break;
2624 if (Subtarget->isThumb()) {
2625 SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
2626 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32) };
2627 return CurDAG->getMachineNode(ARM::t2SMULL, dl, MVT::i32, MVT::i32, Ops);
2628 } else {
2629 SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
2630 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
2631 CurDAG->getRegister(0, MVT::i32) };
2632 return CurDAG->getMachineNode(Subtarget->hasV6Ops() ?
2633 ARM::SMULL : ARM::SMULLv5,
2634 dl, MVT::i32, MVT::i32, Ops);
2635 }
2636 }
2637 case ARMISD::UMLAL:{
2638 if (Subtarget->isThumb()) {
2639 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2),
2640 N->getOperand(3), getAL(CurDAG),
2641 CurDAG->getRegister(0, MVT::i32)};
2642 return CurDAG->getMachineNode(ARM::t2UMLAL, dl, MVT::i32, MVT::i32, Ops);
2643 }else{
2644 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2),
2645 N->getOperand(3), getAL(CurDAG),
2646 CurDAG->getRegister(0, MVT::i32),
2647 CurDAG->getRegister(0, MVT::i32) };
2648 return CurDAG->getMachineNode(Subtarget->hasV6Ops() ?
2649 ARM::UMLAL : ARM::UMLALv5,
2650 dl, MVT::i32, MVT::i32, Ops);
2651 }
2652 }
2653 case ARMISD::SMLAL:{
2654 if (Subtarget->isThumb()) {
2655 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2),
2656 N->getOperand(3), getAL(CurDAG),
2657 CurDAG->getRegister(0, MVT::i32)};
2658 return CurDAG->getMachineNode(ARM::t2SMLAL, dl, MVT::i32, MVT::i32, Ops);
2659 }else{
2660 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2),
2661 N->getOperand(3), getAL(CurDAG),
2662 CurDAG->getRegister(0, MVT::i32),
2663 CurDAG->getRegister(0, MVT::i32) };
2664 return CurDAG->getMachineNode(Subtarget->hasV6Ops() ?
2665 ARM::SMLAL : ARM::SMLALv5,
2666 dl, MVT::i32, MVT::i32, Ops);
2667 }
2668 }
2669 case ISD::LOAD: {
2670 SDNode *ResNode = nullptr;
2671 if (Subtarget->isThumb() && Subtarget->hasThumb2())
2672 ResNode = SelectT2IndexedLoad(N);
2673 else
2674 ResNode = SelectARMIndexedLoad(N);
2675 if (ResNode)
2676 return ResNode;
2677 // Other cases are autogenerated.
2678 break;
2679 }
2680 case ARMISD::BRCOND: {
2681 // Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc)
2682 // Emits: (Bcc:void (bb:Other):$dst, (imm:i32):$cc)
2683 // Pattern complexity = 6 cost = 1 size = 0
2684
2685 // Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc)
2686 // Emits: (tBcc:void (bb:Other):$dst, (imm:i32):$cc)
2687 // Pattern complexity = 6 cost = 1 size = 0
2688
2689 // Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc)
2690 // Emits: (t2Bcc:void (bb:Other):$dst, (imm:i32):$cc)
2691 // Pattern complexity = 6 cost = 1 size = 0
2692
2693 unsigned Opc = Subtarget->isThumb() ?
2694 ((Subtarget->hasThumb2()) ? ARM::t2Bcc : ARM::tBcc) : ARM::Bcc;
2695 SDValue Chain = N->getOperand(0);
2696 SDValue N1 = N->getOperand(1);
2697 SDValue N2 = N->getOperand(2);
2698 SDValue N3 = N->getOperand(3);
2699 SDValue InFlag = N->getOperand(4);
2700 assert(N1.getOpcode() == ISD::BasicBlock)((N1.getOpcode() == ISD::BasicBlock) ? static_cast<void>
(0) : __assert_fail ("N1.getOpcode() == ISD::BasicBlock", "/tmp/buildd/llvm-toolchain-snapshot-3.6~svn220848/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 2700, __PRETTY_FUNCTION__))
;
2701 assert(N2.getOpcode() == ISD::Constant)((N2.getOpcode() == ISD::Constant) ? static_cast<void> (
0) : __assert_fail ("N2.getOpcode() == ISD::Constant", "/tmp/buildd/llvm-toolchain-snapshot-3.6~svn220848/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 2701, __PRETTY_FUNCTION__))
;
2702 assert(N3.getOpcode() == ISD::Register)((N3.getOpcode() == ISD::Register) ? static_cast<void> (
0) : __assert_fail ("N3.getOpcode() == ISD::Register", "/tmp/buildd/llvm-toolchain-snapshot-3.6~svn220848/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 2702, __PRETTY_FUNCTION__))
;
2703
2704 SDValue Tmp2 = CurDAG->getTargetConstant(((unsigned)
2705 cast<ConstantSDNode>(N2)->getZExtValue()),
2706 MVT::i32);
2707 SDValue Ops[] = { N1, Tmp2, N3, Chain, InFlag };
2708 SDNode *ResNode = CurDAG->getMachineNode(Opc, dl, MVT::Other,
2709 MVT::Glue, Ops);
2710 Chain = SDValue(ResNode, 0);
2711 if (N->getNumValues() == 2) {
2712 InFlag = SDValue(ResNode, 1);
2713 ReplaceUses(SDValue(N, 1), InFlag);
2714 }
2715 ReplaceUses(SDValue(N, 0),
2716 SDValue(Chain.getNode(), Chain.getResNo()));
2717 return nullptr;
2718 }
2719 case ARMISD::VZIP: {
2720 unsigned Opc = 0;
2721 EVT VT = N->getValueType(0);
2722 switch (VT.getSimpleVT().SimpleTy) {
2723 default: return nullptr;
2724 case MVT::v8i8: Opc = ARM::VZIPd8; break;
2725 case MVT::v4i16: Opc = ARM::VZIPd16; break;
2726 case MVT::v2f32:
2727 // vzip.32 Dd, Dm is a pseudo-instruction expanded to vtrn.32 Dd, Dm.
2728 case MVT::v2i32: Opc = ARM::VTRNd32; break;
2729 case MVT::v16i8: Opc = ARM::VZIPq8; break;
2730 case MVT::v8i16: Opc = ARM::VZIPq16; break;
2731 case MVT::v4f32:
2732 case MVT::v4i32: Opc = ARM::VZIPq32; break;
2733 }
2734 SDValue Pred = getAL(CurDAG);
2735 SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
2736 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), Pred, PredReg };
2737 return CurDAG->getMachineNode(Opc, dl, VT, VT, Ops);
2738 }
2739 case ARMISD::VUZP: {
2740 unsigned Opc = 0;
2741 EVT VT = N->getValueType(0);
2742 switch (VT.getSimpleVT().SimpleTy) {
2743 default: return nullptr;
2744 case MVT::v8i8: Opc = ARM::VUZPd8; break;
2745 case MVT::v4i16: Opc = ARM::VUZPd16; break;
2746 case MVT::v2f32:
2747 // vuzp.32 Dd, Dm is a pseudo-instruction expanded to vtrn.32 Dd, Dm.
2748 case MVT::v2i32: Opc = ARM::VTRNd32; break;
2749 case MVT::v16i8: Opc = ARM::VUZPq8; break;
2750 case MVT::v8i16: Opc = ARM::VUZPq16; break;
2751 case MVT::v4f32:
2752 case MVT::v4i32: Opc = ARM::VUZPq32; break;
2753 }
2754 SDValue Pred = getAL(CurDAG);
2755 SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
2756 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), Pred, PredReg };
2757 return CurDAG->getMachineNode(Opc, dl, VT, VT, Ops);
2758 }
2759 case ARMISD::VTRN: {
2760 unsigned Opc = 0;
2761 EVT VT = N->getValueType(0);
2762 switch (VT.getSimpleVT().SimpleTy) {
2763 default: return nullptr;
2764 case MVT::v8i8: Opc = ARM::VTRNd8; break;
2765 case MVT::v4i16: Opc = ARM::VTRNd16; break;
2766 case MVT::v2f32:
2767 case MVT::v2i32: Opc = ARM::VTRNd32; break;
2768 case MVT::v16i8: Opc = ARM::VTRNq8; break;
2769 case MVT::v8i16: Opc = ARM::VTRNq16; break;
2770 case MVT::v4f32:
2771 case MVT::v4i32: Opc = ARM::VTRNq32; break;
2772 }
2773 SDValue Pred = getAL(CurDAG);
2774 SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
2775 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), Pred, PredReg };
2776 return CurDAG->getMachineNode(Opc, dl, VT, VT, Ops);
2777 }
2778 case ARMISD::BUILD_VECTOR: {
2779 EVT VecVT = N->getValueType(0);
2780 EVT EltVT = VecVT.getVectorElementType();
2781 unsigned NumElts = VecVT.getVectorNumElements();
2782 if (EltVT == MVT::f64) {
2783 assert(NumElts == 2 && "unexpected type for BUILD_VECTOR")((NumElts == 2 && "unexpected type for BUILD_VECTOR")
? static_cast<void> (0) : __assert_fail ("NumElts == 2 && \"unexpected type for BUILD_VECTOR\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.6~svn220848/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 2783, __PRETTY_FUNCTION__))
;
2784 return createDRegPairNode(VecVT, N->getOperand(0), N->getOperand(1));
2785 }
2786 assert(EltVT == MVT::f32 && "unexpected type for BUILD_VECTOR")((EltVT == MVT::f32 && "unexpected type for BUILD_VECTOR"
) ? static_cast<void> (0) : __assert_fail ("EltVT == MVT::f32 && \"unexpected type for BUILD_VECTOR\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.6~svn220848/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 2786, __PRETTY_FUNCTION__))
;
2787 if (NumElts == 2)
2788 return createSRegPairNode(VecVT, N->getOperand(0), N->getOperand(1));
2789 assert(NumElts == 4 && "unexpected type for BUILD_VECTOR")((NumElts == 4 && "unexpected type for BUILD_VECTOR")
? static_cast<void> (0) : __assert_fail ("NumElts == 4 && \"unexpected type for BUILD_VECTOR\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.6~svn220848/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 2789, __PRETTY_FUNCTION__))
;
2790 return createQuadSRegsNode(VecVT, N->getOperand(0), N->getOperand(1),
2791 N->getOperand(2), N->getOperand(3));
2792 }
2793
2794 case ARMISD::VLD2DUP: {
2795 static const uint16_t Opcodes[] = { ARM::VLD2DUPd8, ARM::VLD2DUPd16,
2796 ARM::VLD2DUPd32 };
2797 return SelectVLDDup(N, false, 2, Opcodes);
2798 }
2799
2800 case ARMISD::VLD3DUP: {
2801 static const uint16_t Opcodes[] = { ARM::VLD3DUPd8Pseudo,
2802 ARM::VLD3DUPd16Pseudo,
2803 ARM::VLD3DUPd32Pseudo };
2804 return SelectVLDDup(N, false, 3, Opcodes);
2805 }
2806
2807 case ARMISD::VLD4DUP: {
2808 static const uint16_t Opcodes[] = { ARM::VLD4DUPd8Pseudo,
2809 ARM::VLD4DUPd16Pseudo,
2810 ARM::VLD4DUPd32Pseudo };
2811 return SelectVLDDup(N, false, 4, Opcodes);
2812 }
2813
2814 case ARMISD::VLD2DUP_UPD: {
2815 static const uint16_t Opcodes[] = { ARM::VLD2DUPd8wb_fixed,
2816 ARM::VLD2DUPd16wb_fixed,
2817 ARM::VLD2DUPd32wb_fixed };
2818 return SelectVLDDup(N, true, 2, Opcodes);
2819 }
2820
2821 case ARMISD::VLD3DUP_UPD: {
2822 static const uint16_t Opcodes[] = { ARM::VLD3DUPd8Pseudo_UPD,
2823 ARM::VLD3DUPd16Pseudo_UPD,
2824 ARM::VLD3DUPd32Pseudo_UPD };
2825 return SelectVLDDup(N, true, 3, Opcodes);
2826 }
2827
2828 case ARMISD::VLD4DUP_UPD: {
2829 static const uint16_t Opcodes[] = { ARM::VLD4DUPd8Pseudo_UPD,
2830 ARM::VLD4DUPd16Pseudo_UPD,
2831 ARM::VLD4DUPd32Pseudo_UPD };
2832 return SelectVLDDup(N, true, 4, Opcodes);
2833 }
2834
2835 case ARMISD::VLD1_UPD: {
2836 static const uint16_t DOpcodes[] = { ARM::VLD1d8wb_fixed,
2837 ARM::VLD1d16wb_fixed,
2838 ARM::VLD1d32wb_fixed,
2839 ARM::VLD1d64wb_fixed };
2840 static const uint16_t QOpcodes[] = { ARM::VLD1q8wb_fixed,
2841 ARM::VLD1q16wb_fixed,
2842 ARM::VLD1q32wb_fixed,
2843 ARM::VLD1q64wb_fixed };
2844 return SelectVLD(N, true, 1, DOpcodes, QOpcodes, nullptr);
2845 }
2846
2847 case ARMISD::VLD2_UPD: {
2848 static const uint16_t DOpcodes[] = { ARM::VLD2d8wb_fixed,
2849 ARM::VLD2d16wb_fixed,
2850 ARM::VLD2d32wb_fixed,
2851 ARM::VLD1q64wb_fixed};
2852 static const uint16_t QOpcodes[] = { ARM::VLD2q8PseudoWB_fixed,
2853 ARM::VLD2q16PseudoWB_fixed,
2854 ARM::VLD2q32PseudoWB_fixed };
2855 return SelectVLD(N, true, 2, DOpcodes, QOpcodes, nullptr);
2856 }
2857
2858 case ARMISD::VLD3_UPD: {
2859 static const uint16_t DOpcodes[] = { ARM::VLD3d8Pseudo_UPD,
2860 ARM::VLD3d16Pseudo_UPD,
2861 ARM::VLD3d32Pseudo_UPD,
2862 ARM::VLD1d64TPseudoWB_fixed};
2863 static const uint16_t QOpcodes0[] = { ARM::VLD3q8Pseudo_UPD,
2864 ARM::VLD3q16Pseudo_UPD,
2865 ARM::VLD3q32Pseudo_UPD };
2866 static const uint16_t QOpcodes1[] = { ARM::VLD3q8oddPseudo_UPD,
2867 ARM::VLD3q16oddPseudo_UPD,
2868 ARM::VLD3q32oddPseudo_UPD };
2869 return SelectVLD(N, true, 3, DOpcodes, QOpcodes0, QOpcodes1);
2870 }
2871
2872 case ARMISD::VLD4_UPD: {
2873 static const uint16_t DOpcodes[] = { ARM::VLD4d8Pseudo_UPD,
2874 ARM::VLD4d16Pseudo_UPD,
2875 ARM::VLD4d32Pseudo_UPD,
2876 ARM::VLD1d64QPseudoWB_fixed};
2877 static const uint16_t QOpcodes0[] = { ARM::VLD4q8Pseudo_UPD,
2878 ARM::VLD4q16Pseudo_UPD,
2879 ARM::VLD4q32Pseudo_UPD };
2880 static const uint16_t QOpcodes1[] = { ARM::VLD4q8oddPseudo_UPD,
2881 ARM::VLD4q16oddPseudo_UPD,
2882 ARM::VLD4q32oddPseudo_UPD };
2883 return SelectVLD(N, true, 4, DOpcodes, QOpcodes0, QOpcodes1);
2884 }
2885
2886 case ARMISD::VLD2LN_UPD: {
2887 static const uint16_t DOpcodes[] = { ARM::VLD2LNd8Pseudo_UPD,
2888 ARM::VLD2LNd16Pseudo_UPD,
2889 ARM::VLD2LNd32Pseudo_UPD };
2890 static const uint16_t QOpcodes[] = { ARM::VLD2LNq16Pseudo_UPD,
2891 ARM::VLD2LNq32Pseudo_UPD };
2892 return SelectVLDSTLane(N, true, true, 2, DOpcodes, QOpcodes);
2893 }
2894
2895 case ARMISD::VLD3LN_UPD: {
2896 static const uint16_t DOpcodes[] = { ARM::VLD3LNd8Pseudo_UPD,
2897 ARM::VLD3LNd16Pseudo_UPD,
2898 ARM::VLD3LNd32Pseudo_UPD };
2899 static const uint16_t QOpcodes[] = { ARM::VLD3LNq16Pseudo_UPD,
2900 ARM::VLD3LNq32Pseudo_UPD };
2901 return SelectVLDSTLane(N, true, true, 3, DOpcodes, QOpcodes);
2902 }
2903
2904 case ARMISD::VLD4LN_UPD: {
2905 static const uint16_t DOpcodes[] = { ARM::VLD4LNd8Pseudo_UPD,
2906 ARM::VLD4LNd16Pseudo_UPD,
2907 ARM::VLD4LNd32Pseudo_UPD };
2908 static const uint16_t QOpcodes[] = { ARM::VLD4LNq16Pseudo_UPD,
2909 ARM::VLD4LNq32Pseudo_UPD };
2910 return SelectVLDSTLane(N, true, true, 4, DOpcodes, QOpcodes);
2911 }
2912
2913 case ARMISD::VST1_UPD: {
2914 static const uint16_t DOpcodes[] = { ARM::VST1d8wb_fixed,
2915 ARM::VST1d16wb_fixed,
2916 ARM::VST1d32wb_fixed,
2917 ARM::VST1d64wb_fixed };
2918 static const uint16_t QOpcodes[] = { ARM::VST1q8wb_fixed,
2919 ARM::VST1q16wb_fixed,
2920 ARM::VST1q32wb_fixed,
2921 ARM::VST1q64wb_fixed };
2922 return SelectVST(N, true, 1, DOpcodes, QOpcodes, nullptr);
2923 }
2924
2925 case ARMISD::VST2_UPD: {
2926 static const uint16_t DOpcodes[] = { ARM::VST2d8wb_fixed,
2927 ARM::VST2d16wb_fixed,
2928 ARM::VST2d32wb_fixed,
2929 ARM::VST1q64wb_fixed};
2930 static const uint16_t QOpcodes[] = { ARM::VST2q8PseudoWB_fixed,
2931 ARM::VST2q16PseudoWB_fixed,
2932 ARM::VST2q32PseudoWB_fixed };
2933 return SelectVST(N, true, 2, DOpcodes, QOpcodes, nullptr);
2934 }
2935
2936 case ARMISD::VST3_UPD: {
2937 static const uint16_t DOpcodes[] = { ARM::VST3d8Pseudo_UPD,
2938 ARM::VST3d16Pseudo_UPD,
2939 ARM::VST3d32Pseudo_UPD,
2940 ARM::VST1d64TPseudoWB_fixed};
2941 static const uint16_t QOpcodes0[] = { ARM::VST3q8Pseudo_UPD,
2942 ARM::VST3q16Pseudo_UPD,
2943 ARM::VST3q32Pseudo_UPD };
2944 static const uint16_t QOpcodes1[] = { ARM::VST3q8oddPseudo_UPD,
2945 ARM::VST3q16oddPseudo_UPD,
2946 ARM::VST3q32oddPseudo_UPD };
2947 return SelectVST(N, true, 3, DOpcodes, QOpcodes0, QOpcodes1);
2948 }
2949
2950 case ARMISD::VST4_UPD: {
2951 static const uint16_t DOpcodes[] = { ARM::VST4d8Pseudo_UPD,
2952 ARM::VST4d16Pseudo_UPD,
2953 ARM::VST4d32Pseudo_UPD,
2954 ARM::VST1d64QPseudoWB_fixed};
2955 static const uint16_t QOpcodes0[] = { ARM::VST4q8Pseudo_UPD,
2956 ARM::VST4q16Pseudo_UPD,
2957 ARM::VST4q32Pseudo_UPD };
2958 static const uint16_t QOpcodes1[] = { ARM::VST4q8oddPseudo_UPD,
2959 ARM::VST4q16oddPseudo_UPD,
2960 ARM::VST4q32oddPseudo_UPD };
2961 return SelectVST(N, true, 4, DOpcodes, QOpcodes0, QOpcodes1);
2962 }
2963
2964 case ARMISD::VST2LN_UPD: {
2965 static const uint16_t DOpcodes[] = { ARM::VST2LNd8Pseudo_UPD,
2966 ARM::VST2LNd16Pseudo_UPD,
2967 ARM::VST2LNd32Pseudo_UPD };
2968 static const uint16_t QOpcodes[] = { ARM::VST2LNq16Pseudo_UPD,
2969 ARM::VST2LNq32Pseudo_UPD };
2970 return SelectVLDSTLane(N, false, true, 2, DOpcodes, QOpcodes);
2971 }
2972
2973 case ARMISD::VST3LN_UPD: {
2974 static const uint16_t DOpcodes[] = { ARM::VST3LNd8Pseudo_UPD,
2975 ARM::VST3LNd16Pseudo_UPD,
2976 ARM::VST3LNd32Pseudo_UPD };
2977 static const uint16_t QOpcodes[] = { ARM::VST3LNq16Pseudo_UPD,
2978 ARM::VST3LNq32Pseudo_UPD };
2979 return SelectVLDSTLane(N, false, true, 3, DOpcodes, QOpcodes);
2980 }
2981
2982 case ARMISD::VST4LN_UPD: {
2983 static const uint16_t DOpcodes[] = { ARM::VST4LNd8Pseudo_UPD,
2984 ARM::VST4LNd16Pseudo_UPD,
2985 ARM::VST4LNd32Pseudo_UPD };
2986 static const uint16_t QOpcodes[] = { ARM::VST4LNq16Pseudo_UPD,
2987 ARM::VST4LNq32Pseudo_UPD };
2988 return SelectVLDSTLane(N, false, true, 4, DOpcodes, QOpcodes);
2989 }
2990
2991 case ISD::INTRINSIC_VOID:
2992 case ISD::INTRINSIC_W_CHAIN: {
2993 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
2994 switch (IntNo) {
2995 default:
2996 break;
2997
2998 case Intrinsic::arm_ldaexd:
2999 case Intrinsic::arm_ldrexd: {
3000 SDLoc dl(N);
3001 SDValue Chain = N->getOperand(0);
3002 SDValue MemAddr = N->getOperand(2);
3003 bool isThumb = Subtarget->isThumb() && Subtarget->hasThumb2();
3004
3005 bool IsAcquire = IntNo == Intrinsic::arm_ldaexd;
3006 unsigned NewOpc = isThumb ? (IsAcquire ? ARM::t2LDAEXD : ARM::t2LDREXD)
3007 : (IsAcquire ? ARM::LDAEXD : ARM::LDREXD);
3008
3009 // arm_ldrexd returns a i64 value in {i32, i32}
3010 std::vector<EVT> ResTys;
3011 if (isThumb) {
3012 ResTys.push_back(MVT::i32);
3013 ResTys.push_back(MVT::i32);
3014 } else
3015 ResTys.push_back(MVT::Untyped);
3016 ResTys.push_back(MVT::Other);
3017
3018 // Place arguments in the right order.
3019 SmallVector<SDValue, 7> Ops;
3020 Ops.push_back(MemAddr);
3021 Ops.push_back(getAL(CurDAG));
3022 Ops.push_back(CurDAG->getRegister(0, MVT::i32));
3023 Ops.push_back(Chain);
3024 SDNode *Ld = CurDAG->getMachineNode(NewOpc, dl, ResTys, Ops);
3025 // Transfer memoperands.
3026 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
3027 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
3028 cast<MachineSDNode>(Ld)->setMemRefs(MemOp, MemOp + 1);
3029
3030 // Remap uses.
3031 SDValue OutChain = isThumb ? SDValue(Ld, 2) : SDValue(Ld, 1);
3032 if (!SDValue(N, 0).use_empty()) {
3033 SDValue Result;
3034 if (isThumb)
3035 Result = SDValue(Ld, 0);
3036 else {
3037 SDValue SubRegIdx = CurDAG->getTargetConstant(ARM::gsub_0, MVT::i32);
3038 SDNode *ResNode = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
3039 dl, MVT::i32, SDValue(Ld, 0), SubRegIdx);
3040 Result = SDValue(ResNode,0);
3041 }
3042 ReplaceUses(SDValue(N, 0), Result);
3043 }
3044 if (!SDValue(N, 1).use_empty()) {
3045 SDValue Result;
3046 if (isThumb)
3047 Result = SDValue(Ld, 1);
3048 else {
3049 SDValue SubRegIdx = CurDAG->getTargetConstant(ARM::gsub_1, MVT::i32);
3050 SDNode *ResNode = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
3051 dl, MVT::i32, SDValue(Ld, 0), SubRegIdx);
3052 Result = SDValue(ResNode,0);
3053 }
3054 ReplaceUses(SDValue(N, 1), Result);
3055 }
3056 ReplaceUses(SDValue(N, 2), OutChain);
3057 return nullptr;
3058 }
3059 case Intrinsic::arm_stlexd:
3060 case Intrinsic::arm_strexd: {
3061 SDLoc dl(N);
3062 SDValue Chain = N->getOperand(0);
3063 SDValue Val0 = N->getOperand(2);
3064 SDValue Val1 = N->getOperand(3);
3065 SDValue MemAddr = N->getOperand(4);
3066
3067 // Store exclusive double return a i32 value which is the return status
3068 // of the issued store.
3069 EVT ResTys[] = { MVT::i32, MVT::Other };
3070
3071 bool isThumb = Subtarget->isThumb() && Subtarget->hasThumb2();
3072 // Place arguments in the right order.
3073 SmallVector<SDValue, 7> Ops;
3074 if (isThumb) {
3075 Ops.push_back(Val0);
3076 Ops.push_back(Val1);
3077 } else
3078 // arm_strexd uses GPRPair.
3079 Ops.push_back(SDValue(createGPRPairNode(MVT::Untyped, Val0, Val1), 0));
3080 Ops.push_back(MemAddr);
3081 Ops.push_back(getAL(CurDAG));
3082 Ops.push_back(CurDAG->getRegister(0, MVT::i32));
3083 Ops.push_back(Chain);
3084
3085 bool IsRelease = IntNo == Intrinsic::arm_stlexd;
3086 unsigned NewOpc = isThumb ? (IsRelease ? ARM::t2STLEXD : ARM::t2STREXD)
3087 : (IsRelease ? ARM::STLEXD : ARM::STREXD);
3088
3089 SDNode *St = CurDAG->getMachineNode(NewOpc, dl, ResTys, Ops);
3090 // Transfer memoperands.
3091 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
3092 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
3093 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
3094
3095 return St;
3096 }
3097
3098 case Intrinsic::arm_neon_vld1: {
3099 static const uint16_t DOpcodes[] = { ARM::VLD1d8, ARM::VLD1d16,
3100 ARM::VLD1d32, ARM::VLD1d64 };
3101 static const uint16_t QOpcodes[] = { ARM::VLD1q8, ARM::VLD1q16,
3102 ARM::VLD1q32, ARM::VLD1q64};
3103 return SelectVLD(N, false, 1, DOpcodes, QOpcodes, nullptr);
3104 }
3105
3106 case Intrinsic::arm_neon_vld2: {
3107 static const uint16_t DOpcodes[] = { ARM::VLD2d8, ARM::VLD2d16,
3108 ARM::VLD2d32, ARM::VLD1q64 };
3109 static const uint16_t QOpcodes[] = { ARM::VLD2q8Pseudo, ARM::VLD2q16Pseudo,
3110 ARM::VLD2q32Pseudo };
3111 return SelectVLD(N, false, 2, DOpcodes, QOpcodes, nullptr);
3112 }
3113
3114 case Intrinsic::arm_neon_vld3: {
3115 static const uint16_t DOpcodes[] = { ARM::VLD3d8Pseudo,
3116 ARM::VLD3d16Pseudo,
3117 ARM::VLD3d32Pseudo,
3118 ARM::VLD1d64TPseudo };
3119 static const uint16_t QOpcodes0[] = { ARM::VLD3q8Pseudo_UPD,
3120 ARM::VLD3q16Pseudo_UPD,
3121 ARM::VLD3q32Pseudo_UPD };
3122 static const uint16_t QOpcodes1[] = { ARM::VLD3q8oddPseudo,
3123 ARM::VLD3q16oddPseudo,
3124 ARM::VLD3q32oddPseudo };
3125 return SelectVLD(N, false, 3, DOpcodes, QOpcodes0, QOpcodes1);
3126 }
3127
3128 case Intrinsic::arm_neon_vld4: {
3129 static const uint16_t DOpcodes[] = { ARM::VLD4d8Pseudo,
3130 ARM::VLD4d16Pseudo,
3131 ARM::VLD4d32Pseudo,
3132 ARM::VLD1d64QPseudo };
3133 static const uint16_t QOpcodes0[] = { ARM::VLD4q8Pseudo_UPD,
3134 ARM::VLD4q16Pseudo_UPD,
3135 ARM::VLD4q32Pseudo_UPD };
3136 static const uint16_t QOpcodes1[] = { ARM::VLD4q8oddPseudo,
3137 ARM::VLD4q16oddPseudo,
3138 ARM::VLD4q32oddPseudo };
3139 return SelectVLD(N, false, 4, DOpcodes, QOpcodes0, QOpcodes1);
3140 }
3141
3142 case Intrinsic::arm_neon_vld2lane: {
3143 static const uint16_t DOpcodes[] = { ARM::VLD2LNd8Pseudo,
3144 ARM::VLD2LNd16Pseudo,
3145 ARM::VLD2LNd32Pseudo };
3146 static const uint16_t QOpcodes[] = { ARM::VLD2LNq16Pseudo,
3147 ARM::VLD2LNq32Pseudo };
3148 return SelectVLDSTLane(N, true, false, 2, DOpcodes, QOpcodes);
3149 }
3150
3151 case Intrinsic::arm_neon_vld3lane: {
3152 static const uint16_t DOpcodes[] = { ARM::VLD3LNd8Pseudo,
3153 ARM::VLD3LNd16Pseudo,
3154 ARM::VLD3LNd32Pseudo };
3155 static const uint16_t QOpcodes[] = { ARM::VLD3LNq16Pseudo,
3156 ARM::VLD3LNq32Pseudo };
3157 return SelectVLDSTLane(N, true, false, 3, DOpcodes, QOpcodes);
3158 }
3159
3160 case Intrinsic::arm_neon_vld4lane: {
3161 static const uint16_t DOpcodes[] = { ARM::VLD4LNd8Pseudo,
3162 ARM::VLD4LNd16Pseudo,
3163 ARM::VLD4LNd32Pseudo };
3164 static const uint16_t QOpcodes[] = { ARM::VLD4LNq16Pseudo,
3165 ARM::VLD4LNq32Pseudo };
3166 return SelectVLDSTLane(N, true, false, 4, DOpcodes, QOpcodes);
3167 }
3168
3169 case Intrinsic::arm_neon_vst1: {
3170 static const uint16_t DOpcodes[] = { ARM::VST1d8, ARM::VST1d16,
3171 ARM::VST1d32, ARM::VST1d64 };
3172 static const uint16_t QOpcodes[] = { ARM::VST1q8, ARM::VST1q16,
3173 ARM::VST1q32, ARM::VST1q64 };
3174 return SelectVST(N, false, 1, DOpcodes, QOpcodes, nullptr);
3175 }
3176
3177 case Intrinsic::arm_neon_vst2: {
3178 static const uint16_t DOpcodes[] = { ARM::VST2d8, ARM::VST2d16,
3179 ARM::VST2d32, ARM::VST1q64 };
3180 static uint16_t QOpcodes[] = { ARM::VST2q8Pseudo, ARM::VST2q16Pseudo,
3181 ARM::VST2q32Pseudo };
3182 return SelectVST(N, false, 2, DOpcodes, QOpcodes, nullptr);
3183 }
3184
3185 case Intrinsic::arm_neon_vst3: {
3186 static const uint16_t DOpcodes[] = { ARM::VST3d8Pseudo,
3187 ARM::VST3d16Pseudo,
3188 ARM::VST3d32Pseudo,
3189 ARM::VST1d64TPseudo };
3190 static const uint16_t QOpcodes0[] = { ARM::VST3q8Pseudo_UPD,
3191 ARM::VST3q16Pseudo_UPD,
3192 ARM::VST3q32Pseudo_UPD };
3193 static const uint16_t QOpcodes1[] = { ARM::VST3q8oddPseudo,
3194 ARM::VST3q16oddPseudo,
3195 ARM::VST3q32oddPseudo };
3196 return SelectVST(N, false, 3, DOpcodes, QOpcodes0, QOpcodes1);
3197 }
3198
3199 case Intrinsic::arm_neon_vst4: {
3200 static const uint16_t DOpcodes[] = { ARM::VST4d8Pseudo,
3201 ARM::VST4d16Pseudo,
3202 ARM::VST4d32Pseudo,
3203 ARM::VST1d64QPseudo };
3204 static const uint16_t QOpcodes0[] = { ARM::VST4q8Pseudo_UPD,
3205 ARM::VST4q16Pseudo_UPD,
3206 ARM::VST4q32Pseudo_UPD };
3207 static const uint16_t QOpcodes1[] = { ARM::VST4q8oddPseudo,
3208 ARM::VST4q16oddPseudo,
3209 ARM::VST4q32oddPseudo };
3210 return SelectVST(N, false, 4, DOpcodes, QOpcodes0, QOpcodes1);
3211 }
3212
3213 case Intrinsic::arm_neon_vst2lane: {
3214 static const uint16_t DOpcodes[] = { ARM::VST2LNd8Pseudo,
3215 ARM::VST2LNd16Pseudo,
3216 ARM::VST2LNd32Pseudo };
3217 static const uint16_t QOpcodes[] = { ARM::VST2LNq16Pseudo,
3218 ARM::VST2LNq32Pseudo };
3219 return SelectVLDSTLane(N, false, false, 2, DOpcodes, QOpcodes);
3220 }
3221
3222 case Intrinsic::arm_neon_vst3lane: {
3223 static const uint16_t DOpcodes[] = { ARM::VST3LNd8Pseudo,
3224 ARM::VST3LNd16Pseudo,
3225 ARM::VST3LNd32Pseudo };
3226 static const uint16_t QOpcodes[] = { ARM::VST3LNq16Pseudo,
3227 ARM::VST3LNq32Pseudo };
3228 return SelectVLDSTLane(N, false, false, 3, DOpcodes, QOpcodes);
3229 }
3230
3231 case Intrinsic::arm_neon_vst4lane: {
3232 static const uint16_t DOpcodes[] = { ARM::VST4LNd8Pseudo,
3233 ARM::VST4LNd16Pseudo,
3234 ARM::VST4LNd32Pseudo };
3235 static const uint16_t QOpcodes[] = { ARM::VST4LNq16Pseudo,
3236 ARM::VST4LNq32Pseudo };
3237 return SelectVLDSTLane(N, false, false, 4, DOpcodes, QOpcodes);
3238 }
3239 }
3240 break;
3241 }
3242
3243 case ISD::INTRINSIC_WO_CHAIN: {
3244 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
3245 switch (IntNo) {
3246 default:
3247 break;
3248
3249 case Intrinsic::arm_neon_vtbl2:
3250 return SelectVTBL(N, false, 2, ARM::VTBL2);
3251 case Intrinsic::arm_neon_vtbl3:
3252 return SelectVTBL(N, false, 3, ARM::VTBL3Pseudo);
3253 case Intrinsic::arm_neon_vtbl4:
3254 return SelectVTBL(N, false, 4, ARM::VTBL4Pseudo);
3255
3256 case Intrinsic::arm_neon_vtbx2:
3257 return SelectVTBL(N, true, 2, ARM::VTBX2);
3258 case Intrinsic::arm_neon_vtbx3:
3259 return SelectVTBL(N, true, 3, ARM::VTBX3Pseudo);
3260 case Intrinsic::arm_neon_vtbx4:
3261 return SelectVTBL(N, true, 4, ARM::VTBX4Pseudo);
3262 }
3263 break;
3264 }
3265
3266 case ARMISD::VTBL1: {
3267 SDLoc dl(N);
3268 EVT VT = N->getValueType(0);
3269 SmallVector<SDValue, 6> Ops;
3270
3271 Ops.push_back(N->getOperand(0));
3272 Ops.push_back(N->getOperand(1));
3273 Ops.push_back(getAL(CurDAG)); // Predicate
3274 Ops.push_back(CurDAG->getRegister(0, MVT::i32)); // Predicate Register
3275 return CurDAG->getMachineNode(ARM::VTBL1, dl, VT, Ops);
3276 }
3277 case ARMISD::VTBL2: {
3278 SDLoc dl(N);
3279 EVT VT = N->getValueType(0);
3280
3281 // Form a REG_SEQUENCE to force register allocation.
3282 SDValue V0 = N->getOperand(0);
3283 SDValue V1 = N->getOperand(1);
3284 SDValue RegSeq = SDValue(createDRegPairNode(MVT::v16i8, V0, V1), 0);
3285
3286 SmallVector<SDValue, 6> Ops;
3287 Ops.push_back(RegSeq);
3288 Ops.push_back(N->getOperand(2));
3289 Ops.push_back(getAL(CurDAG)); // Predicate
3290 Ops.push_back(CurDAG->getRegister(0, MVT::i32)); // Predicate Register
3291 return CurDAG->getMachineNode(ARM::VTBL2, dl, VT, Ops);
3292 }
3293
3294 case ISD::CONCAT_VECTORS:
3295 return SelectConcatVector(N);
3296 }
3297
3298 return SelectCode(N);
3299}
3300
3301SDNode *ARMDAGToDAGISel::SelectInlineAsm(SDNode *N){
3302 std::vector<SDValue> AsmNodeOperands;
3303 unsigned Flag, Kind;
3304 bool Changed = false;
3305 unsigned NumOps = N->getNumOperands();
3306
3307 // Normally, i64 data is bounded to two arbitrary GRPs for "%r" constraint.
3308 // However, some instrstions (e.g. ldrexd/strexd in ARM mode) require
3309 // (even/even+1) GPRs and use %n and %Hn to refer to the individual regs
3310 // respectively. Since there is no constraint to explicitly specify a
3311 // reg pair, we use GPRPair reg class for "%r" for 64-bit data. For Thumb,
3312 // the 64-bit data may be referred by H, Q, R modifiers, so we still pack
3313 // them into a GPRPair.
3314
3315 SDLoc dl(N);
3316 SDValue Glue = N->getGluedNode() ? N->getOperand(NumOps-1)
3317 : SDValue(nullptr,0);
3318
3319 SmallVector<bool, 8> OpChanged;
3320 // Glue node will be appended late.
3321 for(unsigned i = 0, e = N->getGluedNode() ? NumOps - 1 : NumOps; i < e; ++i) {
3322 SDValue op = N->getOperand(i);
3323 AsmNodeOperands.push_back(op);
3324
3325 if (i < InlineAsm::Op_FirstOperand)
3326 continue;
3327
3328 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(i))) {
3329 Flag = C->getZExtValue();
3330 Kind = InlineAsm::getKind(Flag);
3331 }
3332 else
3333 continue;
3334
3335 // Immediate operands to inline asm in the SelectionDAG are modeled with
3336 // two operands. The first is a constant of value InlineAsm::Kind_Imm, and
3337 // the second is a constant with the value of the immediate. If we get here
3338 // and we have a Kind_Imm, skip the next operand, and continue.
3339 if (Kind == InlineAsm::Kind_Imm) {
3340 SDValue op = N->getOperand(++i);
3341 AsmNodeOperands.push_back(op);
3342 continue;
3343 }
3344
3345 unsigned NumRegs = InlineAsm::getNumOperandRegisters(Flag);
3346 if (NumRegs)
3347 OpChanged.push_back(false);
3348
3349 unsigned DefIdx = 0;
3350 bool IsTiedToChangedOp = false;
3351 // If it's a use that is tied with a previous def, it has no
3352 // reg class constraint.
3353 if (Changed && InlineAsm::isUseOperandTiedToDef(Flag, DefIdx))
3354 IsTiedToChangedOp = OpChanged[DefIdx];
3355
3356 if (Kind != InlineAsm::Kind_RegUse && Kind != InlineAsm::Kind_RegDef
3357 && Kind != InlineAsm::Kind_RegDefEarlyClobber)
3358 continue;
3359
3360 unsigned RC;
3361 bool HasRC = InlineAsm::hasRegClassConstraint(Flag, RC);
3362 if ((!IsTiedToChangedOp && (!HasRC || RC != ARM::GPRRegClassID))
3363 || NumRegs != 2)
3364 continue;
3365
3366 assert((i+2 < NumOps) && "Invalid number of operands in inline asm")(((i+2 < NumOps) && "Invalid number of operands in inline asm"
) ? static_cast<void> (0) : __assert_fail ("(i+2 < NumOps) && \"Invalid number of operands in inline asm\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.6~svn220848/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 3366, __PRETTY_FUNCTION__))
;
3367 SDValue V0 = N->getOperand(i+1);
3368 SDValue V1 = N->getOperand(i+2);
3369 unsigned Reg0 = cast<RegisterSDNode>(V0)->getReg();
3370 unsigned Reg1 = cast<RegisterSDNode>(V1)->getReg();
3371 SDValue PairedReg;
3372 MachineRegisterInfo &MRI = MF->getRegInfo();
3373
3374 if (Kind == InlineAsm::Kind_RegDef ||
3375 Kind == InlineAsm::Kind_RegDefEarlyClobber) {
3376 // Replace the two GPRs with 1 GPRPair and copy values from GPRPair to
3377 // the original GPRs.
3378
3379 unsigned GPVR = MRI.createVirtualRegister(&ARM::GPRPairRegClass);
3380 PairedReg = CurDAG->getRegister(GPVR, MVT::Untyped);
3381 SDValue Chain = SDValue(N,0);
3382
3383 SDNode *GU = N->getGluedUser();
3384 SDValue RegCopy = CurDAG->getCopyFromReg(Chain, dl, GPVR, MVT::Untyped,
3385 Chain.getValue(1));
3386
3387 // Extract values from a GPRPair reg and copy to the original GPR reg.
3388 SDValue Sub0 = CurDAG->getTargetExtractSubreg(ARM::gsub_0, dl, MVT::i32,
3389 RegCopy);
3390 SDValue Sub1 = CurDAG->getTargetExtractSubreg(ARM::gsub_1, dl, MVT::i32,
3391 RegCopy);
3392 SDValue T0 = CurDAG->getCopyToReg(Sub0, dl, Reg0, Sub0,
3393 RegCopy.getValue(1));
3394 SDValue T1 = CurDAG->getCopyToReg(Sub1, dl, Reg1, Sub1, T0.getValue(1));
3395
3396 // Update the original glue user.
3397 std::vector<SDValue> Ops(GU->op_begin(), GU->op_end()-1);
3398 Ops.push_back(T1.getValue(1));
3399 CurDAG->UpdateNodeOperands(GU, Ops);
3400 GU = T1.getNode();
3401 }
3402 else {
3403 // For Kind == InlineAsm::Kind_RegUse, we first copy two GPRs into a
3404 // GPRPair and then pass the GPRPair to the inline asm.
3405 SDValue Chain = AsmNodeOperands[InlineAsm::Op_InputChain];
3406
3407 // As REG_SEQ doesn't take RegisterSDNode, we copy them first.
3408 SDValue T0 = CurDAG->getCopyFromReg(Chain, dl, Reg0, MVT::i32,
3409 Chain.getValue(1));
3410 SDValue T1 = CurDAG->getCopyFromReg(Chain, dl, Reg1, MVT::i32,
3411 T0.getValue(1));
3412 SDValue Pair = SDValue(createGPRPairNode(MVT::Untyped, T0, T1), 0);
3413
3414 // Copy REG_SEQ into a GPRPair-typed VR and replace the original two
3415 // i32 VRs of inline asm with it.
3416 unsigned GPVR = MRI.createVirtualRegister(&ARM::GPRPairRegClass);
3417 PairedReg = CurDAG->getRegister(GPVR, MVT::Untyped);
3418 Chain = CurDAG->getCopyToReg(T1, dl, GPVR, Pair, T1.getValue(1));
3419
3420 AsmNodeOperands[InlineAsm::Op_InputChain] = Chain;
3421 Glue = Chain.getValue(1);
3422 }
3423
3424 Changed = true;
3425
3426 if(PairedReg.getNode()) {
3427 OpChanged[OpChanged.size() -1 ] = true;
3428 Flag = InlineAsm::getFlagWord(Kind, 1 /* RegNum*/);
3429 if (IsTiedToChangedOp)
3430 Flag = InlineAsm::getFlagWordForMatchingOp(Flag, DefIdx);
3431 else
3432 Flag = InlineAsm::getFlagWordForRegClass(Flag, ARM::GPRPairRegClassID);
3433 // Replace the current flag.
3434 AsmNodeOperands[AsmNodeOperands.size() -1] = CurDAG->getTargetConstant(
3435 Flag, MVT::i32);
3436 // Add the new register node and skip the original two GPRs.
3437 AsmNodeOperands.push_back(PairedReg);
3438 // Skip the next two GPRs.
3439 i += 2;
3440 }
3441 }
3442
3443 if (Glue.getNode())
3444 AsmNodeOperands.push_back(Glue);
3445 if (!Changed)
3446 return nullptr;
3447
3448 SDValue New = CurDAG->getNode(ISD::INLINEASM, SDLoc(N),
3449 CurDAG->getVTList(MVT::Other, MVT::Glue), AsmNodeOperands);
3450 New->setNodeId(-1);
3451 return New.getNode();
3452}
3453
3454
3455bool ARMDAGToDAGISel::
3456SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode,
3457 std::vector<SDValue> &OutOps) {
3458 assert(ConstraintCode == 'm' && "unexpected asm memory constraint")((ConstraintCode == 'm' && "unexpected asm memory constraint"
) ? static_cast<void> (0) : __assert_fail ("ConstraintCode == 'm' && \"unexpected asm memory constraint\""
, "/tmp/buildd/llvm-toolchain-snapshot-3.6~svn220848/lib/Target/ARM/ARMISelDAGToDAG.cpp"
, 3458, __PRETTY_FUNCTION__))
;
3459 // Require the address to be in a register. That is safe for all ARM
3460 // variants and it is hard to do anything much smarter without knowing
3461 // how the operand is used.
3462 OutOps.push_back(Op);
3463 return false;
3464}
3465
3466/// createARMISelDag - This pass converts a legalized DAG into a
3467/// ARM-specific DAG, ready for instruction scheduling.
3468///
3469FunctionPass *llvm::createARMISelDag(ARMBaseTargetMachine &TM,
3470 CodeGenOpt::Level OptLevel) {
3471 return new ARMDAGToDAGISel(TM, OptLevel);
3472}