LLVM 18.0.0git
AArch64InstructionSelector.cpp
Go to the documentation of this file.
1//===- AArch64InstructionSelector.cpp ----------------------------*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the targeting of the InstructionSelector class for
10/// AArch64.
11/// \todo This should be generated by TableGen.
12//===----------------------------------------------------------------------===//
13
15#include "AArch64InstrInfo.h"
18#include "AArch64RegisterInfo.h"
19#include "AArch64Subtarget.h"
40#include "llvm/IR/Constants.h"
43#include "llvm/IR/IntrinsicsAArch64.h"
45#include "llvm/IR/Type.h"
46#include "llvm/Pass.h"
47#include "llvm/Support/Debug.h"
49#include <optional>
50
51#define DEBUG_TYPE "aarch64-isel"
52
53using namespace llvm;
54using namespace MIPatternMatch;
55using namespace AArch64GISelUtils;
56
57namespace llvm {
60}
61
62namespace {
63
64#define GET_GLOBALISEL_PREDICATE_BITSET
65#include "AArch64GenGlobalISel.inc"
66#undef GET_GLOBALISEL_PREDICATE_BITSET
67
68
69class AArch64InstructionSelector : public InstructionSelector {
70public:
71 AArch64InstructionSelector(const AArch64TargetMachine &TM,
72 const AArch64Subtarget &STI,
73 const AArch64RegisterBankInfo &RBI);
74
75 bool select(MachineInstr &I) override;
76 static const char *getName() { return DEBUG_TYPE; }
77
78 void setupMF(MachineFunction &MF, GISelKnownBits *KB,
79 CodeGenCoverage *CoverageInfo, ProfileSummaryInfo *PSI,
80 BlockFrequencyInfo *BFI) override {
81 InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI);
82 MIB.setMF(MF);
83
84 // hasFnAttribute() is expensive to call on every BRCOND selection, so
85 // cache it here for each run of the selector.
86 ProduceNonFlagSettingCondBr =
87 !MF.getFunction().hasFnAttribute(Attribute::SpeculativeLoadHardening);
88 MFReturnAddr = Register();
89
90 processPHIs(MF);
91 }
92
93private:
94 /// tblgen-erated 'select' implementation, used as the initial selector for
95 /// the patterns that don't require complex C++.
96 bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
97
98 // A lowering phase that runs before any selection attempts.
99 // Returns true if the instruction was modified.
100 bool preISelLower(MachineInstr &I);
101
102 // An early selection function that runs before the selectImpl() call.
103 bool earlySelect(MachineInstr &I);
104
105 // Do some preprocessing of G_PHIs before we begin selection.
106 void processPHIs(MachineFunction &MF);
107
108 bool earlySelectSHL(MachineInstr &I, MachineRegisterInfo &MRI);
109
110 /// Eliminate same-sized cross-bank copies into stores before selectImpl().
111 bool contractCrossBankCopyIntoStore(MachineInstr &I,
113
114 bool convertPtrAddToAdd(MachineInstr &I, MachineRegisterInfo &MRI);
115
116 bool selectVaStartAAPCS(MachineInstr &I, MachineFunction &MF,
117 MachineRegisterInfo &MRI) const;
118 bool selectVaStartDarwin(MachineInstr &I, MachineFunction &MF,
119 MachineRegisterInfo &MRI) const;
120
121 ///@{
122 /// Helper functions for selectCompareBranch.
123 bool selectCompareBranchFedByFCmp(MachineInstr &I, MachineInstr &FCmp,
124 MachineIRBuilder &MIB) const;
125 bool selectCompareBranchFedByICmp(MachineInstr &I, MachineInstr &ICmp,
126 MachineIRBuilder &MIB) const;
127 bool tryOptCompareBranchFedByICmp(MachineInstr &I, MachineInstr &ICmp,
128 MachineIRBuilder &MIB) const;
129 bool tryOptAndIntoCompareBranch(MachineInstr &AndInst, bool Invert,
130 MachineBasicBlock *DstMBB,
131 MachineIRBuilder &MIB) const;
132 ///@}
133
134 bool selectCompareBranch(MachineInstr &I, MachineFunction &MF,
136
137 bool selectVectorAshrLshr(MachineInstr &I, MachineRegisterInfo &MRI);
138 bool selectVectorSHL(MachineInstr &I, MachineRegisterInfo &MRI);
139
140 // Helper to generate an equivalent of scalar_to_vector into a new register,
141 // returned via 'Dst'.
142 MachineInstr *emitScalarToVector(unsigned EltSize,
143 const TargetRegisterClass *DstRC,
144 Register Scalar,
145 MachineIRBuilder &MIRBuilder) const;
146 /// Helper to narrow vector that was widened by emitScalarToVector.
147 /// Copy lowest part of 128-bit or 64-bit vector to 64-bit or 32-bit
148 /// vector, correspondingly.
149 MachineInstr *emitNarrowVector(Register DstReg, Register SrcReg,
150 MachineIRBuilder &MIRBuilder,
151 MachineRegisterInfo &MRI) const;
152
153 /// Emit a lane insert into \p DstReg, or a new vector register if
154 /// std::nullopt is provided.
155 ///
156 /// The lane inserted into is defined by \p LaneIdx. The vector source
157 /// register is given by \p SrcReg. The register containing the element is
158 /// given by \p EltReg.
159 MachineInstr *emitLaneInsert(std::optional<Register> DstReg, Register SrcReg,
160 Register EltReg, unsigned LaneIdx,
161 const RegisterBank &RB,
162 MachineIRBuilder &MIRBuilder) const;
163
164 /// Emit a sequence of instructions representing a constant \p CV for a
165 /// vector register \p Dst. (E.g. a MOV, or a load from a constant pool.)
166 ///
167 /// \returns the last instruction in the sequence on success, and nullptr
168 /// otherwise.
169 MachineInstr *emitConstantVector(Register Dst, Constant *CV,
170 MachineIRBuilder &MIRBuilder,
172
173 bool selectInsertElt(MachineInstr &I, MachineRegisterInfo &MRI);
174 bool tryOptConstantBuildVec(MachineInstr &MI, LLT DstTy,
176 /// \returns true if a G_BUILD_VECTOR instruction \p MI can be selected as a
177 /// SUBREG_TO_REG.
178 bool tryOptBuildVecToSubregToReg(MachineInstr &MI, MachineRegisterInfo &MRI);
179 bool selectBuildVector(MachineInstr &I, MachineRegisterInfo &MRI);
182
183 bool selectShuffleVector(MachineInstr &I, MachineRegisterInfo &MRI);
184 bool selectExtractElt(MachineInstr &I, MachineRegisterInfo &MRI);
185 bool selectConcatVectors(MachineInstr &I, MachineRegisterInfo &MRI);
186 bool selectSplitVectorUnmerge(MachineInstr &I, MachineRegisterInfo &MRI);
187
188 /// Helper function to select vector load intrinsics like
189 /// @llvm.aarch64.neon.ld2.*, @llvm.aarch64.neon.ld4.*, etc.
190 /// \p Opc is the opcode that the selected instruction should use.
191 /// \p NumVecs is the number of vector destinations for the instruction.
192 /// \p I is the original G_INTRINSIC_W_SIDE_EFFECTS instruction.
193 bool selectVectorLoadIntrinsic(unsigned Opc, unsigned NumVecs,
194 MachineInstr &I);
195 bool selectVectorLoadLaneIntrinsic(unsigned Opc, unsigned NumVecs,
196 MachineInstr &I);
197 void selectVectorStoreIntrinsic(MachineInstr &I, unsigned NumVecs,
198 unsigned Opc);
199 bool selectVectorStoreLaneIntrinsic(MachineInstr &I, unsigned NumVecs,
200 unsigned Opc);
201 bool selectIntrinsicWithSideEffects(MachineInstr &I,
203 bool selectIntrinsic(MachineInstr &I, MachineRegisterInfo &MRI);
204 bool selectVectorICmp(MachineInstr &I, MachineRegisterInfo &MRI);
205 bool selectJumpTable(MachineInstr &I, MachineRegisterInfo &MRI);
206 bool selectBrJT(MachineInstr &I, MachineRegisterInfo &MRI);
207 bool selectTLSGlobalValue(MachineInstr &I, MachineRegisterInfo &MRI);
208 bool selectReduction(MachineInstr &I, MachineRegisterInfo &MRI);
209 bool selectMOPS(MachineInstr &I, MachineRegisterInfo &MRI);
210 bool selectUSMovFromExtend(MachineInstr &I, MachineRegisterInfo &MRI);
211
212 unsigned emitConstantPoolEntry(const Constant *CPVal,
213 MachineFunction &MF) const;
214 MachineInstr *emitLoadFromConstantPool(const Constant *CPVal,
215 MachineIRBuilder &MIRBuilder) const;
216
217 // Emit a vector concat operation.
218 MachineInstr *emitVectorConcat(std::optional<Register> Dst, Register Op1,
219 Register Op2,
220 MachineIRBuilder &MIRBuilder) const;
221
222 // Emit an integer compare between LHS and RHS, which checks for Predicate.
223 MachineInstr *emitIntegerCompare(MachineOperand &LHS, MachineOperand &RHS,
224 MachineOperand &Predicate,
225 MachineIRBuilder &MIRBuilder) const;
226
227 /// Emit a floating point comparison between \p LHS and \p RHS.
228 /// \p Pred if given is the intended predicate to use.
230 emitFPCompare(Register LHS, Register RHS, MachineIRBuilder &MIRBuilder,
231 std::optional<CmpInst::Predicate> = std::nullopt) const;
232
234 emitInstr(unsigned Opcode, std::initializer_list<llvm::DstOp> DstOps,
235 std::initializer_list<llvm::SrcOp> SrcOps,
236 MachineIRBuilder &MIRBuilder,
237 const ComplexRendererFns &RenderFns = std::nullopt) const;
238 /// Helper function to emit an add or sub instruction.
239 ///
240 /// \p AddrModeAndSizeToOpcode must contain each of the opcode variants above
241 /// in a specific order.
242 ///
243 /// Below is an example of the expected input to \p AddrModeAndSizeToOpcode.
244 ///
245 /// \code
246 /// const std::array<std::array<unsigned, 2>, 4> Table {
247 /// {{AArch64::ADDXri, AArch64::ADDWri},
248 /// {AArch64::ADDXrs, AArch64::ADDWrs},
249 /// {AArch64::ADDXrr, AArch64::ADDWrr},
250 /// {AArch64::SUBXri, AArch64::SUBWri},
251 /// {AArch64::ADDXrx, AArch64::ADDWrx}}};
252 /// \endcode
253 ///
254 /// Each row in the table corresponds to a different addressing mode. Each
255 /// column corresponds to a different register size.
256 ///
257 /// \attention Rows must be structured as follows:
258 /// - Row 0: The ri opcode variants
259 /// - Row 1: The rs opcode variants
260 /// - Row 2: The rr opcode variants
261 /// - Row 3: The ri opcode variants for negative immediates
262 /// - Row 4: The rx opcode variants
263 ///
264 /// \attention Columns must be structured as follows:
265 /// - Column 0: The 64-bit opcode variants
266 /// - Column 1: The 32-bit opcode variants
267 ///
268 /// \p Dst is the destination register of the binop to emit.
269 /// \p LHS is the left-hand operand of the binop to emit.
270 /// \p RHS is the right-hand operand of the binop to emit.
271 MachineInstr *emitAddSub(
272 const std::array<std::array<unsigned, 2>, 5> &AddrModeAndSizeToOpcode,
274 MachineIRBuilder &MIRBuilder) const;
275 MachineInstr *emitADD(Register DefReg, MachineOperand &LHS,
277 MachineIRBuilder &MIRBuilder) const;
279 MachineIRBuilder &MIRBuilder) const;
281 MachineIRBuilder &MIRBuilder) const;
283 MachineIRBuilder &MIRBuilder) const;
285 MachineIRBuilder &MIRBuilder) const;
287 MachineIRBuilder &MIRBuilder) const;
289 MachineIRBuilder &MIRBuilder) const;
290 MachineInstr *emitSelect(Register Dst, Register LHS, Register RHS,
292 MachineIRBuilder &MIRBuilder) const;
293 MachineInstr *emitExtractVectorElt(std::optional<Register> DstReg,
294 const RegisterBank &DstRB, LLT ScalarTy,
295 Register VecReg, unsigned LaneIdx,
296 MachineIRBuilder &MIRBuilder) const;
297 MachineInstr *emitCSINC(Register Dst, Register Src1, Register Src2,
299 MachineIRBuilder &MIRBuilder) const;
300 /// Emit a CSet for a FP compare.
301 ///
302 /// \p Dst is expected to be a 32-bit scalar register.
303 MachineInstr *emitCSetForFCmp(Register Dst, CmpInst::Predicate Pred,
304 MachineIRBuilder &MIRBuilder) const;
305
306 /// Emit an instruction that sets NZCV to the carry-in expected by \p I.
307 /// Might elide the instruction if the previous instruction already sets NZCV
308 /// correctly.
309 MachineInstr *emitCarryIn(MachineInstr &I, Register CarryReg);
310
311 /// Emit the overflow op for \p Opcode.
312 ///
313 /// \p Opcode is expected to be an overflow op's opcode, e.g. G_UADDO,
314 /// G_USUBO, etc.
315 std::pair<MachineInstr *, AArch64CC::CondCode>
316 emitOverflowOp(unsigned Opcode, Register Dst, MachineOperand &LHS,
317 MachineOperand &RHS, MachineIRBuilder &MIRBuilder) const;
318
319 bool selectOverflowOp(MachineInstr &I, MachineRegisterInfo &MRI);
320
321 /// Emit expression as a conjunction (a series of CCMP/CFCMP ops).
322 /// In some cases this is even possible with OR operations in the expression.
324 MachineIRBuilder &MIB) const;
327 AArch64CC::CondCode Predicate,
329 MachineIRBuilder &MIB) const;
331 bool Negate, Register CCOp,
332 AArch64CC::CondCode Predicate,
333 MachineIRBuilder &MIB) const;
334
335 /// Emit a TB(N)Z instruction which tests \p Bit in \p TestReg.
336 /// \p IsNegative is true if the test should be "not zero".
337 /// This will also optimize the test bit instruction when possible.
338 MachineInstr *emitTestBit(Register TestReg, uint64_t Bit, bool IsNegative,
339 MachineBasicBlock *DstMBB,
340 MachineIRBuilder &MIB) const;
341
342 /// Emit a CB(N)Z instruction which branches to \p DestMBB.
343 MachineInstr *emitCBZ(Register CompareReg, bool IsNegative,
344 MachineBasicBlock *DestMBB,
345 MachineIRBuilder &MIB) const;
346
347 // Equivalent to the i32shift_a and friends from AArch64InstrInfo.td.
348 // We use these manually instead of using the importer since it doesn't
349 // support SDNodeXForm.
350 ComplexRendererFns selectShiftA_32(const MachineOperand &Root) const;
351 ComplexRendererFns selectShiftB_32(const MachineOperand &Root) const;
352 ComplexRendererFns selectShiftA_64(const MachineOperand &Root) const;
353 ComplexRendererFns selectShiftB_64(const MachineOperand &Root) const;
354
355 ComplexRendererFns select12BitValueWithLeftShift(uint64_t Immed) const;
356 ComplexRendererFns selectArithImmed(MachineOperand &Root) const;
357 ComplexRendererFns selectNegArithImmed(MachineOperand &Root) const;
358
359 ComplexRendererFns selectAddrModeUnscaled(MachineOperand &Root,
360 unsigned Size) const;
361
362 ComplexRendererFns selectAddrModeUnscaled8(MachineOperand &Root) const {
363 return selectAddrModeUnscaled(Root, 1);
364 }
365 ComplexRendererFns selectAddrModeUnscaled16(MachineOperand &Root) const {
366 return selectAddrModeUnscaled(Root, 2);
367 }
368 ComplexRendererFns selectAddrModeUnscaled32(MachineOperand &Root) const {
369 return selectAddrModeUnscaled(Root, 4);
370 }
371 ComplexRendererFns selectAddrModeUnscaled64(MachineOperand &Root) const {
372 return selectAddrModeUnscaled(Root, 8);
373 }
374 ComplexRendererFns selectAddrModeUnscaled128(MachineOperand &Root) const {
375 return selectAddrModeUnscaled(Root, 16);
376 }
377
378 /// Helper to try to fold in a GISEL_ADD_LOW into an immediate, to be used
379 /// from complex pattern matchers like selectAddrModeIndexed().
380 ComplexRendererFns tryFoldAddLowIntoImm(MachineInstr &RootDef, unsigned Size,
381 MachineRegisterInfo &MRI) const;
382
383 ComplexRendererFns selectAddrModeIndexed(MachineOperand &Root,
384 unsigned Size) const;
385 template <int Width>
386 ComplexRendererFns selectAddrModeIndexed(MachineOperand &Root) const {
387 return selectAddrModeIndexed(Root, Width / 8);
388 }
389
390 bool isWorthFoldingIntoExtendedReg(MachineInstr &MI,
391 const MachineRegisterInfo &MRI) const;
392 ComplexRendererFns
393 selectAddrModeShiftedExtendXReg(MachineOperand &Root,
394 unsigned SizeInBytes) const;
395
396 /// Returns a \p ComplexRendererFns which contains a base, offset, and whether
397 /// or not a shift + extend should be folded into an addressing mode. Returns
398 /// None when this is not profitable or possible.
399 ComplexRendererFns
400 selectExtendedSHL(MachineOperand &Root, MachineOperand &Base,
401 MachineOperand &Offset, unsigned SizeInBytes,
402 bool WantsExt) const;
403 ComplexRendererFns selectAddrModeRegisterOffset(MachineOperand &Root) const;
404 ComplexRendererFns selectAddrModeXRO(MachineOperand &Root,
405 unsigned SizeInBytes) const;
406 template <int Width>
407 ComplexRendererFns selectAddrModeXRO(MachineOperand &Root) const {
408 return selectAddrModeXRO(Root, Width / 8);
409 }
410
411 ComplexRendererFns selectAddrModeWRO(MachineOperand &Root,
412 unsigned SizeInBytes) const;
413 template <int Width>
414 ComplexRendererFns selectAddrModeWRO(MachineOperand &Root) const {
415 return selectAddrModeWRO(Root, Width / 8);
416 }
417
418 ComplexRendererFns selectShiftedRegister(MachineOperand &Root,
419 bool AllowROR = false) const;
420
421 ComplexRendererFns selectArithShiftedRegister(MachineOperand &Root) const {
422 return selectShiftedRegister(Root);
423 }
424
425 ComplexRendererFns selectLogicalShiftedRegister(MachineOperand &Root) const {
426 return selectShiftedRegister(Root, true);
427 }
428
429 /// Given an extend instruction, determine the correct shift-extend type for
430 /// that instruction.
431 ///
432 /// If the instruction is going to be used in a load or store, pass
433 /// \p IsLoadStore = true.
435 getExtendTypeForInst(MachineInstr &MI, MachineRegisterInfo &MRI,
436 bool IsLoadStore = false) const;
437
438 /// Move \p Reg to \p RC if \p Reg is not already on \p RC.
439 ///
440 /// \returns Either \p Reg if no change was necessary, or the new register
441 /// created by moving \p Reg.
442 ///
443 /// Note: This uses emitCopy right now.
444 Register moveScalarRegClass(Register Reg, const TargetRegisterClass &RC,
445 MachineIRBuilder &MIB) const;
446
447 ComplexRendererFns selectArithExtendedRegister(MachineOperand &Root) const;
448
449 ComplexRendererFns selectExtractHigh(MachineOperand &Root) const;
450
451 void renderTruncImm(MachineInstrBuilder &MIB, const MachineInstr &MI,
452 int OpIdx = -1) const;
453 void renderLogicalImm32(MachineInstrBuilder &MIB, const MachineInstr &I,
454 int OpIdx = -1) const;
455 void renderLogicalImm64(MachineInstrBuilder &MIB, const MachineInstr &I,
456 int OpIdx = -1) const;
457 void renderFPImm16(MachineInstrBuilder &MIB, const MachineInstr &MI,
458 int OpIdx = -1) const;
459 void renderFPImm32(MachineInstrBuilder &MIB, const MachineInstr &MI,
460 int OpIdx = -1) const;
461 void renderFPImm64(MachineInstrBuilder &MIB, const MachineInstr &MI,
462 int OpIdx = -1) const;
463 void renderFPImm32SIMDModImmType4(MachineInstrBuilder &MIB,
464 const MachineInstr &MI,
465 int OpIdx = -1) const;
466
467 // Materialize a GlobalValue or BlockAddress using a movz+movk sequence.
468 void materializeLargeCMVal(MachineInstr &I, const Value *V, unsigned OpFlags);
469
470 // Optimization methods.
471 bool tryOptSelect(GSelect &Sel);
472 bool tryOptSelectConjunction(GSelect &Sel, MachineInstr &CondMI);
473 MachineInstr *tryFoldIntegerCompare(MachineOperand &LHS, MachineOperand &RHS,
474 MachineOperand &Predicate,
475 MachineIRBuilder &MIRBuilder) const;
476
477 /// Return true if \p MI is a load or store of \p NumBytes bytes.
478 bool isLoadStoreOfNumBytes(const MachineInstr &MI, unsigned NumBytes) const;
479
480 /// Returns true if \p MI is guaranteed to have the high-half of a 64-bit
481 /// register zeroed out. In other words, the result of MI has been explicitly
482 /// zero extended.
483 bool isDef32(const MachineInstr &MI) const;
484
486 const AArch64Subtarget &STI;
487 const AArch64InstrInfo &TII;
489 const AArch64RegisterBankInfo &RBI;
490
491 bool ProduceNonFlagSettingCondBr = false;
492
493 // Some cached values used during selection.
494 // We use LR as a live-in register, and we keep track of it here as it can be
495 // clobbered by calls.
496 Register MFReturnAddr;
497
499
500#define GET_GLOBALISEL_PREDICATES_DECL
501#include "AArch64GenGlobalISel.inc"
502#undef GET_GLOBALISEL_PREDICATES_DECL
503
504// We declare the temporaries used by selectImpl() in the class to minimize the
505// cost of constructing placeholder values.
506#define GET_GLOBALISEL_TEMPORARIES_DECL
507#include "AArch64GenGlobalISel.inc"
508#undef GET_GLOBALISEL_TEMPORARIES_DECL
509};
510
511} // end anonymous namespace
512
513#define GET_GLOBALISEL_IMPL
514#include "AArch64GenGlobalISel.inc"
515#undef GET_GLOBALISEL_IMPL
516
517AArch64InstructionSelector::AArch64InstructionSelector(
518 const AArch64TargetMachine &TM, const AArch64Subtarget &STI,
519 const AArch64RegisterBankInfo &RBI)
520 : TM(TM), STI(STI), TII(*STI.getInstrInfo()), TRI(*STI.getRegisterInfo()),
521 RBI(RBI),
523#include "AArch64GenGlobalISel.inc"
526#include "AArch64GenGlobalISel.inc"
528{
529}
530
531// FIXME: This should be target-independent, inferred from the types declared
532// for each class in the bank.
533//
534/// Given a register bank, and a type, return the smallest register class that
535/// can represent that combination.
536static const TargetRegisterClass *
537getRegClassForTypeOnBank(LLT Ty, const RegisterBank &RB,
538 bool GetAllRegSet = false) {
539 if (RB.getID() == AArch64::GPRRegBankID) {
540 if (Ty.getSizeInBits() <= 32)
541 return GetAllRegSet ? &AArch64::GPR32allRegClass
542 : &AArch64::GPR32RegClass;
543 if (Ty.getSizeInBits() == 64)
544 return GetAllRegSet ? &AArch64::GPR64allRegClass
545 : &AArch64::GPR64RegClass;
546 if (Ty.getSizeInBits() == 128)
547 return &AArch64::XSeqPairsClassRegClass;
548 return nullptr;
549 }
550
551 if (RB.getID() == AArch64::FPRRegBankID) {
552 switch (Ty.getSizeInBits()) {
553 case 8:
554 return &AArch64::FPR8RegClass;
555 case 16:
556 return &AArch64::FPR16RegClass;
557 case 32:
558 return &AArch64::FPR32RegClass;
559 case 64:
560 return &AArch64::FPR64RegClass;
561 case 128:
562 return &AArch64::FPR128RegClass;
563 }
564 return nullptr;
565 }
566
567 return nullptr;
568}
569
570/// Given a register bank, and size in bits, return the smallest register class
571/// that can represent that combination.
572static const TargetRegisterClass *
573getMinClassForRegBank(const RegisterBank &RB, unsigned SizeInBits,
574 bool GetAllRegSet = false) {
575 unsigned RegBankID = RB.getID();
576
577 if (RegBankID == AArch64::GPRRegBankID) {
578 if (SizeInBits <= 32)
579 return GetAllRegSet ? &AArch64::GPR32allRegClass
580 : &AArch64::GPR32RegClass;
581 if (SizeInBits == 64)
582 return GetAllRegSet ? &AArch64::GPR64allRegClass
583 : &AArch64::GPR64RegClass;
584 if (SizeInBits == 128)
585 return &AArch64::XSeqPairsClassRegClass;
586 }
587
588 if (RegBankID == AArch64::FPRRegBankID) {
589 switch (SizeInBits) {
590 default:
591 return nullptr;
592 case 8:
593 return &AArch64::FPR8RegClass;
594 case 16:
595 return &AArch64::FPR16RegClass;
596 case 32:
597 return &AArch64::FPR32RegClass;
598 case 64:
599 return &AArch64::FPR64RegClass;
600 case 128:
601 return &AArch64::FPR128RegClass;
602 }
603 }
604
605 return nullptr;
606}
607
608/// Returns the correct subregister to use for a given register class.
610 const TargetRegisterInfo &TRI, unsigned &SubReg) {
611 switch (TRI.getRegSizeInBits(*RC)) {
612 case 8:
613 SubReg = AArch64::bsub;
614 break;
615 case 16:
616 SubReg = AArch64::hsub;
617 break;
618 case 32:
619 if (RC != &AArch64::FPR32RegClass)
620 SubReg = AArch64::sub_32;
621 else
622 SubReg = AArch64::ssub;
623 break;
624 case 64:
625 SubReg = AArch64::dsub;
626 break;
627 default:
629 dbgs() << "Couldn't find appropriate subregister for register class.");
630 return false;
631 }
632
633 return true;
634}
635
636/// Returns the minimum size the given register bank can hold.
637static unsigned getMinSizeForRegBank(const RegisterBank &RB) {
638 switch (RB.getID()) {
639 case AArch64::GPRRegBankID:
640 return 32;
641 case AArch64::FPRRegBankID:
642 return 8;
643 default:
644 llvm_unreachable("Tried to get minimum size for unknown register bank.");
645 }
646}
647
648/// Create a REG_SEQUENCE instruction using the registers in \p Regs.
649/// Helper function for functions like createDTuple and createQTuple.
650///
651/// \p RegClassIDs - The list of register class IDs available for some tuple of
652/// a scalar class. E.g. QQRegClassID, QQQRegClassID, QQQQRegClassID. This is
653/// expected to contain between 2 and 4 tuple classes.
654///
655/// \p SubRegs - The list of subregister classes associated with each register
656/// class ID in \p RegClassIDs. E.g., QQRegClassID should use the qsub0
657/// subregister class. The index of each subregister class is expected to
658/// correspond with the index of each register class.
659///
660/// \returns Either the destination register of REG_SEQUENCE instruction that
661/// was created, or the 0th element of \p Regs if \p Regs contains a single
662/// element.
664 const unsigned RegClassIDs[],
665 const unsigned SubRegs[], MachineIRBuilder &MIB) {
666 unsigned NumRegs = Regs.size();
667 if (NumRegs == 1)
668 return Regs[0];
669 assert(NumRegs >= 2 && NumRegs <= 4 &&
670 "Only support between two and 4 registers in a tuple!");
672 auto *DesiredClass = TRI->getRegClass(RegClassIDs[NumRegs - 2]);
673 auto RegSequence =
674 MIB.buildInstr(TargetOpcode::REG_SEQUENCE, {DesiredClass}, {});
675 for (unsigned I = 0, E = Regs.size(); I < E; ++I) {
676 RegSequence.addUse(Regs[I]);
677 RegSequence.addImm(SubRegs[I]);
678 }
679 return RegSequence.getReg(0);
680}
681
682/// Create a tuple of D-registers using the registers in \p Regs.
684 static const unsigned RegClassIDs[] = {
685 AArch64::DDRegClassID, AArch64::DDDRegClassID, AArch64::DDDDRegClassID};
686 static const unsigned SubRegs[] = {AArch64::dsub0, AArch64::dsub1,
687 AArch64::dsub2, AArch64::dsub3};
688 return createTuple(Regs, RegClassIDs, SubRegs, MIB);
689}
690
691/// Create a tuple of Q-registers using the registers in \p Regs.
693 static const unsigned RegClassIDs[] = {
694 AArch64::QQRegClassID, AArch64::QQQRegClassID, AArch64::QQQQRegClassID};
695 static const unsigned SubRegs[] = {AArch64::qsub0, AArch64::qsub1,
696 AArch64::qsub2, AArch64::qsub3};
697 return createTuple(Regs, RegClassIDs, SubRegs, MIB);
698}
699
700static std::optional<uint64_t> getImmedFromMO(const MachineOperand &Root) {
701 auto &MI = *Root.getParent();
702 auto &MBB = *MI.getParent();
703 auto &MF = *MBB.getParent();
704 auto &MRI = MF.getRegInfo();
705 uint64_t Immed;
706 if (Root.isImm())
707 Immed = Root.getImm();
708 else if (Root.isCImm())
709 Immed = Root.getCImm()->getZExtValue();
710 else if (Root.isReg()) {
711 auto ValAndVReg =
713 if (!ValAndVReg)
714 return std::nullopt;
715 Immed = ValAndVReg->Value.getSExtValue();
716 } else
717 return std::nullopt;
718 return Immed;
719}
720
721/// Check whether \p I is a currently unsupported binary operation:
722/// - it has an unsized type
723/// - an operand is not a vreg
724/// - all operands are not in the same bank
725/// These are checks that should someday live in the verifier, but right now,
726/// these are mostly limitations of the aarch64 selector.
727static bool unsupportedBinOp(const MachineInstr &I,
728 const AArch64RegisterBankInfo &RBI,
730 const AArch64RegisterInfo &TRI) {
731 LLT Ty = MRI.getType(I.getOperand(0).getReg());
732 if (!Ty.isValid()) {
733 LLVM_DEBUG(dbgs() << "Generic binop register should be typed\n");
734 return true;
735 }
736
737 const RegisterBank *PrevOpBank = nullptr;
738 for (auto &MO : I.operands()) {
739 // FIXME: Support non-register operands.
740 if (!MO.isReg()) {
741 LLVM_DEBUG(dbgs() << "Generic inst non-reg operands are unsupported\n");
742 return true;
743 }
744
745 // FIXME: Can generic operations have physical registers operands? If
746 // so, this will need to be taught about that, and we'll need to get the
747 // bank out of the minimal class for the register.
748 // Either way, this needs to be documented (and possibly verified).
749 if (!MO.getReg().isVirtual()) {
750 LLVM_DEBUG(dbgs() << "Generic inst has physical register operand\n");
751 return true;
752 }
753
754 const RegisterBank *OpBank = RBI.getRegBank(MO.getReg(), MRI, TRI);
755 if (!OpBank) {
756 LLVM_DEBUG(dbgs() << "Generic register has no bank or class\n");
757 return true;
758 }
759
760 if (PrevOpBank && OpBank != PrevOpBank) {
761 LLVM_DEBUG(dbgs() << "Generic inst operands have different banks\n");
762 return true;
763 }
764 PrevOpBank = OpBank;
765 }
766 return false;
767}
768
769/// Select the AArch64 opcode for the basic binary operation \p GenericOpc
770/// (such as G_OR or G_SDIV), appropriate for the register bank \p RegBankID
771/// and of size \p OpSize.
772/// \returns \p GenericOpc if the combination is unsupported.
773static unsigned selectBinaryOp(unsigned GenericOpc, unsigned RegBankID,
774 unsigned OpSize) {
775 switch (RegBankID) {
776 case AArch64::GPRRegBankID:
777 if (OpSize == 32) {
778 switch (GenericOpc) {
779 case TargetOpcode::G_SHL:
780 return AArch64::LSLVWr;
781 case TargetOpcode::G_LSHR:
782 return AArch64::LSRVWr;
783 case TargetOpcode::G_ASHR:
784 return AArch64::ASRVWr;
785 default:
786 return GenericOpc;
787 }
788 } else if (OpSize == 64) {
789 switch (GenericOpc) {
790 case TargetOpcode::G_PTR_ADD:
791 return AArch64::ADDXrr;
792 case TargetOpcode::G_SHL:
793 return AArch64::LSLVXr;
794 case TargetOpcode::G_LSHR:
795 return AArch64::LSRVXr;
796 case TargetOpcode::G_ASHR:
797 return AArch64::ASRVXr;
798 default:
799 return GenericOpc;
800 }
801 }
802 break;
803 case AArch64::FPRRegBankID:
804 switch (OpSize) {
805 case 32:
806 switch (GenericOpc) {
807 case TargetOpcode::G_FADD:
808 return AArch64::FADDSrr;
809 case TargetOpcode::G_FSUB:
810 return AArch64::FSUBSrr;
811 case TargetOpcode::G_FMUL:
812 return AArch64::FMULSrr;
813 case TargetOpcode::G_FDIV:
814 return AArch64::FDIVSrr;
815 default:
816 return GenericOpc;
817 }
818 case 64:
819 switch (GenericOpc) {
820 case TargetOpcode::G_FADD:
821 return AArch64::FADDDrr;
822 case TargetOpcode::G_FSUB:
823 return AArch64::FSUBDrr;
824 case TargetOpcode::G_FMUL:
825 return AArch64::FMULDrr;
826 case TargetOpcode::G_FDIV:
827 return AArch64::FDIVDrr;
828 case TargetOpcode::G_OR:
829 return AArch64::ORRv8i8;
830 default:
831 return GenericOpc;
832 }
833 }
834 break;
835 }
836 return GenericOpc;
837}
838
839/// Select the AArch64 opcode for the G_LOAD or G_STORE operation \p GenericOpc,
840/// appropriate for the (value) register bank \p RegBankID and of memory access
841/// size \p OpSize. This returns the variant with the base+unsigned-immediate
842/// addressing mode (e.g., LDRXui).
843/// \returns \p GenericOpc if the combination is unsupported.
844static unsigned selectLoadStoreUIOp(unsigned GenericOpc, unsigned RegBankID,
845 unsigned OpSize) {
846 const bool isStore = GenericOpc == TargetOpcode::G_STORE;
847 switch (RegBankID) {
848 case AArch64::GPRRegBankID:
849 switch (OpSize) {
850 case 8:
851 return isStore ? AArch64::STRBBui : AArch64::LDRBBui;
852 case 16:
853 return isStore ? AArch64::STRHHui : AArch64::LDRHHui;
854 case 32:
855 return isStore ? AArch64::STRWui : AArch64::LDRWui;
856 case 64:
857 return isStore ? AArch64::STRXui : AArch64::LDRXui;
858 }
859 break;
860 case AArch64::FPRRegBankID:
861 switch (OpSize) {
862 case 8:
863 return isStore ? AArch64::STRBui : AArch64::LDRBui;
864 case 16:
865 return isStore ? AArch64::STRHui : AArch64::LDRHui;
866 case 32:
867 return isStore ? AArch64::STRSui : AArch64::LDRSui;
868 case 64:
869 return isStore ? AArch64::STRDui : AArch64::LDRDui;
870 case 128:
871 return isStore ? AArch64::STRQui : AArch64::LDRQui;
872 }
873 break;
874 }
875 return GenericOpc;
876}
877
878/// Helper function for selectCopy. Inserts a subregister copy from \p SrcReg
879/// to \p *To.
880///
881/// E.g "To = COPY SrcReg:SubReg"
883 const RegisterBankInfo &RBI, Register SrcReg,
884 const TargetRegisterClass *To, unsigned SubReg) {
885 assert(SrcReg.isValid() && "Expected a valid source register?");
886 assert(To && "Destination register class cannot be null");
887 assert(SubReg && "Expected a valid subregister");
888
889 MachineIRBuilder MIB(I);
890 auto SubRegCopy =
891 MIB.buildInstr(TargetOpcode::COPY, {To}, {}).addReg(SrcReg, 0, SubReg);
892 MachineOperand &RegOp = I.getOperand(1);
893 RegOp.setReg(SubRegCopy.getReg(0));
894
895 // It's possible that the destination register won't be constrained. Make
896 // sure that happens.
897 if (!I.getOperand(0).getReg().isPhysical())
898 RBI.constrainGenericRegister(I.getOperand(0).getReg(), *To, MRI);
899
900 return true;
901}
902
903/// Helper function to get the source and destination register classes for a
904/// copy. Returns a std::pair containing the source register class for the
905/// copy, and the destination register class for the copy. If a register class
906/// cannot be determined, then it will be nullptr.
907static std::pair<const TargetRegisterClass *, const TargetRegisterClass *>
910 const RegisterBankInfo &RBI) {
911 Register DstReg = I.getOperand(0).getReg();
912 Register SrcReg = I.getOperand(1).getReg();
913 const RegisterBank &DstRegBank = *RBI.getRegBank(DstReg, MRI, TRI);
914 const RegisterBank &SrcRegBank = *RBI.getRegBank(SrcReg, MRI, TRI);
915 unsigned DstSize = RBI.getSizeInBits(DstReg, MRI, TRI);
916 unsigned SrcSize = RBI.getSizeInBits(SrcReg, MRI, TRI);
917
918 // Special casing for cross-bank copies of s1s. We can technically represent
919 // a 1-bit value with any size of register. The minimum size for a GPR is 32
920 // bits. So, we need to put the FPR on 32 bits as well.
921 //
922 // FIXME: I'm not sure if this case holds true outside of copies. If it does,
923 // then we can pull it into the helpers that get the appropriate class for a
924 // register bank. Or make a new helper that carries along some constraint
925 // information.
926 if (SrcRegBank != DstRegBank && (DstSize == 1 && SrcSize == 1))
927 SrcSize = DstSize = 32;
928
929 return {getMinClassForRegBank(SrcRegBank, SrcSize, true),
930 getMinClassForRegBank(DstRegBank, DstSize, true)};
931}
932
933// FIXME: We need some sort of API in RBI/TRI to allow generic code to
934// constrain operands of simple instructions given a TargetRegisterClass
935// and LLT
937 const RegisterBankInfo &RBI) {
938 for (MachineOperand &MO : I.operands()) {
939 if (!MO.isReg())
940 continue;
941 Register Reg = MO.getReg();
942 if (!Reg)
943 continue;
944 if (Reg.isPhysical())
945 continue;
946 LLT Ty = MRI.getType(Reg);
947 const RegClassOrRegBank &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
948 const TargetRegisterClass *RC =
949 RegClassOrBank.dyn_cast<const TargetRegisterClass *>();
950 if (!RC) {
951 const RegisterBank &RB = *RegClassOrBank.get<const RegisterBank *>();
952 RC = getRegClassForTypeOnBank(Ty, RB);
953 if (!RC) {
955 dbgs() << "Warning: DBG_VALUE operand has unexpected size/bank\n");
956 break;
957 }
958 }
959 RBI.constrainGenericRegister(Reg, *RC, MRI);
960 }
961
962 return true;
963}
964
967 const RegisterBankInfo &RBI) {
968 Register DstReg = I.getOperand(0).getReg();
969 Register SrcReg = I.getOperand(1).getReg();
970 const RegisterBank &DstRegBank = *RBI.getRegBank(DstReg, MRI, TRI);
971 const RegisterBank &SrcRegBank = *RBI.getRegBank(SrcReg, MRI, TRI);
972
973 // Find the correct register classes for the source and destination registers.
974 const TargetRegisterClass *SrcRC;
975 const TargetRegisterClass *DstRC;
976 std::tie(SrcRC, DstRC) = getRegClassesForCopy(I, TII, MRI, TRI, RBI);
977
978 if (!DstRC) {
979 LLVM_DEBUG(dbgs() << "Unexpected dest size "
980 << RBI.getSizeInBits(DstReg, MRI, TRI) << '\n');
981 return false;
982 }
983
984 // Is this a copy? If so, then we may need to insert a subregister copy.
985 if (I.isCopy()) {
986 // Yes. Check if there's anything to fix up.
987 if (!SrcRC) {
988 LLVM_DEBUG(dbgs() << "Couldn't determine source register class\n");
989 return false;
990 }
991
992 unsigned SrcSize = TRI.getRegSizeInBits(*SrcRC);
993 unsigned DstSize = TRI.getRegSizeInBits(*DstRC);
994 unsigned SubReg;
995
996 // If the source bank doesn't support a subregister copy small enough,
997 // then we first need to copy to the destination bank.
998 if (getMinSizeForRegBank(SrcRegBank) > DstSize) {
999 const TargetRegisterClass *DstTempRC =
1000 getMinClassForRegBank(DstRegBank, SrcSize, /* GetAllRegSet */ true);
1001 getSubRegForClass(DstRC, TRI, SubReg);
1002
1003 MachineIRBuilder MIB(I);
1004 auto Copy = MIB.buildCopy({DstTempRC}, {SrcReg});
1005 copySubReg(I, MRI, RBI, Copy.getReg(0), DstRC, SubReg);
1006 } else if (SrcSize > DstSize) {
1007 // If the source register is bigger than the destination we need to
1008 // perform a subregister copy.
1009 const TargetRegisterClass *SubRegRC =
1010 getMinClassForRegBank(SrcRegBank, DstSize, /* GetAllRegSet */ true);
1011 getSubRegForClass(SubRegRC, TRI, SubReg);
1012 copySubReg(I, MRI, RBI, SrcReg, DstRC, SubReg);
1013 } else if (DstSize > SrcSize) {
1014 // If the destination register is bigger than the source we need to do
1015 // a promotion using SUBREG_TO_REG.
1016 const TargetRegisterClass *PromotionRC =
1017 getMinClassForRegBank(SrcRegBank, DstSize, /* GetAllRegSet */ true);
1018 getSubRegForClass(SrcRC, TRI, SubReg);
1019
1020 Register PromoteReg = MRI.createVirtualRegister(PromotionRC);
1021 BuildMI(*I.getParent(), I, I.getDebugLoc(),
1022 TII.get(AArch64::SUBREG_TO_REG), PromoteReg)
1023 .addImm(0)
1024 .addUse(SrcReg)
1025 .addImm(SubReg);
1026 MachineOperand &RegOp = I.getOperand(1);
1027 RegOp.setReg(PromoteReg);
1028 }
1029
1030 // If the destination is a physical register, then there's nothing to
1031 // change, so we're done.
1032 if (DstReg.isPhysical())
1033 return true;
1034 }
1035
1036 // No need to constrain SrcReg. It will get constrained when we hit another
1037 // of its use or its defs. Copies do not have constraints.
1038 if (!RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
1039 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
1040 << " operand\n");
1041 return false;
1042 }
1043
1044 // If this a GPR ZEXT that we want to just reduce down into a copy.
1045 // The sizes will be mismatched with the source < 32b but that's ok.
1046 if (I.getOpcode() == TargetOpcode::G_ZEXT) {
1047 I.setDesc(TII.get(AArch64::COPY));
1048 assert(SrcRegBank.getID() == AArch64::GPRRegBankID);
1049 return selectCopy(I, TII, MRI, TRI, RBI);
1050 }
1051
1052 I.setDesc(TII.get(AArch64::COPY));
1053 return true;
1054}
1055
1056static unsigned selectFPConvOpc(unsigned GenericOpc, LLT DstTy, LLT SrcTy) {
1057 if (!DstTy.isScalar() || !SrcTy.isScalar())
1058 return GenericOpc;
1059
1060 const unsigned DstSize = DstTy.getSizeInBits();
1061 const unsigned SrcSize = SrcTy.getSizeInBits();
1062
1063 switch (DstSize) {
1064 case 32:
1065 switch (SrcSize) {
1066 case 32:
1067 switch (GenericOpc) {
1068 case TargetOpcode::G_SITOFP:
1069 return AArch64::SCVTFUWSri;
1070 case TargetOpcode::G_UITOFP:
1071 return AArch64::UCVTFUWSri;
1072 case TargetOpcode::G_FPTOSI:
1073 return AArch64::FCVTZSUWSr;
1074 case TargetOpcode::G_FPTOUI:
1075 return AArch64::FCVTZUUWSr;
1076 default:
1077 return GenericOpc;
1078 }
1079 case 64:
1080 switch (GenericOpc) {
1081 case TargetOpcode::G_SITOFP:
1082 return AArch64::SCVTFUXSri;
1083 case TargetOpcode::G_UITOFP:
1084 return AArch64::UCVTFUXSri;
1085 case TargetOpcode::G_FPTOSI:
1086 return AArch64::FCVTZSUWDr;
1087 case TargetOpcode::G_FPTOUI:
1088 return AArch64::FCVTZUUWDr;
1089 default:
1090 return GenericOpc;
1091 }
1092 default:
1093 return GenericOpc;
1094 }
1095 case 64:
1096 switch (SrcSize) {
1097 case 32:
1098 switch (GenericOpc) {
1099 case TargetOpcode::G_SITOFP:
1100 return AArch64::SCVTFUWDri;
1101 case TargetOpcode::G_UITOFP:
1102 return AArch64::UCVTFUWDri;
1103 case TargetOpcode::G_FPTOSI:
1104 return AArch64::FCVTZSUXSr;
1105 case TargetOpcode::G_FPTOUI:
1106 return AArch64::FCVTZUUXSr;
1107 default:
1108 return GenericOpc;
1109 }
1110 case 64:
1111 switch (GenericOpc) {
1112 case TargetOpcode::G_SITOFP:
1113 return AArch64::SCVTFUXDri;
1114 case TargetOpcode::G_UITOFP:
1115 return AArch64::UCVTFUXDri;
1116 case TargetOpcode::G_FPTOSI:
1117 return AArch64::FCVTZSUXDr;
1118 case TargetOpcode::G_FPTOUI:
1119 return AArch64::FCVTZUUXDr;
1120 default:
1121 return GenericOpc;
1122 }
1123 default:
1124 return GenericOpc;
1125 }
1126 default:
1127 return GenericOpc;
1128 };
1129 return GenericOpc;
1130}
1131
1133AArch64InstructionSelector::emitSelect(Register Dst, Register True,
1135 MachineIRBuilder &MIB) const {
1136 MachineRegisterInfo &MRI = *MIB.getMRI();
1137 assert(RBI.getRegBank(False, MRI, TRI)->getID() ==
1138 RBI.getRegBank(True, MRI, TRI)->getID() &&
1139 "Expected both select operands to have the same regbank?");
1140 LLT Ty = MRI.getType(True);
1141 if (Ty.isVector())
1142 return nullptr;
1143 const unsigned Size = Ty.getSizeInBits();
1144 assert((Size == 32 || Size == 64) &&
1145 "Expected 32 bit or 64 bit select only?");
1146 const bool Is32Bit = Size == 32;
1147 if (RBI.getRegBank(True, MRI, TRI)->getID() != AArch64::GPRRegBankID) {
1148 unsigned Opc = Is32Bit ? AArch64::FCSELSrrr : AArch64::FCSELDrrr;
1149 auto FCSel = MIB.buildInstr(Opc, {Dst}, {True, False}).addImm(CC);
1151 return &*FCSel;
1152 }
1153
1154 // By default, we'll try and emit a CSEL.
1155 unsigned Opc = Is32Bit ? AArch64::CSELWr : AArch64::CSELXr;
1156 bool Optimized = false;
1157 auto TryFoldBinOpIntoSelect = [&Opc, Is32Bit, &CC, &MRI,
1158 &Optimized](Register &Reg, Register &OtherReg,
1159 bool Invert) {
1160 if (Optimized)
1161 return false;
1162
1163 // Attempt to fold:
1164 //
1165 // %sub = G_SUB 0, %x
1166 // %select = G_SELECT cc, %reg, %sub
1167 //
1168 // Into:
1169 // %select = CSNEG %reg, %x, cc
1170 Register MatchReg;
1171 if (mi_match(Reg, MRI, m_Neg(m_Reg(MatchReg)))) {
1172 Opc = Is32Bit ? AArch64::CSNEGWr : AArch64::CSNEGXr;
1173 Reg = MatchReg;
1174 if (Invert) {
1176 std::swap(Reg, OtherReg);
1177 }
1178 return true;
1179 }
1180
1181 // Attempt to fold:
1182 //
1183 // %xor = G_XOR %x, -1
1184 // %select = G_SELECT cc, %reg, %xor
1185 //
1186 // Into:
1187 // %select = CSINV %reg, %x, cc
1188 if (mi_match(Reg, MRI, m_Not(m_Reg(MatchReg)))) {
1189 Opc = Is32Bit ? AArch64::CSINVWr : AArch64::CSINVXr;
1190 Reg = MatchReg;
1191 if (Invert) {
1193 std::swap(Reg, OtherReg);
1194 }
1195 return true;
1196 }
1197
1198 // Attempt to fold:
1199 //
1200 // %add = G_ADD %x, 1
1201 // %select = G_SELECT cc, %reg, %add
1202 //
1203 // Into:
1204 // %select = CSINC %reg, %x, cc
1205 if (mi_match(Reg, MRI,
1206 m_any_of(m_GAdd(m_Reg(MatchReg), m_SpecificICst(1)),
1207 m_GPtrAdd(m_Reg(MatchReg), m_SpecificICst(1))))) {
1208 Opc = Is32Bit ? AArch64::CSINCWr : AArch64::CSINCXr;
1209 Reg = MatchReg;
1210 if (Invert) {
1212 std::swap(Reg, OtherReg);
1213 }
1214 return true;
1215 }
1216
1217 return false;
1218 };
1219
1220 // Helper lambda which tries to use CSINC/CSINV for the instruction when its
1221 // true/false values are constants.
1222 // FIXME: All of these patterns already exist in tablegen. We should be
1223 // able to import these.
1224 auto TryOptSelectCst = [&Opc, &True, &False, &CC, Is32Bit, &MRI,
1225 &Optimized]() {
1226 if (Optimized)
1227 return false;
1228 auto TrueCst = getIConstantVRegValWithLookThrough(True, MRI);
1229 auto FalseCst = getIConstantVRegValWithLookThrough(False, MRI);
1230 if (!TrueCst && !FalseCst)
1231 return false;
1232
1233 Register ZReg = Is32Bit ? AArch64::WZR : AArch64::XZR;
1234 if (TrueCst && FalseCst) {
1235 int64_t T = TrueCst->Value.getSExtValue();
1236 int64_t F = FalseCst->Value.getSExtValue();
1237
1238 if (T == 0 && F == 1) {
1239 // G_SELECT cc, 0, 1 -> CSINC zreg, zreg, cc
1240 Opc = Is32Bit ? AArch64::CSINCWr : AArch64::CSINCXr;
1241 True = ZReg;
1242 False = ZReg;
1243 return true;
1244 }
1245
1246 if (T == 0 && F == -1) {
1247 // G_SELECT cc 0, -1 -> CSINV zreg, zreg cc
1248 Opc = Is32Bit ? AArch64::CSINVWr : AArch64::CSINVXr;
1249 True = ZReg;
1250 False = ZReg;
1251 return true;
1252 }
1253 }
1254
1255 if (TrueCst) {
1256 int64_t T = TrueCst->Value.getSExtValue();
1257 if (T == 1) {
1258 // G_SELECT cc, 1, f -> CSINC f, zreg, inv_cc
1259 Opc = Is32Bit ? AArch64::CSINCWr : AArch64::CSINCXr;
1260 True = False;
1261 False = ZReg;
1263 return true;
1264 }
1265
1266 if (T == -1) {
1267 // G_SELECT cc, -1, f -> CSINV f, zreg, inv_cc
1268 Opc = Is32Bit ? AArch64::CSINVWr : AArch64::CSINVXr;
1269 True = False;
1270 False = ZReg;
1272 return true;
1273 }
1274 }
1275
1276 if (FalseCst) {
1277 int64_t F = FalseCst->Value.getSExtValue();
1278 if (F == 1) {
1279 // G_SELECT cc, t, 1 -> CSINC t, zreg, cc
1280 Opc = Is32Bit ? AArch64::CSINCWr : AArch64::CSINCXr;
1281 False = ZReg;
1282 return true;
1283 }
1284
1285 if (F == -1) {
1286 // G_SELECT cc, t, -1 -> CSINC t, zreg, cc
1287 Opc = Is32Bit ? AArch64::CSINVWr : AArch64::CSINVXr;
1288 False = ZReg;
1289 return true;
1290 }
1291 }
1292 return false;
1293 };
1294
1295 Optimized |= TryFoldBinOpIntoSelect(False, True, /*Invert = */ false);
1296 Optimized |= TryFoldBinOpIntoSelect(True, False, /*Invert = */ true);
1297 Optimized |= TryOptSelectCst();
1298 auto SelectInst = MIB.buildInstr(Opc, {Dst}, {True, False}).addImm(CC);
1300 return &*SelectInst;
1301}
1302
1304 switch (P) {
1305 default:
1306 llvm_unreachable("Unknown condition code!");
1307 case CmpInst::ICMP_NE:
1308 return AArch64CC::NE;
1309 case CmpInst::ICMP_EQ:
1310 return AArch64CC::EQ;
1311 case CmpInst::ICMP_SGT:
1312 return AArch64CC::GT;
1313 case CmpInst::ICMP_SGE:
1314 return AArch64CC::GE;
1315 case CmpInst::ICMP_SLT:
1316 return AArch64CC::LT;
1317 case CmpInst::ICMP_SLE:
1318 return AArch64CC::LE;
1319 case CmpInst::ICMP_UGT:
1320 return AArch64CC::HI;
1321 case CmpInst::ICMP_UGE:
1322 return AArch64CC::HS;
1323 case CmpInst::ICMP_ULT:
1324 return AArch64CC::LO;
1325 case CmpInst::ICMP_ULE:
1326 return AArch64CC::LS;
1327 }
1328}
1329
1330/// changeFPCCToORAArch64CC - Convert an IR fp condition code to an AArch64 CC.
1332 AArch64CC::CondCode &CondCode,
1333 AArch64CC::CondCode &CondCode2) {
1334 CondCode2 = AArch64CC::AL;
1335 switch (CC) {
1336 default:
1337 llvm_unreachable("Unknown FP condition!");
1338 case CmpInst::FCMP_OEQ:
1339 CondCode = AArch64CC::EQ;
1340 break;
1341 case CmpInst::FCMP_OGT:
1342 CondCode = AArch64CC::GT;
1343 break;
1344 case CmpInst::FCMP_OGE:
1345 CondCode = AArch64CC::GE;
1346 break;
1347 case CmpInst::FCMP_OLT:
1348 CondCode = AArch64CC::MI;
1349 break;
1350 case CmpInst::FCMP_OLE:
1351 CondCode = AArch64CC::LS;
1352 break;
1353 case CmpInst::FCMP_ONE:
1354 CondCode = AArch64CC::MI;
1355 CondCode2 = AArch64CC::GT;
1356 break;
1357 case CmpInst::FCMP_ORD:
1358 CondCode = AArch64CC::VC;
1359 break;
1360 case CmpInst::FCMP_UNO:
1361 CondCode = AArch64CC::VS;
1362 break;
1363 case CmpInst::FCMP_UEQ:
1364 CondCode = AArch64CC::EQ;
1365 CondCode2 = AArch64CC::VS;
1366 break;
1367 case CmpInst::FCMP_UGT:
1368 CondCode = AArch64CC::HI;
1369 break;
1370 case CmpInst::FCMP_UGE:
1371 CondCode = AArch64CC::PL;
1372 break;
1373 case CmpInst::FCMP_ULT:
1374 CondCode = AArch64CC::LT;
1375 break;
1376 case CmpInst::FCMP_ULE:
1377 CondCode = AArch64CC::LE;
1378 break;
1379 case CmpInst::FCMP_UNE:
1380 CondCode = AArch64CC::NE;
1381 break;
1382 }
1383}
1384
1385/// Convert an IR fp condition code to an AArch64 CC.
1386/// This differs from changeFPCCToAArch64CC in that it returns cond codes that
1387/// should be AND'ed instead of OR'ed.
1389 AArch64CC::CondCode &CondCode,
1390 AArch64CC::CondCode &CondCode2) {
1391 CondCode2 = AArch64CC::AL;
1392 switch (CC) {
1393 default:
1394 changeFPCCToORAArch64CC(CC, CondCode, CondCode2);
1395 assert(CondCode2 == AArch64CC::AL);
1396 break;
1397 case CmpInst::FCMP_ONE:
1398 // (a one b)
1399 // == ((a olt b) || (a ogt b))
1400 // == ((a ord b) && (a une b))
1401 CondCode = AArch64CC::VC;
1402 CondCode2 = AArch64CC::NE;
1403 break;
1404 case CmpInst::FCMP_UEQ:
1405 // (a ueq b)
1406 // == ((a uno b) || (a oeq b))
1407 // == ((a ule b) && (a uge b))
1408 CondCode = AArch64CC::PL;
1409 CondCode2 = AArch64CC::LE;
1410 break;
1411 }
1412}
1413
1414/// Return a register which can be used as a bit to test in a TB(N)Z.
1415static Register getTestBitReg(Register Reg, uint64_t &Bit, bool &Invert,
1417 assert(Reg.isValid() && "Expected valid register!");
1418 bool HasZext = false;
1419 while (MachineInstr *MI = getDefIgnoringCopies(Reg, MRI)) {
1420 unsigned Opc = MI->getOpcode();
1421
1422 if (!MI->getOperand(0).isReg() ||
1423 !MRI.hasOneNonDBGUse(MI->getOperand(0).getReg()))
1424 break;
1425
1426 // (tbz (any_ext x), b) -> (tbz x, b) if we don't use the extended bits.
1427 //
1428 // (tbz (trunc x), b) -> (tbz x, b) is always safe, because the bit number
1429 // on the truncated x is the same as the bit number on x.
1430 if (Opc == TargetOpcode::G_ANYEXT || Opc == TargetOpcode::G_ZEXT ||
1431 Opc == TargetOpcode::G_TRUNC) {
1432 if (Opc == TargetOpcode::G_ZEXT)
1433 HasZext = true;
1434
1435 Register NextReg = MI->getOperand(1).getReg();
1436 // Did we find something worth folding?
1437 if (!NextReg.isValid() || !MRI.hasOneNonDBGUse(NextReg))
1438 break;
1439
1440 // NextReg is worth folding. Keep looking.
1441 Reg = NextReg;
1442 continue;
1443 }
1444
1445 // Attempt to find a suitable operation with a constant on one side.
1446 std::optional<uint64_t> C;
1447 Register TestReg;
1448 switch (Opc) {
1449 default:
1450 break;
1451 case TargetOpcode::G_AND:
1452 case TargetOpcode::G_XOR: {
1453 TestReg = MI->getOperand(1).getReg();
1454 Register ConstantReg = MI->getOperand(2).getReg();
1455 auto VRegAndVal = getIConstantVRegValWithLookThrough(ConstantReg, MRI);
1456 if (!VRegAndVal) {
1457 // AND commutes, check the other side for a constant.
1458 // FIXME: Can we canonicalize the constant so that it's always on the
1459 // same side at some point earlier?
1460 std::swap(ConstantReg, TestReg);
1461 VRegAndVal = getIConstantVRegValWithLookThrough(ConstantReg, MRI);
1462 }
1463 if (VRegAndVal) {
1464 if (HasZext)
1465 C = VRegAndVal->Value.getZExtValue();
1466 else
1467 C = VRegAndVal->Value.getSExtValue();
1468 }
1469 break;
1470 }
1471 case TargetOpcode::G_ASHR:
1472 case TargetOpcode::G_LSHR:
1473 case TargetOpcode::G_SHL: {
1474 TestReg = MI->getOperand(1).getReg();
1475 auto VRegAndVal =
1476 getIConstantVRegValWithLookThrough(MI->getOperand(2).getReg(), MRI);
1477 if (VRegAndVal)
1478 C = VRegAndVal->Value.getSExtValue();
1479 break;
1480 }
1481 }
1482
1483 // Didn't find a constant or viable register. Bail out of the loop.
1484 if (!C || !TestReg.isValid())
1485 break;
1486
1487 // We found a suitable instruction with a constant. Check to see if we can
1488 // walk through the instruction.
1489 Register NextReg;
1490 unsigned TestRegSize = MRI.getType(TestReg).getSizeInBits();
1491 switch (Opc) {
1492 default:
1493 break;
1494 case TargetOpcode::G_AND:
1495 // (tbz (and x, m), b) -> (tbz x, b) when the b-th bit of m is set.
1496 if ((*C >> Bit) & 1)
1497 NextReg = TestReg;
1498 break;
1499 case TargetOpcode::G_SHL:
1500 // (tbz (shl x, c), b) -> (tbz x, b-c) when b-c is positive and fits in
1501 // the type of the register.
1502 if (*C <= Bit && (Bit - *C) < TestRegSize) {
1503 NextReg = TestReg;
1504 Bit = Bit - *C;
1505 }
1506 break;
1507 case TargetOpcode::G_ASHR:
1508 // (tbz (ashr x, c), b) -> (tbz x, b+c) or (tbz x, msb) if b+c is > # bits
1509 // in x
1510 NextReg = TestReg;
1511 Bit = Bit + *C;
1512 if (Bit >= TestRegSize)
1513 Bit = TestRegSize - 1;
1514 break;
1515 case TargetOpcode::G_LSHR:
1516 // (tbz (lshr x, c), b) -> (tbz x, b+c) when b + c is < # bits in x
1517 if ((Bit + *C) < TestRegSize) {
1518 NextReg = TestReg;
1519 Bit = Bit + *C;
1520 }
1521 break;
1522 case TargetOpcode::G_XOR:
1523 // We can walk through a G_XOR by inverting whether we use tbz/tbnz when
1524 // appropriate.
1525 //
1526 // e.g. If x' = xor x, c, and the b-th bit is set in c then
1527 //
1528 // tbz x', b -> tbnz x, b
1529 //
1530 // Because x' only has the b-th bit set if x does not.
1531 if ((*C >> Bit) & 1)
1532 Invert = !Invert;
1533 NextReg = TestReg;
1534 break;
1535 }
1536
1537 // Check if we found anything worth folding.
1538 if (!NextReg.isValid())
1539 return Reg;
1540 Reg = NextReg;
1541 }
1542
1543 return Reg;
1544}
1545
1546MachineInstr *AArch64InstructionSelector::emitTestBit(
1547 Register TestReg, uint64_t Bit, bool IsNegative, MachineBasicBlock *DstMBB,
1548 MachineIRBuilder &MIB) const {
1549 assert(TestReg.isValid());
1550 assert(ProduceNonFlagSettingCondBr &&
1551 "Cannot emit TB(N)Z with speculation tracking!");
1552 MachineRegisterInfo &MRI = *MIB.getMRI();
1553
1554 // Attempt to optimize the test bit by walking over instructions.
1555 TestReg = getTestBitReg(TestReg, Bit, IsNegative, MRI);
1556 LLT Ty = MRI.getType(TestReg);
1557 unsigned Size = Ty.getSizeInBits();
1558 assert(!Ty.isVector() && "Expected a scalar!");
1559 assert(Bit < 64 && "Bit is too large!");
1560
1561 // When the test register is a 64-bit register, we have to narrow to make
1562 // TBNZW work.
1563 bool UseWReg = Bit < 32;
1564 unsigned NecessarySize = UseWReg ? 32 : 64;
1565 if (Size != NecessarySize)
1566 TestReg = moveScalarRegClass(
1567 TestReg, UseWReg ? AArch64::GPR32RegClass : AArch64::GPR64RegClass,
1568 MIB);
1569
1570 static const unsigned OpcTable[2][2] = {{AArch64::TBZX, AArch64::TBNZX},
1571 {AArch64::TBZW, AArch64::TBNZW}};
1572 unsigned Opc = OpcTable[UseWReg][IsNegative];
1573 auto TestBitMI =
1574 MIB.buildInstr(Opc).addReg(TestReg).addImm(Bit).addMBB(DstMBB);
1575 constrainSelectedInstRegOperands(*TestBitMI, TII, TRI, RBI);
1576 return &*TestBitMI;
1577}
1578
1579bool AArch64InstructionSelector::tryOptAndIntoCompareBranch(
1580 MachineInstr &AndInst, bool Invert, MachineBasicBlock *DstMBB,
1581 MachineIRBuilder &MIB) const {
1582 assert(AndInst.getOpcode() == TargetOpcode::G_AND && "Expected G_AND only?");
1583 // Given something like this:
1584 //
1585 // %x = ...Something...
1586 // %one = G_CONSTANT i64 1
1587 // %zero = G_CONSTANT i64 0
1588 // %and = G_AND %x, %one
1589 // %cmp = G_ICMP intpred(ne), %and, %zero
1590 // %cmp_trunc = G_TRUNC %cmp
1591 // G_BRCOND %cmp_trunc, %bb.3
1592 //
1593 // We want to try and fold the AND into the G_BRCOND and produce either a
1594 // TBNZ (when we have intpred(ne)) or a TBZ (when we have intpred(eq)).
1595 //
1596 // In this case, we'd get
1597 //
1598 // TBNZ %x %bb.3
1599 //
1600
1601 // Check if the AND has a constant on its RHS which we can use as a mask.
1602 // If it's a power of 2, then it's the same as checking a specific bit.
1603 // (e.g, ANDing with 8 == ANDing with 000...100 == testing if bit 3 is set)
1604 auto MaybeBit = getIConstantVRegValWithLookThrough(
1605 AndInst.getOperand(2).getReg(), *MIB.getMRI());
1606 if (!MaybeBit)
1607 return false;
1608
1609 int32_t Bit = MaybeBit->Value.exactLogBase2();
1610 if (Bit < 0)
1611 return false;
1612
1613 Register TestReg = AndInst.getOperand(1).getReg();
1614
1615 // Emit a TB(N)Z.
1616 emitTestBit(TestReg, Bit, Invert, DstMBB, MIB);
1617 return true;
1618}
1619
1620MachineInstr *AArch64InstructionSelector::emitCBZ(Register CompareReg,
1621 bool IsNegative,
1622 MachineBasicBlock *DestMBB,
1623 MachineIRBuilder &MIB) const {
1624 assert(ProduceNonFlagSettingCondBr && "CBZ does not set flags!");
1625 MachineRegisterInfo &MRI = *MIB.getMRI();
1626 assert(RBI.getRegBank(CompareReg, MRI, TRI)->getID() ==
1627 AArch64::GPRRegBankID &&
1628 "Expected GPRs only?");
1629 auto Ty = MRI.getType(CompareReg);
1630 unsigned Width = Ty.getSizeInBits();
1631 assert(!Ty.isVector() && "Expected scalar only?");
1632 assert(Width <= 64 && "Expected width to be at most 64?");
1633 static const unsigned OpcTable[2][2] = {{AArch64::CBZW, AArch64::CBZX},
1634 {AArch64::CBNZW, AArch64::CBNZX}};
1635 unsigned Opc = OpcTable[IsNegative][Width == 64];
1636 auto BranchMI = MIB.buildInstr(Opc, {}, {CompareReg}).addMBB(DestMBB);
1637 constrainSelectedInstRegOperands(*BranchMI, TII, TRI, RBI);
1638 return &*BranchMI;
1639}
1640
1641bool AArch64InstructionSelector::selectCompareBranchFedByFCmp(
1642 MachineInstr &I, MachineInstr &FCmp, MachineIRBuilder &MIB) const {
1643 assert(FCmp.getOpcode() == TargetOpcode::G_FCMP);
1644 assert(I.getOpcode() == TargetOpcode::G_BRCOND);
1645 // Unfortunately, the mapping of LLVM FP CC's onto AArch64 CC's isn't
1646 // totally clean. Some of them require two branches to implement.
1647 auto Pred = (CmpInst::Predicate)FCmp.getOperand(1).getPredicate();
1648 emitFPCompare(FCmp.getOperand(2).getReg(), FCmp.getOperand(3).getReg(), MIB,
1649 Pred);
1650 AArch64CC::CondCode CC1, CC2;
1651 changeFCMPPredToAArch64CC(static_cast<CmpInst::Predicate>(Pred), CC1, CC2);
1652 MachineBasicBlock *DestMBB = I.getOperand(1).getMBB();
1653 MIB.buildInstr(AArch64::Bcc, {}, {}).addImm(CC1).addMBB(DestMBB);
1654 if (CC2 != AArch64CC::AL)
1655 MIB.buildInstr(AArch64::Bcc, {}, {}).addImm(CC2).addMBB(DestMBB);
1656 I.eraseFromParent();
1657 return true;
1658}
1659
1660bool AArch64InstructionSelector::tryOptCompareBranchFedByICmp(
1661 MachineInstr &I, MachineInstr &ICmp, MachineIRBuilder &MIB) const {
1662 assert(ICmp.getOpcode() == TargetOpcode::G_ICMP);
1663 assert(I.getOpcode() == TargetOpcode::G_BRCOND);
1664 // Attempt to optimize the G_BRCOND + G_ICMP into a TB(N)Z/CB(N)Z.
1665 //
1666 // Speculation tracking/SLH assumes that optimized TB(N)Z/CB(N)Z
1667 // instructions will not be produced, as they are conditional branch
1668 // instructions that do not set flags.
1669 if (!ProduceNonFlagSettingCondBr)
1670 return false;
1671
1672 MachineRegisterInfo &MRI = *MIB.getMRI();
1673 MachineBasicBlock *DestMBB = I.getOperand(1).getMBB();
1674 auto Pred =
1675 static_cast<CmpInst::Predicate>(ICmp.getOperand(1).getPredicate());
1676 Register LHS = ICmp.getOperand(2).getReg();
1677 Register RHS = ICmp.getOperand(3).getReg();
1678
1679 // We're allowed to emit a TB(N)Z/CB(N)Z. Try to do that.
1680 auto VRegAndVal = getIConstantVRegValWithLookThrough(RHS, MRI);
1681 MachineInstr *AndInst = getOpcodeDef(TargetOpcode::G_AND, LHS, MRI);
1682
1683 // When we can emit a TB(N)Z, prefer that.
1684 //
1685 // Handle non-commutative condition codes first.
1686 // Note that we don't want to do this when we have a G_AND because it can
1687 // become a tst. The tst will make the test bit in the TB(N)Z redundant.
1688 if (VRegAndVal && !AndInst) {
1689 int64_t C = VRegAndVal->Value.getSExtValue();
1690
1691 // When we have a greater-than comparison, we can just test if the msb is
1692 // zero.
1693 if (C == -1 && Pred == CmpInst::ICMP_SGT) {
1694 uint64_t Bit = MRI.getType(LHS).getSizeInBits() - 1;
1695 emitTestBit(LHS, Bit, /*IsNegative = */ false, DestMBB, MIB);
1696 I.eraseFromParent();
1697 return true;
1698 }
1699
1700 // When we have a less than comparison, we can just test if the msb is not
1701 // zero.
1702 if (C == 0 && Pred == CmpInst::ICMP_SLT) {
1703 uint64_t Bit = MRI.getType(LHS).getSizeInBits() - 1;
1704 emitTestBit(LHS, Bit, /*IsNegative = */ true, DestMBB, MIB);
1705 I.eraseFromParent();
1706 return true;
1707 }
1708
1709 // Inversely, if we have a signed greater-than-or-equal comparison to zero,
1710 // we can test if the msb is zero.
1711 if (C == 0 && Pred == CmpInst::ICMP_SGE) {
1712 uint64_t Bit = MRI.getType(LHS).getSizeInBits() - 1;
1713 emitTestBit(LHS, Bit, /*IsNegative = */ false, DestMBB, MIB);
1714 I.eraseFromParent();
1715 return true;
1716 }
1717 }
1718
1719 // Attempt to handle commutative condition codes. Right now, that's only
1720 // eq/ne.
1721 if (ICmpInst::isEquality(Pred)) {
1722 if (!VRegAndVal) {
1723 std::swap(RHS, LHS);
1724 VRegAndVal = getIConstantVRegValWithLookThrough(RHS, MRI);
1725 AndInst = getOpcodeDef(TargetOpcode::G_AND, LHS, MRI);
1726 }
1727
1728 if (VRegAndVal && VRegAndVal->Value == 0) {
1729 // If there's a G_AND feeding into this branch, try to fold it away by
1730 // emitting a TB(N)Z instead.
1731 //
1732 // Note: If we have LT, then it *is* possible to fold, but it wouldn't be
1733 // beneficial. When we have an AND and LT, we need a TST/ANDS, so folding
1734 // would be redundant.
1735 if (AndInst &&
1736 tryOptAndIntoCompareBranch(
1737 *AndInst, /*Invert = */ Pred == CmpInst::ICMP_NE, DestMBB, MIB)) {
1738 I.eraseFromParent();
1739 return true;
1740 }
1741
1742 // Otherwise, try to emit a CB(N)Z instead.
1743 auto LHSTy = MRI.getType(LHS);
1744 if (!LHSTy.isVector() && LHSTy.getSizeInBits() <= 64) {
1745 emitCBZ(LHS, /*IsNegative = */ Pred == CmpInst::ICMP_NE, DestMBB, MIB);
1746 I.eraseFromParent();
1747 return true;
1748 }
1749 }
1750 }
1751
1752 return false;
1753}
1754
1755bool AArch64InstructionSelector::selectCompareBranchFedByICmp(
1756 MachineInstr &I, MachineInstr &ICmp, MachineIRBuilder &MIB) const {
1757 assert(ICmp.getOpcode() == TargetOpcode::G_ICMP);
1758 assert(I.getOpcode() == TargetOpcode::G_BRCOND);
1759 if (tryOptCompareBranchFedByICmp(I, ICmp, MIB))
1760 return true;
1761
1762 // Couldn't optimize. Emit a compare + a Bcc.
1763 MachineBasicBlock *DestMBB = I.getOperand(1).getMBB();
1764 auto PredOp = ICmp.getOperand(1);
1765 emitIntegerCompare(ICmp.getOperand(2), ICmp.getOperand(3), PredOp, MIB);
1767 static_cast<CmpInst::Predicate>(PredOp.getPredicate()));
1768 MIB.buildInstr(AArch64::Bcc, {}, {}).addImm(CC).addMBB(DestMBB);
1769 I.eraseFromParent();
1770 return true;
1771}
1772
1773bool AArch64InstructionSelector::selectCompareBranch(
1775 Register CondReg = I.getOperand(0).getReg();
1776 MachineInstr *CCMI = MRI.getVRegDef(CondReg);
1777 // Try to select the G_BRCOND using whatever is feeding the condition if
1778 // possible.
1779 unsigned CCMIOpc = CCMI->getOpcode();
1780 if (CCMIOpc == TargetOpcode::G_FCMP)
1781 return selectCompareBranchFedByFCmp(I, *CCMI, MIB);
1782 if (CCMIOpc == TargetOpcode::G_ICMP)
1783 return selectCompareBranchFedByICmp(I, *CCMI, MIB);
1784
1785 // Speculation tracking/SLH assumes that optimized TB(N)Z/CB(N)Z
1786 // instructions will not be produced, as they are conditional branch
1787 // instructions that do not set flags.
1788 if (ProduceNonFlagSettingCondBr) {
1789 emitTestBit(CondReg, /*Bit = */ 0, /*IsNegative = */ true,
1790 I.getOperand(1).getMBB(), MIB);
1791 I.eraseFromParent();
1792 return true;
1793 }
1794
1795 // Can't emit TB(N)Z/CB(N)Z. Emit a tst + bcc instead.
1796 auto TstMI =
1797 MIB.buildInstr(AArch64::ANDSWri, {LLT::scalar(32)}, {CondReg}).addImm(1);
1799 auto Bcc = MIB.buildInstr(AArch64::Bcc)
1801 .addMBB(I.getOperand(1).getMBB());
1802 I.eraseFromParent();
1803 return constrainSelectedInstRegOperands(*Bcc, TII, TRI, RBI);
1804}
1805
1806/// Returns the element immediate value of a vector shift operand if found.
1807/// This needs to detect a splat-like operation, e.g. a G_BUILD_VECTOR.
1808static std::optional<int64_t> getVectorShiftImm(Register Reg,
1810 assert(MRI.getType(Reg).isVector() && "Expected a *vector* shift operand");
1811 MachineInstr *OpMI = MRI.getVRegDef(Reg);
1812 return getAArch64VectorSplatScalar(*OpMI, MRI);
1813}
1814
1815/// Matches and returns the shift immediate value for a SHL instruction given
1816/// a shift operand.
1817static std::optional<int64_t> getVectorSHLImm(LLT SrcTy, Register Reg,
1819 std::optional<int64_t> ShiftImm = getVectorShiftImm(Reg, MRI);
1820 if (!ShiftImm)
1821 return std::nullopt;
1822 // Check the immediate is in range for a SHL.
1823 int64_t Imm = *ShiftImm;
1824 if (Imm < 0)
1825 return std::nullopt;
1826 switch (SrcTy.getElementType().getSizeInBits()) {
1827 default:
1828 LLVM_DEBUG(dbgs() << "Unhandled element type for vector shift");
1829 return std::nullopt;
1830 case 8:
1831 if (Imm > 7)
1832 return std::nullopt;
1833 break;
1834 case 16:
1835 if (Imm > 15)
1836 return std::nullopt;
1837 break;
1838 case 32:
1839 if (Imm > 31)
1840 return std::nullopt;
1841 break;
1842 case 64:
1843 if (Imm > 63)
1844 return std::nullopt;
1845 break;
1846 }
1847 return Imm;
1848}
1849
1850bool AArch64InstructionSelector::selectVectorSHL(MachineInstr &I,
1852 assert(I.getOpcode() == TargetOpcode::G_SHL);
1853 Register DstReg = I.getOperand(0).getReg();
1854 const LLT Ty = MRI.getType(DstReg);
1855 Register Src1Reg = I.getOperand(1).getReg();
1856 Register Src2Reg = I.getOperand(2).getReg();
1857
1858 if (!Ty.isVector())
1859 return false;
1860
1861 // Check if we have a vector of constants on RHS that we can select as the
1862 // immediate form.
1863 std::optional<int64_t> ImmVal = getVectorSHLImm(Ty, Src2Reg, MRI);
1864
1865 unsigned Opc = 0;
1866 if (Ty == LLT::fixed_vector(2, 64)) {
1867 Opc = ImmVal ? AArch64::SHLv2i64_shift : AArch64::USHLv2i64;
1868 } else if (Ty == LLT::fixed_vector(4, 32)) {
1869 Opc = ImmVal ? AArch64::SHLv4i32_shift : AArch64::USHLv4i32;
1870 } else if (Ty == LLT::fixed_vector(2, 32)) {
1871 Opc = ImmVal ? AArch64::SHLv2i32_shift : AArch64::USHLv2i32;
1872 } else if (Ty == LLT::fixed_vector(4, 16)) {
1873 Opc = ImmVal ? AArch64::SHLv4i16_shift : AArch64::USHLv4i16;
1874 } else if (Ty == LLT::fixed_vector(8, 16)) {
1875 Opc = ImmVal ? AArch64::SHLv8i16_shift : AArch64::USHLv8i16;
1876 } else if (Ty == LLT::fixed_vector(16, 8)) {
1877 Opc = ImmVal ? AArch64::SHLv16i8_shift : AArch64::USHLv16i8;
1878 } else if (Ty == LLT::fixed_vector(8, 8)) {
1879 Opc = ImmVal ? AArch64::SHLv8i8_shift : AArch64::USHLv8i8;
1880 } else {
1881 LLVM_DEBUG(dbgs() << "Unhandled G_SHL type");
1882 return false;
1883 }
1884
1885 auto Shl = MIB.buildInstr(Opc, {DstReg}, {Src1Reg});
1886 if (ImmVal)
1887 Shl.addImm(*ImmVal);
1888 else
1889 Shl.addUse(Src2Reg);
1891 I.eraseFromParent();
1892 return true;
1893}
1894
1895bool AArch64InstructionSelector::selectVectorAshrLshr(
1897 assert(I.getOpcode() == TargetOpcode::G_ASHR ||
1898 I.getOpcode() == TargetOpcode::G_LSHR);
1899 Register DstReg = I.getOperand(0).getReg();
1900 const LLT Ty = MRI.getType(DstReg);
1901 Register Src1Reg = I.getOperand(1).getReg();
1902 Register Src2Reg = I.getOperand(2).getReg();
1903
1904 if (!Ty.isVector())
1905 return false;
1906
1907 bool IsASHR = I.getOpcode() == TargetOpcode::G_ASHR;
1908
1909 // We expect the immediate case to be lowered in the PostLegalCombiner to
1910 // AArch64ISD::VASHR or AArch64ISD::VLSHR equivalents.
1911
1912 // There is not a shift right register instruction, but the shift left
1913 // register instruction takes a signed value, where negative numbers specify a
1914 // right shift.
1915
1916 unsigned Opc = 0;
1917 unsigned NegOpc = 0;
1918 const TargetRegisterClass *RC =
1919 getRegClassForTypeOnBank(Ty, RBI.getRegBank(AArch64::FPRRegBankID));
1920 if (Ty == LLT::fixed_vector(2, 64)) {
1921 Opc = IsASHR ? AArch64::SSHLv2i64 : AArch64::USHLv2i64;
1922 NegOpc = AArch64::NEGv2i64;
1923 } else if (Ty == LLT::fixed_vector(4, 32)) {
1924 Opc = IsASHR ? AArch64::SSHLv4i32 : AArch64::USHLv4i32;
1925 NegOpc = AArch64::NEGv4i32;
1926 } else if (Ty == LLT::fixed_vector(2, 32)) {
1927 Opc = IsASHR ? AArch64::SSHLv2i32 : AArch64::USHLv2i32;
1928 NegOpc = AArch64::NEGv2i32;
1929 } else if (Ty == LLT::fixed_vector(4, 16)) {
1930 Opc = IsASHR ? AArch64::SSHLv4i16 : AArch64::USHLv4i16;
1931 NegOpc = AArch64::NEGv4i16;
1932 } else if (Ty == LLT::fixed_vector(8, 16)) {
1933 Opc = IsASHR ? AArch64::SSHLv8i16 : AArch64::USHLv8i16;
1934 NegOpc = AArch64::NEGv8i16;
1935 } else if (Ty == LLT::fixed_vector(16, 8)) {
1936 Opc = IsASHR ? AArch64::SSHLv16i8 : AArch64::USHLv16i8;
1937 NegOpc = AArch64::NEGv16i8;
1938 } else if (Ty == LLT::fixed_vector(8, 8)) {
1939 Opc = IsASHR ? AArch64::SSHLv8i8 : AArch64::USHLv8i8;
1940 NegOpc = AArch64::NEGv8i8;
1941 } else {
1942 LLVM_DEBUG(dbgs() << "Unhandled G_ASHR type");
1943 return false;
1944 }
1945
1946 auto Neg = MIB.buildInstr(NegOpc, {RC}, {Src2Reg});
1948 auto SShl = MIB.buildInstr(Opc, {DstReg}, {Src1Reg, Neg});
1950 I.eraseFromParent();
1951 return true;
1952}
1953
1954bool AArch64InstructionSelector::selectVaStartAAPCS(
1956 return false;
1957}
1958
1959bool AArch64InstructionSelector::selectVaStartDarwin(
1962 Register ListReg = I.getOperand(0).getReg();
1963
1964 Register ArgsAddrReg = MRI.createVirtualRegister(&AArch64::GPR64RegClass);
1965
1966 int FrameIdx = FuncInfo->getVarArgsStackIndex();
1968 MF.getFunction().getCallingConv())) {
1969 FrameIdx = FuncInfo->getVarArgsGPRSize() > 0
1970 ? FuncInfo->getVarArgsGPRIndex()
1971 : FuncInfo->getVarArgsStackIndex();
1972 }
1973
1974 auto MIB =
1975 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AArch64::ADDXri))
1976 .addDef(ArgsAddrReg)
1977 .addFrameIndex(FrameIdx)
1978 .addImm(0)
1979 .addImm(0);
1980
1982
1983 MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AArch64::STRXui))
1984 .addUse(ArgsAddrReg)
1985 .addUse(ListReg)
1986 .addImm(0)
1987 .addMemOperand(*I.memoperands_begin());
1988
1990 I.eraseFromParent();
1991 return true;
1992}
1993
1994void AArch64InstructionSelector::materializeLargeCMVal(
1995 MachineInstr &I, const Value *V, unsigned OpFlags) {
1996 MachineBasicBlock &MBB = *I.getParent();
1997 MachineFunction &MF = *MBB.getParent();
1999
2000 auto MovZ = MIB.buildInstr(AArch64::MOVZXi, {&AArch64::GPR64RegClass}, {});
2001 MovZ->addOperand(MF, I.getOperand(1));
2002 MovZ->getOperand(1).setTargetFlags(OpFlags | AArch64II::MO_G0 |
2004 MovZ->addOperand(MF, MachineOperand::CreateImm(0));
2006
2007 auto BuildMovK = [&](Register SrcReg, unsigned char Flags, unsigned Offset,
2008 Register ForceDstReg) {
2009 Register DstReg = ForceDstReg
2010 ? ForceDstReg
2011 : MRI.createVirtualRegister(&AArch64::GPR64RegClass);
2012 auto MovI = MIB.buildInstr(AArch64::MOVKXi).addDef(DstReg).addUse(SrcReg);
2013 if (auto *GV = dyn_cast<GlobalValue>(V)) {
2015 GV, MovZ->getOperand(1).getOffset(), Flags));
2016 } else {
2017 MovI->addOperand(
2018 MF, MachineOperand::CreateBA(cast<BlockAddress>(V),
2019 MovZ->getOperand(1).getOffset(), Flags));
2020 }
2021 MovI->addOperand(MF, MachineOperand::CreateImm(Offset));
2023 return DstReg;
2024 };
2025 Register DstReg = BuildMovK(MovZ.getReg(0),
2027 DstReg = BuildMovK(DstReg, AArch64II::MO_G2 | AArch64II::MO_NC, 32, 0);
2028 BuildMovK(DstReg, AArch64II::MO_G3, 48, I.getOperand(0).getReg());
2029}
2030
2031bool AArch64InstructionSelector::preISelLower(MachineInstr &I) {
2032 MachineBasicBlock &MBB = *I.getParent();
2033 MachineFunction &MF = *MBB.getParent();
2035
2036 switch (I.getOpcode()) {
2037 case TargetOpcode::G_STORE: {
2038 bool Changed = contractCrossBankCopyIntoStore(I, MRI);
2039 MachineOperand &SrcOp = I.getOperand(0);
2040 if (MRI.getType(SrcOp.getReg()).isPointer()) {
2041 // Allow matching with imported patterns for stores of pointers. Unlike
2042 // G_LOAD/G_PTR_ADD, we may not have selected all users. So, emit a copy
2043 // and constrain.
2044 auto Copy = MIB.buildCopy(LLT::scalar(64), SrcOp);
2045 Register NewSrc = Copy.getReg(0);
2046 SrcOp.setReg(NewSrc);
2047 RBI.constrainGenericRegister(NewSrc, AArch64::GPR64RegClass, MRI);
2048 Changed = true;
2049 }
2050 return Changed;
2051 }
2052 case TargetOpcode::G_PTR_ADD:
2053 return convertPtrAddToAdd(I, MRI);
2054 case TargetOpcode::G_LOAD: {
2055 // For scalar loads of pointers, we try to convert the dest type from p0
2056 // to s64 so that our imported patterns can match. Like with the G_PTR_ADD
2057 // conversion, this should be ok because all users should have been
2058 // selected already, so the type doesn't matter for them.
2059 Register DstReg = I.getOperand(0).getReg();
2060 const LLT DstTy = MRI.getType(DstReg);
2061 if (!DstTy.isPointer())
2062 return false;
2063 MRI.setType(DstReg, LLT::scalar(64));
2064 return true;
2065 }
2066 case AArch64::G_DUP: {
2067 // Convert the type from p0 to s64 to help selection.
2068 LLT DstTy = MRI.getType(I.getOperand(0).getReg());
2069 if (!DstTy.getElementType().isPointer())
2070 return false;
2071 auto NewSrc = MIB.buildCopy(LLT::scalar(64), I.getOperand(1).getReg());
2072 MRI.setType(I.getOperand(0).getReg(),
2073 DstTy.changeElementType(LLT::scalar(64)));
2074 MRI.setRegClass(NewSrc.getReg(0), &AArch64::GPR64RegClass);
2075 I.getOperand(1).setReg(NewSrc.getReg(0));
2076 return true;
2077 }
2078 case TargetOpcode::G_UITOFP:
2079 case TargetOpcode::G_SITOFP: {
2080 // If both source and destination regbanks are FPR, then convert the opcode
2081 // to G_SITOF so that the importer can select it to an fpr variant.
2082 // Otherwise, it ends up matching an fpr/gpr variant and adding a cross-bank
2083 // copy.
2084 Register SrcReg = I.getOperand(1).getReg();
2085 LLT SrcTy = MRI.getType(SrcReg);
2086 LLT DstTy = MRI.getType(I.getOperand(0).getReg());
2087 if (SrcTy.isVector() || SrcTy.getSizeInBits() != DstTy.getSizeInBits())
2088 return false;
2089
2090 if (RBI.getRegBank(SrcReg, MRI, TRI)->getID() == AArch64::FPRRegBankID) {
2091 if (I.getOpcode() == TargetOpcode::G_SITOFP)
2092 I.setDesc(TII.get(AArch64::G_SITOF));
2093 else
2094 I.setDesc(TII.get(AArch64::G_UITOF));
2095 return true;
2096 }
2097 return false;
2098 }
2099 default:
2100 return false;
2101 }
2102}
2103
2104/// This lowering tries to look for G_PTR_ADD instructions and then converts
2105/// them to a standard G_ADD with a COPY on the source.
2106///
2107/// The motivation behind this is to expose the add semantics to the imported
2108/// tablegen patterns. We shouldn't need to check for uses being loads/stores,
2109/// because the selector works bottom up, uses before defs. By the time we
2110/// end up trying to select a G_PTR_ADD, we should have already attempted to
2111/// fold this into addressing modes and were therefore unsuccessful.
2112bool AArch64InstructionSelector::convertPtrAddToAdd(
2114 assert(I.getOpcode() == TargetOpcode::G_PTR_ADD && "Expected G_PTR_ADD");
2115 Register DstReg = I.getOperand(0).getReg();
2116 Register AddOp1Reg = I.getOperand(1).getReg();
2117 const LLT PtrTy = MRI.getType(DstReg);
2118 if (PtrTy.getAddressSpace() != 0)
2119 return false;
2120
2121 const LLT CastPtrTy =
2122 PtrTy.isVector() ? LLT::fixed_vector(2, 64) : LLT::scalar(64);
2123 auto PtrToInt = MIB.buildPtrToInt(CastPtrTy, AddOp1Reg);
2124 // Set regbanks on the registers.
2125 if (PtrTy.isVector())
2126 MRI.setRegBank(PtrToInt.getReg(0), RBI.getRegBank(AArch64::FPRRegBankID));
2127 else
2128 MRI.setRegBank(PtrToInt.getReg(0), RBI.getRegBank(AArch64::GPRRegBankID));
2129
2130 // Now turn the %dst(p0) = G_PTR_ADD %base, off into:
2131 // %dst(intty) = G_ADD %intbase, off
2132 I.setDesc(TII.get(TargetOpcode::G_ADD));
2133 MRI.setType(DstReg, CastPtrTy);
2134 I.getOperand(1).setReg(PtrToInt.getReg(0));
2135 if (!select(*PtrToInt)) {
2136 LLVM_DEBUG(dbgs() << "Failed to select G_PTRTOINT in convertPtrAddToAdd");
2137 return false;
2138 }
2139
2140 // Also take the opportunity here to try to do some optimization.
2141 // Try to convert this into a G_SUB if the offset is a 0-x negate idiom.
2142 Register NegatedReg;
2143 if (!mi_match(I.getOperand(2).getReg(), MRI, m_Neg(m_Reg(NegatedReg))))
2144 return true;
2145 I.getOperand(2).setReg(NegatedReg);
2146 I.setDesc(TII.get(TargetOpcode::G_SUB));
2147 return true;
2148}
2149
2150bool AArch64InstructionSelector::earlySelectSHL(MachineInstr &I,
2152 // We try to match the immediate variant of LSL, which is actually an alias
2153 // for a special case of UBFM. Otherwise, we fall back to the imported
2154 // selector which will match the register variant.
2155 assert(I.getOpcode() == TargetOpcode::G_SHL && "unexpected op");
2156 const auto &MO = I.getOperand(2);
2157 auto VRegAndVal = getIConstantVRegVal(MO.getReg(), MRI);
2158 if (!VRegAndVal)
2159 return false;
2160
2161 const LLT DstTy = MRI.getType(I.getOperand(0).getReg());
2162 if (DstTy.isVector())
2163 return false;
2164 bool Is64Bit = DstTy.getSizeInBits() == 64;
2165 auto Imm1Fn = Is64Bit ? selectShiftA_64(MO) : selectShiftA_32(MO);
2166 auto Imm2Fn = Is64Bit ? selectShiftB_64(MO) : selectShiftB_32(MO);
2167
2168 if (!Imm1Fn || !Imm2Fn)
2169 return false;
2170
2171 auto NewI =
2172 MIB.buildInstr(Is64Bit ? AArch64::UBFMXri : AArch64::UBFMWri,
2173 {I.getOperand(0).getReg()}, {I.getOperand(1).getReg()});
2174
2175 for (auto &RenderFn : *Imm1Fn)
2176 RenderFn(NewI);
2177 for (auto &RenderFn : *Imm2Fn)
2178 RenderFn(NewI);
2179
2180 I.eraseFromParent();
2181 return constrainSelectedInstRegOperands(*NewI, TII, TRI, RBI);
2182}
2183
2184bool AArch64InstructionSelector::contractCrossBankCopyIntoStore(
2186 assert(I.getOpcode() == TargetOpcode::G_STORE && "Expected G_STORE");
2187 // If we're storing a scalar, it doesn't matter what register bank that
2188 // scalar is on. All that matters is the size.
2189 //
2190 // So, if we see something like this (with a 32-bit scalar as an example):
2191 //
2192 // %x:gpr(s32) = ... something ...
2193 // %y:fpr(s32) = COPY %x:gpr(s32)
2194 // G_STORE %y:fpr(s32)
2195 //
2196 // We can fix this up into something like this:
2197 //
2198 // G_STORE %x:gpr(s32)
2199 //
2200 // And then continue the selection process normally.
2201 Register DefDstReg = getSrcRegIgnoringCopies(I.getOperand(0).getReg(), MRI);
2202 if (!DefDstReg.isValid())
2203 return false;
2204 LLT DefDstTy = MRI.getType(DefDstReg);
2205 Register StoreSrcReg = I.getOperand(0).getReg();
2206 LLT StoreSrcTy = MRI.getType(StoreSrcReg);
2207
2208 // If we get something strange like a physical register, then we shouldn't
2209 // go any further.
2210 if (!DefDstTy.isValid())
2211 return false;
2212
2213 // Are the source and dst types the same size?
2214 if (DefDstTy.getSizeInBits() != StoreSrcTy.getSizeInBits())
2215 return false;
2216
2217 if (RBI.getRegBank(StoreSrcReg, MRI, TRI) ==
2218 RBI.getRegBank(DefDstReg, MRI, TRI))
2219 return false;
2220
2221 // We have a cross-bank copy, which is entering a store. Let's fold it.
2222 I.getOperand(0).setReg(DefDstReg);
2223 return true;
2224}
2225
2226bool AArch64InstructionSelector::earlySelect(MachineInstr &I) {
2227 assert(I.getParent() && "Instruction should be in a basic block!");
2228 assert(I.getParent()->getParent() && "Instruction should be in a function!");
2229
2230 MachineBasicBlock &MBB = *I.getParent();
2231 MachineFunction &MF = *MBB.getParent();
2233
2234 switch (I.getOpcode()) {
2235 case AArch64::G_DUP: {
2236 // Before selecting a DUP instruction, check if it is better selected as a
2237 // MOV or load from a constant pool.
2238 Register Src = I.getOperand(1).getReg();
2239 auto ValAndVReg = getIConstantVRegValWithLookThrough(Src, MRI);
2240 if (!ValAndVReg)
2241 return false;
2242 LLVMContext &Ctx = MF.getFunction().getContext();
2243 Register Dst = I.getOperand(0).getReg();
2245 MRI.getType(Dst).getNumElements(),
2246 ConstantInt::get(Type::getIntNTy(Ctx, MRI.getType(Src).getSizeInBits()),
2247 ValAndVReg->Value));
2248 if (!emitConstantVector(Dst, CV, MIB, MRI))
2249 return false;
2250 I.eraseFromParent();
2251 return true;
2252 }
2253 case TargetOpcode::G_SEXT:
2254 // Check for i64 sext(i32 vector_extract) prior to tablegen to select SMOV
2255 // over a normal extend.
2256 if (selectUSMovFromExtend(I, MRI))
2257 return true;
2258 return false;
2259 case TargetOpcode::G_BR:
2260 return false;
2261 case TargetOpcode::G_SHL:
2262 return earlySelectSHL(I, MRI);
2263 case TargetOpcode::G_CONSTANT: {
2264 bool IsZero = false;
2265 if (I.getOperand(1).isCImm())
2266 IsZero = I.getOperand(1).getCImm()->isZero();
2267 else if (I.getOperand(1).isImm())
2268 IsZero = I.getOperand(1).getImm() == 0;
2269
2270 if (!IsZero)
2271 return false;
2272
2273 Register DefReg = I.getOperand(0).getReg();
2274 LLT Ty = MRI.getType(DefReg);
2275 if (Ty.getSizeInBits() == 64) {
2276 I.getOperand(1).ChangeToRegister(AArch64::XZR, false);
2277 RBI.constrainGenericRegister(DefReg, AArch64::GPR64RegClass, MRI);
2278 } else if (Ty.getSizeInBits() == 32) {
2279 I.getOperand(1).ChangeToRegister(AArch64::WZR, false);
2280 RBI.constrainGenericRegister(DefReg, AArch64::GPR32RegClass, MRI);
2281 } else
2282 return false;
2283
2284 I.setDesc(TII.get(TargetOpcode::COPY));
2285 return true;
2286 }
2287
2288 case TargetOpcode::G_ADD: {
2289 // Check if this is being fed by a G_ICMP on either side.
2290 //
2291 // (cmp pred, x, y) + z
2292 //
2293 // In the above case, when the cmp is true, we increment z by 1. So, we can
2294 // fold the add into the cset for the cmp by using cinc.
2295 //
2296 // FIXME: This would probably be a lot nicer in PostLegalizerLowering.
2297 Register AddDst = I.getOperand(0).getReg();
2298 Register AddLHS = I.getOperand(1).getReg();
2299 Register AddRHS = I.getOperand(2).getReg();
2300 // Only handle scalars.
2301 LLT Ty = MRI.getType(AddLHS);
2302 if (Ty.isVector())
2303 return false;
2304 // Since G_ICMP is modeled as ADDS/SUBS/ANDS, we can handle 32 bits or 64
2305 // bits.
2306 unsigned Size = Ty.getSizeInBits();
2307 if (Size != 32 && Size != 64)
2308 return false;
2309 auto MatchCmp = [&](Register Reg) -> MachineInstr * {
2310 if (!MRI.hasOneNonDBGUse(Reg))
2311 return nullptr;
2312 // If the LHS of the add is 32 bits, then we want to fold a 32-bit
2313 // compare.
2314 if (Size == 32)
2315 return getOpcodeDef(TargetOpcode::G_ICMP, Reg, MRI);
2316 // We model scalar compares using 32-bit destinations right now.
2317 // If it's a 64-bit compare, it'll have 64-bit sources.
2318 Register ZExt;
2319 if (!mi_match(Reg, MRI,
2321 return nullptr;
2322 auto *Cmp = getOpcodeDef(TargetOpcode::G_ICMP, ZExt, MRI);
2323 if (!Cmp ||
2324 MRI.getType(Cmp->getOperand(2).getReg()).getSizeInBits() != 64)
2325 return nullptr;
2326 return Cmp;
2327 };
2328 // Try to match
2329 // z + (cmp pred, x, y)
2330 MachineInstr *Cmp = MatchCmp(AddRHS);
2331 if (!Cmp) {
2332 // (cmp pred, x, y) + z
2333 std::swap(AddLHS, AddRHS);
2334 Cmp = MatchCmp(AddRHS);
2335 if (!Cmp)
2336 return false;
2337 }
2338 auto &PredOp = Cmp->getOperand(1);
2339 auto Pred = static_cast<CmpInst::Predicate>(PredOp.getPredicate());
2340 const AArch64CC::CondCode InvCC =
2343 emitIntegerCompare(/*LHS=*/Cmp->getOperand(2),
2344 /*RHS=*/Cmp->getOperand(3), PredOp, MIB);
2345 emitCSINC(/*Dst=*/AddDst, /*Src =*/AddLHS, /*Src2=*/AddLHS, InvCC, MIB);
2346 I.eraseFromParent();
2347 return true;
2348 }
2349 case TargetOpcode::G_OR: {
2350 // Look for operations that take the lower `Width=Size-ShiftImm` bits of
2351 // `ShiftSrc` and insert them into the upper `Width` bits of `MaskSrc` via
2352 // shifting and masking that we can replace with a BFI (encoded as a BFM).
2353 Register Dst = I.getOperand(0).getReg();
2354 LLT Ty = MRI.getType(Dst);
2355
2356 if (!Ty.isScalar())
2357 return false;
2358
2359 unsigned Size = Ty.getSizeInBits();
2360 if (Size != 32 && Size != 64)
2361 return false;
2362
2363 Register ShiftSrc;
2364 int64_t ShiftImm;
2365 Register MaskSrc;
2366 int64_t MaskImm;
2367 if (!mi_match(
2368 Dst, MRI,
2369 m_GOr(m_OneNonDBGUse(m_GShl(m_Reg(ShiftSrc), m_ICst(ShiftImm))),
2370 m_OneNonDBGUse(m_GAnd(m_Reg(MaskSrc), m_ICst(MaskImm))))))
2371 return false;
2372
2373 if (ShiftImm > Size || ((1ULL << ShiftImm) - 1ULL) != uint64_t(MaskImm))
2374 return false;
2375
2376 int64_t Immr = Size - ShiftImm;
2377 int64_t Imms = Size - ShiftImm - 1;
2378 unsigned Opc = Size == 32 ? AArch64::BFMWri : AArch64::BFMXri;
2379 emitInstr(Opc, {Dst}, {MaskSrc, ShiftSrc, Immr, Imms}, MIB);
2380 I.eraseFromParent();
2381 return true;
2382 }
2383 case TargetOpcode::G_FENCE: {
2384 if (I.getOperand(1).getImm() == 0)
2385 BuildMI(MBB, I, MIMetadata(I), TII.get(TargetOpcode::MEMBARRIER));
2386 else
2387 BuildMI(MBB, I, MIMetadata(I), TII.get(AArch64::DMB))
2388 .addImm(I.getOperand(0).getImm() == 4 ? 0x9 : 0xb);
2389 I.eraseFromParent();
2390 return true;
2391 }
2392 default:
2393 return false;
2394 }
2395}
2396
2397bool AArch64InstructionSelector::select(MachineInstr &I) {
2398 assert(I.getParent() && "Instruction should be in a basic block!");
2399 assert(I.getParent()->getParent() && "Instruction should be in a function!");
2400
2401 MachineBasicBlock &MBB = *I.getParent();
2402 MachineFunction &MF = *MBB.getParent();
2404
2405 const AArch64Subtarget *Subtarget = &MF.getSubtarget<AArch64Subtarget>();
2406 if (Subtarget->requiresStrictAlign()) {
2407 // We don't support this feature yet.
2408 LLVM_DEBUG(dbgs() << "AArch64 GISel does not support strict-align yet\n");
2409 return false;
2410 }
2411
2413
2414 unsigned Opcode = I.getOpcode();
2415 // G_PHI requires same handling as PHI
2416 if (!I.isPreISelOpcode() || Opcode == TargetOpcode::G_PHI) {
2417 // Certain non-generic instructions also need some special handling.
2418
2419 if (Opcode == TargetOpcode::LOAD_STACK_GUARD)
2421
2422 if (Opcode == TargetOpcode::PHI || Opcode == TargetOpcode::G_PHI) {
2423 const Register DefReg = I.getOperand(0).getReg();
2424 const LLT DefTy = MRI.getType(DefReg);
2425
2426 const RegClassOrRegBank &RegClassOrBank =
2427 MRI.getRegClassOrRegBank(DefReg);
2428
2429 const TargetRegisterClass *DefRC
2430 = RegClassOrBank.dyn_cast<const TargetRegisterClass *>();
2431 if (!DefRC) {
2432 if (!DefTy.isValid()) {
2433 LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n");
2434 return false;
2435 }
2436 const RegisterBank &RB = *RegClassOrBank.get<const RegisterBank *>();
2437 DefRC = getRegClassForTypeOnBank(DefTy, RB);
2438 if (!DefRC) {
2439 LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n");
2440 return false;
2441 }
2442 }
2443
2444 I.setDesc(TII.get(TargetOpcode::PHI));
2445
2446 return RBI.constrainGenericRegister(DefReg, *DefRC, MRI);
2447 }
2448
2449 if (I.isCopy())
2450 return selectCopy(I, TII, MRI, TRI, RBI);
2451
2452 if (I.isDebugInstr())
2453 return selectDebugInstr(I, MRI, RBI);
2454
2455 return true;
2456 }
2457
2458
2459 if (I.getNumOperands() != I.getNumExplicitOperands()) {
2460 LLVM_DEBUG(
2461 dbgs() << "Generic instruction has unexpected implicit operands\n");
2462 return false;
2463 }
2464
2465 // Try to do some lowering before we start instruction selecting. These
2466 // lowerings are purely transformations on the input G_MIR and so selection
2467 // must continue after any modification of the instruction.
2468 if (preISelLower(I)) {
2469 Opcode = I.getOpcode(); // The opcode may have been modified, refresh it.
2470 }
2471
2472 // There may be patterns where the importer can't deal with them optimally,
2473 // but does select it to a suboptimal sequence so our custom C++ selection
2474 // code later never has a chance to work on it. Therefore, we have an early
2475 // selection attempt here to give priority to certain selection routines
2476 // over the imported ones.
2477 if (earlySelect(I))
2478 return true;
2479
2480 if (selectImpl(I, *CoverageInfo))
2481 return true;
2482
2483 LLT Ty =
2484 I.getOperand(0).isReg() ? MRI.getType(I.getOperand(0).getReg()) : LLT{};
2485
2486 switch (Opcode) {
2487 case TargetOpcode::G_SBFX:
2488 case TargetOpcode::G_UBFX: {
2489 static const unsigned OpcTable[2][2] = {
2490 {AArch64::UBFMWri, AArch64::UBFMXri},
2491 {AArch64::SBFMWri, AArch64::SBFMXri}};
2492 bool IsSigned = Opcode == TargetOpcode::G_SBFX;
2493 unsigned Size = Ty.getSizeInBits();
2494 unsigned Opc = OpcTable[IsSigned][Size == 64];
2495 auto Cst1 =
2496 getIConstantVRegValWithLookThrough(I.getOperand(2).getReg(), MRI);
2497 assert(Cst1 && "Should have gotten a constant for src 1?");
2498 auto Cst2 =
2499 getIConstantVRegValWithLookThrough(I.getOperand(3).getReg(), MRI);
2500 assert(Cst2 && "Should have gotten a constant for src 2?");
2501 auto LSB = Cst1->Value.getZExtValue();
2502 auto Width = Cst2->Value.getZExtValue();
2503 auto BitfieldInst =
2504 MIB.buildInstr(Opc, {I.getOperand(0)}, {I.getOperand(1)})
2505 .addImm(LSB)
2506 .addImm(LSB + Width - 1);
2507 I.eraseFromParent();
2508 return constrainSelectedInstRegOperands(*BitfieldInst, TII, TRI, RBI);
2509 }
2510 case TargetOpcode::G_BRCOND:
2511 return selectCompareBranch(I, MF, MRI);
2512
2513 case TargetOpcode::G_BRINDIRECT: {
2514 I.setDesc(TII.get(AArch64::BR));
2516 }
2517
2518 case TargetOpcode::G_BRJT:
2519 return selectBrJT(I, MRI);
2520
2521 case AArch64::G_ADD_LOW: {
2522 // This op may have been separated from it's ADRP companion by the localizer
2523 // or some other code motion pass. Given that many CPUs will try to
2524 // macro fuse these operations anyway, select this into a MOVaddr pseudo
2525 // which will later be expanded into an ADRP+ADD pair after scheduling.
2526 MachineInstr *BaseMI = MRI.getVRegDef(I.getOperand(1).getReg());
2527 if (BaseMI->getOpcode() != AArch64::ADRP) {
2528 I.setDesc(TII.get(AArch64::ADDXri));
2529 I.addOperand(MachineOperand::CreateImm(0));
2531 }
2532 assert(TM.getCodeModel() == CodeModel::Small &&
2533 "Expected small code model");
2534 auto Op1 = BaseMI->getOperand(1);
2535 auto Op2 = I.getOperand(2);
2536 auto MovAddr = MIB.buildInstr(AArch64::MOVaddr, {I.getOperand(0)}, {})
2537 .addGlobalAddress(Op1.getGlobal(), Op1.getOffset(),
2538 Op1.getTargetFlags())
2539 .addGlobalAddress(Op2.getGlobal(), Op2.getOffset(),
2540 Op2.getTargetFlags());
2541 I.eraseFromParent();
2542 return constrainSelectedInstRegOperands(*MovAddr, TII, TRI, RBI);
2543 }
2544
2545 case TargetOpcode::G_BSWAP: {
2546 // Handle vector types for G_BSWAP directly.
2547 Register DstReg = I.getOperand(0).getReg();
2548 LLT DstTy = MRI.getType(DstReg);
2549
2550 // We should only get vector types here; everything else is handled by the
2551 // importer right now.
2552 if (!DstTy.isVector() || DstTy.getSizeInBits() > 128) {
2553 LLVM_DEBUG(dbgs() << "Dst type for G_BSWAP currently unsupported.\n");
2554 return false;
2555 }
2556
2557 // Only handle 4 and 2 element vectors for now.
2558 // TODO: 16-bit elements.
2559 unsigned NumElts = DstTy.getNumElements();
2560 if (NumElts != 4 && NumElts != 2) {
2561 LLVM_DEBUG(dbgs() << "Unsupported number of elements for G_BSWAP.\n");
2562 return false;
2563 }
2564
2565 // Choose the correct opcode for the supported types. Right now, that's
2566 // v2s32, v4s32, and v2s64.
2567 unsigned Opc = 0;
2568 unsigned EltSize = DstTy.getElementType().getSizeInBits();
2569 if (EltSize == 32)
2570 Opc = (DstTy.getNumElements() == 2) ? AArch64::REV32v8i8
2571 : AArch64::REV32v16i8;
2572 else if (EltSize == 64)
2573 Opc = AArch64::REV64v16i8;
2574
2575 // We should always get something by the time we get here...
2576 assert(Opc != 0 && "Didn't get an opcode for G_BSWAP?");
2577
2578 I.setDesc(TII.get(Opc));
2580 }
2581
2582 case TargetOpcode::G_FCONSTANT:
2583 case TargetOpcode::G_CONSTANT: {
2584 const bool isFP = Opcode == TargetOpcode::G_FCONSTANT;
2585
2586 const LLT s8 = LLT::scalar(8);
2587 const LLT s16 = LLT::scalar(16);
2588 const LLT s32 = LLT::scalar(32);
2589 const LLT s64 = LLT::scalar(64);
2590 const LLT s128 = LLT::scalar(128);
2591 const LLT p0 = LLT::pointer(0, 64);
2592
2593 const Register DefReg = I.getOperand(0).getReg();
2594 const LLT DefTy = MRI.getType(DefReg);
2595 const unsigned DefSize = DefTy.getSizeInBits();
2596 const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
2597
2598 // FIXME: Redundant check, but even less readable when factored out.
2599 if (isFP) {
2600 if (Ty != s16 && Ty != s32 && Ty != s64 && Ty != s128) {
2601 LLVM_DEBUG(dbgs() << "Unable to materialize FP " << Ty
2602 << " constant, expected: " << s16 << " or " << s32
2603 << " or " << s64 << " or " << s128 << '\n');
2604 return false;
2605 }
2606
2607 if (RB.getID() != AArch64::FPRRegBankID) {
2608 LLVM_DEBUG(dbgs() << "Unable to materialize FP " << Ty
2609 << " constant on bank: " << RB
2610 << ", expected: FPR\n");
2611 return false;
2612 }
2613
2614 // The case when we have 0.0 is covered by tablegen. Reject it here so we
2615 // can be sure tablegen works correctly and isn't rescued by this code.
2616 // 0.0 is not covered by tablegen for FP128. So we will handle this
2617 // scenario in the code here.
2618 if (DefSize != 128 && I.getOperand(1).getFPImm()->isExactlyValue(0.0))
2619 return false;
2620 } else {
2621 // s32 and s64 are covered by tablegen.
2622 if (Ty != p0 && Ty != s8 && Ty != s16) {
2623 LLVM_DEBUG(dbgs() << "Unable to materialize integer " << Ty
2624 << " constant, expected: " << s32 << ", " << s64
2625 << ", or " << p0 << '\n');
2626 return false;
2627 }
2628
2629 if (RB.getID() != AArch64::GPRRegBankID) {
2630 LLVM_DEBUG(dbgs() << "Unable to materialize integer " << Ty
2631 << " constant on bank: " << RB
2632 << ", expected: GPR\n");
2633 return false;
2634 }
2635 }
2636
2637 if (isFP) {
2638 const TargetRegisterClass &FPRRC = *getRegClassForTypeOnBank(DefTy, RB);
2639 // For 16, 64, and 128b values, emit a constant pool load.
2640 switch (DefSize) {
2641 default:
2642 llvm_unreachable("Unexpected destination size for G_FCONSTANT?");
2643 case 32:
2644 case 64: {
2645 bool OptForSize = shouldOptForSize(&MF);
2646 const auto &TLI = MF.getSubtarget().getTargetLowering();
2647 // If TLI says that this fpimm is illegal, then we'll expand to a
2648 // constant pool load.
2649 if (TLI->isFPImmLegal(I.getOperand(1).getFPImm()->getValueAPF(),
2650 EVT::getFloatingPointVT(DefSize), OptForSize))
2651 break;
2652 [[fallthrough]];
2653 }
2654 case 16:
2655 case 128: {
2656 auto *FPImm = I.getOperand(1).getFPImm();
2657 auto *LoadMI = emitLoadFromConstantPool(FPImm, MIB);
2658 if (!LoadMI) {
2659 LLVM_DEBUG(dbgs() << "Failed to load double constant pool entry\n");
2660 return false;
2661 }
2662 MIB.buildCopy({DefReg}, {LoadMI->getOperand(0).getReg()});
2663 I.eraseFromParent();
2664 return RBI.constrainGenericRegister(DefReg, FPRRC, MRI);
2665 }
2666 }
2667
2668 assert((DefSize == 32 || DefSize == 64) && "Unexpected const def size");
2669 // Either emit a FMOV, or emit a copy to emit a normal mov.
2670 const Register DefGPRReg = MRI.createVirtualRegister(
2671 DefSize == 32 ? &AArch64::GPR32RegClass : &AArch64::GPR64RegClass);
2672 MachineOperand &RegOp = I.getOperand(0);
2673 RegOp.setReg(DefGPRReg);
2674 MIB.setInsertPt(MIB.getMBB(), std::next(I.getIterator()));
2675 MIB.buildCopy({DefReg}, {DefGPRReg});
2676
2677 if (!RBI.constrainGenericRegister(DefReg, FPRRC, MRI)) {
2678 LLVM_DEBUG(dbgs() << "Failed to constrain G_FCONSTANT def operand\n");
2679 return false;
2680 }
2681
2682 MachineOperand &ImmOp = I.getOperand(1);
2683 // FIXME: Is going through int64_t always correct?
2684 ImmOp.ChangeToImmediate(
2686 } else if (I.getOperand(1).isCImm()) {
2687 uint64_t Val = I.getOperand(1).getCImm()->getZExtValue();
2688 I.getOperand(1).ChangeToImmediate(Val);
2689 } else if (I.getOperand(1).isImm()) {
2690 uint64_t Val = I.getOperand(1).getImm();
2691 I.getOperand(1).ChangeToImmediate(Val);
2692 }
2693
2694 const unsigned MovOpc =
2695 DefSize == 64 ? AArch64::MOVi64imm : AArch64::MOVi32imm;
2696 I.setDesc(TII.get(MovOpc));
2698 return true;
2699 }
2700 case TargetOpcode::G_EXTRACT: {
2701 Register DstReg = I.getOperand(0).getReg();
2702 Register SrcReg = I.getOperand(1).getReg();
2703 LLT SrcTy = MRI.getType(SrcReg);
2704 LLT DstTy = MRI.getType(DstReg);
2705 (void)DstTy;
2706 unsigned SrcSize = SrcTy.getSizeInBits();
2707
2708 if (SrcTy.getSizeInBits() > 64) {
2709 // This should be an extract of an s128, which is like a vector extract.
2710 if (SrcTy.getSizeInBits() != 128)
2711 return false;
2712 // Only support extracting 64 bits from an s128 at the moment.
2713 if (DstTy.getSizeInBits() != 64)
2714 return false;
2715
2716 unsigned Offset = I.getOperand(2).getImm();
2717 if (Offset % 64 != 0)
2718 return false;
2719
2720 // Check we have the right regbank always.
2721 const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI);
2722 const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
2723 assert(SrcRB.getID() == DstRB.getID() && "Wrong extract regbank!");
2724
2725 if (SrcRB.getID() == AArch64::GPRRegBankID) {
2726 auto NewI =
2727 MIB.buildInstr(TargetOpcode::COPY, {DstReg}, {})
2728 .addUse(SrcReg, 0,
2729 Offset == 0 ? AArch64::sube64 : AArch64::subo64);
2730 constrainOperandRegClass(MF, TRI, MRI, TII, RBI, *NewI,
2731 AArch64::GPR64RegClass, NewI->getOperand(0));
2732 I.eraseFromParent();
2733 return true;
2734 }
2735
2736 // Emit the same code as a vector extract.
2737 // Offset must be a multiple of 64.
2738 unsigned LaneIdx = Offset / 64;
2739 MachineInstr *Extract = emitExtractVectorElt(
2740 DstReg, DstRB, LLT::scalar(64), SrcReg, LaneIdx, MIB);
2741 if (!Extract)
2742 return false;
2743 I.eraseFromParent();
2744 return true;
2745 }
2746
2747 I.setDesc(TII.get(SrcSize == 64 ? AArch64::UBFMXri : AArch64::UBFMWri));
2748 MachineInstrBuilder(MF, I).addImm(I.getOperand(2).getImm() +
2749 Ty.getSizeInBits() - 1);
2750
2751 if (SrcSize < 64) {
2752 assert(SrcSize == 32 && DstTy.getSizeInBits() == 16 &&
2753 "unexpected G_EXTRACT types");
2755 }
2756
2757 DstReg = MRI.createGenericVirtualRegister(LLT::scalar(64));
2758 MIB.setInsertPt(MIB.getMBB(), std::next(I.getIterator()));
2759 MIB.buildInstr(TargetOpcode::COPY, {I.getOperand(0).getReg()}, {})
2760 .addReg(DstReg, 0, AArch64::sub_32);
2761 RBI.constrainGenericRegister(I.getOperand(0).getReg(),
2762 AArch64::GPR32RegClass, MRI);
2763 I.getOperand(0).setReg(DstReg);
2764
2766 }
2767
2768 case TargetOpcode::G_INSERT: {
2769 LLT SrcTy = MRI.getType(I.getOperand(2).getReg());
2770 LLT DstTy = MRI.getType(I.getOperand(0).getReg());
2771 unsigned DstSize = DstTy.getSizeInBits();
2772 // Larger inserts are vectors, same-size ones should be something else by
2773 // now (split up or turned into COPYs).
2774 if (Ty.getSizeInBits() > 64 || SrcTy.getSizeInBits() > 32)
2775 return false;
2776
2777 I.setDesc(TII.get(DstSize == 64 ? AArch64::BFMXri : AArch64::BFMWri));
2778 unsigned LSB = I.getOperand(3).getImm();
2779 unsigned Width = MRI.getType(I.getOperand(2).getReg()).getSizeInBits();
2780 I.getOperand(3).setImm((DstSize - LSB) % DstSize);
2781 MachineInstrBuilder(MF, I).addImm(Width - 1);
2782
2783 if (DstSize < 64) {
2784 assert(DstSize == 32 && SrcTy.getSizeInBits() == 16 &&
2785 "unexpected G_INSERT types");
2787 }
2788
2789 Register SrcReg = MRI.createGenericVirtualRegister(LLT::scalar(64));
2790 BuildMI(MBB, I.getIterator(), I.getDebugLoc(),
2791 TII.get(AArch64::SUBREG_TO_REG))
2792 .addDef(SrcReg)
2793 .addImm(0)
2794 .addUse(I.getOperand(2).getReg())
2795 .addImm(AArch64::sub_32);
2796 RBI.constrainGenericRegister(I.getOperand(2).getReg(),
2797 AArch64::GPR32RegClass, MRI);
2798 I.getOperand(2).setReg(SrcReg);
2799
2801 }
2802 case TargetOpcode::G_FRAME_INDEX: {
2803 // allocas and G_FRAME_INDEX are only supported in addrspace(0).
2804 if (Ty != LLT::pointer(0, 64)) {
2805 LLVM_DEBUG(dbgs() << "G_FRAME_INDEX pointer has type: " << Ty
2806 << ", expected: " << LLT::pointer(0, 64) << '\n');
2807 return false;
2808 }
2809 I.setDesc(TII.get(AArch64::ADDXri));
2810
2811 // MOs for a #0 shifted immediate.
2812 I.addOperand(MachineOperand::CreateImm(0));
2813 I.addOperand(MachineOperand::CreateImm(0));
2814
2816 }
2817
2818 case TargetOpcode::G_GLOBAL_VALUE: {
2819 auto GV = I.getOperand(1).getGlobal();
2820 if (GV->isThreadLocal())
2821 return selectTLSGlobalValue(I, MRI);
2822
2823 unsigned OpFlags = STI.ClassifyGlobalReference(GV, TM);
2824 if (OpFlags & AArch64II::MO_GOT) {
2825 I.setDesc(TII.get(AArch64::LOADgot));
2826 I.getOperand(1).setTargetFlags(OpFlags);
2827 } else if (TM.getCodeModel() == CodeModel::Large) {
2828 // Materialize the global using movz/movk instructions.
2829 materializeLargeCMVal(I, GV, OpFlags);
2830 I.eraseFromParent();
2831 return true;
2832 } else if (TM.getCodeModel() == CodeModel::Tiny) {
2833 I.setDesc(TII.get(AArch64::ADR));
2834 I.getOperand(1).setTargetFlags(OpFlags);
2835 } else {
2836 I.setDesc(TII.get(AArch64::MOVaddr));
2837 I.getOperand(1).setTargetFlags(OpFlags | AArch64II::MO_PAGE);
2838 MachineInstrBuilder MIB(MF, I);
2839 MIB.addGlobalAddress(GV, I.getOperand(1).getOffset(),
2841 }
2843 }
2844
2845 case TargetOpcode::G_ZEXTLOAD:
2846 case TargetOpcode::G_LOAD:
2847 case TargetOpcode::G_STORE: {
2848 GLoadStore &LdSt = cast<GLoadStore>(I);
2849 bool IsZExtLoad = I.getOpcode() == TargetOpcode::G_ZEXTLOAD;
2850 LLT PtrTy = MRI.getType(LdSt.getPointerReg());
2851
2852 if (PtrTy != LLT::pointer(0, 64)) {
2853 LLVM_DEBUG(dbgs() << "Load/Store pointer has type: " << PtrTy
2854 << ", expected: " << LLT::pointer(0, 64) << '\n');
2855 return false;
2856 }
2857
2858 uint64_t MemSizeInBytes = LdSt.getMemSize();
2859 unsigned MemSizeInBits = LdSt.getMemSizeInBits();
2860 AtomicOrdering Order = LdSt.getMMO().getSuccessOrdering();
2861
2862 // Need special instructions for atomics that affect ordering.
2863 if (Order != AtomicOrdering::NotAtomic &&
2864 Order != AtomicOrdering::Unordered &&
2865 Order != AtomicOrdering::Monotonic) {
2866 assert(!isa<GZExtLoad>(LdSt));
2867 if (MemSizeInBytes > 64)
2868 return false;
2869
2870 if (isa<GLoad>(LdSt)) {
2871 static constexpr unsigned LDAPROpcodes[] = {
2872 AArch64::LDAPRB, AArch64::LDAPRH, AArch64::LDAPRW, AArch64::LDAPRX};
2873 static constexpr unsigned LDAROpcodes[] = {
2874 AArch64::LDARB, AArch64::LDARH, AArch64::LDARW, AArch64::LDARX};
2875 ArrayRef<unsigned> Opcodes =
2876 STI.hasRCPC() && Order != AtomicOrdering::SequentiallyConsistent
2877 ? LDAPROpcodes
2878 : LDAROpcodes;
2879 I.setDesc(TII.get(Opcodes[Log2_32(MemSizeInBytes)]));
2880 } else {
2881 static constexpr unsigned Opcodes[] = {AArch64::STLRB, AArch64::STLRH,
2882 AArch64::STLRW, AArch64::STLRX};
2883 Register ValReg = LdSt.getReg(0);
2884 if (MRI.getType(ValReg).getSizeInBits() == 64 && MemSizeInBits != 64) {
2885 // Emit a subreg copy of 32 bits.
2886 Register NewVal = MRI.createVirtualRegister(&AArch64::GPR32RegClass);
2887 MIB.buildInstr(TargetOpcode::COPY, {NewVal}, {})
2888 .addReg(I.getOperand(0).getReg(), 0, AArch64::sub_32);
2889 I.getOperand(0).setReg(NewVal);
2890 }
2891 I.setDesc(TII.get(Opcodes[Log2_32(MemSizeInBytes)]));
2892 }
2894 return true;
2895 }
2896
2897#ifndef NDEBUG
2898 const Register PtrReg = LdSt.getPointerReg();
2899 const RegisterBank &PtrRB = *RBI.getRegBank(PtrReg, MRI, TRI);
2900 // Check that the pointer register is valid.
2901 assert(PtrRB.getID() == AArch64::GPRRegBankID &&
2902 "Load/Store pointer operand isn't a GPR");
2903 assert(MRI.getType(PtrReg).isPointer() &&
2904 "Load/Store pointer operand isn't a pointer");
2905#endif
2906
2907 const Register ValReg = LdSt.getReg(0);
2908 const LLT ValTy = MRI.getType(ValReg);
2909 const RegisterBank &RB = *RBI.getRegBank(ValReg, MRI, TRI);
2910
2911 // The code below doesn't support truncating stores, so we need to split it
2912 // again.
2913 if (isa<GStore>(LdSt) && ValTy.getSizeInBits() > MemSizeInBits) {
2914 unsigned SubReg;
2915 LLT MemTy = LdSt.getMMO().getMemoryType();
2916 auto *RC = getRegClassForTypeOnBank(MemTy, RB);
2917 if (!getSubRegForClass(RC, TRI, SubReg))
2918 return false;
2919
2920 // Generate a subreg copy.
2921 auto Copy = MIB.buildInstr(TargetOpcode::COPY, {MemTy}, {})
2922 .addReg(ValReg, 0, SubReg)
2923 .getReg(0);
2924 RBI.constrainGenericRegister(Copy, *RC, MRI);
2925 LdSt.getOperand(0).setReg(Copy);
2926 } else if (isa<GLoad>(LdSt) && ValTy.getSizeInBits() > MemSizeInBits) {
2927 // If this is an any-extending load from the FPR bank, split it into a regular
2928 // load + extend.
2929 if (RB.getID() == AArch64::FPRRegBankID) {
2930 unsigned SubReg;
2931 LLT MemTy = LdSt.getMMO().getMemoryType();
2932 auto *RC = getRegClassForTypeOnBank(MemTy, RB);
2933 if (!getSubRegForClass(RC, TRI, SubReg))
2934 return false;
2935 Register OldDst = LdSt.getReg(0);
2936 Register NewDst =
2937 MRI.createGenericVirtualRegister(LdSt.getMMO().getMemoryType());
2938 LdSt.getOperand(0).setReg(NewDst);
2939 MRI.setRegBank(NewDst, RB);
2940 // Generate a SUBREG_TO_REG to extend it.
2941 MIB.setInsertPt(MIB.getMBB(), std::next(LdSt.getIterator()));
2942 MIB.buildInstr(AArch64::SUBREG_TO_REG, {OldDst}, {})
2943 .addImm(0)
2944 .addUse(NewDst)
2945 .addImm(SubReg);
2946 auto SubRegRC = getRegClassForTypeOnBank(MRI.getType(OldDst), RB);
2947 RBI.constrainGenericRegister(OldDst, *SubRegRC, MRI);
2948 MIB.setInstr(LdSt);
2949 }
2950 }
2951
2952 // Helper lambda for partially selecting I. Either returns the original
2953 // instruction with an updated opcode, or a new instruction.
2954 auto SelectLoadStoreAddressingMode = [&]() -> MachineInstr * {
2955 bool IsStore = isa<GStore>(I);
2956 const unsigned NewOpc =
2957 selectLoadStoreUIOp(I.getOpcode(), RB.getID(), MemSizeInBits);
2958 if (NewOpc == I.getOpcode())
2959 return nullptr;
2960 // Check if we can fold anything into the addressing mode.
2961 auto AddrModeFns =
2962 selectAddrModeIndexed(I.getOperand(1), MemSizeInBytes);
2963 if (!AddrModeFns) {
2964 // Can't fold anything. Use the original instruction.
2965 I.setDesc(TII.get(NewOpc));
2966 I.addOperand(MachineOperand::CreateImm(0));
2967 return &I;
2968 }
2969
2970 // Folded something. Create a new instruction and return it.
2971 auto NewInst = MIB.buildInstr(NewOpc, {}, {}, I.getFlags());
2972 Register CurValReg = I.getOperand(0).getReg();
2973 IsStore ? NewInst.addUse(CurValReg) : NewInst.addDef(CurValReg);
2974 NewInst.cloneMemRefs(I);
2975 for (auto &Fn : *AddrModeFns)
2976 Fn(NewInst);
2977 I.eraseFromParent();
2978 return &*NewInst;
2979 };
2980
2981 MachineInstr *LoadStore = SelectLoadStoreAddressingMode();
2982 if (!LoadStore)
2983 return false;
2984
2985 // If we're storing a 0, use WZR/XZR.
2986 if (Opcode == TargetOpcode::G_STORE) {
2988 LoadStore->getOperand(0).getReg(), MRI);
2989 if (CVal && CVal->Value == 0) {
2990 switch (LoadStore->getOpcode()) {
2991 case AArch64::STRWui:
2992 case AArch64::STRHHui:
2993 case AArch64::STRBBui:
2994 LoadStore->getOperand(0).setReg(AArch64::WZR);
2995 break;
2996 case AArch64::STRXui:
2997 LoadStore->getOperand(0).setReg(AArch64::XZR);
2998 break;
2999 }
3000 }
3001 }
3002
3003 if (IsZExtLoad) {
3004 // The zextload from a smaller type to i32 should be handled by the
3005 // importer.
3006 if (MRI.getType(LoadStore->getOperand(0).getReg()).getSizeInBits() != 64)
3007 return false;
3008 // If we have a ZEXTLOAD then change the load's type to be a narrower reg
3009 // and zero_extend with SUBREG_TO_REG.
3010 Register LdReg = MRI.createVirtualRegister(&AArch64::GPR32RegClass);
3011 Register DstReg = LoadStore->getOperand(0).getReg();
3012 LoadStore->getOperand(0).setReg(LdReg);
3013
3014 MIB.setInsertPt(MIB.getMBB(), std::next(LoadStore->getIterator()));
3015 MIB.buildInstr(AArch64::SUBREG_TO_REG, {DstReg}, {})
3016 .addImm(0)
3017 .addUse(LdReg)
3018 .addImm(AArch64::sub_32);
3019 constrainSelectedInstRegOperands(*LoadStore, TII, TRI, RBI);
3020 return RBI.constrainGenericRegister(DstReg, AArch64::GPR64allRegClass,
3021 MRI);
3022 }
3023 return constrainSelectedInstRegOperands(*LoadStore, TII, TRI, RBI);
3024 }
3025
3026 case TargetOpcode::G_SMULH:
3027 case TargetOpcode::G_UMULH: {
3028 // Reject the various things we don't support yet.
3029 if (unsupportedBinOp(I, RBI, MRI, TRI))
3030 return false;
3031
3032 const Register DefReg = I.getOperand(0).getReg();
3033 const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
3034
3035 if (RB.getID() != AArch64::GPRRegBankID) {
3036 LLVM_DEBUG(dbgs() << "G_[SU]MULH on bank: " << RB << ", expected: GPR\n");
3037 return false;
3038 }
3039
3040 if (Ty != LLT::scalar(64)) {
3041 LLVM_DEBUG(dbgs() << "G_[SU]MULH has type: " << Ty
3042 << ", expected: " << LLT::scalar(64) << '\n');
3043 return false;
3044 }
3045
3046 unsigned NewOpc = I.getOpcode() == TargetOpcode::G_SMULH ? AArch64::SMULHrr
3047 : AArch64::UMULHrr;
3048 I.setDesc(TII.get(NewOpc));
3049
3050 // Now that we selected an opcode, we need to constrain the register
3051 // operands to use appropriate classes.
3053 }
3054 case TargetOpcode::G_LSHR:
3055 case TargetOpcode::G_ASHR:
3056 if (MRI.getType(I.getOperand(0).getReg()).isVector())
3057 return selectVectorAshrLshr(I, MRI);
3058 [[fallthrough]];
3059 case TargetOpcode::G_SHL:
3060 if (Opcode == TargetOpcode::G_SHL &&
3061 MRI.getType(I.getOperand(0).getReg()).isVector())
3062 return selectVectorSHL(I, MRI);
3063
3064 // These shifts were legalized to have 64 bit shift amounts because we
3065 // want to take advantage of the selection patterns that assume the
3066 // immediates are s64s, however, selectBinaryOp will assume both operands
3067 // will have the same bit size.
3068 {
3069 Register SrcReg = I.getOperand(1).getReg();
3070 Register ShiftReg = I.getOperand(2).getReg();
3071 const LLT ShiftTy = MRI.getType(ShiftReg);
3072 const LLT SrcTy = MRI.getType(SrcReg);
3073 if (!SrcTy.isVector() && SrcTy.getSizeInBits() == 32 &&
3074 ShiftTy.getSizeInBits() == 64) {
3075 assert(!ShiftTy.isVector() && "unexpected vector shift ty");
3076 // Insert a subregister copy to implement a 64->32 trunc
3077 auto Trunc = MIB.buildInstr(TargetOpcode::COPY, {SrcTy}, {})
3078 .addReg(ShiftReg, 0, AArch64::sub_32);
3079 MRI.setRegBank(Trunc.getReg(0), RBI.getRegBank(AArch64::GPRRegBankID));
3080 I.getOperand(2).setReg(Trunc.getReg(0));
3081 }
3082 }
3083 [[fallthrough]];
3084 case TargetOpcode::G_OR: {
3085 // Reject the various things we don't support yet.
3086 if (unsupportedBinOp(I, RBI, MRI, TRI))
3087 return false;
3088
3089 const unsigned OpSize = Ty.getSizeInBits();
3090
3091 const Register DefReg = I.getOperand(0).getReg();
3092 const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
3093
3094 const unsigned NewOpc = selectBinaryOp(I.getOpcode(), RB.getID(), OpSize);
3095 if (NewOpc == I.getOpcode())
3096 return false;
3097
3098 I.setDesc(TII.get(NewOpc));
3099 // FIXME: Should the type be always reset in setDesc?
3100
3101 // Now that we selected an opcode, we need to constrain the register
3102 // operands to use appropriate classes.
3104 }
3105
3106 case TargetOpcode::G_PTR_ADD: {
3107 emitADD(I.getOperand(0).getReg(), I.getOperand(1), I.getOperand(2), MIB);
3108 I.eraseFromParent();
3109 return true;
3110 }
3111
3112 case TargetOpcode::G_SADDE:
3113 case TargetOpcode::G_UADDE:
3114 case TargetOpcode::G_SSUBE:
3115 case TargetOpcode::G_USUBE:
3116 case TargetOpcode::G_SADDO:
3117 case TargetOpcode::G_UADDO:
3118 case TargetOpcode::G_SSUBO:
3119 case TargetOpcode::G_USUBO:
3120 return selectOverflowOp(I, MRI);
3121
3122 case TargetOpcode::G_PTRMASK: {
3123 Register MaskReg = I.getOperand(2).getReg();
3124 std::optional<int64_t> MaskVal = getIConstantVRegSExtVal(MaskReg, MRI);
3125 // TODO: Implement arbitrary cases
3126 if (!MaskVal || !isShiftedMask_64(*MaskVal))
3127 return false;
3128
3129 uint64_t Mask = *MaskVal;
3130 I.setDesc(TII.get(AArch64::ANDXri));
3131 I.getOperand(2).ChangeToImmediate(
3133
3135 }
3136 case TargetOpcode::G_PTRTOINT:
3137 case TargetOpcode::G_TRUNC: {
3138 const LLT DstTy = MRI.getType(I.getOperand(0).getReg());
3139 const LLT SrcTy = MRI.getType(I.getOperand(1).getReg());
3140
3141 const Register DstReg = I.getOperand(0).getReg();
3142 const Register SrcReg = I.getOperand(1).getReg();
3143
3144 const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
3145 const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI);
3146
3147 if (DstRB.getID() != SrcRB.getID()) {
3148 LLVM_DEBUG(
3149 dbgs() << "G_TRUNC/G_PTRTOINT input/output on different banks\n");
3150 return false;
3151 }
3152
3153 if (DstRB.getID() == AArch64::GPRRegBankID) {
3154 const TargetRegisterClass *DstRC = getRegClassForTypeOnBank(DstTy, DstRB);
3155 if (!DstRC)
3156 return false;
3157
3158 const TargetRegisterClass *SrcRC = getRegClassForTypeOnBank(SrcTy, SrcRB);
3159 if (!SrcRC)
3160 return false;
3161
3162 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
3163 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
3164 LLVM_DEBUG(dbgs() << "Failed to constrain G_TRUNC/G_PTRTOINT\n");
3165 return false;
3166 }
3167
3168 if (DstRC == SrcRC) {
3169 // Nothing to be done
3170 } else if (Opcode == TargetOpcode::G_TRUNC && DstTy == LLT::scalar(32) &&
3171 SrcTy == LLT::scalar(64)) {
3172 llvm_unreachable("TableGen can import this case");
3173 return false;
3174 } else if (DstRC == &AArch64::GPR32RegClass &&
3175 SrcRC == &AArch64::GPR64RegClass) {
3176 I.getOperand(1).setSubReg(AArch64::sub_32);
3177 } else {
3178 LLVM_DEBUG(
3179 dbgs() << "Unhandled mismatched classes in G_TRUNC/G_PTRTOINT\n");
3180 return false;
3181 }
3182
3183 I.setDesc(TII.get(TargetOpcode::COPY));
3184 return true;
3185 } else if (DstRB.getID() == AArch64::FPRRegBankID) {
3186 if (DstTy == LLT::fixed_vector(4, 16) &&
3187 SrcTy == LLT::fixed_vector(4, 32)) {
3188 I.setDesc(TII.get(AArch64::XTNv4i16));
3190 return true;
3191 }
3192
3193 if (!SrcTy.isVector() && SrcTy.getSizeInBits() == 128) {
3194 MachineInstr *Extract = emitExtractVectorElt(
3195 DstReg, DstRB, LLT::scalar(DstTy.getSizeInBits()), SrcReg, 0, MIB);
3196 if (!Extract)
3197 return false;
3198 I.eraseFromParent();
3199 return true;
3200 }
3201
3202 // We might have a vector G_PTRTOINT, in which case just emit a COPY.
3203 if (Opcode == TargetOpcode::G_PTRTOINT) {
3204 assert(DstTy.isVector() && "Expected an FPR ptrtoint to be a vector");
3205 I.setDesc(TII.get(TargetOpcode::COPY));
3206 return selectCopy(I, TII, MRI, TRI, RBI);
3207 }
3208 }
3209
3210 return false;
3211 }
3212
3213 case TargetOpcode::G_ANYEXT: {
3214 if (selectUSMovFromExtend(I, MRI))
3215 return true;
3216
3217 const Register DstReg = I.getOperand(0).getReg();
3218 const Register SrcReg = I.getOperand(1).getReg();
3219
3220 const RegisterBank &RBDst = *RBI.getRegBank(DstReg, MRI, TRI);
3221 if (RBDst.getID() != AArch64::GPRRegBankID) {
3222 LLVM_DEBUG(dbgs() << "G_ANYEXT on bank: " << RBDst
3223 << ", expected: GPR\n");
3224 return false;
3225 }
3226
3227 const RegisterBank &RBSrc = *RBI.getRegBank(SrcReg, MRI, TRI);
3228 if (RBSrc.getID() != AArch64::GPRRegBankID) {
3229 LLVM_DEBUG(dbgs() << "G_ANYEXT on bank: " << RBSrc
3230 << ", expected: GPR\n");
3231 return false;
3232 }
3233
3234 const unsigned DstSize = MRI.getType(DstReg).getSizeInBits();
3235
3236 if (DstSize == 0) {
3237 LLVM_DEBUG(dbgs() << "G_ANYEXT operand has no size, not a gvreg?\n");
3238 return false;
3239 }
3240
3241 if (DstSize != 64 && DstSize > 32) {
3242 LLVM_DEBUG(dbgs() << "G_ANYEXT to size: " << DstSize
3243 << ", expected: 32 or 64\n");
3244 return false;
3245 }
3246 // At this point G_ANYEXT is just like a plain COPY, but we need
3247 // to explicitly form the 64-bit value if any.
3248 if (DstSize > 32) {
3249 Register ExtSrc = MRI.createVirtualRegister(&AArch64::GPR64allRegClass);
3250 BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::SUBREG_TO_REG))
3251 .addDef(ExtSrc)
3252 .addImm(0)
3253 .addUse(SrcReg)
3254 .addImm(AArch64::sub_32);
3255 I.getOperand(1).setReg(ExtSrc);
3256 }
3257 return selectCopy(I, TII, MRI, TRI, RBI);
3258 }
3259
3260 case TargetOpcode::G_ZEXT:
3261 case TargetOpcode::G_SEXT_INREG:
3262 case TargetOpcode::G_SEXT: {
3263 if (selectUSMovFromExtend(I, MRI))
3264 return true;
3265
3266 unsigned Opcode = I.getOpcode();
3267 const bool IsSigned = Opcode != TargetOpcode::G_ZEXT;
3268 const Register DefReg = I.getOperand(0).getReg();
3269 Register SrcReg = I.getOperand(1).getReg();
3270 const LLT DstTy = MRI.getType(DefReg);
3271 const LLT SrcTy = MRI.getType(SrcReg);
3272 unsigned DstSize = DstTy.getSizeInBits();
3273 unsigned SrcSize = SrcTy.getSizeInBits();
3274
3275 // SEXT_INREG has the same src reg size as dst, the size of the value to be
3276 // extended is encoded in the imm.
3277 if (Opcode == TargetOpcode::G_SEXT_INREG)
3278 SrcSize = I.getOperand(2).getImm();
3279
3280 if (DstTy.isVector())
3281 return false; // Should be handled by imported patterns.
3282
3283 assert((*RBI.getRegBank(DefReg, MRI, TRI)).getID() ==
3284 AArch64::GPRRegBankID &&
3285 "Unexpected ext regbank");
3286
3287 MachineInstr *ExtI;
3288
3289 // First check if we're extending the result of a load which has a dest type
3290 // smaller than 32 bits, then this zext is redundant. GPR32 is the smallest
3291 // GPR register on AArch64 and all loads which are smaller automatically
3292 // zero-extend the upper bits. E.g.
3293 // %v(s8) = G_LOAD %p, :: (load 1)
3294 // %v2(s32) = G_ZEXT %v(s8)
3295 if (!IsSigned) {
3296 auto *LoadMI = getOpcodeDef(TargetOpcode::G_LOAD, SrcReg, MRI);
3297 bool IsGPR =
3298 RBI.getRegBank(SrcReg, MRI, TRI)->getID() == AArch64::GPRRegBankID;
3299 if (LoadMI && IsGPR) {
3300 const MachineMemOperand *MemOp = *LoadMI->memoperands_begin();
3301 unsigned BytesLoaded = MemOp->getSize();
3302 if (BytesLoaded < 4 && SrcTy.getSizeInBytes() == BytesLoaded)
3303 return selectCopy(I, TII, MRI, TRI, RBI);
3304 }
3305
3306 // For the 32-bit -> 64-bit case, we can emit a mov (ORRWrs)
3307 // + SUBREG_TO_REG.
3308 if (IsGPR && SrcSize == 32 && DstSize == 64) {
3309 Register SubregToRegSrc =
3310 MRI.createVirtualRegister(&AArch64::GPR32RegClass);
3311 const Register ZReg = AArch64::WZR;
3312 MIB.buildInstr(AArch64::ORRWrs, {SubregToRegSrc}, {ZReg, SrcReg})
3313 .addImm(0);
3314
3315 MIB.buildInstr(AArch64::SUBREG_TO_REG, {DefReg}, {})
3316 .addImm(0)
3317 .addUse(SubregToRegSrc)
3318 .addImm(AArch64::sub_32);
3319
3320 if (!RBI.constrainGenericRegister(DefReg, AArch64::GPR64RegClass,
3321 MRI)) {
3322 LLVM_DEBUG(dbgs() << "Failed to constrain G_ZEXT destination\n");
3323 return false;
3324 }
3325
3326 if (!RBI.constrainGenericRegister(SrcReg, AArch64::GPR32RegClass,
3327 MRI)) {
3328 LLVM_DEBUG(dbgs() << "Failed to constrain G_ZEXT source\n");
3329 return false;
3330 }
3331
3332 I.eraseFromParent();
3333 return true;
3334 }
3335 }
3336
3337 if (DstSize == 64) {
3338 if (Opcode != TargetOpcode::G_SEXT_INREG) {
3339 // FIXME: Can we avoid manually doing this?
3340 if (!RBI.constrainGenericRegister(SrcReg, AArch64::GPR32RegClass,
3341 MRI)) {
3342 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(Opcode)
3343 << " operand\n");
3344 return false;
3345 }
3346 SrcReg = MIB.buildInstr(AArch64::SUBREG_TO_REG,
3347 {&AArch64::GPR64RegClass}, {})
3348 .addImm(0)
3349 .addUse(SrcReg)
3350 .addImm(AArch64::sub_32)
3351 .getReg(0);
3352 }
3353
3354 ExtI = MIB.buildInstr(IsSigned ? AArch64::SBFMXri : AArch64::UBFMXri,
3355 {DefReg}, {SrcReg})
3356 .addImm(0)
3357 .addImm(SrcSize - 1);
3358 } else if (DstSize <= 32) {
3359 ExtI = MIB.buildInstr(IsSigned ? AArch64::SBFMWri : AArch64::UBFMWri,
3360 {DefReg}, {SrcReg})
3361 .addImm(0)
3362 .addImm(SrcSize - 1);
3363 } else {
3364 return false;
3365 }
3366
3368 I.eraseFromParent();
3369 return true;
3370 }
3371
3372 case TargetOpcode::G_SITOFP:
3373 case TargetOpcode::G_UITOFP:
3374 case TargetOpcode::G_FPTOSI:
3375 case TargetOpcode::G_FPTOUI: {
3376 const LLT DstTy = MRI.getType(I.getOperand(0).getReg()),
3377 SrcTy = MRI.getType(I.getOperand(1).getReg());
3378 const unsigned NewOpc = selectFPConvOpc(Opcode, DstTy, SrcTy);
3379 if (NewOpc == Opcode)
3380 return false;
3381
3382 I.setDesc(TII.get(NewOpc));
3384 I.setFlags(MachineInstr::NoFPExcept);
3385
3386 return true;
3387 }
3388
3389 case TargetOpcode::G_FREEZE:
3390 return selectCopy(I, TII, MRI, TRI, RBI);
3391
3392 case TargetOpcode::G_INTTOPTR:
3393 // The importer is currently unable to import pointer types since they
3394 // didn't exist in SelectionDAG.
3395 return selectCopy(I, TII, MRI, TRI, RBI);
3396
3397 case TargetOpcode::G_BITCAST:
3398 // Imported SelectionDAG rules can handle every bitcast except those that
3399 // bitcast from a type to the same type. Ideally, these shouldn't occur
3400 // but we might not run an optimizer that deletes them. The other exception
3401 // is bitcasts involving pointer types, as SelectionDAG has no knowledge
3402 // of them.
3403 return selectCopy(I, TII, MRI, TRI, RBI);
3404
3405 case TargetOpcode::G_SELECT: {
3406 auto &Sel = cast<GSelect>(I);
3407 const Register CondReg = Sel.getCondReg();
3408 const Register TReg = Sel.getTrueReg();
3409 const Register FReg = Sel.getFalseReg();
3410
3411 if (tryOptSelect(Sel))
3412 return true;
3413
3414 // Make sure to use an unused vreg instead of wzr, so that the peephole
3415 // optimizations will be able to optimize these.
3416 Register DeadVReg = MRI.createVirtualRegister(&AArch64::GPR32RegClass);
3417 auto TstMI = MIB.buildInstr(AArch64::ANDSWri, {DeadVReg}, {CondReg})
3418 .addImm(AArch64_AM::encodeLogicalImmediate(1, 32));
3420 if (!emitSelect(Sel.getReg(0), TReg, FReg, AArch64CC::NE, MIB))
3421 return false;
3422 Sel.eraseFromParent();
3423 return true;
3424 }
3425 case TargetOpcode::G_ICMP: {
3426 if (Ty.isVector())
3427 return selectVectorICmp(I, MRI);
3428
3429 if (Ty != LLT::scalar(32)) {
3430 LLVM_DEBUG(dbgs() << "G_ICMP result has type: " << Ty
3431 << ", expected: " << LLT::scalar(32) << '\n');
3432 return false;
3433 }
3434
3435 auto Pred = static_cast<CmpInst::Predicate>(I.getOperand(1).getPredicate());
3436 const AArch64CC::CondCode InvCC =
3438 emitIntegerCompare(I.getOperand(2), I.getOperand(3), I.getOperand(1), MIB);
3439 emitCSINC(/*Dst=*/I.getOperand(0).getReg(), /*Src1=*/AArch64::WZR,
3440 /*Src2=*/AArch64::WZR, InvCC, MIB);
3441 I.eraseFromParent();
3442 return true;
3443 }
3444
3445 case TargetOpcode::G_FCMP: {
3446 CmpInst::Predicate Pred =
3447 static_cast<CmpInst::Predicate>(I.getOperand(1).getPredicate());
3448 if (!emitFPCompare(I.getOperand(2).getReg(), I.getOperand(3).getReg(), MIB,
3449 Pred) ||
3450 !emitCSetForFCmp(I.getOperand(0).getReg(), Pred, MIB))
3451 return false;
3452 I.eraseFromParent();
3453 return true;
3454 }
3455 case TargetOpcode::G_VASTART:
3456 return STI.isTargetDarwin() ? selectVaStartDarwin(I, MF, MRI)
3457 : selectVaStartAAPCS(I, MF, MRI);
3458 case TargetOpcode::G_INTRINSIC:
3459 return selectIntrinsic(I, MRI);
3460 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
3461 return selectIntrinsicWithSideEffects(I, MRI);
3462 case TargetOpcode::G_IMPLICIT_DEF: {
3463 I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
3464 const LLT DstTy = MRI.getType(I.getOperand(0).getReg());
3465 const Register DstReg = I.getOperand(0).getReg();
3466 const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
3467 const TargetRegisterClass *DstRC = getRegClassForTypeOnBank(DstTy, DstRB);
3468 RBI.constrainGenericRegister(DstReg, *DstRC, MRI);
3469 return true;
3470 }
3471 case TargetOpcode::G_BLOCK_ADDR: {
3472 if (TM.getCodeModel() == CodeModel::Large) {
3473 materializeLargeCMVal(I, I.getOperand(1).getBlockAddress(), 0);
3474 I.eraseFromParent();
3475 return true;
3476 } else {
3477 I.setDesc(TII.get(AArch64::MOVaddrBA));
3478 auto MovMI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::MOVaddrBA),
3479 I.getOperand(0).getReg())
3480 .addBlockAddress(I.getOperand(1).getBlockAddress(),
3481 /* Offset */ 0, AArch64II::MO_PAGE)
3483 I.getOperand(1).getBlockAddress(), /* Offset */ 0,
3485 I.eraseFromParent();
3486 return constrainSelectedInstRegOperands(*MovMI, TII, TRI, RBI);
3487 }
3488 }
3489 case AArch64::G_DUP: {
3490 // When the scalar of G_DUP is an s8/s16 gpr, they can't be selected by
3491 // imported patterns. Do it manually here. Avoiding generating s16 gpr is
3492 // difficult because at RBS we may end up pessimizing the fpr case if we
3493 // decided to add an anyextend to fix this. Manual selection is the most
3494 // robust solution for now.
3495 if (RBI.getRegBank(I.getOperand(1).getReg(), MRI, TRI)->getID() !=
3496 AArch64::GPRRegBankID)
3497 return false; // We expect the fpr regbank case to be imported.
3498 LLT VecTy = MRI.getType(I.getOperand(0).getReg());
3499 if (VecTy == LLT::fixed_vector(8, 8))
3500 I.setDesc(TII.get(AArch64::DUPv8i8gpr));
3501 else if (VecTy == LLT::fixed_vector(16, 8))
3502 I.setDesc(TII.get(AArch64::DUPv16i8gpr));
3503 else if (VecTy == LLT::fixed_vector(4, 16))
3504 I.setDesc(TII.get(AArch64::DUPv4i16gpr));
3505 else if (VecTy == LLT::fixed_vector(8, 16))
3506 I.setDesc(TII.get(AArch64::DUPv8i16gpr));
3507 else
3508 return false;
3510 }
3511 case TargetOpcode::G_BUILD_VECTOR:
3512 return selectBuildVector(I, MRI);
3513 case TargetOpcode::G_MERGE_VALUES:
3514 return selectMergeValues(I, MRI);
3515 case TargetOpcode::G_UNMERGE_VALUES:
3516 return selectUnmergeValues(I, MRI);
3517 case TargetOpcode::G_SHUFFLE_VECTOR:
3518 return selectShuffleVector(I, MRI);
3519 case TargetOpcode::G_EXTRACT_VECTOR_ELT:
3520 return selectExtractElt(I, MRI);
3521 case TargetOpcode::G_INSERT_VECTOR_ELT:
3522 return selectInsertElt(I, MRI);
3523 case TargetOpcode::G_CONCAT_VECTORS:
3524 return selectConcatVectors(I, MRI);
3525 case TargetOpcode::G_JUMP_TABLE:
3526 return selectJumpTable(I, MRI);
3527 case TargetOpcode::G_VECREDUCE_ADD:
3528 return selectReduction(I, MRI);
3529 case TargetOpcode::G_MEMCPY:
3530 case TargetOpcode::G_MEMCPY_INLINE:
3531 case TargetOpcode::G_MEMMOVE:
3532 case TargetOpcode::G_MEMSET:
3533 assert(STI.hasMOPS() && "Shouldn't get here without +mops feature");
3534 return selectMOPS(I, MRI);
3535 }
3536
3537 return false;
3538}
3539
3540bool AArch64InstructionSelector::selectReduction(MachineInstr &I,
3542 Register VecReg = I.getOperand(1).getReg();
3543 LLT VecTy = MRI.getType(VecReg);
3544 if (I.getOpcode() == TargetOpcode::G_VECREDUCE_ADD) {
3545 // For <2 x i32> ADDPv2i32 generates an FPR64 value, so we need to emit
3546 // a subregister copy afterwards.
3547 if (VecTy == LLT::fixed_vector(2, 32)) {
3548 Register DstReg = I.getOperand(0).getReg();
3549 auto AddP = MIB.buildInstr(AArch64::ADDPv2i32, {&AArch64::FPR64RegClass},
3550 {VecReg, VecReg});
3551 auto Copy = MIB.buildInstr(TargetOpcode::COPY, {DstReg}, {})
3552 .addReg(AddP.getReg(0), 0, AArch64::ssub)
3553 .getReg(0);
3554 RBI.constrainGenericRegister(Copy, AArch64::FPR32RegClass, MRI);
3555 I.eraseFromParent();
3556 return constrainSelectedInstRegOperands(*AddP, TII, TRI, RBI);
3557 }
3558
3559 unsigned Opc = 0;
3560 if (VecTy == LLT::fixed_vector(16, 8))
3561 Opc = AArch64::ADDVv16i8v;
3562 else if (VecTy == LLT::fixed_vector(8, 16))
3563 Opc = AArch64::ADDVv8i16v;
3564 else if (VecTy == LLT::fixed_vector(4, 32))
3565 Opc = AArch64::ADDVv4i32v;
3566 else if (VecTy == LLT::fixed_vector(2, 64))
3567 Opc = AArch64::ADDPv2i64p;
3568 else {
3569 LLVM_DEBUG(dbgs() << "Unhandled type for add reduction");
3570 return false;
3571 }
3572 I.setDesc(TII.get(Opc));
3574 }
3575
3576 return false;
3577}
3578
3579bool AArch64InstructionSelector::selectMOPS(MachineInstr &GI,
3581 unsigned Mopcode;
3582 switch (GI.getOpcode()) {
3583 case TargetOpcode::G_MEMCPY:
3584 case TargetOpcode::G_MEMCPY_INLINE:
3585 Mopcode = AArch64::MOPSMemoryCopyPseudo;
3586 break;
3587 case TargetOpcode::G_MEMMOVE:
3588 Mopcode = AArch64::MOPSMemoryMovePseudo;
3589 break;
3590 case TargetOpcode::G_MEMSET:
3591 // For tagged memset see llvm.aarch64.mops.memset.tag
3592 Mopcode = AArch64::MOPSMemorySetPseudo;
3593 break;
3594 }
3595
3596 auto &DstPtr = GI.getOperand(0);
3597 auto &SrcOrVal = GI.getOperand(1);
3598 auto &Size = GI.getOperand(2);
3599
3600 // Create copies of the registers that can be clobbered.
3601 const Register DstPtrCopy = MRI.cloneVirtualRegister(DstPtr.getReg());
3602 const Register SrcValCopy = MRI.cloneVirtualRegister(SrcOrVal.getReg());
3603 const Register SizeCopy = MRI.cloneVirtualRegister(Size.getReg());
3604
3605 const bool IsSet = Mopcode == AArch64::MOPSMemorySetPseudo;
3606 const auto &SrcValRegClass =
3607 IsSet ? AArch64::GPR64RegClass : AArch64::GPR64commonRegClass;
3608
3609 // Constrain to specific registers
3610 RBI.constrainGenericRegister(DstPtrCopy, AArch64::GPR64commonRegClass, MRI);
3611 RBI.constrainGenericRegister(SrcValCopy, SrcValRegClass, MRI);
3612 RBI.constrainGenericRegister(SizeCopy, AArch64::GPR64RegClass, MRI);
3613
3614 MIB.buildCopy(DstPtrCopy, DstPtr);
3615 MIB.buildCopy(SrcValCopy, SrcOrVal);
3616 MIB.buildCopy(SizeCopy, Size);
3617
3618 // New instruction uses the copied registers because it must update them.
3619 // The defs are not used since they don't exist in G_MEM*. They are still
3620 // tied.
3621 // Note: order of operands is different from G_MEMSET, G_MEMCPY, G_MEMMOVE
3622 Register DefDstPtr = MRI.createVirtualRegister(&AArch64::GPR64commonRegClass);
3623 Register DefSize = MRI.createVirtualRegister(&AArch64::GPR64RegClass);
3624 if (IsSet) {
3625 MIB.buildInstr(Mopcode, {DefDstPtr, DefSize},
3626 {DstPtrCopy, SizeCopy, SrcValCopy});
3627 } else {
3628 Register DefSrcPtr = MRI.createVirtualRegister(&SrcValRegClass);
3629 MIB.buildInstr(Mopcode, {DefDstPtr, DefSrcPtr, DefSize},
3630 {DstPtrCopy, SrcValCopy, SizeCopy});
3631 }
3632
3633 GI.eraseFromParent();
3634 return true;
3635}
3636
3637bool AArch64InstructionSelector::selectBrJT(MachineInstr &I,
3639 assert(I.getOpcode() == TargetOpcode::G_BRJT && "Expected G_BRJT");
3640 Register JTAddr = I.getOperand(0).getReg();
3641 unsigned JTI = I.getOperand(1).getIndex();
3642 Register Index = I.getOperand(2).getReg();
3643
3644 Register TargetReg = MRI.createVirtualRegister(&AArch64::GPR64RegClass);
3645 Register ScratchReg = MRI.createVirtualRegister(&AArch64::GPR64spRegClass);
3646
3647 MF->getInfo<AArch64FunctionInfo>()->setJumpTableEntryInfo(JTI, 4, nullptr);
3648 auto JumpTableInst = MIB.buildInstr(AArch64::JumpTableDest32,
3649 {TargetReg, ScratchReg}, {JTAddr, Index})
3650 .addJumpTableIndex(JTI);
3651 // Save the jump table info.
3652 MIB.buildInstr(TargetOpcode::JUMP_TABLE_DEBUG_INFO, {},
3653 {static_cast<int64_t>(JTI)});
3654 // Build the indirect branch.
3655 MIB.buildInstr(AArch64::BR, {}, {TargetReg});
3656 I.eraseFromParent();
3657 return constrainSelectedInstRegOperands(*JumpTableInst, TII, TRI, RBI);
3658}
3659
3660bool AArch64InstructionSelector::selectJumpTable(MachineInstr &I,
3662 assert(I.getOpcode() == TargetOpcode::G_JUMP_TABLE && "Expected jump table");
3663 assert(I.getOperand(1).isJTI() && "Jump table op should have a JTI!");
3664
3665 Register DstReg = I.getOperand(0).getReg();
3666 unsigned JTI = I.getOperand(1).getIndex();
3667 // We generate a MOVaddrJT which will get expanded to an ADRP + ADD later.
3668 auto MovMI =
3669 MIB.buildInstr(AArch64::MOVaddrJT, {DstReg}, {})
3670 .addJumpTableIndex(JTI, AArch64II::MO_PAGE)
3672 I.eraseFromParent();
3673 return constrainSelectedInstRegOperands(*MovMI, TII, TRI, RBI);
3674}
3675
3676bool AArch64InstructionSelector::selectTLSGlobalValue(
3678 if (!STI.isTargetMachO())
3679 return false;
3680 MachineFunction &MF = *I.getParent()->getParent();
3681 MF.getFrameInfo().setAdjustsStack(true);
3682
3683 const auto &GlobalOp = I.getOperand(1);
3684 assert(GlobalOp.getOffset() == 0 &&
3685 "Shouldn't have an offset on TLS globals!");
3686 const GlobalValue &GV = *GlobalOp.getGlobal();
3687
3688 auto LoadGOT =
3689 MIB.buildInstr(AArch64::LOADgot, {&AArch64::GPR64commonRegClass}, {})
3690 .addGlobalAddress(&GV, 0, AArch64II::MO_TLS);
3691
3692 auto Load = MIB.buildInstr(AArch64::LDRXui, {&AArch64::GPR64commonRegClass},
3693 {LoadGOT.getReg(0)})
3694 .addImm(0);
3695
3696 MIB.buildCopy(Register(AArch64::X0), LoadGOT.getReg(0));
3697 // TLS calls preserve all registers except those that absolutely must be
3698 // trashed: X0 (it takes an argument), LR (it's a call) and NZCV (let's not be
3699 // silly).
3700 MIB.buildInstr(getBLRCallOpcode(MF), {}, {Load})
3701 .addUse(AArch64::X0, RegState::Implicit)
3702 .addDef(AArch64::X0, RegState::Implicit)
3703 .addRegMask(TRI.getTLSCallPreservedMask());
3704
3705 MIB.buildCopy(I.getOperand(0).getReg(), Register(AArch64::X0));
3706 RBI.constrainGenericRegister(I.getOperand(0).getReg(), AArch64::GPR64RegClass,
3707 MRI);
3708 I.eraseFromParent();
3709 return true;
3710}
3711
3712bool AArch64InstructionSelector::selectVectorICmp(
3714 Register DstReg = I.getOperand(0).getReg();
3715 LLT DstTy = MRI.getType(DstReg);
3716 Register SrcReg = I.getOperand(2).getReg();
3717 Register Src2Reg = I.getOperand(3).getReg();
3718 LLT SrcTy = MRI.getType(SrcReg);
3719
3720 unsigned SrcEltSize = SrcTy.getElementType().getSizeInBits();
3721 unsigned NumElts = DstTy.getNumElements();
3722
3723 // First index is element size, 0 == 8b, 1 == 16b, 2 == 32b, 3 == 64b
3724 // Second index is num elts, 0 == v2, 1 == v4, 2 == v8, 3 == v16
3725 // Third index is cc opcode:
3726 // 0 == eq
3727 // 1 == ugt
3728 // 2 == uge
3729 // 3 == ult
3730 // 4 == ule
3731 // 5 == sgt
3732 // 6 == sge
3733 // 7 == slt
3734 // 8 == sle
3735 // ne is done by negating 'eq' result.
3736
3737 // This table below assumes that for some comparisons the operands will be
3738 // commuted.
3739 // ult op == commute + ugt op
3740 // ule op == commute + uge op
3741 // slt op == commute + sgt op
3742 // sle op == commute + sge op
3743 unsigned PredIdx = 0;
3744 bool SwapOperands = false;
3745 CmpInst::Predicate Pred = (CmpInst::Predicate)I.getOperand(1).getPredicate();
3746 switch (Pred) {
3747 case CmpInst::ICMP_NE:
3748 case CmpInst::ICMP_EQ:
3749 PredIdx = 0;
3750 break;
3751 case CmpInst::ICMP_UGT:
3752 PredIdx = 1;
3753 break;
3754 case CmpInst::ICMP_UGE:
3755 PredIdx = 2;
3756 break;
3757 case CmpInst::ICMP_ULT:
3758 PredIdx = 3;
3759 SwapOperands = true;
3760 break;
3761 case CmpInst::ICMP_ULE:
3762 PredIdx = 4;
3763 SwapOperands = true;
3764 break;
3765 case CmpInst::ICMP_SGT:
3766 PredIdx = 5;
3767 break;
3768 case CmpInst::ICMP_SGE:
3769 PredIdx = 6;
3770 break;
3771 case CmpInst::ICMP_SLT:
3772 PredIdx = 7;
3773 SwapOperands = true;
3774 break;
3775 case CmpInst::ICMP_SLE:
3776 PredIdx = 8;
3777 SwapOperands = true;
3778 break;
3779 default:
3780 llvm_unreachable("Unhandled icmp predicate");
3781 return false;
3782 }
3783
3784 // This table obviously should be tablegen'd when we have our GISel native
3785 // tablegen selector.
3786
3787 static const unsigned OpcTable[4][4][9] = {
3788 {
3789 {0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
3790 0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
3791 0 /* invalid */},
3792 {0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
3793 0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
3794 0 /* invalid */},
3795 {AArch64::CMEQv8i8, AArch64::CMHIv8i8, AArch64::CMHSv8i8,
3796 AArch64::CMHIv8i8, AArch64::CMHSv8i8, AArch64::CMGTv8i8,
3797 AArch64::CMGEv8i8, AArch64::CMGTv8i8, AArch64::CMGEv8i8},
3798 {AArch64::CMEQv16i8, AArch64::CMHIv16i8, AArch64::CMHSv16i8,
3799 AArch64::CMHIv16i8, AArch64::CMHSv16i8, AArch64::CMGTv16i8,
3800 AArch64::CMGEv16i8, AArch64::CMGTv16i8, AArch64::CMGEv16i8}
3801 },
3802 {
3803 {0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
3804 0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
3805 0 /* invalid */},
3806 {AArch64::CMEQv4i16, AArch64::CMHIv4i16, AArch64::CMHSv4i16,
3807 AArch64::CMHIv4i16, AArch64::CMHSv4i16, AArch64::CMGTv4i16,
3808 AArch64::CMGEv4i16, AArch64::CMGTv4i16, AArch64::CMGEv4i16},
3809 {AArch64::CMEQv8i16, AArch64::CMHIv8i16, AArch64::CMHSv8i16,
3810 AArch64::CMHIv8i16, AArch64::CMHSv8i16, AArch64::CMGTv8i16,
3811 AArch64::CMGEv8i16, AArch64::CMGTv8i16, AArch64::CMGEv8i16},
3812 {0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
3813 0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
3814 0 /* invalid */}
3815 },
3816 {
3817 {AArch64::CMEQv2i32, AArch64::CMHIv2i32, AArch64::CMHSv2i32,
3818 AArch64::CMHIv2i32, AArch64::CMHSv2i32, AArch64::CMGTv2i32,
3819 AArch64::CMGEv2i32, AArch64::CMGTv2i32, AArch64::CMGEv2i32},
3820 {AArch64::CMEQv4i32, AArch64::CMHIv4i32, AArch64::CMHSv4i32,
3821 AArch64::CMHIv4i32, AArch64::CMHSv4i32, AArch64::CMGTv4i32,
3822 AArch64::CMGEv4i32, AArch64::CMGTv4i32, AArch64::CMGEv4i32},
3823 {0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
3824 0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
3825 0 /* invalid */},
3826 {0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
3827 0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
3828 0 /* invalid */}
3829 },
3830 {
3831 {AArch64::CMEQv2i64, AArch64::CMHIv2i64, AArch64::CMHSv2i64,
3832 AArch64::CMHIv2i64, AArch64::CMHSv2i64, AArch64::CMGTv2i64,
3833 AArch64::CMGEv2i64, AArch64::CMGTv2i64, AArch64::CMGEv2i64},
3834 {0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
3835 0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
3836 0 /* invalid */},
3837 {0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
3838 0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
3839 0 /* invalid */},
3840 {0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
3841 0 /* invalid */, 0 /* invalid */, 0 /* invalid */, 0 /* invalid */,
3842 0 /* invalid */}
3843 },
3844 };
3845 unsigned EltIdx = Log2_32(SrcEltSize / 8);
3846 unsigned NumEltsIdx = Log2_32(NumElts / 2);
3847 unsigned Opc = OpcTable[EltIdx][NumEltsIdx][PredIdx];
3848 if (!Opc) {
3849 LLVM_DEBUG(dbgs() << "Could not map G_ICMP to cmp opcode");
3850 return false;
3851 }
3852
3853 const RegisterBank &VecRB = *RBI.getRegBank(SrcReg, MRI, TRI);
3854 const TargetRegisterClass *SrcRC =
3855 getRegClassForTypeOnBank(SrcTy, VecRB, true);
3856 if (!SrcRC) {
3857 LLVM_DEBUG(dbgs() << "Could not determine source register class.\n");
3858 return false;
3859 }
3860
3861 unsigned NotOpc = Pred == ICmpInst::ICMP_NE ? AArch64::NOTv8i8 : 0;
3862 if (SrcTy.getSizeInBits() == 128)
3863 NotOpc = NotOpc ? AArch64::NOTv16i8 : 0;
3864
3865 if (SwapOperands)
3866 std::swap(SrcReg, Src2Reg);
3867
3868 auto Cmp = MIB.buildInstr(Opc, {SrcRC}, {SrcReg, Src2Reg});
3870
3871 // Invert if we had a 'ne' cc.
3872 if (NotOpc) {
3873 Cmp = MIB.buildInstr(NotOpc, {DstReg}, {Cmp});
3875 } else {
3876 MIB.buildCopy(DstReg, Cmp.getReg(0));
3877 }
3878 RBI.constrainGenericRegister(DstReg, *SrcRC, MRI);
3879 I.eraseFromParent();
3880 return true;
3881}
3882
3883MachineInstr *AArch64InstructionSelector::emitScalarToVector(
3884 unsigned EltSize, const TargetRegisterClass *DstRC, Register Scalar,
3885 MachineIRBuilder &MIRBuilder) const {
3886 auto Undef = MIRBuilder.buildInstr(TargetOpcode::IMPLICIT_DEF, {DstRC}, {});
3887
3888 auto BuildFn = [&](unsigned SubregIndex) {
3889 auto Ins =
3890 MIRBuilder
3891 .buildInstr(TargetOpcode::INSERT_SUBREG, {DstRC}, {Undef, Scalar})
3892 .addImm(SubregIndex);
3895 return &*Ins;
3896 };
3897
3898 switch (EltSize) {
3899 case 8:
3900 return BuildFn(AArch64::bsub);
3901 case 16:
3902 return BuildFn(AArch64::hsub);
3903 case 32:
3904 return BuildFn(AArch64::ssub);
3905 case 64:
3906 return BuildFn(AArch64::dsub);
3907 default:
3908 return nullptr;
3909 }
3910}
3911
3913AArch64InstructionSelector::emitNarrowVector(Register DstReg, Register SrcReg,
3914 MachineIRBuilder &MIB,
3915 MachineRegisterInfo &MRI) const {
3916 LLT DstTy = MRI.getType(DstReg);
3917 const TargetRegisterClass *RC =
3918 getRegClassForTypeOnBank(DstTy, *RBI.getRegBank(SrcReg, MRI, TRI));
3919 if (RC != &AArch64::FPR32RegClass && RC != &AArch64::FPR64RegClass) {
3920 LLVM_DEBUG(dbgs() << "Unsupported register class!\n");
3921 return nullptr;
3922 }
3923 unsigned SubReg = 0;
3924 if (!getSubRegForClass(RC, TRI, SubReg))
3925 return nullptr;
3926 if (SubReg != AArch64::ssub && SubReg != AArch64::dsub) {
3927 LLVM_DEBUG(dbgs() << "Unsupported destination size! ("
3928 << DstTy.getSizeInBits() << "\n");
3929 return nullptr;
3930 }
3931 auto Copy = MIB.buildInstr(TargetOpcode::COPY, {DstReg}, {})
3932 .addReg(SrcReg, 0, SubReg);
3933 RBI.constrainGenericRegister(DstReg, *RC, MRI);
3934 return Copy;
3935}
3936
3937bool AArch64InstructionSelector::selectMergeValues(
3939 assert(I.getOpcode() == TargetOpcode::G_MERGE_VALUES && "unexpected opcode");
3940 const LLT DstTy = MRI.getType(I.getOperand(0).getReg());
3941 const LLT SrcTy = MRI.getType(I.getOperand(1).getReg());
3942 assert(!DstTy.isVector() && !SrcTy.isVector() && "invalid merge operation");
3943 const RegisterBank &RB = *RBI.getRegBank(I.getOperand(1).getReg(), MRI, TRI);
3944
3945 if (I.getNumOperands() != 3)
3946 return false;
3947
3948 // Merging 2 s64s into an s128.
3949 if (DstTy == LLT::scalar(128)) {
3950 if (SrcTy.getSizeInBits() != 64)
3951 return false;
3952 Register DstReg = I.getOperand(0).getReg();
3953 Register Src1Reg = I.getOperand(1).getReg();
3954 Register Src2Reg = I.getOperand(2).getReg();
3955 auto Tmp = MIB.buildInstr(TargetOpcode::IMPLICIT_DEF, {DstTy}, {});
3956 MachineInstr *InsMI = emitLaneInsert(std::nullopt, Tmp.getReg(0), Src1Reg,
3957 /* LaneIdx */ 0, RB, MIB);
3958 if (!InsMI)
3959 return false;
3960 MachineInstr *Ins2MI = emitLaneInsert(DstReg, InsMI->getOperand(0).getReg(),
3961 Src2Reg, /* LaneIdx */ 1, RB, MIB);
3962 if (!Ins2MI)
3963 return false;
3966 I.eraseFromParent();
3967 return true;
3968 }
3969
3970 if (RB.getID() != AArch64::GPRRegBankID)
3971 return false;
3972
3973 if (DstTy.getSizeInBits() != 64 || SrcTy.getSizeInBits() != 32)
3974 return false;
3975
3976 auto *DstRC = &AArch64::GPR64RegClass;
3977 Register SubToRegDef = MRI.createVirtualRegister(DstRC);
3978 MachineInstr &SubRegMI = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
3979 TII.get(TargetOpcode::SUBREG_TO_REG))
3980 .addDef(SubToRegDef)
3981 .addImm(0)
3982 .addUse(I.getOperand(1).getReg())
3983 .addImm(AArch64::sub_32);
3984 Register SubToRegDef2 = MRI.createVirtualRegister(DstRC);
3985 // Need to anyext the second scalar before we can use bfm
3986 MachineInstr &SubRegMI2 = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
3987 TII.get(TargetOpcode::SUBREG_TO_REG))
3988 .addDef(SubToRegDef2)
3989 .addImm(0)
3990 .addUse(I.getOperand(2).getReg())
3991 .addImm(AArch64::sub_32);
3992 MachineInstr &BFM =
3993 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AArch64::BFMXri))
3994 .addDef(I.getOperand(0).getReg())
3995 .addUse(SubToRegDef)
3996 .addUse(SubToRegDef2)
3997 .addImm(32)
3998 .addImm(31);
3999 constrainSelectedInstRegOperands(SubRegMI, TII, TRI, RBI);
4000 constrainSelectedInstRegOperands(SubRegMI2, TII, TRI, RBI);
4002 I.eraseFromParent();
4003 return true;
4004}
4005
4006static bool getLaneCopyOpcode(unsigned &CopyOpc, unsigned &ExtractSubReg,
4007 const unsigned EltSize) {
4008 // Choose a lane copy opcode and subregister based off of the size of the
4009 // vector's elements.
4010 switch (EltSize) {
4011 case 8:
4012 CopyOpc = AArch64::DUPi8;
4013 ExtractSubReg = AArch64::bsub;
4014 break;
4015 case 16:
4016 CopyOpc = AArch64::DUPi16;
4017 ExtractSubReg = AArch64::hsub;
4018 break;
4019 case 32:
4020 CopyOpc = AArch64::DUPi32;
4021 ExtractSubReg = AArch64::ssub;
4022 break;
4023 case 64:
4024 CopyOpc = AArch64::DUPi64;
4025 ExtractSubReg = AArch64::dsub;
4026 break;
4027 default:
4028 // Unknown size, bail out.
4029 LLVM_DEBUG(dbgs() << "Elt size '" << EltSize << "' unsupported.\n");
4030 return false;
4031 }
4032 return true;
4033}
4034
4035MachineInstr *AArch64InstructionSelector::emitExtractVectorElt(
4036 std::optional<Register> DstReg, const RegisterBank &DstRB, LLT ScalarTy,
4037 Register VecReg, unsigned LaneIdx, MachineIRBuilder &MIRBuilder) const {
4038 MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
4039 unsigned CopyOpc = 0;
4040 unsigned ExtractSubReg = 0;
4041 if (!getLaneCopyOpcode(CopyOpc, ExtractSubReg, ScalarTy.getSizeInBits())) {
4042 LLVM_DEBUG(
4043 dbgs() << "Couldn't determine lane copy opcode for instruction.\n");
4044 return nullptr;
4045 }
4046
4047 const TargetRegisterClass *DstRC =
4048 getRegClassForTypeOnBank(ScalarTy, DstRB, true);
4049 if (!DstRC) {
4050 LLVM_DEBUG(dbgs() << "Could not determine destination register class.\n");
4051 return nullptr;
4052 }
4053
4054 const RegisterBank &VecRB = *RBI.getRegBank(VecReg, MRI, TRI);
4055 const LLT &VecTy = MRI.getType(VecReg);
4056 const TargetRegisterClass *VecRC =
4057 getRegClassForTypeOnBank(VecTy, VecRB, true);
4058 if (!VecRC) {
4059 LLVM_DEBUG(dbgs() << "Could not determine source register class.\n");
4060 return nullptr;
4061 }
4062
4063 // The register that we're going to copy into.
4064 Register InsertReg = VecReg;
4065 if (!DstReg)
4066 DstReg = MRI.createVirtualRegister(DstRC);
4067 // If the lane index is 0, we just use a subregister COPY.
4068 if (LaneIdx == 0) {
4069 auto Copy = MIRBuilder.buildInstr(TargetOpcode::COPY, {*DstReg}, {})
4070 .addReg(VecReg, 0, ExtractSubReg);
4071 RBI.constrainGenericRegister(*DstReg, *DstRC, MRI);
4072 return &*Copy;
4073 }
4074
4075 // Lane copies require 128-bit wide registers. If we're dealing with an
4076 // unpacked vector, then we need to move up to that width. Insert an implicit
4077 // def and a subregister insert to get us there.
4078 if (VecTy.getSizeInBits() != 128) {
4079 MachineInstr *ScalarToVector = emitScalarToVector(
4080 VecTy.getSizeInBits(), &AArch64::FPR128RegClass, VecReg, MIRBuilder);
4081 if (!ScalarToVector)
4082 return nullptr;
4083 InsertReg = ScalarToVector->getOperand(0).getReg();
4084 }
4085
4086 MachineInstr *LaneCopyMI =
4087 MIRBuilder.buildInstr(CopyOpc, {*DstReg}, {InsertReg}).addImm(LaneIdx);
4088 constrainSelectedInstRegOperands(*LaneCopyMI, TII, TRI, RBI);
4089
4090 // Make sure that we actually constrain the initial copy.
4091 RBI.constrainGenericRegister(*DstReg, *DstRC, MRI);
4092 return LaneCopyMI;
4093}
4094
4095bool AArch64InstructionSelector::selectExtractElt(
4097 assert(I.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT &&
4098 "unexpected opcode!");
4099 Register DstReg = I.getOperand(0).getReg();
4100 const LLT NarrowTy = MRI.getType(DstReg);
4101 const Register SrcReg = I.getOperand(1).getReg();
4102 const LLT WideTy = MRI.getType(SrcReg);
4103 (void)WideTy;
4104 assert(WideTy.getSizeInBits() >= NarrowTy.getSizeInBits() &&
4105 "source register size too small!");
4106 assert(!NarrowTy.isVector() && "cannot extract vector into vector!");
4107
4108 // Need the lane index to determine the correct copy opcode.
4109 MachineOperand &LaneIdxOp = I.getOperand(2);
4110 assert(LaneIdxOp.isReg() && "Lane index operand was not a register?");
4111
4112 if (RBI.getRegBank(DstReg, MRI, TRI)->getID() != AArch64::FPRRegBankID) {
4113 LLVM_DEBUG(dbgs() << "Cannot extract into GPR.\n");
4114 return false;
4115 }
4116
4117 // Find the index to extract from.
4118 auto VRegAndVal = getIConstantVRegValWithLookThrough(LaneIdxOp.getReg(), MRI);
4119 if (!VRegAndVal)
4120 return false;
4121 unsigned LaneIdx = VRegAndVal->Value.getSExtValue();
4122
4123
4124 const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
4125 MachineInstr *Extract = emitExtractVectorElt(DstReg, DstRB, NarrowTy, SrcReg,
4126 LaneIdx, MIB);
4127 if (!Extract)
4128 return false;
4129
4130 I.eraseFromParent();
4131 return true;
4132}
4133
4134bool AArch64InstructionSelector::selectSplitVectorUnmerge(
4136 unsigned NumElts = I.getNumOperands() - 1;
4137 Register SrcReg = I.getOperand(NumElts).getReg();
4138 const LLT NarrowTy = MRI.getType(I.getOperand(0).getReg());
4139 const LLT SrcTy = MRI.getType(SrcReg);
4140
4141 assert(NarrowTy.isVector() && "Expected an unmerge into vectors");
4142 if (SrcTy.getSizeInBits() > 128) {
4143 LLVM_DEBUG(dbgs() << "Unexpected vector type for vec split unmerge");
4144 return false;
4145 }
4146
4147 // We implement a split vector operation by treating the sub-vectors as
4148 // scalars and extracting them.
4149 const RegisterBank &DstRB =
4150 *RBI.getRegBank(I.getOperand(0).getReg(), MRI, TRI);
4151 for (unsigned OpIdx = 0; OpIdx < NumElts; ++OpIdx) {
4152 Register Dst = I.getOperand(OpIdx).getReg();
4153 MachineInstr *Extract =
4154 emitExtractVectorElt(Dst, DstRB, NarrowTy, SrcReg, OpIdx, MIB);
4155 if (!Extract)
4156 return false;
4157 }
4158 I.eraseFromParent();
4159 return true;
4160}
4161
4162bool AArch64InstructionSelector::selectUnmergeValues(MachineInstr &I,
4164 assert(I.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
4165 "unexpected opcode");
4166
4167 // TODO: Handle unmerging into GPRs and from scalars to scalars.
4168 if (RBI.getRegBank(I.getOperand(0).getReg(), MRI, TRI)->getID() !=
4169 AArch64::FPRRegBankID ||
4170 RBI.getRegBank(I.getOperand(1).getReg(), MRI, TRI)->getID() !=
4171 AArch64::FPRRegBankID) {
4172 LLVM_DEBUG(dbgs() << "Unmerging vector-to-gpr and scalar-to-scalar "
4173 "currently unsupported.\n");
4174 return false;
4175 }
4176
4177 // The last operand is the vector source register, and every other operand is
4178 // a register to unpack into.
4179 unsigned NumElts = I.getNumOperands() - 1;
4180 Register SrcReg = I.getOperand(NumElts).getReg();
4181 const LLT NarrowTy = MRI.getType(I.getOperand(0).getReg());
4182 const LLT WideTy = MRI.getType(SrcReg);
4183 (void)WideTy;
4184 assert((WideTy.isVector() || WideTy.getSizeInBits() == 128) &&
4185 "can only unmerge from vector or s128 types!");
4186 assert(WideTy.getSizeInBits() > NarrowTy.getSizeInBits() &&
4187 "source register size too small!");
4188
4189 if (!NarrowTy.isScalar())
4190 return selectSplitVectorUnmerge(I, MRI);
4191
4192 // Choose a lane copy opcode and subregister based off of the size of the
4193 // vector's elements.
4194 unsigned CopyOpc = 0;
4195 unsigned ExtractSubReg = 0;
4196 if (!getLaneCopyOpcode(CopyOpc, ExtractSubReg, NarrowTy.getSizeInBits()))
4197 return false;
4198
4199 // Set up for the lane copies.
4200 MachineBasicBlock &MBB = *I.getParent();
4201
4202 // Stores the registers we'll be copying from.
4203 SmallVector<Register, 4> InsertRegs;
4204
4205 // We'll use the first register twice, so we only need NumElts-1 registers.
4206 unsigned NumInsertRegs = NumElts - 1;
4207
4208 // If our elements fit into exactly 128 bits, then we can copy from the source
4209 // directly. Otherwise, we need to do a bit of setup with some subregister
4210 // inserts.
4211 if (NarrowTy.getSizeInBits() * NumElts == 128) {
4212 InsertRegs = SmallVector<Register, 4>(NumInsertRegs, SrcReg);
4213 } else {
4214 // No. We have to perform subregister inserts. For each insert, create an
4215 // implicit def and a subregister insert, and save the register we create.
4216 const TargetRegisterClass *RC = getRegClassForTypeOnBank(
4217 LLT::fixed_vector(NumElts, WideTy.getScalarSizeInBits()),
4218 *RBI.getRegBank(SrcReg, MRI, TRI));
4219 unsigned SubReg = 0;
4220 bool Found = getSubRegForClass(RC, TRI, SubReg);
4221 (void)Found;
4222 assert(Found && "expected to find last operand's subeg idx");
4223 for (unsigned Idx = 0; Idx < NumInsertRegs; ++Idx) {
4224 Register ImpDefReg = MRI.createVirtualRegister(&AArch64::FPR128RegClass);
4225 MachineInstr &ImpDefMI =
4226 *BuildMI(MBB, I, I.getDebugLoc(), TII.get(TargetOpcode::IMPLICIT_DEF),
4227 ImpDefReg);
4228
4229 // Now, create the subregister insert from SrcReg.
4230 Register InsertReg = MRI.createVirtualRegister(&AArch64::FPR128RegClass);
4231 MachineInstr &InsMI =
4232 *BuildMI(MBB, I, I.getDebugLoc(),
4233 TII.get(TargetOpcode::INSERT_SUBREG), InsertReg)
4234 .addUse(ImpDefReg)
4235 .addUse(SrcReg)
4236 .addImm(SubReg);
4237
4238 constrainSelectedInstRegOperands(ImpDefMI, TII, TRI, RBI);
4240
4241 // Save the register so that we can copy from it after.
4242 InsertRegs.push_back(InsertReg);
4243 }
4244 }
4245
4246 // Now that we've created any necessary subregister inserts, we can
4247 // create the copies.
4248 //
4249 // Perform the first copy separately as a subregister copy.
4250 Register CopyTo = I.getOperand(0).getReg();
4251 auto FirstCopy = MIB.buildInstr(TargetOpcode::COPY, {CopyTo}, {})
4252 .addReg(InsertRegs[0], 0, ExtractSubReg);
4253 constrainSelectedInstRegOperands(*FirstCopy, TII, TRI, RBI);
4254
4255 // Now, perform the remaining copies as vector lane copies.
4256 unsigned LaneIdx = 1;
4257 for (Register InsReg : InsertRegs) {
4258 Register CopyTo = I.getOperand(LaneIdx).getReg();
4259 MachineInstr &CopyInst =
4260 *BuildMI(MBB, I, I.getDebugLoc(), TII.get(CopyOpc), CopyTo)
4261 .addUse(InsReg)
4262 .addImm(LaneIdx);
4263 constrainSelectedInstRegOperands(CopyInst, TII, TRI, RBI);
4264 ++LaneIdx;
4265 }
4266
4267 // Separately constrain the first copy's destination. Because of the
4268 // limitation in constrainOperandRegClass, we can't guarantee that this will
4269 // actually be constrained. So, do it ourselves using the second operand.
4270 const TargetRegisterClass *RC =
4271 MRI.getRegClassOrNull(I.getOperand(1).getReg());
4272 if (!RC) {
4273 LLVM_DEBUG(dbgs() << "Couldn't constrain copy destination.\n");
4274 return false;
4275 }
4276
4277 RBI.constrainGenericRegister(CopyTo, *RC, MRI);
4278 I.eraseFromParent();
4279 return true;
4280}
4281
4282bool AArch64InstructionSelector::selectConcatVectors(
4284 assert(I.getOpcode() == TargetOpcode::G_CONCAT_VECTORS &&
4285 "Unexpected opcode");
4286 Register Dst = I.getOperand(0).getReg();
4287 Register Op1 = I.getOperand(1).getReg();
4288 Register Op2 = I.getOperand(2).getReg();
4289 MachineInstr *ConcatMI = emitVectorConcat(Dst, Op1, Op2, MIB);
4290 if (!ConcatMI)
4291 return false;
4292 I.eraseFromParent();
4293 return true;
4294}
4295
4296unsigned
4297AArch64InstructionSelector::emitConstantPoolEntry(const Constant *CPVal,
4298 MachineFunction &MF) const {
4299 Type *CPTy = CPVal->getType();
4300 Align Alignment = MF.getDataLayout().getPrefTypeAlign(CPTy);
4301
4303 return MCP->getConstantPoolIndex(CPVal, Alignment);
4304}
4305
4306MachineInstr *AArch64InstructionSelector::emitLoadFromConstantPool(
4307 const Constant *CPVal, MachineIRBuilder &MIRBuilder) const {
4308 const TargetRegisterClass *RC;
4309 unsigned Opc;
4310 bool IsTiny = TM.getCodeModel() == CodeModel::Tiny;
4311 unsigned Size = MIRBuilder.getDataLayout().getTypeStoreSize(CPVal->getType());
4312 switch (Size) {
4313 case 16:
4314 RC = &AArch64::FPR128RegClass;
4315 Opc = IsTiny ? AArch64::LDRQl : AArch64::LDRQui;
4316 break;
4317 case 8:
4318 RC = &AArch64::FPR64RegClass;
4319 Opc = IsTiny ? AArch64::LDRDl : AArch64::LDRDui;
4320 break;
4321 case 4:
4322 RC = &AArch64::FPR32RegClass;
4323 Opc = IsTiny ? AArch64::LDRSl : AArch64::LDRSui;
4324 break;
4325 case 2:
4326 RC = &AArch64::FPR16RegClass;
4327 Opc = AArch64::LDRHui;
4328 break;
4329 default:
4330 LLVM_DEBUG(dbgs() << "Could not load from constant pool of type "
4331 << *CPVal->getType());
4332 return nullptr;
4333 }
4334
4335 MachineInstr *LoadMI = nullptr;
4336 auto &MF = MIRBuilder.getMF();
4337 unsigned CPIdx = emitConstantPoolEntry(CPVal, MF);
4338 if (IsTiny && (Size == 16 || Size == 8 || Size == 4)) {
4339 // Use load(literal) for tiny code model.
4340 LoadMI = &*MIRBuilder.buildInstr(Opc, {RC}, {}).addConstantPoolIndex(CPIdx);
4341 } else {
4342 auto Adrp =
4343 MIRBuilder.buildInstr(AArch64::ADRP, {&AArch64::GPR64RegClass}, {})
4344 .addConstantPoolIndex(CPIdx, 0, AArch64II::MO_PAGE);
4345
4346 LoadMI = &*MIRBuilder.buildInstr(Opc, {RC}, {Adrp})
4347 .addConstantPoolIndex(
4349
4351 }
4352
4354 LoadMI->addMemOperand(MF, MF.getMachineMemOperand(PtrInfo,
4356 Size, Align(Size)));
4358 return LoadMI;
4359}
4360
4361/// Return an <Opcode, SubregIndex> pair to do an vector elt insert of a given
4362/// size and RB.
4363static std::pair<unsigned, unsigned>
4364getInsertVecEltOpInfo(const RegisterBank &RB, unsigned EltSize) {
4365 unsigned Opc, SubregIdx;
4366 if (RB.getID() == AArch64::GPRRegBankID) {
4367 if (EltSize == 8) {
4368 Opc = AArch64::INSvi8gpr;
4369 SubregIdx = AArch64::bsub;
4370 } else if (EltSize == 16) {
4371 Opc = AArch64::INSvi16gpr;
4372 SubregIdx = AArch64::ssub;
4373 } else if (EltSize == 32) {
4374 Opc = AArch64::INSvi32gpr;
4375 SubregIdx = AArch64::ssub;
4376 } else if (EltSize == 64) {
4377 Opc = AArch64::INSvi64gpr;
4378 SubregIdx = AArch64::dsub;
4379 } else {
4380 llvm_unreachable("invalid elt size!");
4381 }
4382 } else {
4383 if (EltSize == 8) {
4384 Opc = AArch64::INSvi8lane;
4385 SubregIdx = AArch64::bsub;
4386 } else if (EltSize == 16) {
4387 Opc = AArch64::INSvi16lane;
4388 SubregIdx = AArch64::hsub;
4389 } else if (EltSize == 32) {
4390 Opc = AArch64::INSvi32lane;
4391 SubregIdx = AArch64::ssub;
4392 } else if (EltSize == 64) {
4393 Opc = AArch64::INSvi64lane;
4394 SubregIdx = AArch64::dsub;
4395 } else {
4396 llvm_unreachable("invalid elt size!");
4397 }
4398 }
4399 return std::make_pair(Opc, SubregIdx);
4400}
4401
4402MachineInstr *AArch64InstructionSelector::emitInstr(
4403 unsigned Opcode, std::initializer_list<llvm::DstOp> DstOps,
4404 std::initializer_list<llvm::SrcOp> SrcOps, MachineIRBuilder &MIRBuilder,
4405 const ComplexRendererFns &RenderFns) const {
4406 assert(Opcode && "Expected an opcode?");
4407 assert(!isPreISelGenericOpcode(Opcode) &&
4408 "Function should only be used to produce selected instructions!");
4409 auto MI = MIRBuilder.buildInstr(Opcode, DstOps, SrcOps);
4410 if (RenderFns)
4411 for (auto &Fn : *RenderFns)
4412 Fn(MI);
4414 return &*MI;
4415}
4416
4417MachineInstr *AArch64InstructionSelector::emitAddSub(
4418 const std::array<std::array<unsigned, 2>, 5> &AddrModeAndSizeToOpcode,
4419 Register Dst, MachineOperand &LHS, MachineOperand &RHS,
4420 MachineIRBuilder &MIRBuilder) const {
4421 MachineRegisterInfo &MRI = MIRBuilder.getMF().getRegInfo();
4422 assert(LHS.isReg() && RHS.isReg() && "Expected register operands?");
4423 auto Ty = MRI.getType(LHS.getReg());
4424 assert(!Ty.isVector() && "Expected a scalar or pointer?");
4425 unsigned Size = Ty.getSizeInBits();
4426 assert((Size == 32 || Size == 64) && "Expected a 32-bit or 64-bit type only");
4427 bool Is32Bit = Size == 32;
4428
4429 // INSTRri form with positive arithmetic immediate.
4430 if (auto Fns = selectArithImmed(RHS))
4431 return emitInstr(AddrModeAndSizeToOpcode[0][Is32Bit], {Dst}, {LHS},
4432 MIRBuilder, Fns);
4433
4434 // INSTRri form with negative arithmetic immediate.
4435 if (auto Fns = selectNegArithImmed(RHS))
4436 return emitInstr(AddrModeAndSizeToOpcode[3][Is32Bit], {Dst}, {LHS},
4437 MIRBuilder, Fns);
4438
4439 // INSTRrx form.
4440 if (auto Fns = selectArithExtendedRegister(RHS))
4441 return emitInstr(AddrModeAndSizeToOpcode[4][Is32Bit], {Dst}, {LHS},
4442 MIRBuilder, Fns);
4443
4444 // INSTRrs form.
4445 if (auto Fns = selectShiftedRegister(RHS))
4446 return emitInstr(AddrModeAndSizeToOpcode[1][Is32Bit], {Dst}, {LHS},
4447 MIRBuilder, Fns);
4448 return emitInstr(AddrModeAndSizeToOpcode[2][Is32Bit], {Dst}, {LHS, RHS},
4449 MIRBuilder);
4450}
4451
4453AArch64InstructionSelector::emitADD(Register DefReg, MachineOperand &LHS,
4454 MachineOperand &RHS,
4455 MachineIRBuilder &MIRBuilder) const {
4456 const std::array<std::array<unsigned, 2>, 5> OpcTable{
4457 {{AArch64::ADDXri, AArch64::ADDWri},
4458 {AArch64::ADDXrs, AArch64::ADDWrs},
4459 {AArch64::ADDXrr, AArch64::ADDWrr},
4460 {AArch64::SUBXri, AArch64::SUBWri},
4461 {AArch64::ADDXrx, AArch64::ADDWrx}}};
4462 return emitAddSub(OpcTable, DefReg, LHS, RHS, MIRBuilder);
4463}
4464
4466AArch64InstructionSelector::emitADDS(Register Dst, MachineOperand &LHS,
4467 MachineOperand &RHS,
4468 MachineIRBuilder &MIRBuilder) const {
4469 const std::array<std::array<unsigned, 2>, 5> OpcTable{
4470 {{AArch64::ADDSXri, AArch64::ADDSWri},
4471 {AArch64::ADDSXrs, AArch64::ADDSWrs},
4472 {AArch64::ADDSXrr, AArch64::ADDSWrr},
4473 {AArch64::SUBSXri, AArch64::SUBSWri},
4474 {AArch64::ADDSXrx, AArch64::ADDSWrx}}};
4475 return emitAddSub(OpcTable, Dst, LHS, RHS, MIRBuilder);
4476}
4477
4479AArch64InstructionSelector::emitSUBS(Register Dst, MachineOperand &LHS,
4480 MachineOperand &RHS,
4481 MachineIRBuilder &MIRBuilder) const {
4482 const std::array<std::array<unsigned, 2>, 5> OpcTable{
4483 {{AArch64::SUBSXri, AArch64::SUBSWri},
4484 {AArch64::SUBSXrs, AArch64::SUBSWrs},
4485 {AArch64::SUBSXrr, AArch64::SUBSWrr},
4486 {AArch64::ADDSXri, AArch64::ADDSWri},
4487 {AArch64::SUBSXrx, AArch64::SUBSWrx}}};
4488 return emitAddSub(OpcTable, Dst, LHS, RHS, MIRBuilder);
4489}
4490
4492AArch64InstructionSelector::emitADCS(Register Dst, MachineOperand &LHS,
4493 MachineOperand &RHS,
4494 MachineIRBuilder &MIRBuilder) const {
4495 assert(LHS.isReg() && RHS.isReg() && "Expected register operands?");
4496 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
4497 bool Is32Bit = (MRI->getType(LHS.getReg()).getSizeInBits() == 32);
4498 static const unsigned OpcTable[2] = {AArch64::ADCSXr, AArch64::ADCSWr};
4499 return emitInstr(OpcTable[Is32Bit], {Dst}, {LHS, RHS}, MIRBuilder);
4500}
4501
4503AArch64InstructionSelector::emitSBCS(Register Dst, MachineOperand &LHS,
4504 MachineOperand &RHS,
4505 MachineIRBuilder &MIRBuilder) const {
4506 assert(LHS.isReg() && RHS.isReg() && "Expected register operands?");
4507 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
4508 bool Is32Bit = (MRI->getType(LHS.getReg()).getSizeInBits() == 32);
4509 static const unsigned OpcTable[2] = {AArch64::SBCSXr, AArch64::SBCSWr};
4510 return emitInstr(OpcTable[Is32Bit], {Dst}, {LHS, RHS}, MIRBuilder);
4511}
4512
4514AArch64InstructionSelector::emitCMN(MachineOperand &LHS, MachineOperand &RHS,
4515 MachineIRBuilder &MIRBuilder) const {
4516 MachineRegisterInfo &MRI = MIRBuilder.getMF().getRegInfo();
4517 bool Is32Bit = (MRI.getType(LHS.getReg()).getSizeInBits() == 32);
4518 auto RC = Is32Bit ? &AArch64::GPR32RegClass : &AArch64::GPR64RegClass;
4519 return emitADDS(MRI.createVirtualRegister(RC), LHS, RHS, MIRBuilder);
4520}
4521
4523AArch64InstructionSelector::emitTST(MachineOperand &LHS, MachineOperand &RHS,
4524 MachineIRBuilder &MIRBuilder) const {
4525 assert(LHS.isReg() && RHS.isReg() && "Expected register operands?");
4526 MachineRegisterInfo &MRI = MIRBuilder.getMF().getRegInfo();
4527 LLT Ty = MRI.getType(LHS.getReg());
4528 unsigned RegSize = Ty.getSizeInBits();
4529 bool Is32Bit = (RegSize == 32);
4530 const unsigned OpcTable[3][2] = {{AArch64::ANDSXri, AArch64::ANDSWri},
4531 {AArch64::ANDSXrs, AArch64::ANDSWrs},
4532 {AArch64::ANDSXrr, AArch64::ANDSWrr}};
4533 // ANDS needs a logical immediate for its immediate form. Check if we can
4534 // fold one in.
4535 if (auto ValAndVReg = getIConstantVRegValWithLookThrough(RHS.getReg(), MRI)) {
4536 int64_t Imm = ValAndVReg->Value.getSExtValue();
4537
4539 auto TstMI = MIRBuilder.buildInstr(OpcTable[0][Is32Bit], {Ty}, {LHS});
4542 return &*TstMI;
4543 }
4544 }
4545
4546 if (auto Fns = selectLogicalShiftedRegister(RHS))
4547 return emitInstr(OpcTable[1][Is32Bit], {Ty}, {LHS}, MIRBuilder, Fns);
4548 return emitInstr(OpcTable[2][Is32Bit], {Ty}, {LHS, RHS}, MIRBuilder);
4549}
4550
4551MachineInstr *AArch64InstructionSelector::emitIntegerCompare(
4552 MachineOperand &LHS, MachineOperand &RHS, MachineOperand &Predicate,
4553 MachineIRBuilder &MIRBuilder) const {
4554 assert(LHS.isReg() && RHS.isReg() && "Expected LHS and RHS to be registers!");
4555 assert(Predicate.isPredicate() && "Expected predicate?");
4556 MachineRegisterInfo &MRI = MIRBuilder.getMF().getRegInfo();
4557 LLT CmpTy = MRI.getType(LHS.getReg());
4558 assert(!CmpTy.isVector() && "Expected scalar or pointer");
4559 unsigned Size = CmpTy.getSizeInBits();
4560 (void)Size;
4561 assert((Size == 32 || Size == 64) && "Expected a 32-bit or 64-bit LHS/RHS?");
4562 // Fold the compare into a cmn or tst if possible.
4563 if (auto FoldCmp = tryFoldIntegerCompare(LHS, RHS, Predicate, MIRBuilder))
4564 return FoldCmp;
4565 auto Dst = MRI.cloneVirtualRegister(LHS.getReg());
4566 return emitSUBS(Dst, LHS, RHS, MIRBuilder);
4567}
4568
4569MachineInstr *AArch64InstructionSelector::emitCSetForFCmp(
4570 Register Dst, CmpInst::Predicate Pred, MachineIRBuilder &MIRBuilder) const {
4571 MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
4572#ifndef NDEBUG
4573 LLT Ty = MRI.getType(Dst);
4574 assert(!Ty.isVector() && Ty.getSizeInBits() == 32 &&
4575 "Expected a 32-bit scalar register?");
4576#endif
4577 const Register ZReg = AArch64::WZR;
4578 AArch64CC::CondCode CC1, CC2;
4579 changeFCMPPredToAArch64CC(Pred, CC1, CC2);
4580 auto InvCC1 = AArch64CC::getInvertedCondCode(CC1);
4581 if (CC2 == AArch64CC::AL)
4582 return emitCSINC(/*Dst=*/Dst, /*Src1=*/ZReg, /*Src2=*/ZReg, InvCC1,
4583 MIRBuilder);
4584 const TargetRegisterClass *RC = &AArch64::GPR32RegClass;
4585 Register Def1Reg = MRI.createVirtualRegister(RC);
4586 Register Def2Reg = MRI.createVirtualRegister(RC);
4587 auto InvCC2 = AArch64CC::getInvertedCondCode(CC2);
4588 emitCSINC(/*Dst=*/Def1Reg, /*Src1=*/ZReg, /*Src2=*/ZReg, InvCC1, MIRBuilder);
4589 emitCSINC(/*Dst=*/Def2Reg, /*Src1=*/ZReg, /*Src2=*/ZReg, InvCC2, MIRBuilder);
4590 auto OrMI = MIRBuilder.buildInstr(AArch64::ORRWrr, {Dst}, {Def1Reg, Def2Reg});
4592 return &*OrMI;
4593}
4594
4595MachineInstr *AArch64InstructionSelector::emitFPCompare(
4596 Register LHS, Register RHS, MachineIRBuilder &MIRBuilder,
4597 std::optional<CmpInst::Predicate> Pred) const {
4598 MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
4599 LLT Ty = MRI.getType(LHS);
4600 if (Ty.isVector())
4601 return nullptr;
4602 unsigned OpSize = Ty.getSizeInBits();
4603 if (OpSize != 32 && OpSize != 64)
4604 return nullptr;
4605
4606 // If this is a compare against +0.0, then we don't have
4607 // to explicitly materialize a constant.
4608 const ConstantFP *FPImm = getConstantFPVRegVal(RHS, MRI);
4609 bool ShouldUseImm = FPImm && (FPImm->isZero() && !FPImm->isNegative());
4610
4611 auto IsEqualityPred = [](CmpInst::Predicate P) {
4612 return P == CmpInst::FCMP_OEQ || P == CmpInst::FCMP_ONE ||
4614 };
4615 if (!ShouldUseImm && Pred && IsEqualityPred(*Pred)) {
4616 // Try commutating the operands.
4617 const ConstantFP *LHSImm = getConstantFPVRegVal(LHS, MRI);
4618 if (LHSImm && (LHSImm->isZero() && !LHSImm->isNegative())) {
4619 ShouldUseImm = true;
4620 std::swap(LHS, RHS);
4621 }
4622 }
4623 unsigned CmpOpcTbl[2][2] = {{AArch64::FCMPSrr, AArch64::FCMPDrr},
4624 {AArch64::FCMPSri, AArch64::FCMPDri}};
4625 unsigned CmpOpc = CmpOpcTbl[ShouldUseImm][OpSize == 64];
4626
4627 // Partially build the compare. Decide if we need to add a use for the
4628 // third operand based off whether or not we're comparing against 0.0.
4629 auto CmpMI = MIRBuilder.buildInstr(CmpOpc).addUse(LHS);
4631 if (!ShouldUseImm)
4632 CmpMI.addUse(RHS);
4634 return &*CmpMI;
4635}
4636
4637MachineInstr *AArch64InstructionSelector::emitVectorConcat(
4638 std::optional<Register> Dst, Register Op1, Register Op2,
4639 MachineIRBuilder &MIRBuilder) const {
4640 // We implement a vector concat by:
4641 // 1. Use scalar_to_vector to insert the lower vector into the larger dest
4642 // 2. Insert the upper vector into the destination's upper element
4643 // TODO: some of this code is common with G_BUILD_VECTOR handling.
4644 MachineRegisterInfo &MRI = MIRBuilder.getMF().getRegInfo();
4645
4646 const LLT Op1Ty = MRI.getType(Op1);
4647 const LLT Op2Ty = MRI.getType(Op2);
4648
4649 if (Op1Ty != Op2Ty) {
4650 LLVM_DEBUG(dbgs() << "Could not do vector concat of differing vector tys");
4651 return nullptr;
4652 }
4653 assert(Op1Ty.isVector() && "Expected a vector for vector concat");
4654
4655 if (Op1Ty.getSizeInBits() >= 128) {
4656 LLVM_DEBUG(dbgs() << "Vector concat not supported for full size vectors");
4657 return nullptr;
4658 }
4659
4660 // At the moment we just support 64 bit vector concats.
4661 if (Op1Ty.getSizeInBits() != 64) {
4662 LLVM_DEBUG(dbgs() << "Vector concat supported for 64b vectors");
4663 return nullptr;
4664 }
4665
4666 const LLT ScalarTy = LLT::scalar(Op1Ty.getSizeInBits());
4667 const RegisterBank &FPRBank = *RBI.getRegBank(Op1, MRI, TRI);
4668 const TargetRegisterClass *DstRC =
4669 getRegClassForTypeOnBank(Op1Ty.multiplyElements(2), FPRBank);
4670
4671 MachineInstr *WidenedOp1 =
4672 emitScalarToVector(ScalarTy.getSizeInBits(), DstRC, Op1, MIRBuilder);
4673 MachineInstr *WidenedOp2 =
4674 emitScalarToVector(ScalarTy.getSizeInBits(), DstRC, Op2, MIRBuilder);
4675 if (!WidenedOp1 || !WidenedOp2) {
4676 LLVM_DEBUG(dbgs() << "Could not emit a vector from scalar value");
4677 return nullptr;
4678 }
4679
4680 // Now do the insert of the upper element.
4681 unsigned InsertOpc, InsSubRegIdx;
4682 std::tie(InsertOpc, InsSubRegIdx) =
4683 getInsertVecEltOpInfo(FPRBank, ScalarTy.getSizeInBits());
4684
4685 if (!Dst)
4686 Dst = MRI.createVirtualRegister(DstRC);
4687 auto InsElt =
4688 MIRBuilder
4689 .buildInstr(InsertOpc, {*Dst}, {WidenedOp1->getOperand(0).getReg()})
4690 .addImm(1) /* Lane index */
4691 .addUse(WidenedOp2->getOperand(0).getReg())
4692 .addImm(0);
4694 return &*InsElt;
4695}
4696
4698AArch64InstructionSelector::emitCSINC(Register Dst, Register Src1,
4699 Register Src2, AArch64CC::CondCode Pred,
4700 MachineIRBuilder &MIRBuilder) const {
4701 auto &MRI = *MIRBuilder.getMRI();
4702 const RegClassOrRegBank &RegClassOrBank = MRI.getRegClassOrRegBank(Dst);
4703 // If we used a register class, then this won't necessarily have an LLT.
4704 // Compute the size based off whether or not we have a class or bank.
4705 unsigned Size;
4706 if (const auto *RC = RegClassOrBank.dyn_cast<const TargetRegisterClass *>())
4707 Size = TRI.getRegSizeInBits(*RC);
4708 else
4709 Size = MRI.getType(Dst).getSizeInBits();
4710 // Some opcodes use s1.
4711 assert(Size <= 64 && "Expected 64 bits or less only!");
4712 static const unsigned OpcTable[2] = {AArch64::CSINCWr, AArch64::CSINCXr};
4713 unsigned Opc = OpcTable[Size == 64];
4714 auto CSINC = MIRBuilder.buildInstr(Opc, {Dst}, {Src1, Src2}).addImm(Pred);
4716