LLVM 23.0.0git
AArch64RegisterBankInfo.cpp
Go to the documentation of this file.
1//===- AArch64RegisterBankInfo.cpp ----------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the targeting of the RegisterBankInfo class for
10/// AArch64.
11/// \todo This should be generated by TableGen.
12//===----------------------------------------------------------------------===//
13
15#include "AArch64ExpandImm.h"
16#include "AArch64RegisterInfo.h"
17#include "AArch64Subtarget.h"
20#include "llvm/ADT/APInt.h"
21#include "llvm/ADT/STLExtras.h"
37#include "llvm/IR/Constants.h"
38#include "llvm/IR/IntrinsicsAArch64.h"
41#include <cassert>
42
43#define GET_TARGET_REGBANK_IMPL
44#include "AArch64GenRegisterBank.inc"
45
46// This file will be TableGen'ed at some point.
47#include "AArch64GenRegisterBankInfo.def"
48
49using namespace llvm;
50static const unsigned CustomMappingID = 1;
51
53 const TargetRegisterInfo &TRI) {
54 static llvm::once_flag InitializeRegisterBankFlag;
55
56 static auto InitializeRegisterBankOnce = [&]() {
57 // We have only one set of register banks, whatever the subtarget
58 // is. Therefore, the initialization of the RegBanks table should be
59 // done only once. Indeed the table of all register banks
60 // (AArch64::RegBanks) is unique in the compiler. At some point, it
61 // will get tablegen'ed and the whole constructor becomes empty.
62
63 const RegisterBank &RBGPR = getRegBank(AArch64::GPRRegBankID);
64 (void)RBGPR;
65 assert(&AArch64::GPRRegBank == &RBGPR &&
66 "The order in RegBanks is messed up");
67
68 const RegisterBank &RBFPR = getRegBank(AArch64::FPRRegBankID);
69 (void)RBFPR;
70 assert(&AArch64::FPRRegBank == &RBFPR &&
71 "The order in RegBanks is messed up");
72
73 const RegisterBank &RBCCR = getRegBank(AArch64::CCRegBankID);
74 (void)RBCCR;
75 assert(&AArch64::CCRegBank == &RBCCR &&
76 "The order in RegBanks is messed up");
77
78 // The GPR register bank is fully defined by all the registers in
79 // GR64all + its subclasses.
80 assert(RBGPR.covers(*TRI.getRegClass(AArch64::GPR32RegClassID)) &&
81 "Subclass not added?");
82 assert(getMaximumSize(RBGPR.getID()) == 128 &&
83 "GPRs should hold up to 128-bit");
84
85 // The FPR register bank is fully defined by all the registers in
86 // GR64all + its subclasses.
87 assert(RBFPR.covers(*TRI.getRegClass(AArch64::QQRegClassID)) &&
88 "Subclass not added?");
89 assert(RBFPR.covers(*TRI.getRegClass(AArch64::FPR64RegClassID)) &&
90 "Subclass not added?");
91 assert(getMaximumSize(RBFPR.getID()) == 512 &&
92 "FPRs should hold up to 512-bit via QQQQ sequence");
93
94 assert(RBCCR.covers(*TRI.getRegClass(AArch64::CCRRegClassID)) &&
95 "Class not added?");
96 assert(getMaximumSize(RBCCR.getID()) == 32 &&
97 "CCR should hold up to 32-bit");
98
99 // Check that the TableGen'ed like file is in sync we our expectations.
100 // First, the Idx.
103 "PartialMappingIdx's are incorrectly ordered");
107 "PartialMappingIdx's are incorrectly ordered");
108// Now, the content.
109// Check partial mapping.
110#define CHECK_PARTIALMAP(Idx, ValStartIdx, ValLength, RB) \
111 do { \
112 assert( \
113 checkPartialMap(PartialMappingIdx::Idx, ValStartIdx, ValLength, RB) && \
114 #Idx " is incorrectly initialized"); \
115 } while (false)
116
117 CHECK_PARTIALMAP(PMI_GPR32, 0, 32, RBGPR);
118 CHECK_PARTIALMAP(PMI_GPR64, 0, 64, RBGPR);
119 CHECK_PARTIALMAP(PMI_GPR128, 0, 128, RBGPR);
120 CHECK_PARTIALMAP(PMI_FPR16, 0, 16, RBFPR);
121 CHECK_PARTIALMAP(PMI_FPR32, 0, 32, RBFPR);
122 CHECK_PARTIALMAP(PMI_FPR64, 0, 64, RBFPR);
123 CHECK_PARTIALMAP(PMI_FPR128, 0, 128, RBFPR);
124 CHECK_PARTIALMAP(PMI_FPR256, 0, 256, RBFPR);
125 CHECK_PARTIALMAP(PMI_FPR512, 0, 512, RBFPR);
126
127// Check value mapping.
128#define CHECK_VALUEMAP_IMPL(RBName, Size, Offset) \
129 do { \
130 assert(checkValueMapImpl(PartialMappingIdx::PMI_##RBName##Size, \
131 PartialMappingIdx::PMI_First##RBName, Size, \
132 Offset) && \
133 #RBName #Size " " #Offset " is incorrectly initialized"); \
134 } while (false)
135
136#define CHECK_VALUEMAP(RBName, Size) CHECK_VALUEMAP_IMPL(RBName, Size, 0)
137
138 CHECK_VALUEMAP(GPR, 32);
139 CHECK_VALUEMAP(GPR, 64);
140 CHECK_VALUEMAP(GPR, 128);
141 CHECK_VALUEMAP(FPR, 16);
142 CHECK_VALUEMAP(FPR, 32);
143 CHECK_VALUEMAP(FPR, 64);
144 CHECK_VALUEMAP(FPR, 128);
145 CHECK_VALUEMAP(FPR, 256);
146 CHECK_VALUEMAP(FPR, 512);
147
148// Check the value mapping for 3-operands instructions where all the operands
149// map to the same value mapping.
150#define CHECK_VALUEMAP_3OPS(RBName, Size) \
151 do { \
152 CHECK_VALUEMAP_IMPL(RBName, Size, 0); \
153 CHECK_VALUEMAP_IMPL(RBName, Size, 1); \
154 CHECK_VALUEMAP_IMPL(RBName, Size, 2); \
155 } while (false)
156
157 CHECK_VALUEMAP_3OPS(GPR, 32);
158 CHECK_VALUEMAP_3OPS(GPR, 64);
159 CHECK_VALUEMAP_3OPS(GPR, 128);
165
166#define CHECK_VALUEMAP_CROSSREGCPY(RBNameDst, RBNameSrc, Size) \
167 do { \
168 unsigned PartialMapDstIdx = PMI_##RBNameDst##Size - PMI_Min; \
169 unsigned PartialMapSrcIdx = PMI_##RBNameSrc##Size - PMI_Min; \
170 (void)PartialMapDstIdx; \
171 (void)PartialMapSrcIdx; \
172 const ValueMapping *Map = getCopyMapping(AArch64::RBNameDst##RegBankID, \
173 AArch64::RBNameSrc##RegBankID, \
174 TypeSize::getFixed(Size)); \
175 (void)Map; \
176 assert(Map[0].BreakDown == \
177 &AArch64GenRegisterBankInfo::PartMappings[PartialMapDstIdx] && \
178 Map[0].NumBreakDowns == 1 && \
179 #RBNameDst #Size " Dst is incorrectly initialized"); \
180 assert(Map[1].BreakDown == \
181 &AArch64GenRegisterBankInfo::PartMappings[PartialMapSrcIdx] && \
182 Map[1].NumBreakDowns == 1 && \
183 #RBNameSrc #Size " Src is incorrectly initialized"); \
184 \
185 } while (false)
186
187 CHECK_VALUEMAP_CROSSREGCPY(GPR, GPR, 32);
189 CHECK_VALUEMAP_CROSSREGCPY(GPR, GPR, 64);
195
196#define CHECK_VALUEMAP_FPEXT(DstSize, SrcSize) \
197 do { \
198 unsigned PartialMapDstIdx = PMI_FPR##DstSize - PMI_Min; \
199 unsigned PartialMapSrcIdx = PMI_FPR##SrcSize - PMI_Min; \
200 (void)PartialMapDstIdx; \
201 (void)PartialMapSrcIdx; \
202 const ValueMapping *Map = getFPExtMapping(DstSize, SrcSize); \
203 (void)Map; \
204 assert(Map[0].BreakDown == \
205 &AArch64GenRegisterBankInfo::PartMappings[PartialMapDstIdx] && \
206 Map[0].NumBreakDowns == 1 && "FPR" #DstSize \
207 " Dst is incorrectly initialized"); \
208 assert(Map[1].BreakDown == \
209 &AArch64GenRegisterBankInfo::PartMappings[PartialMapSrcIdx] && \
210 Map[1].NumBreakDowns == 1 && "FPR" #SrcSize \
211 " Src is incorrectly initialized"); \
212 \
213 } while (false)
214
215 CHECK_VALUEMAP_FPEXT(32, 16);
216 CHECK_VALUEMAP_FPEXT(64, 16);
217 CHECK_VALUEMAP_FPEXT(64, 32);
218 CHECK_VALUEMAP_FPEXT(128, 64);
219
220 assert(verify(TRI) && "Invalid register bank information");
221 };
222
223 llvm::call_once(InitializeRegisterBankFlag, InitializeRegisterBankOnce);
224}
225
227 const RegisterBank &B,
228 const TypeSize Size) const {
229 // What do we do with different size?
230 // copy are same size.
231 // Will introduce other hooks for different size:
232 // * extract cost.
233 // * build_sequence cost.
234
235 // Copy from (resp. to) GPR to (resp. from) FPR involves FMOV.
236 // FIXME: This should be deduced from the scheduling model.
237 if (&A == &AArch64::GPRRegBank && &B == &AArch64::FPRRegBank)
238 // FMOVXDr or FMOVWSr.
239 return 5;
240 if (&A == &AArch64::FPRRegBank && &B == &AArch64::GPRRegBank)
241 // FMOVDXr or FMOVSWr.
242 return 4;
243
245}
246
247const RegisterBank &
249 LLT Ty) const {
250 switch (RC.getID()) {
251 case AArch64::GPR64sponlyRegClassID:
252 return getRegBank(AArch64::GPRRegBankID);
253 default:
255 }
256}
257
260 const MachineInstr &MI) const {
261 const MachineFunction &MF = *MI.getParent()->getParent();
262 const TargetSubtargetInfo &STI = MF.getSubtarget();
263 const TargetRegisterInfo &TRI = *STI.getRegisterInfo();
264 const MachineRegisterInfo &MRI = MF.getRegInfo();
265
266 switch (MI.getOpcode()) {
267 case TargetOpcode::G_OR: {
268 // 32 and 64-bit or can be mapped on either FPR or
269 // GPR for the same cost.
270 TypeSize Size = getSizeInBits(MI.getOperand(0).getReg(), MRI, TRI);
271 if (Size != 32 && Size != 64)
272 break;
273
274 // If the instruction has any implicit-defs or uses,
275 // do not mess with it.
276 if (MI.getNumOperands() != 3)
277 break;
278 InstructionMappings AltMappings;
279 const InstructionMapping &GPRMapping = getInstructionMapping(
280 /*ID*/ 1, /*Cost*/ 1, getValueMapping(PMI_FirstGPR, Size),
281 /*NumOperands*/ 3);
282 const InstructionMapping &FPRMapping = getInstructionMapping(
283 /*ID*/ 2, /*Cost*/ 1, getValueMapping(PMI_FirstFPR, Size),
284 /*NumOperands*/ 3);
285
286 AltMappings.push_back(&GPRMapping);
287 AltMappings.push_back(&FPRMapping);
288 return AltMappings;
289 }
290 case TargetOpcode::G_BITCAST: {
291 TypeSize Size = getSizeInBits(MI.getOperand(0).getReg(), MRI, TRI);
292 if (Size != 32 && Size != 64)
293 break;
294
295 // If the instruction has any implicit-defs or uses,
296 // do not mess with it.
297 if (MI.getNumOperands() != 2)
298 break;
299
300 InstructionMappings AltMappings;
301 const InstructionMapping &GPRMapping = getInstructionMapping(
302 /*ID*/ 1, /*Cost*/ 1,
303 getCopyMapping(AArch64::GPRRegBankID, AArch64::GPRRegBankID, Size),
304 /*NumOperands*/ 2);
305 const InstructionMapping &FPRMapping = getInstructionMapping(
306 /*ID*/ 2, /*Cost*/ 1,
307 getCopyMapping(AArch64::FPRRegBankID, AArch64::FPRRegBankID, Size),
308 /*NumOperands*/ 2);
309 const InstructionMapping &GPRToFPRMapping = getInstructionMapping(
310 /*ID*/ 3,
311 /*Cost*/
312 copyCost(AArch64::GPRRegBank, AArch64::FPRRegBank,
314 getCopyMapping(AArch64::FPRRegBankID, AArch64::GPRRegBankID, Size),
315 /*NumOperands*/ 2);
316 const InstructionMapping &FPRToGPRMapping = getInstructionMapping(
317 /*ID*/ 3,
318 /*Cost*/
319 copyCost(AArch64::GPRRegBank, AArch64::FPRRegBank,
321 getCopyMapping(AArch64::GPRRegBankID, AArch64::FPRRegBankID, Size),
322 /*NumOperands*/ 2);
323
324 AltMappings.push_back(&GPRMapping);
325 AltMappings.push_back(&FPRMapping);
326 AltMappings.push_back(&GPRToFPRMapping);
327 AltMappings.push_back(&FPRToGPRMapping);
328 return AltMappings;
329 }
330 case TargetOpcode::G_LOAD: {
331 TypeSize Size = getSizeInBits(MI.getOperand(0).getReg(), MRI, TRI);
332 if (Size != 64)
333 break;
334
335 // If the instruction has any implicit-defs or uses,
336 // do not mess with it.
337 if (MI.getNumOperands() != 2)
338 break;
339
340 InstructionMappings AltMappings;
341 const InstructionMapping &GPRMapping = getInstructionMapping(
342 /*ID*/ 1, /*Cost*/ 1,
345 // Addresses are GPR 64-bit.
347 /*NumOperands*/ 2);
348 const InstructionMapping &FPRMapping = getInstructionMapping(
349 /*ID*/ 2, /*Cost*/ 1,
352 // Addresses are GPR 64-bit.
354 /*NumOperands*/ 2);
355
356 AltMappings.push_back(&GPRMapping);
357 AltMappings.push_back(&FPRMapping);
358 return AltMappings;
359 }
360 default:
361 break;
362 }
364}
365
367 const MachineRegisterInfo &MRI,
368 const AArch64Subtarget &STI) {
369 assert(MI.getOpcode() == TargetOpcode::G_FCONSTANT);
370 Register Dst = MI.getOperand(0).getReg();
371 LLT Ty = MRI.getType(Dst);
372
373 unsigned Size = Ty.getSizeInBits();
374 if (Size != 16 && Size != 32 && Size != 64)
375 return false;
376
378 const AArch64TargetLowering *TLI = STI.getTargetLowering();
379
380 const APFloat Imm = MI.getOperand(1).getFPImm()->getValueAPF();
381 const APInt ImmBits = Imm.bitcastToAPInt();
382
383 // If all the uses are stores use a gpr constant
384 if (all_of(MRI.use_nodbg_instructions(Dst), [&](const MachineInstr &UseMI) {
385 return UseMI.getOpcode() == TargetOpcode::G_STORE &&
386 UseMI.getOperand(0).getReg() == Dst;
387 }))
388 return true;
389
390 // Check if we can encode this as a movi. Note, we only have one pattern so
391 // far for movis, hence the one check.
392 if (Size == 32) {
393 uint64_t Val = APInt::getSplat(64, ImmBits).getZExtValue();
395 return false;
396 }
397
398 // We want to use GPR when the value cannot be encoded as the immediate value
399 // of a fmov and when it will not result in a constant pool load. As
400 // AArch64TargetLowering::isFPImmLegal is used by the instruction selector
401 // to choose whether to emit a constant pool load, negating this check will
402 // ensure it would not have become a constant pool load.
403 bool OptForSize =
404 shouldOptimizeForSize(&MI.getMF()->getFunction(), nullptr, nullptr);
405 bool IsLegal = TLI->isFPImmLegal(Imm, VT, OptForSize);
406 bool IsFMov = TLI->isFPImmLegalAsFMov(Imm, VT);
407 return !IsFMov && IsLegal;
408}
409
410// Some of the instructions in applyMappingImpl attempt to anyext small values.
411// It may be that these values come from a G_CONSTANT that has been expanded to
412// 32 bits and then truncated. If this is the case, we shouldn't insert an
413// anyext and should instead make use of the G_CONSTANT directly, deleting the
414// trunc if possible.
417 const AArch64RegisterBankInfo &RBI) {
418 MachineOperand &Op = MI.getOperand(OpIdx);
419
420 Register ScalarReg = Op.getReg();
421 MachineInstr *TruncMI = MRI.getVRegDef(ScalarReg);
422 if (!TruncMI || TruncMI->getOpcode() != TargetOpcode::G_TRUNC)
423 return false;
424
425 Register TruncSrc = TruncMI->getOperand(1).getReg();
426 MachineInstr *SrcDef = MRI.getVRegDef(TruncSrc);
427 if (!SrcDef || SrcDef->getOpcode() != TargetOpcode::G_CONSTANT)
428 return false;
429
430 LLT TruncSrcTy = MRI.getType(TruncSrc);
431 if (!TruncSrcTy.isScalar() || TruncSrcTy.getSizeInBits() != 32)
432 return false;
433
434 // Avoid truncating and extending a constant, this helps with selection.
435 Op.setReg(TruncSrc);
436 MRI.setRegBank(TruncSrc, RBI.getRegBank(AArch64::GPRRegBankID));
437
438 if (MRI.use_empty(ScalarReg))
439 TruncMI->eraseFromParent();
440
441 return true;
442}
443
444void AArch64RegisterBankInfo::applyMappingImpl(
445 MachineIRBuilder &Builder, const OperandsMapper &OpdMapper) const {
446 MachineInstr &MI = OpdMapper.getMI();
447 MachineRegisterInfo &MRI = OpdMapper.getMRI();
448
449 switch (MI.getOpcode()) {
450 case TargetOpcode::G_CONSTANT: {
451 Register Dst = MI.getOperand(0).getReg();
452 [[maybe_unused]] LLT DstTy = MRI.getType(Dst);
453 assert(MRI.getRegBank(Dst) == &AArch64::GPRRegBank && DstTy.isScalar() &&
454 DstTy.getSizeInBits() < 32 &&
455 "Expected a scalar smaller than 32 bits on a GPR.");
456 Builder.setInsertPt(*MI.getParent(), std::next(MI.getIterator()));
458 Builder.buildTrunc(Dst, ExtReg);
459
460 APInt Val = MI.getOperand(1).getCImm()->getValue().zext(32);
461 LLVMContext &Ctx = Builder.getMF().getFunction().getContext();
462 MI.getOperand(1).setCImm(ConstantInt::get(Ctx, Val));
463 MI.getOperand(0).setReg(ExtReg);
464 MRI.setRegBank(ExtReg, AArch64::GPRRegBank);
465
466 return applyDefaultMapping(OpdMapper);
467 }
468 case TargetOpcode::G_FCONSTANT: {
469 Register Dst = MI.getOperand(0).getReg();
470 assert(MRI.getRegBank(Dst) == &AArch64::GPRRegBank &&
471 "Expected Dst to be on a GPR.");
472 const APFloat &Imm = MI.getOperand(1).getFPImm()->getValueAPF();
473 APInt Bits = Imm.bitcastToAPInt();
474 Builder.setInsertPt(*MI.getParent(), MI.getIterator());
475 if (Bits.getBitWidth() < 32) {
477 Builder.buildConstant(ExtReg, Bits.zext(32));
478 Builder.buildTrunc(Dst, ExtReg);
479 MRI.setRegBank(ExtReg, AArch64::GPRRegBank);
480 } else {
481 Builder.buildConstant(Dst, Bits);
482 }
483 MI.eraseFromParent();
484 return;
485 }
486 case TargetOpcode::G_STORE: {
487 Register Dst = MI.getOperand(0).getReg();
488 LLT Ty = MRI.getType(Dst);
489
490 if (MRI.getRegBank(Dst) == &AArch64::GPRRegBank && Ty.isScalar() &&
491 Ty.getSizeInBits() < 32) {
492
493 if (foldTruncOfI32Constant(MI, 0, MRI, *this))
494 return applyDefaultMapping(OpdMapper);
495
496 Builder.setInsertPt(*MI.getParent(), MI.getIterator());
497 auto Ext = Builder.buildAnyExt(LLT::integer(32), Dst);
498 MI.getOperand(0).setReg(Ext.getReg(0));
499 MRI.setRegBank(Ext.getReg(0), AArch64::GPRRegBank);
500 }
501 return applyDefaultMapping(OpdMapper);
502 }
503 case TargetOpcode::G_LOAD: {
504 Register Dst = MI.getOperand(0).getReg();
505 LLT Ty = MRI.getType(Dst);
506 if (MRI.getRegBank(Dst) == &AArch64::GPRRegBank && Ty.isScalar() &&
507 Ty.getSizeInBits() < 32) {
508 Builder.setInsertPt(*MI.getParent(), std::next(MI.getIterator()));
510 Builder.buildTrunc(Dst, ExtReg);
511 MI.getOperand(0).setReg(ExtReg);
512 MRI.setRegBank(ExtReg, AArch64::GPRRegBank);
513 }
514 [[fallthrough]];
515 }
516 case TargetOpcode::G_OR:
517 case TargetOpcode::G_BITCAST:
518 // Those ID must match getInstrAlternativeMappings.
519 assert((OpdMapper.getInstrMapping().getID() >= 1 &&
520 OpdMapper.getInstrMapping().getID() <= 4) &&
521 "Don't know how to handle that ID");
522 return applyDefaultMapping(OpdMapper);
523 case TargetOpcode::G_INSERT_VECTOR_ELT: {
524 if (foldTruncOfI32Constant(MI, 2, MRI, *this))
525 return applyDefaultMapping(OpdMapper);
526
527 // Extend smaller gpr operands to 32 bit.
528 Builder.setInsertPt(*MI.getParent(), MI.getIterator());
529 LLT OperandType = MRI.getType(MI.getOperand(2).getReg());
530 auto Ext = Builder.buildAnyExt(OperandType.changeElementSize(32),
531 MI.getOperand(2).getReg());
532 MRI.setRegBank(Ext.getReg(0), getRegBank(AArch64::GPRRegBankID));
533 MI.getOperand(2).setReg(Ext.getReg(0));
534 return applyDefaultMapping(OpdMapper);
535 }
536 case AArch64::G_DUP: {
537 if (foldTruncOfI32Constant(MI, 1, MRI, *this))
538 return applyDefaultMapping(OpdMapper);
539
540 // Extend smaller gpr to 32-bits
541 assert(MRI.getType(MI.getOperand(1).getReg()).getSizeInBits() < 32 &&
542 "Expected sources smaller than 32-bits");
543 Builder.setInsertPt(*MI.getParent(), MI.getIterator());
544
545 Register ConstReg =
546 Builder.buildAnyExt(LLT::integer(32), MI.getOperand(1).getReg())
547 .getReg(0);
548 MRI.setRegBank(ConstReg, getRegBank(AArch64::GPRRegBankID));
549 MI.getOperand(1).setReg(ConstReg);
550
551 return applyDefaultMapping(OpdMapper);
552 }
553 default:
554 llvm_unreachable("Don't know how to handle that operation");
555 }
556}
557
559AArch64RegisterBankInfo::getSameKindOfOperandsMapping(
560 const MachineInstr &MI) const {
561 const unsigned Opc = MI.getOpcode();
562 const MachineFunction &MF = *MI.getParent()->getParent();
563 const MachineRegisterInfo &MRI = MF.getRegInfo();
564
565 unsigned NumOperands = MI.getNumOperands();
566 assert(NumOperands <= 3 &&
567 "This code is for instructions with 3 or less operands");
568
569 LLT Ty = MRI.getType(MI.getOperand(0).getReg());
570 TypeSize Size = Ty.getSizeInBits();
572
574
575#ifndef NDEBUG
576 // Make sure all the operands are using similar size and type.
577 // Should probably be checked by the machine verifier.
578 // This code won't catch cases where the number of lanes is
579 // different between the operands.
580 // If we want to go to that level of details, it is probably
581 // best to check that the types are the same, period.
582 // Currently, we just check that the register banks are the same
583 // for each types.
584 for (unsigned Idx = 1; Idx != NumOperands; ++Idx) {
585 LLT OpTy = MRI.getType(MI.getOperand(Idx).getReg());
586 assert(
588 RBIdx, OpTy.getSizeInBits()) ==
590 "Operand has incompatible size");
591 bool OpIsFPR = OpTy.isVector() || isPreISelGenericFloatingPointOpcode(Opc);
592 (void)OpIsFPR;
593 assert(IsFPR == OpIsFPR && "Operand has incompatible type");
594 }
595#endif // End NDEBUG.
596
598 getValueMapping(RBIdx, Size), NumOperands);
599}
600
601/// \returns true if a given intrinsic only uses and defines FPRs.
602static bool isFPIntrinsic(const MachineRegisterInfo &MRI,
603 const MachineInstr &MI) {
604 // TODO: Add more intrinsics.
606 default:
607 return false;
608 case Intrinsic::aarch64_neon_uaddlv:
609 case Intrinsic::aarch64_neon_uaddv:
610 case Intrinsic::aarch64_neon_saddv:
611 case Intrinsic::aarch64_neon_umaxv:
612 case Intrinsic::aarch64_neon_smaxv:
613 case Intrinsic::aarch64_neon_uminv:
614 case Intrinsic::aarch64_neon_sminv:
615 case Intrinsic::aarch64_neon_faddv:
616 case Intrinsic::aarch64_neon_fmaxv:
617 case Intrinsic::aarch64_neon_fminv:
618 case Intrinsic::aarch64_neon_fmaxnmv:
619 case Intrinsic::aarch64_neon_fminnmv:
620 case Intrinsic::aarch64_neon_fmulx:
621 case Intrinsic::aarch64_neon_frecpe:
622 case Intrinsic::aarch64_neon_frecps:
623 case Intrinsic::aarch64_neon_frecpx:
624 case Intrinsic::aarch64_neon_frsqrte:
625 case Intrinsic::aarch64_neon_frsqrts:
626 case Intrinsic::aarch64_neon_facge:
627 case Intrinsic::aarch64_neon_facgt:
628 case Intrinsic::aarch64_neon_fabd:
629 case Intrinsic::aarch64_neon_sqrdmlah:
630 case Intrinsic::aarch64_neon_sqrdmlsh:
631 case Intrinsic::aarch64_neon_sqrdmulh:
632 case Intrinsic::aarch64_neon_suqadd:
633 case Intrinsic::aarch64_neon_usqadd:
634 case Intrinsic::aarch64_neon_uqadd:
635 case Intrinsic::aarch64_neon_sqadd:
636 case Intrinsic::aarch64_neon_uqsub:
637 case Intrinsic::aarch64_neon_sqsub:
638 case Intrinsic::aarch64_neon_sqdmulls_scalar:
639 case Intrinsic::aarch64_neon_srshl:
640 case Intrinsic::aarch64_neon_urshl:
641 case Intrinsic::aarch64_neon_sqshl:
642 case Intrinsic::aarch64_neon_uqshl:
643 case Intrinsic::aarch64_neon_sqrshl:
644 case Intrinsic::aarch64_neon_uqrshl:
645 case Intrinsic::aarch64_neon_ushl:
646 case Intrinsic::aarch64_neon_sshl:
647 case Intrinsic::aarch64_neon_sqshrn:
648 case Intrinsic::aarch64_neon_sqshrun:
649 case Intrinsic::aarch64_neon_sqrshrn:
650 case Intrinsic::aarch64_neon_sqrshrun:
651 case Intrinsic::aarch64_neon_uqshrn:
652 case Intrinsic::aarch64_neon_uqrshrn:
653 case Intrinsic::aarch64_crypto_sha1h:
654 case Intrinsic::aarch64_crypto_sha1c:
655 case Intrinsic::aarch64_crypto_sha1p:
656 case Intrinsic::aarch64_crypto_sha1m:
657 case Intrinsic::aarch64_sisd_fcvtxn:
658 case Intrinsic::aarch64_sisd_fabd:
659 return true;
660 case Intrinsic::aarch64_neon_saddlv: {
661 const LLT SrcTy = MRI.getType(MI.getOperand(2).getReg());
662 return SrcTy.getElementType().getSizeInBits() >= 16 &&
663 SrcTy.getElementCount().getFixedValue() >= 4;
664 }
665 }
666}
667
668bool AArch64RegisterBankInfo::isPHIWithFPConstraints(
669 const MachineInstr &MI, const MachineRegisterInfo &MRI,
670 const AArch64RegisterInfo &TRI, const unsigned Depth) const {
671 if (!MI.isPHI() || Depth > MaxFPRSearchDepth)
672 return false;
673
674 return any_of(MRI.use_nodbg_instructions(MI.getOperand(0).getReg()),
675 [&](const MachineInstr &UseMI) {
676 if (onlyUsesFP(UseMI, MRI, TRI, Depth + 1))
677 return true;
678 return isPHIWithFPConstraints(UseMI, MRI, TRI, Depth + 1);
679 });
680}
681
682bool AArch64RegisterBankInfo::hasFPConstraints(const MachineInstr &MI,
683 const MachineRegisterInfo &MRI,
685 unsigned Depth) const {
686 unsigned Op = MI.getOpcode();
687 if (Op == TargetOpcode::G_INTRINSIC && isFPIntrinsic(MRI, MI))
688 return true;
689
690 // Do we have an explicit floating point instruction?
692 return true;
693
694 // No. Check if we have a copy-like instruction. If we do, then we could
695 // still be fed by floating point instructions.
696 if (Op != TargetOpcode::COPY && !MI.isPHI() &&
698 return false;
699
700 // Check if we already know the register bank.
701 auto *RB = getRegBank(MI.getOperand(0).getReg(), MRI, TRI);
702 if (RB == &AArch64::FPRRegBank)
703 return true;
704 if (RB == &AArch64::GPRRegBank)
705 return false;
706
707 // We don't know anything.
708 //
709 // If we have a phi, we may be able to infer that it will be assigned a FPR
710 // based off of its inputs.
711 if (!MI.isPHI() || Depth > MaxFPRSearchDepth)
712 return false;
713
714 return any_of(MI.explicit_uses(), [&](const MachineOperand &Op) {
715 return Op.isReg() &&
716 onlyDefinesFP(*MRI.getVRegDef(Op.getReg()), MRI, TRI, Depth + 1);
717 });
718}
719
720bool AArch64RegisterBankInfo::onlyUsesFP(const MachineInstr &MI,
721 const MachineRegisterInfo &MRI,
723 unsigned Depth) const {
724 switch (MI.getOpcode()) {
725 case TargetOpcode::G_BITCAST: {
726 Register DstReg = MI.getOperand(0).getReg();
727 return all_of(MRI.use_nodbg_instructions(DstReg),
728 [&](const MachineInstr &UseMI) {
729 return onlyUsesFP(UseMI, MRI, TRI, Depth + 1) ||
730 prefersFPUse(UseMI, MRI, TRI);
731 });
732 }
733
734 case TargetOpcode::G_FPTOSI:
735 case TargetOpcode::G_FPTOUI:
736 case TargetOpcode::G_FPTOSI_SAT:
737 case TargetOpcode::G_FPTOUI_SAT:
738 case TargetOpcode::G_FCMP:
739 case TargetOpcode::G_LROUND:
740 case TargetOpcode::G_LLROUND:
741 case AArch64::G_PMULL:
742 case AArch64::G_SLI:
743 case AArch64::G_SRI:
744 case AArch64::G_FPTRUNC_ODD:
745 return true;
746 case TargetOpcode::G_INTRINSIC:
748 case Intrinsic::aarch64_neon_fcvtas:
749 case Intrinsic::aarch64_neon_fcvtau:
750 case Intrinsic::aarch64_neon_fcvtzs:
751 case Intrinsic::aarch64_neon_fcvtzu:
752 case Intrinsic::aarch64_neon_fcvtms:
753 case Intrinsic::aarch64_neon_fcvtmu:
754 case Intrinsic::aarch64_neon_fcvtns:
755 case Intrinsic::aarch64_neon_fcvtnu:
756 case Intrinsic::aarch64_neon_fcvtps:
757 case Intrinsic::aarch64_neon_fcvtpu:
758 return true;
759 default:
760 break;
761 }
762 break;
763 default:
764 break;
765 }
766 return hasFPConstraints(MI, MRI, TRI, Depth);
767}
768
769bool AArch64RegisterBankInfo::onlyDefinesFP(const MachineInstr &MI,
770 const MachineRegisterInfo &MRI,
772 unsigned Depth) const {
773 switch (MI.getOpcode()) {
774 case AArch64::G_DUP:
775 case AArch64::G_SADDLP:
776 case AArch64::G_UADDLP:
777 case TargetOpcode::G_SITOFP:
778 case TargetOpcode::G_UITOFP:
779 case TargetOpcode::G_EXTRACT_VECTOR_ELT:
780 case TargetOpcode::G_INSERT_VECTOR_ELT:
781 case TargetOpcode::G_BUILD_VECTOR:
782 case TargetOpcode::G_BUILD_VECTOR_TRUNC:
783 case AArch64::G_SLI:
784 case AArch64::G_SRI:
785 case AArch64::G_FPTRUNC_ODD:
786 return true;
787 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
789 case Intrinsic::aarch64_neon_ld1x2:
790 case Intrinsic::aarch64_neon_ld1x3:
791 case Intrinsic::aarch64_neon_ld1x4:
792 case Intrinsic::aarch64_neon_ld2:
793 case Intrinsic::aarch64_neon_ld2lane:
794 case Intrinsic::aarch64_neon_ld2r:
795 case Intrinsic::aarch64_neon_ld3:
796 case Intrinsic::aarch64_neon_ld3lane:
797 case Intrinsic::aarch64_neon_ld3r:
798 case Intrinsic::aarch64_neon_ld4:
799 case Intrinsic::aarch64_neon_ld4lane:
800 case Intrinsic::aarch64_neon_ld4r:
801 return true;
802 default:
803 break;
804 }
805 break;
806 default:
807 break;
808 }
809 return hasFPConstraints(MI, MRI, TRI, Depth);
810}
811
812bool AArch64RegisterBankInfo::prefersFPUse(const MachineInstr &MI,
813 const MachineRegisterInfo &MRI,
815 unsigned Depth) const {
816 switch (MI.getOpcode()) {
817 case TargetOpcode::G_SITOFP:
818 case TargetOpcode::G_UITOFP:
819 return MRI.getType(MI.getOperand(0).getReg()).getSizeInBits() ==
820 MRI.getType(MI.getOperand(1).getReg()).getSizeInBits();
821 }
822 return onlyDefinesFP(MI, MRI, TRI, Depth);
823}
824
825bool AArch64RegisterBankInfo::isLoadFromFPType(const MachineInstr &MI) const {
826 // GMemOperation because we also want to match indexed loads.
827 auto *MemOp = cast<GMemOperation>(&MI);
828 const Value *LdVal = MemOp->getMMO().getValue();
829 if (!LdVal)
830 return false;
831
832 Type *EltTy = nullptr;
833 if (const GlobalValue *GV = dyn_cast<GlobalValue>(LdVal)) {
834 EltTy = GV->getValueType();
835 // Look at the first element of the struct to determine the type we are
836 // loading
837 while (StructType *StructEltTy = dyn_cast<StructType>(EltTy)) {
838 if (StructEltTy->getNumElements() == 0)
839 break;
840 EltTy = StructEltTy->getTypeAtIndex(0U);
841 }
842 // Look at the first element of the array to determine its type
843 if (isa<ArrayType>(EltTy))
844 EltTy = EltTy->getArrayElementType();
845 } else if (!isa<Constant>(LdVal)) {
846 // FIXME: grubbing around uses is pretty ugly, but with no more
847 // `getPointerElementType` there's not much else we can do.
848 for (const auto *LdUser : LdVal->users()) {
849 if (isa<LoadInst>(LdUser)) {
850 EltTy = LdUser->getType();
851 break;
852 }
853 if (isa<StoreInst>(LdUser) && LdUser->getOperand(1) == LdVal) {
854 EltTy = LdUser->getOperand(0)->getType();
855 break;
856 }
857 }
858 }
859 return EltTy && EltTy->isFPOrFPVectorTy();
860}
861
864 const unsigned Opc = MI.getOpcode();
865
866 // Try the default logic for non-generic instructions that are either copies
867 // or already have some operands assigned to banks.
868 if ((Opc != TargetOpcode::COPY && !isPreISelGenericOpcode(Opc)) ||
869 Opc == TargetOpcode::G_PHI) {
872 if (Mapping.isValid())
873 return Mapping;
874 }
875
876 const MachineFunction &MF = *MI.getParent()->getParent();
877 const MachineRegisterInfo &MRI = MF.getRegInfo();
880
881 switch (Opc) {
882 // G_{F|S|U}REM are not listed because they are not legal.
883 // Arithmetic ops.
884 case TargetOpcode::G_ADD:
885 case TargetOpcode::G_SUB:
886 case TargetOpcode::G_PTR_ADD:
887 case TargetOpcode::G_MUL:
888 case TargetOpcode::G_SDIV:
889 case TargetOpcode::G_UDIV:
890 // Bitwise ops.
891 case TargetOpcode::G_AND:
892 case TargetOpcode::G_OR:
893 case TargetOpcode::G_XOR:
894 // Floating point ops.
895 case TargetOpcode::G_FADD:
896 case TargetOpcode::G_FSUB:
897 case TargetOpcode::G_FMUL:
898 case TargetOpcode::G_FDIV:
899 case TargetOpcode::G_FMAXIMUM:
900 case TargetOpcode::G_FMINIMUM:
901 return getSameKindOfOperandsMapping(MI);
902 case TargetOpcode::G_FPEXT: {
903 LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
904 LLT SrcTy = MRI.getType(MI.getOperand(1).getReg());
906 DefaultMappingID, /*Cost*/ 1,
907 getFPExtMapping(DstTy.getSizeInBits(), SrcTy.getSizeInBits()),
908 /*NumOperands*/ 2);
909 }
910 // Shifts.
911 case TargetOpcode::G_SHL:
912 case TargetOpcode::G_LSHR:
913 case TargetOpcode::G_ASHR: {
914 LLT ShiftAmtTy = MRI.getType(MI.getOperand(2).getReg());
915 LLT SrcTy = MRI.getType(MI.getOperand(1).getReg());
916 if (ShiftAmtTy.getSizeInBits() == 64 && SrcTy.getSizeInBits() == 32)
919 return getSameKindOfOperandsMapping(MI);
920 }
921 case TargetOpcode::G_BITCAST: {
922 Register SrcReg = MI.getOperand(1).getReg();
923 const RegisterBank *SrcRB = getRegBank(SrcReg, MRI, TRI);
924 if (SrcRB) {
925 TypeSize Size = getSizeInBits(SrcReg, MRI, TRI);
928 getCopyMapping(SrcRB->getID(), SrcRB->getID(), Size),
929 // We only care about the mapping of the destination.
930 /*NumOperands=*/2);
931 }
932 [[fallthrough]];
933 }
934 case TargetOpcode::COPY: {
935 Register DstReg = MI.getOperand(0).getReg();
936 Register SrcReg = MI.getOperand(1).getReg();
937 // Check if one of the register is not a generic register.
938 if ((DstReg.isPhysical() || !MRI.getType(DstReg).isValid()) ||
939 (SrcReg.isPhysical() || !MRI.getType(SrcReg).isValid())) {
940 const RegisterBank *DstRB = getRegBank(DstReg, MRI, TRI);
941 const RegisterBank *SrcRB = getRegBank(SrcReg, MRI, TRI);
942 if (!DstRB)
943 DstRB = SrcRB;
944 else if (!SrcRB)
945 SrcRB = DstRB;
946 // If both RB are null that means both registers are generic.
947 // We shouldn't be here.
948 assert(DstRB && SrcRB && "Both RegBank were nullptr");
949 TypeSize Size = getSizeInBits(DstReg, MRI, TRI);
951 DefaultMappingID, copyCost(*DstRB, *SrcRB, Size),
952 getCopyMapping(DstRB->getID(), SrcRB->getID(), Size),
953 // We only care about the mapping of the destination.
954 /*NumOperands*/ 1);
955 }
956 // Both registers are generic
957 LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
958 LLT SrcTy = MRI.getType(MI.getOperand(1).getReg());
959 TypeSize Size = DstTy.getSizeInBits();
960 bool DstIsGPR = !DstTy.isVector() && DstTy.getSizeInBits() <= 64;
961 bool SrcIsGPR = !SrcTy.isVector() && SrcTy.getSizeInBits() <= 64;
962 const RegisterBank &DstRB =
963 DstIsGPR ? AArch64::GPRRegBank : AArch64::FPRRegBank;
964 const RegisterBank &SrcRB =
965 SrcIsGPR ? AArch64::GPRRegBank : AArch64::FPRRegBank;
967 DefaultMappingID, copyCost(DstRB, SrcRB, Size),
968 getCopyMapping(DstRB.getID(), SrcRB.getID(), Size),
969 // We only care about the mapping of the destination for COPY.
970 /*NumOperands*/ Opc == TargetOpcode::G_BITCAST ? 2 : 1);
971 }
972 case TargetOpcode::G_CONSTANT: {
973 LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
974 TypeSize Size = DstTy.getSizeInBits();
975 if (!DstTy.isPointer() && (!DstTy.isScalar() || Size < 32 || Size > 64))
976 break;
977 // Scalar constants materialize in GPRs.
978 [[fallthrough]];
979 }
980 case TargetOpcode::G_BRCOND:
981 case TargetOpcode::G_FRAME_INDEX: {
982 // Operand 0 is the only banked operand and is mapped to GPR.
984 DefaultMappingID, /*Cost=*/1,
988 MRI.getType(MI.getOperand(0).getReg()).getSizeInBits()),
989 nullptr}),
990 /*NumOperands=*/2);
991 }
992 default:
993 break;
994 }
995
996 unsigned NumOperands = MI.getNumOperands();
997 unsigned MappingID = DefaultMappingID;
998
999 // Track the size and bank of each register. We don't do partial mappings.
1000 SmallVector<unsigned, 4> OpSize(NumOperands);
1001 SmallVector<PartialMappingIdx, 4> OpRegBankIdx(NumOperands);
1002 for (unsigned Idx = 0; Idx < NumOperands; ++Idx) {
1003 auto &MO = MI.getOperand(Idx);
1004 if (!MO.isReg() || !MO.getReg())
1005 continue;
1006
1007 LLT Ty = MRI.getType(MO.getReg());
1008 if (!Ty.isValid())
1009 continue;
1010 OpSize[Idx] = Ty.getSizeInBits().getKnownMinValue();
1011
1012 // As a top-level guess, vectors including both scalable and non-scalable
1013 // ones go in FPRs, scalars and pointers in GPRs.
1014 // For floating-point instructions, scalars go in FPRs.
1015 if (Ty.isVector())
1016 OpRegBankIdx[Idx] = PMI_FirstFPR;
1018 (MO.isDef() && onlyDefinesFP(MI, MRI, TRI)) ||
1019 (MO.isUse() && onlyUsesFP(MI, MRI, TRI)) ||
1020 Ty.getSizeInBits() > 64)
1021 OpRegBankIdx[Idx] = PMI_FirstFPR;
1022 else
1023 OpRegBankIdx[Idx] = PMI_FirstGPR;
1024 }
1025
1026 unsigned Cost = 1;
1027 // Some of the floating-point instructions have mixed GPR and FPR operands:
1028 // fine-tune the computed mapping.
1029 switch (Opc) {
1030 case TargetOpcode::G_CONSTANT: {
1031 Register Dst = MI.getOperand(0).getReg();
1032 LLT DstTy = MRI.getType(Dst);
1033 if (DstTy.isScalar() && DstTy.getSizeInBits() < 32)
1034 MappingID = CustomMappingID;
1035 break;
1036 }
1037 case TargetOpcode::G_FCONSTANT: {
1038 if (preferGPRForFPImm(MI, MRI, STI)) {
1039 // Materialize in GPR and rely on later bank copies for FP uses.
1040 MappingID = CustomMappingID;
1041 OpRegBankIdx = {PMI_FirstGPR};
1042 }
1043 break;
1044 }
1045 case AArch64::G_DUP: {
1046 Register ScalarReg = MI.getOperand(1).getReg();
1047 LLT ScalarTy = MRI.getType(ScalarReg);
1048 auto ScalarDef = MRI.getVRegDef(ScalarReg);
1049 // We want to select dup(load) into LD1R.
1050 if (ScalarDef->getOpcode() == TargetOpcode::G_LOAD)
1051 OpRegBankIdx = {PMI_FirstFPR, PMI_FirstFPR};
1052 // s8 is an exception for G_DUP, which we always want on gpr.
1053 else if (ScalarTy.getSizeInBits() != 8 &&
1054 (getRegBank(ScalarReg, MRI, TRI) == &AArch64::FPRRegBank ||
1055 onlyDefinesFP(*ScalarDef, MRI, TRI)))
1056 OpRegBankIdx = {PMI_FirstFPR, PMI_FirstFPR};
1057 else {
1058 if (ScalarTy.getSizeInBits() < 32 &&
1059 getRegBank(ScalarReg, MRI, TRI) == &AArch64::GPRRegBank) {
1060 // Calls applyMappingImpl()
1061 MappingID = CustomMappingID;
1062 }
1063 OpRegBankIdx = {PMI_FirstFPR, PMI_FirstGPR};
1064 }
1065 break;
1066 }
1067 case TargetOpcode::G_TRUNC: {
1068 LLT SrcTy = MRI.getType(MI.getOperand(1).getReg());
1069 if (!SrcTy.isVector() && SrcTy.getSizeInBits() == 128)
1070 OpRegBankIdx = {PMI_FirstFPR, PMI_FirstFPR};
1071 break;
1072 }
1073 case TargetOpcode::G_SITOFP:
1074 case TargetOpcode::G_UITOFP: {
1075 if (MRI.getType(MI.getOperand(0).getReg()).isVector())
1076 break;
1077 // Integer to FP conversions don't necessarily happen between GPR -> FPR
1078 // regbanks. They can also be done within an FPR register.
1079 Register SrcReg = MI.getOperand(1).getReg();
1080 if (getRegBank(SrcReg, MRI, TRI) == &AArch64::FPRRegBank &&
1081 MRI.getType(SrcReg).getSizeInBits() ==
1082 MRI.getType(MI.getOperand(0).getReg()).getSizeInBits())
1083 OpRegBankIdx = {PMI_FirstFPR, PMI_FirstFPR};
1084 else
1085 OpRegBankIdx = {PMI_FirstFPR, PMI_FirstGPR};
1086 break;
1087 }
1088 case TargetOpcode::G_FPTOSI_SAT:
1089 case TargetOpcode::G_FPTOUI_SAT:
1090 case TargetOpcode::G_FPTOSI:
1091 case TargetOpcode::G_FPTOUI:
1092 case TargetOpcode::G_INTRINSIC_LRINT:
1093 case TargetOpcode::G_INTRINSIC_LLRINT:
1094 case TargetOpcode::G_LROUND:
1095 case TargetOpcode::G_LLROUND: {
1096 LLT DstType = MRI.getType(MI.getOperand(0).getReg());
1097 if (DstType.isVector())
1098 break;
1099 if (DstType == LLT::scalar(16)) {
1100 OpRegBankIdx = {PMI_FirstFPR, PMI_FirstFPR};
1101 break;
1102 }
1103 TypeSize DstSize = getSizeInBits(MI.getOperand(0).getReg(), MRI, TRI);
1104 TypeSize SrcSize = getSizeInBits(MI.getOperand(1).getReg(), MRI, TRI);
1105 if (((DstSize == SrcSize) || STI.hasFeature(AArch64::FeatureFPRCVT)) &&
1106 all_of(MRI.use_nodbg_instructions(MI.getOperand(0).getReg()),
1107 [&](const MachineInstr &UseMI) {
1108 return onlyUsesFP(UseMI, MRI, TRI) ||
1109 prefersFPUse(UseMI, MRI, TRI);
1110 }))
1111 OpRegBankIdx = {PMI_FirstFPR, PMI_FirstFPR};
1112 else
1113 OpRegBankIdx = {PMI_FirstGPR, PMI_FirstFPR};
1114 break;
1115 }
1116 case TargetOpcode::G_FCMP: {
1117 // If the result is a vector, it must use a FPR.
1119 MRI.getType(MI.getOperand(0).getReg()).isVector() ? PMI_FirstFPR
1120 : PMI_FirstGPR;
1121 OpRegBankIdx = {Idx0,
1122 /* Predicate */ PMI_None, PMI_FirstFPR, PMI_FirstFPR};
1123 break;
1124 }
1125 case TargetOpcode::G_BITCAST:
1126 // This is going to be a cross register bank copy and this is expensive.
1127 if (OpRegBankIdx[0] != OpRegBankIdx[1])
1128 Cost = copyCost(
1129 *AArch64GenRegisterBankInfo::PartMappings[OpRegBankIdx[0]].RegBank,
1130 *AArch64GenRegisterBankInfo::PartMappings[OpRegBankIdx[1]].RegBank,
1131 TypeSize::getFixed(OpSize[0]));
1132 break;
1133 case TargetOpcode::G_LOAD: {
1134 // Loading in vector unit is slightly more expensive.
1135 // This is actually only true for the LD1R and co instructions,
1136 // but anyway for the fast mode this number does not matter and
1137 // for the greedy mode the cost of the cross bank copy will
1138 // offset this number.
1139 // FIXME: Should be derived from the scheduling model.
1140 if (OpRegBankIdx[0] != PMI_FirstGPR) {
1141 Cost = 2;
1142 break;
1143 }
1144
1145 if (cast<GLoad>(MI).isAtomic()) {
1146 // Atomics always use GPR destinations. Don't refine any further.
1147 OpRegBankIdx[0] = PMI_FirstGPR;
1148 if (MRI.getType(MI.getOperand(0).getReg()).getSizeInBits() < 32)
1149 MappingID = CustomMappingID;
1150 break;
1151 }
1152
1153 // Try to guess the type of the load from the MMO.
1154 if (isLoadFromFPType(MI)) {
1155 OpRegBankIdx[0] = PMI_FirstFPR;
1156 break;
1157 }
1158
1159 // Check if that load feeds fp instructions.
1160 // In that case, we want the default mapping to be on FPR
1161 // instead of blind map every scalar to GPR.
1162 if (any_of(MRI.use_nodbg_instructions(MI.getOperand(0).getReg()),
1163 [&](const MachineInstr &UseMI) {
1164 // If we have at least one direct or indirect use
1165 // in a FP instruction,
1166 // assume this was a floating point load in the IR. If it was
1167 // not, we would have had a bitcast before reaching that
1168 // instruction.
1169 //
1170 // Int->FP conversion operations are also captured in
1171 // prefersFPUse().
1172
1173 if (isPHIWithFPConstraints(UseMI, MRI, TRI))
1174 return true;
1175
1176 return onlyUsesFP(UseMI, MRI, TRI) ||
1177 prefersFPUse(UseMI, MRI, TRI);
1178 }))
1179 OpRegBankIdx[0] = PMI_FirstFPR;
1180
1181 // On GPR, extend any load < 32bits to 32bit.
1182 LLT Ty = MRI.getType(MI.getOperand(0).getReg());
1183 if (Ty.isScalar() && Ty.getSizeInBits() < 32)
1184 MappingID = CustomMappingID;
1185 break;
1186 }
1187 case TargetOpcode::G_STORE:
1188 // Check if that store is fed by fp instructions.
1189 if (OpRegBankIdx[0] == PMI_FirstGPR) {
1190 Register VReg = MI.getOperand(0).getReg();
1191 if (VReg) {
1192 MachineInstr *DefMI = MRI.getVRegDef(VReg);
1193 if (onlyDefinesFP(*DefMI, MRI, TRI)) {
1194 OpRegBankIdx[0] = PMI_FirstFPR;
1195 break;
1196 }
1197 }
1198
1199 // On GPR, extend any store < 32bits to 32bit.
1200 LLT Ty = MRI.getType(MI.getOperand(0).getReg());
1201 if (Ty.isScalar() && Ty.getSizeInBits() < 32)
1202 MappingID = CustomMappingID;
1203 }
1204 break;
1205 case TargetOpcode::G_INDEXED_STORE:
1206 if (OpRegBankIdx[1] == PMI_FirstGPR) {
1207 Register VReg = MI.getOperand(1).getReg();
1208 if (!VReg)
1209 break;
1210 MachineInstr *DefMI = MRI.getVRegDef(VReg);
1211 if (onlyDefinesFP(*DefMI, MRI, TRI))
1212 OpRegBankIdx[1] = PMI_FirstFPR;
1213 break;
1214 }
1215 break;
1216 case TargetOpcode::G_INDEXED_SEXTLOAD:
1217 case TargetOpcode::G_INDEXED_ZEXTLOAD:
1218 // These should always be GPR.
1219 OpRegBankIdx[0] = PMI_FirstGPR;
1220 break;
1221 case TargetOpcode::G_INDEXED_LOAD: {
1222 if (isLoadFromFPType(MI))
1223 OpRegBankIdx[0] = PMI_FirstFPR;
1224 break;
1225 }
1226 case TargetOpcode::G_SELECT: {
1227 // If the destination is FPR, preserve that.
1228 if (OpRegBankIdx[0] != PMI_FirstGPR)
1229 break;
1230
1231 // If we're taking in vectors, we have no choice but to put everything on
1232 // FPRs, except for the condition. The condition must always be on a GPR.
1233 LLT SrcTy = MRI.getType(MI.getOperand(2).getReg());
1234 if (SrcTy.isVector()) {
1236 break;
1237 }
1238
1239 // Try to minimize the number of copies. If we have more floating point
1240 // constrained values than not, then we'll put everything on FPR. Otherwise,
1241 // everything has to be on GPR.
1242 unsigned NumFP = 0;
1243
1244 // Check if the uses of the result always produce floating point values.
1245 //
1246 // For example:
1247 //
1248 // %z = G_SELECT %cond %x %y
1249 // fpr = G_FOO %z ...
1250 if (any_of(MRI.use_nodbg_instructions(MI.getOperand(0).getReg()),
1251 [&](MachineInstr &MI) { return onlyUsesFP(MI, MRI, TRI); }))
1252 ++NumFP;
1253
1254 // Check if the defs of the source values always produce floating point
1255 // values.
1256 //
1257 // For example:
1258 //
1259 // %x = G_SOMETHING_ALWAYS_FLOAT %a ...
1260 // %z = G_SELECT %cond %x %y
1261 //
1262 // Also check whether or not the sources have already been decided to be
1263 // FPR. Keep track of this.
1264 //
1265 // This doesn't check the condition, since it's just whatever is in NZCV.
1266 // This isn't passed explicitly in a register to fcsel/csel.
1267 for (unsigned Idx = 2; Idx < 4; ++Idx) {
1268 Register VReg = MI.getOperand(Idx).getReg();
1269 MachineInstr *DefMI = MRI.getVRegDef(VReg);
1270 if (getRegBank(VReg, MRI, TRI) == &AArch64::FPRRegBank ||
1271 onlyDefinesFP(*DefMI, MRI, TRI))
1272 ++NumFP;
1273 }
1274
1275 // If we have more FP constraints than not, then move everything over to
1276 // FPR.
1277 if (NumFP >= 2)
1279
1280 break;
1281 }
1282 case TargetOpcode::G_UNMERGE_VALUES: {
1283 // If the first operand belongs to a FPR register bank, then make sure that
1284 // we preserve that.
1285 if (OpRegBankIdx[0] != PMI_FirstGPR)
1286 break;
1287
1288 LLT SrcTy = MRI.getType(MI.getOperand(MI.getNumOperands()-1).getReg());
1289 // UNMERGE into scalars from a vector should always use FPR.
1290 // Likewise if any of the uses are FP instructions.
1291 if (SrcTy.isVector() || SrcTy == LLT::scalar(128) ||
1292 any_of(MRI.use_nodbg_instructions(MI.getOperand(0).getReg()),
1293 [&](MachineInstr &MI) { return onlyUsesFP(MI, MRI, TRI); })) {
1294 // Set the register bank of every operand to FPR.
1295 for (unsigned Idx = 0, NumOperands = MI.getNumOperands();
1296 Idx < NumOperands; ++Idx)
1297 OpRegBankIdx[Idx] = PMI_FirstFPR;
1298 }
1299 break;
1300 }
1301 case TargetOpcode::G_EXTRACT_VECTOR_ELT:
1302 // Destination and source need to be FPRs.
1303 OpRegBankIdx[0] = PMI_FirstFPR;
1304 OpRegBankIdx[1] = PMI_FirstFPR;
1305
1306 // Index needs to be a GPR.
1307 OpRegBankIdx[2] = PMI_FirstGPR;
1308 break;
1309 case AArch64::G_SQSHLU_I:
1310 // Destination and source need to be FPRs.
1311 OpRegBankIdx[0] = PMI_FirstFPR;
1312 OpRegBankIdx[1] = PMI_FirstFPR;
1313
1314 // Shift Index needs to be a GPR.
1315 OpRegBankIdx[2] = PMI_FirstGPR;
1316 break;
1317
1318 case TargetOpcode::G_INSERT_VECTOR_ELT:
1319 OpRegBankIdx[0] = PMI_FirstFPR;
1320 OpRegBankIdx[1] = PMI_FirstFPR;
1321
1322 // The element may be either a GPR or FPR. Preserve that behaviour.
1323 if (getRegBank(MI.getOperand(2).getReg(), MRI, TRI) == &AArch64::FPRRegBank)
1324 OpRegBankIdx[2] = PMI_FirstFPR;
1325 else {
1326 // If the type is i8/i16, and the regbank will be GPR, then we change the
1327 // type to i32 in applyMappingImpl.
1328 LLT Ty = MRI.getType(MI.getOperand(2).getReg());
1329 if (Ty.getSizeInBits() == 8 || Ty.getSizeInBits() == 16) {
1330 // Calls applyMappingImpl()
1331 MappingID = CustomMappingID;
1332 }
1333 OpRegBankIdx[2] = PMI_FirstGPR;
1334 }
1335
1336 // Index needs to be a GPR.
1337 OpRegBankIdx[3] = PMI_FirstGPR;
1338 break;
1339 case TargetOpcode::G_EXTRACT: {
1340 // For s128 sources we have to use fpr unless we know otherwise.
1341 auto Src = MI.getOperand(1).getReg();
1342 LLT SrcTy = MRI.getType(MI.getOperand(1).getReg());
1343 if (SrcTy.getSizeInBits() != 128)
1344 break;
1345 auto Idx = MRI.getRegClassOrNull(Src) == &AArch64::XSeqPairsClassRegClass
1346 ? PMI_FirstGPR
1347 : PMI_FirstFPR;
1348 OpRegBankIdx[0] = Idx;
1349 OpRegBankIdx[1] = Idx;
1350 break;
1351 }
1352 case TargetOpcode::G_BUILD_VECTOR: {
1353 // If the first source operand belongs to a FPR register bank, then make
1354 // sure that we preserve that.
1355 if (OpRegBankIdx[1] != PMI_FirstGPR)
1356 break;
1357 Register VReg = MI.getOperand(1).getReg();
1358 if (!VReg)
1359 break;
1360
1361 // Get the instruction that defined the source operand reg, and check if
1362 // it's a floating point operation. Or, if it's a type like s16 which
1363 // doesn't have a exact size gpr register class. The exception is if the
1364 // build_vector has all constant operands, which may be better to leave as
1365 // gpr without copies, so it can be matched in imported patterns.
1366 MachineInstr *DefMI = MRI.getVRegDef(VReg);
1367 unsigned DefOpc = DefMI->getOpcode();
1368 const LLT SrcTy = MRI.getType(VReg);
1369 if (all_of(MI.operands(), [&](const MachineOperand &Op) {
1370 return Op.isDef() || MRI.getVRegDef(Op.getReg())->getOpcode() ==
1371 TargetOpcode::G_CONSTANT;
1372 }))
1373 break;
1375 SrcTy.getSizeInBits() < 32 ||
1376 getRegBank(VReg, MRI, TRI) == &AArch64::FPRRegBank) {
1377 // Have a floating point op.
1378 // Make sure every operand gets mapped to a FPR register class.
1379 unsigned NumOperands = MI.getNumOperands();
1380 for (unsigned Idx = 0; Idx < NumOperands; ++Idx)
1381 OpRegBankIdx[Idx] = PMI_FirstFPR;
1382 }
1383 break;
1384 }
1385 case TargetOpcode::G_VECREDUCE_FADD:
1386 case TargetOpcode::G_VECREDUCE_FMUL:
1387 case TargetOpcode::G_VECREDUCE_FMAX:
1388 case TargetOpcode::G_VECREDUCE_FMIN:
1389 case TargetOpcode::G_VECREDUCE_FMAXIMUM:
1390 case TargetOpcode::G_VECREDUCE_FMINIMUM:
1391 case TargetOpcode::G_VECREDUCE_ADD:
1392 case TargetOpcode::G_VECREDUCE_MUL:
1393 case TargetOpcode::G_VECREDUCE_AND:
1394 case TargetOpcode::G_VECREDUCE_OR:
1395 case TargetOpcode::G_VECREDUCE_XOR:
1396 case TargetOpcode::G_VECREDUCE_SMAX:
1397 case TargetOpcode::G_VECREDUCE_SMIN:
1398 case TargetOpcode::G_VECREDUCE_UMAX:
1399 case TargetOpcode::G_VECREDUCE_UMIN:
1400 // Reductions produce a scalar value from a vector, the scalar should be on
1401 // FPR bank.
1402 OpRegBankIdx = {PMI_FirstFPR, PMI_FirstFPR};
1403 break;
1404 case TargetOpcode::G_VECREDUCE_SEQ_FADD:
1405 case TargetOpcode::G_VECREDUCE_SEQ_FMUL:
1406 // These reductions also take a scalar accumulator input.
1407 // Assign them FPR for now.
1408 OpRegBankIdx = {PMI_FirstFPR, PMI_FirstFPR, PMI_FirstFPR};
1409 break;
1410 case TargetOpcode::G_INTRINSIC:
1411 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS: {
1412 switch (cast<GIntrinsic>(MI).getIntrinsicID()) {
1413 case Intrinsic::aarch64_neon_fcvtas:
1414 case Intrinsic::aarch64_neon_fcvtau:
1415 case Intrinsic::aarch64_neon_fcvtzs:
1416 case Intrinsic::aarch64_neon_fcvtzu:
1417 case Intrinsic::aarch64_neon_fcvtms:
1418 case Intrinsic::aarch64_neon_fcvtmu:
1419 case Intrinsic::aarch64_neon_fcvtns:
1420 case Intrinsic::aarch64_neon_fcvtnu:
1421 case Intrinsic::aarch64_neon_fcvtps:
1422 case Intrinsic::aarch64_neon_fcvtpu: {
1423 OpRegBankIdx[2] = PMI_FirstFPR;
1424 if (MRI.getType(MI.getOperand(0).getReg()).isVector()) {
1425 OpRegBankIdx[0] = PMI_FirstFPR;
1426 break;
1427 }
1428 TypeSize DstSize = getSizeInBits(MI.getOperand(0).getReg(), MRI, TRI);
1429 TypeSize SrcSize = getSizeInBits(MI.getOperand(2).getReg(), MRI, TRI);
1430 // Fp conversions to i16 must be kept on fp register banks to ensure
1431 // proper saturation, as there are no 16-bit gprs.
1432 // In addition, conversion intrinsics have fpr output when the input
1433 // size matches the output size, or FPRCVT is present.
1434 if (DstSize == 16 ||
1435 ((DstSize == SrcSize || STI.hasFeature(AArch64::FeatureFPRCVT)) &&
1436 all_of(MRI.use_nodbg_instructions(MI.getOperand(0).getReg()),
1437 [&](const MachineInstr &UseMI) {
1438 return onlyUsesFP(UseMI, MRI, TRI) ||
1439 prefersFPUse(UseMI, MRI, TRI);
1440 })))
1441 OpRegBankIdx[0] = PMI_FirstFPR;
1442 else
1443 OpRegBankIdx[0] = PMI_FirstGPR;
1444 break;
1445 }
1446 case Intrinsic::aarch64_neon_vcvtfxs2fp:
1447 case Intrinsic::aarch64_neon_vcvtfxu2fp:
1448 case Intrinsic::aarch64_neon_vcvtfp2fxs:
1449 case Intrinsic::aarch64_neon_vcvtfp2fxu:
1450 // Override these intrinsics, because they would have a partial
1451 // mapping. This is needed for 'half' types, which otherwise don't
1452 // get legalised correctly.
1453 OpRegBankIdx[0] = PMI_FirstFPR;
1454 OpRegBankIdx[2] = PMI_FirstFPR;
1455 // OpRegBankIdx[1] is the intrinsic ID.
1456 // OpRegBankIdx[3] is an integer immediate.
1457 break;
1458 default: {
1459 // Check if we know that the intrinsic has any constraints on its register
1460 // banks. If it does, then update the mapping accordingly.
1461 unsigned Idx = 0;
1462 if (onlyDefinesFP(MI, MRI, TRI))
1463 for (const auto &Op : MI.defs()) {
1464 if (Op.isReg())
1465 OpRegBankIdx[Idx] = PMI_FirstFPR;
1466 ++Idx;
1467 }
1468 else
1469 Idx += MI.getNumExplicitDefs();
1470
1471 if (onlyUsesFP(MI, MRI, TRI))
1472 for (const auto &Op : MI.explicit_uses()) {
1473 if (Op.isReg())
1474 OpRegBankIdx[Idx] = PMI_FirstFPR;
1475 ++Idx;
1476 }
1477 break;
1478 }
1479 }
1480 break;
1481 }
1482 }
1483
1484 // Finally construct the computed mapping.
1485 SmallVector<const ValueMapping *, 8> OpdsMapping(NumOperands);
1486 for (unsigned Idx = 0; Idx < NumOperands; ++Idx) {
1487 if (MI.getOperand(Idx).isReg() && MI.getOperand(Idx).getReg()) {
1488 LLT Ty = MRI.getType(MI.getOperand(Idx).getReg());
1489 if (!Ty.isValid())
1490 continue;
1491 auto Mapping =
1492 getValueMapping(OpRegBankIdx[Idx], TypeSize::getFixed(OpSize[Idx]));
1493 if (!Mapping->isValid())
1495
1496 OpdsMapping[Idx] = Mapping;
1497 }
1498 }
1499
1500 return getInstructionMapping(MappingID, Cost, getOperandsMapping(OpdsMapping),
1501 NumOperands);
1502}
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
static unsigned getIntrinsicID(const SDNode *N)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
#define CHECK_VALUEMAP(RBName, Size)
static bool isFPIntrinsic(const MachineRegisterInfo &MRI, const MachineInstr &MI)
#define CHECK_VALUEMAP_3OPS(RBName, Size)
static bool foldTruncOfI32Constant(MachineInstr &MI, unsigned OpIdx, MachineRegisterInfo &MRI, const AArch64RegisterBankInfo &RBI)
static const unsigned CustomMappingID
#define CHECK_PARTIALMAP(Idx, ValStartIdx, ValLength, RB)
#define CHECK_VALUEMAP_CROSSREGCPY(RBNameDst, RBNameSrc, Size)
#define CHECK_VALUEMAP_FPEXT(DstSize, SrcSize)
static bool preferGPRForFPImm(const MachineInstr &MI, const MachineRegisterInfo &MRI, const AArch64Subtarget &STI)
This file declares the targeting of the RegisterBankInfo class for AArch64.
This file implements a class to represent arbitrary precision integral constant values and operations...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
IRTranslator LLVM IR MI
Implement a low-level type suitable for MachineInstr level instruction selection.
This file declares the MachineIRBuilder class.
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
MachineInstr unsigned OpIdx
ppc ctr loops verify
static const MCPhysReg FPR[]
FPR - The set of FP registers that should be allocated for arguments on Darwin and AIX.
This file contains some templates that are useful if you are working with the STL at all.
This file defines the SmallVector class.
static unsigned getRegBankBaseIdxOffset(unsigned RBIdx, TypeSize Size)
static const RegisterBankInfo::ValueMapping * getCopyMapping(unsigned DstBankID, unsigned SrcBankID, TypeSize Size)
Get the pointer to the ValueMapping of the operands of a copy instruction from the SrcBankID register...
static bool checkPartialMappingIdx(PartialMappingIdx FirstAlias, PartialMappingIdx LastAlias, ArrayRef< PartialMappingIdx > Order)
static const RegisterBankInfo::PartialMapping PartMappings[]
static const RegisterBankInfo::ValueMapping * getFPExtMapping(unsigned DstSize, unsigned SrcSize)
Get the instruction mapping for G_FPEXT.
static const RegisterBankInfo::ValueMapping * getValueMapping(PartialMappingIdx RBIdx, TypeSize Size)
Get the pointer to the ValueMapping representing the RegisterBank at RBIdx with a size of Size.
static const RegisterBankInfo::ValueMapping ValMappings[]
This class provides the information for the target register banks.
InstructionMappings getInstrAlternativeMappings(const MachineInstr &MI) const override
Get the alternative mappings for MI.
unsigned copyCost(const RegisterBank &A, const RegisterBank &B, TypeSize Size) const override
Get the cost of a copy from B to A, or put differently, get the cost of A = COPY B.
const RegisterBank & getRegBankFromRegClass(const TargetRegisterClass &RC, LLT Ty) const override
Get a register bank that covers RC.
AArch64RegisterBankInfo(const TargetRegisterInfo &TRI)
const InstructionMapping & getInstrMapping(const MachineInstr &MI) const override
Get the mapping of the different operands of MI on the register bank.
const AArch64RegisterInfo * getRegisterInfo() const override
const AArch64TargetLowering * getTargetLowering() const override
bool isFPImmLegal(const APFloat &Imm, EVT VT, bool ForCodeSize) const override
Returns true if the target can instruction select the specified FP immediate natively.
bool isFPImmLegalAsFMov(const APFloat &Imm, EVT VT) const
Class for arbitrary precision integers.
Definition APInt.h:78
uint64_t getZExtValue() const
Get zero extended value.
Definition APInt.h:1563
static LLVM_ABI APInt getSplat(unsigned NewLen, const APInt &V)
Return a value containing V broadcasted over NewLen bits.
Definition APInt.cpp:652
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition Function.cpp:358
constexpr bool isScalar() const
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isValid() const
constexpr bool isVector() const
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr bool isPointer() const
static LLT integer(unsigned SizeInBits)
LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Helper class to build MachineInstr.
void setInsertPt(MachineBasicBlock &MBB, MachineBasicBlock::iterator II)
Set the insertion point before the specified position.
MachineFunction & getMF()
Getter for the function we currently build.
MachineInstrBuilder buildTrunc(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_TRUNC Op.
MachineInstrBuilder buildAnyExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ANYEXT Op0.
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
Register getReg(unsigned Idx) const
Get the register for the operand index.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineOperand & getOperand(unsigned i) const
LLVM_ABI MachineInstrBundleIterator< MachineInstr > eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
MachineOperand class - Representation of each machine instruction operand.
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI MachineInstr * getVRegDef(Register Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
const RegisterBank * getRegBank(Register Reg) const
Return the register bank of Reg.
LLT getType(Register Reg) const
Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register.
LLVM_ABI void setRegBank(Register Reg, const RegisterBank &RegBank)
Set the register bank to RegBank for Reg.
iterator_range< use_instr_nodbg_iterator > use_nodbg_instructions(Register Reg) const
LLVM_ABI Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
const TargetRegisterClass * getRegClassOrNull(Register Reg) const
Return the register class of Reg, or null if Reg has not been assigned a register class yet.
bool use_empty(Register RegNo) const
use_empty - Return true if there are no instructions using the specified register.
Helper class that represents how the value of an instruction may be mapped and what is the related co...
bool isValid() const
Check whether this object is valid.
virtual InstructionMappings getInstrAlternativeMappings(const MachineInstr &MI) const
Get the alternative mappings for MI.
const InstructionMapping & getInstructionMapping(unsigned ID, unsigned Cost, const ValueMapping *OperandsMapping, unsigned NumOperands) const
Method to get a uniquely generated InstructionMapping.
static void applyDefaultMapping(const OperandsMapper &OpdMapper)
Helper method to apply something that is like the default mapping.
const InstructionMapping & getInvalidInstructionMapping() const
Method to get a uniquely generated invalid InstructionMapping.
const RegisterBank & getRegBank(unsigned ID)
Get the register bank identified by ID.
unsigned getMaximumSize(unsigned RegBankID) const
Get the maximum size in bits that fits in the given register bank.
TypeSize getSizeInBits(Register Reg, const MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI) const
Get the size in bits of Reg.
virtual const RegisterBank & getRegBankFromRegClass(const TargetRegisterClass &RC, LLT Ty) const
Get a register bank that covers RC.
const ValueMapping * getOperandsMapping(Iterator Begin, Iterator End) const
Get the uniquely generated array of ValueMapping for the elements of between Begin and End.
static const unsigned DefaultMappingID
Identifier used when the related instruction mapping instance is generated by target independent code...
SmallVector< const InstructionMapping *, 4 > InstructionMappings
Convenient type to represent the alternatives for mapping an instruction.
virtual unsigned copyCost(const RegisterBank &A, const RegisterBank &B, TypeSize Size) const
Get the cost of a copy from B to A, or put differently, get the cost of A = COPY B.
const InstructionMapping & getInstrMappingImpl(const MachineInstr &MI) const
Try to get the mapping of MI.
This class implements the register bank concept.
LLVM_ABI bool covers(const TargetRegisterClass &RC) const
Check whether this register bank covers RC.
unsigned getID() const
Get the identifier of this register bank.
Wrapper class representing virtual and physical registers.
Definition Register.h:20
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition Register.h:83
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
unsigned getID() const
Return the register class ID number.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition TypeSize.h:343
Type * getArrayElementType() const
Definition Type.h:427
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition Type.h:227
iterator_range< user_iterator > users()
Definition Value.h:426
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:200
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
static bool isAdvSIMDModImmType4(uint64_t Imm)
OperandType
Operands are tagged with one of the values of this enum.
Definition MCInstrDesc.h:59
This is an optimization pass for GlobalISel generic memory operations.
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1738
InstructionCost Cost
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
LLVM_ABI bool shouldOptimizeForSize(const MachineFunction *MF, ProfileSummaryInfo *PSI, const MachineBlockFrequencyInfo *BFI, PGSOQueryType QueryType=PGSOQueryType::Other)
Returns true if machine function MF is suggested to be size-optimized based on the profile.
bool isPreISelGenericOptimizationHint(unsigned Opcode)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1745
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
DWARFExpression::Operation Op
void call_once(once_flag &flag, Function &&F, Args &&... ArgList)
Execute the function specified as a parameter once.
Definition Threading.h:86
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI bool isPreISelGenericFloatingPointOpcode(unsigned Opc)
Returns whether opcode Opc is a pre-isel generic floating-point opcode, having only floating-point op...
Definition Utils.cpp:1706
Extended Value Type.
Definition ValueTypes.h:35
static EVT getFloatingPointVT(unsigned BitWidth)
Returns the EVT that represents a floating-point type with the given number of bits.
Definition ValueTypes.h:55
The llvm::once_flag structure.
Definition Threading.h:67