LLVM 20.0.0git
AArch64RegisterBankInfo.cpp
Go to the documentation of this file.
1//===- AArch64RegisterBankInfo.cpp ----------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the targeting of the RegisterBankInfo class for
10/// AArch64.
11/// \todo This should be generated by TableGen.
12//===----------------------------------------------------------------------===//
13
15#include "AArch64RegisterInfo.h"
17#include "llvm/ADT/STLExtras.h"
32#include "llvm/IR/IntrinsicsAArch64.h"
35#include <algorithm>
36#include <cassert>
37
38#define GET_TARGET_REGBANK_IMPL
39#include "AArch64GenRegisterBank.inc"
40
41// This file will be TableGen'ed at some point.
42#include "AArch64GenRegisterBankInfo.def"
43
44using namespace llvm;
45static const unsigned CustomMappingID = 1;
46
48 const TargetRegisterInfo &TRI) {
49 static llvm::once_flag InitializeRegisterBankFlag;
50
51 static auto InitializeRegisterBankOnce = [&]() {
52 // We have only one set of register banks, whatever the subtarget
53 // is. Therefore, the initialization of the RegBanks table should be
54 // done only once. Indeed the table of all register banks
55 // (AArch64::RegBanks) is unique in the compiler. At some point, it
56 // will get tablegen'ed and the whole constructor becomes empty.
57
58 const RegisterBank &RBGPR = getRegBank(AArch64::GPRRegBankID);
59 (void)RBGPR;
60 assert(&AArch64::GPRRegBank == &RBGPR &&
61 "The order in RegBanks is messed up");
62
63 const RegisterBank &RBFPR = getRegBank(AArch64::FPRRegBankID);
64 (void)RBFPR;
65 assert(&AArch64::FPRRegBank == &RBFPR &&
66 "The order in RegBanks is messed up");
67
68 const RegisterBank &RBCCR = getRegBank(AArch64::CCRegBankID);
69 (void)RBCCR;
70 assert(&AArch64::CCRegBank == &RBCCR &&
71 "The order in RegBanks is messed up");
72
73 // The GPR register bank is fully defined by all the registers in
74 // GR64all + its subclasses.
75 assert(RBGPR.covers(*TRI.getRegClass(AArch64::GPR32RegClassID)) &&
76 "Subclass not added?");
77 assert(getMaximumSize(RBGPR.getID()) == 128 &&
78 "GPRs should hold up to 128-bit");
79
80 // The FPR register bank is fully defined by all the registers in
81 // GR64all + its subclasses.
82 assert(RBFPR.covers(*TRI.getRegClass(AArch64::QQRegClassID)) &&
83 "Subclass not added?");
84 assert(RBFPR.covers(*TRI.getRegClass(AArch64::FPR64RegClassID)) &&
85 "Subclass not added?");
86 assert(getMaximumSize(RBFPR.getID()) == 512 &&
87 "FPRs should hold up to 512-bit via QQQQ sequence");
88
89 assert(RBCCR.covers(*TRI.getRegClass(AArch64::CCRRegClassID)) &&
90 "Class not added?");
91 assert(getMaximumSize(RBCCR.getID()) == 32 &&
92 "CCR should hold up to 32-bit");
93
94 // Check that the TableGen'ed like file is in sync we our expectations.
95 // First, the Idx.
98 "PartialMappingIdx's are incorrectly ordered");
102 "PartialMappingIdx's are incorrectly ordered");
103// Now, the content.
104// Check partial mapping.
105#define CHECK_PARTIALMAP(Idx, ValStartIdx, ValLength, RB) \
106 do { \
107 assert( \
108 checkPartialMap(PartialMappingIdx::Idx, ValStartIdx, ValLength, RB) && \
109 #Idx " is incorrectly initialized"); \
110 } while (false)
111
112 CHECK_PARTIALMAP(PMI_GPR32, 0, 32, RBGPR);
113 CHECK_PARTIALMAP(PMI_GPR64, 0, 64, RBGPR);
114 CHECK_PARTIALMAP(PMI_GPR128, 0, 128, RBGPR);
115 CHECK_PARTIALMAP(PMI_FPR16, 0, 16, RBFPR);
116 CHECK_PARTIALMAP(PMI_FPR32, 0, 32, RBFPR);
117 CHECK_PARTIALMAP(PMI_FPR64, 0, 64, RBFPR);
118 CHECK_PARTIALMAP(PMI_FPR128, 0, 128, RBFPR);
119 CHECK_PARTIALMAP(PMI_FPR256, 0, 256, RBFPR);
120 CHECK_PARTIALMAP(PMI_FPR512, 0, 512, RBFPR);
121
122// Check value mapping.
123#define CHECK_VALUEMAP_IMPL(RBName, Size, Offset) \
124 do { \
125 assert(checkValueMapImpl(PartialMappingIdx::PMI_##RBName##Size, \
126 PartialMappingIdx::PMI_First##RBName, Size, \
127 Offset) && \
128 #RBName #Size " " #Offset " is incorrectly initialized"); \
129 } while (false)
130
131#define CHECK_VALUEMAP(RBName, Size) CHECK_VALUEMAP_IMPL(RBName, Size, 0)
132
133 CHECK_VALUEMAP(GPR, 32);
134 CHECK_VALUEMAP(GPR, 64);
135 CHECK_VALUEMAP(GPR, 128);
136 CHECK_VALUEMAP(FPR, 16);
137 CHECK_VALUEMAP(FPR, 32);
138 CHECK_VALUEMAP(FPR, 64);
139 CHECK_VALUEMAP(FPR, 128);
140 CHECK_VALUEMAP(FPR, 256);
141 CHECK_VALUEMAP(FPR, 512);
142
143// Check the value mapping for 3-operands instructions where all the operands
144// map to the same value mapping.
145#define CHECK_VALUEMAP_3OPS(RBName, Size) \
146 do { \
147 CHECK_VALUEMAP_IMPL(RBName, Size, 0); \
148 CHECK_VALUEMAP_IMPL(RBName, Size, 1); \
149 CHECK_VALUEMAP_IMPL(RBName, Size, 2); \
150 } while (false)
151
152 CHECK_VALUEMAP_3OPS(GPR, 32);
153 CHECK_VALUEMAP_3OPS(GPR, 64);
154 CHECK_VALUEMAP_3OPS(GPR, 128);
160
161#define CHECK_VALUEMAP_CROSSREGCPY(RBNameDst, RBNameSrc, Size) \
162 do { \
163 unsigned PartialMapDstIdx = PMI_##RBNameDst##Size - PMI_Min; \
164 unsigned PartialMapSrcIdx = PMI_##RBNameSrc##Size - PMI_Min; \
165 (void)PartialMapDstIdx; \
166 (void)PartialMapSrcIdx; \
167 const ValueMapping *Map = getCopyMapping(AArch64::RBNameDst##RegBankID, \
168 AArch64::RBNameSrc##RegBankID, \
169 TypeSize::getFixed(Size)); \
170 (void)Map; \
171 assert(Map[0].BreakDown == \
172 &AArch64GenRegisterBankInfo::PartMappings[PartialMapDstIdx] && \
173 Map[0].NumBreakDowns == 1 && \
174 #RBNameDst #Size " Dst is incorrectly initialized"); \
175 assert(Map[1].BreakDown == \
176 &AArch64GenRegisterBankInfo::PartMappings[PartialMapSrcIdx] && \
177 Map[1].NumBreakDowns == 1 && \
178 #RBNameSrc #Size " Src is incorrectly initialized"); \
179 \
180 } while (false)
181
182 CHECK_VALUEMAP_CROSSREGCPY(GPR, GPR, 32);
184 CHECK_VALUEMAP_CROSSREGCPY(GPR, GPR, 64);
190
191#define CHECK_VALUEMAP_FPEXT(DstSize, SrcSize) \
192 do { \
193 unsigned PartialMapDstIdx = PMI_FPR##DstSize - PMI_Min; \
194 unsigned PartialMapSrcIdx = PMI_FPR##SrcSize - PMI_Min; \
195 (void)PartialMapDstIdx; \
196 (void)PartialMapSrcIdx; \
197 const ValueMapping *Map = getFPExtMapping(DstSize, SrcSize); \
198 (void)Map; \
199 assert(Map[0].BreakDown == \
200 &AArch64GenRegisterBankInfo::PartMappings[PartialMapDstIdx] && \
201 Map[0].NumBreakDowns == 1 && "FPR" #DstSize \
202 " Dst is incorrectly initialized"); \
203 assert(Map[1].BreakDown == \
204 &AArch64GenRegisterBankInfo::PartMappings[PartialMapSrcIdx] && \
205 Map[1].NumBreakDowns == 1 && "FPR" #SrcSize \
206 " Src is incorrectly initialized"); \
207 \
208 } while (false)
209
210 CHECK_VALUEMAP_FPEXT(32, 16);
211 CHECK_VALUEMAP_FPEXT(64, 16);
212 CHECK_VALUEMAP_FPEXT(64, 32);
213 CHECK_VALUEMAP_FPEXT(128, 64);
214
215 assert(verify(TRI) && "Invalid register bank information");
216 };
217
218 llvm::call_once(InitializeRegisterBankFlag, InitializeRegisterBankOnce);
219}
220
222 const RegisterBank &B,
223 const TypeSize Size) const {
224 // What do we do with different size?
225 // copy are same size.
226 // Will introduce other hooks for different size:
227 // * extract cost.
228 // * build_sequence cost.
229
230 // Copy from (resp. to) GPR to (resp. from) FPR involves FMOV.
231 // FIXME: This should be deduced from the scheduling model.
232 if (&A == &AArch64::GPRRegBank && &B == &AArch64::FPRRegBank)
233 // FMOVXDr or FMOVWSr.
234 return 5;
235 if (&A == &AArch64::FPRRegBank && &B == &AArch64::GPRRegBank)
236 // FMOVDXr or FMOVSWr.
237 return 4;
238
240}
241
242const RegisterBank &
244 LLT Ty) const {
245 switch (RC.getID()) {
246 case AArch64::GPR64sponlyRegClassID:
247 return getRegBank(AArch64::GPRRegBankID);
248 default:
250 }
251}
252
255 const MachineInstr &MI) const {
256 const MachineFunction &MF = *MI.getParent()->getParent();
257 const TargetSubtargetInfo &STI = MF.getSubtarget();
258 const TargetRegisterInfo &TRI = *STI.getRegisterInfo();
259 const MachineRegisterInfo &MRI = MF.getRegInfo();
260
261 switch (MI.getOpcode()) {
262 case TargetOpcode::G_OR: {
263 // 32 and 64-bit or can be mapped on either FPR or
264 // GPR for the same cost.
265 TypeSize Size = getSizeInBits(MI.getOperand(0).getReg(), MRI, TRI);
266 if (Size != 32 && Size != 64)
267 break;
268
269 // If the instruction has any implicit-defs or uses,
270 // do not mess with it.
271 if (MI.getNumOperands() != 3)
272 break;
273 InstructionMappings AltMappings;
274 const InstructionMapping &GPRMapping = getInstructionMapping(
275 /*ID*/ 1, /*Cost*/ 1, getValueMapping(PMI_FirstGPR, Size),
276 /*NumOperands*/ 3);
277 const InstructionMapping &FPRMapping = getInstructionMapping(
278 /*ID*/ 2, /*Cost*/ 1, getValueMapping(PMI_FirstFPR, Size),
279 /*NumOperands*/ 3);
280
281 AltMappings.push_back(&GPRMapping);
282 AltMappings.push_back(&FPRMapping);
283 return AltMappings;
284 }
285 case TargetOpcode::G_BITCAST: {
286 TypeSize Size = getSizeInBits(MI.getOperand(0).getReg(), MRI, TRI);
287 if (Size != 32 && Size != 64)
288 break;
289
290 // If the instruction has any implicit-defs or uses,
291 // do not mess with it.
292 if (MI.getNumOperands() != 2)
293 break;
294
295 InstructionMappings AltMappings;
296 const InstructionMapping &GPRMapping = getInstructionMapping(
297 /*ID*/ 1, /*Cost*/ 1,
298 getCopyMapping(AArch64::GPRRegBankID, AArch64::GPRRegBankID, Size),
299 /*NumOperands*/ 2);
300 const InstructionMapping &FPRMapping = getInstructionMapping(
301 /*ID*/ 2, /*Cost*/ 1,
302 getCopyMapping(AArch64::FPRRegBankID, AArch64::FPRRegBankID, Size),
303 /*NumOperands*/ 2);
304 const InstructionMapping &GPRToFPRMapping = getInstructionMapping(
305 /*ID*/ 3,
306 /*Cost*/
307 copyCost(AArch64::GPRRegBank, AArch64::FPRRegBank,
309 getCopyMapping(AArch64::FPRRegBankID, AArch64::GPRRegBankID, Size),
310 /*NumOperands*/ 2);
311 const InstructionMapping &FPRToGPRMapping = getInstructionMapping(
312 /*ID*/ 3,
313 /*Cost*/
314 copyCost(AArch64::GPRRegBank, AArch64::FPRRegBank,
316 getCopyMapping(AArch64::GPRRegBankID, AArch64::FPRRegBankID, Size),
317 /*NumOperands*/ 2);
318
319 AltMappings.push_back(&GPRMapping);
320 AltMappings.push_back(&FPRMapping);
321 AltMappings.push_back(&GPRToFPRMapping);
322 AltMappings.push_back(&FPRToGPRMapping);
323 return AltMappings;
324 }
325 case TargetOpcode::G_LOAD: {
326 TypeSize Size = getSizeInBits(MI.getOperand(0).getReg(), MRI, TRI);
327 if (Size != 64)
328 break;
329
330 // If the instruction has any implicit-defs or uses,
331 // do not mess with it.
332 if (MI.getNumOperands() != 2)
333 break;
334
335 InstructionMappings AltMappings;
336 const InstructionMapping &GPRMapping = getInstructionMapping(
337 /*ID*/ 1, /*Cost*/ 1,
340 // Addresses are GPR 64-bit.
342 /*NumOperands*/ 2);
343 const InstructionMapping &FPRMapping = getInstructionMapping(
344 /*ID*/ 2, /*Cost*/ 1,
347 // Addresses are GPR 64-bit.
349 /*NumOperands*/ 2);
350
351 AltMappings.push_back(&GPRMapping);
352 AltMappings.push_back(&FPRMapping);
353 return AltMappings;
354 }
355 default:
356 break;
357 }
359}
360
361void AArch64RegisterBankInfo::applyMappingImpl(
362 MachineIRBuilder &Builder, const OperandsMapper &OpdMapper) const {
363 MachineInstr &MI = OpdMapper.getMI();
364 MachineRegisterInfo &MRI = OpdMapper.getMRI();
365
366 switch (MI.getOpcode()) {
367 case TargetOpcode::G_OR:
368 case TargetOpcode::G_BITCAST:
369 case TargetOpcode::G_LOAD:
370 // Those ID must match getInstrAlternativeMappings.
371 assert((OpdMapper.getInstrMapping().getID() >= 1 &&
372 OpdMapper.getInstrMapping().getID() <= 4) &&
373 "Don't know how to handle that ID");
374 return applyDefaultMapping(OpdMapper);
375 case TargetOpcode::G_INSERT_VECTOR_ELT: {
376 // Extend smaller gpr operands to 32 bit.
377 Builder.setInsertPt(*MI.getParent(), MI.getIterator());
378 auto Ext = Builder.buildAnyExt(LLT::scalar(32), MI.getOperand(2).getReg());
379 MRI.setRegBank(Ext.getReg(0), getRegBank(AArch64::GPRRegBankID));
380 MI.getOperand(2).setReg(Ext.getReg(0));
381 return applyDefaultMapping(OpdMapper);
382 }
383 case AArch64::G_DUP: {
384 // Extend smaller gpr to 32-bits
385 assert(MRI.getType(MI.getOperand(1).getReg()).getSizeInBits() < 32 &&
386 "Expected sources smaller than 32-bits");
387 Builder.setInsertPt(*MI.getParent(), MI.getIterator());
388
389 Register ConstReg;
390 auto ConstMI = MRI.getVRegDef(MI.getOperand(1).getReg());
391 if (ConstMI->getOpcode() == TargetOpcode::G_CONSTANT) {
392 auto CstVal = ConstMI->getOperand(1).getCImm()->getValue();
393 ConstReg =
394 Builder.buildConstant(LLT::scalar(32), CstVal.sext(32)).getReg(0);
395 } else {
396 ConstReg = Builder.buildAnyExt(LLT::scalar(32), MI.getOperand(1).getReg())
397 .getReg(0);
398 }
399 MRI.setRegBank(ConstReg, getRegBank(AArch64::GPRRegBankID));
400 MI.getOperand(1).setReg(ConstReg);
401 return applyDefaultMapping(OpdMapper);
402 }
403 default:
404 llvm_unreachable("Don't know how to handle that operation");
405 }
406}
407
409AArch64RegisterBankInfo::getSameKindOfOperandsMapping(
410 const MachineInstr &MI) const {
411 const unsigned Opc = MI.getOpcode();
412 const MachineFunction &MF = *MI.getParent()->getParent();
413 const MachineRegisterInfo &MRI = MF.getRegInfo();
414
415 unsigned NumOperands = MI.getNumOperands();
416 assert(NumOperands <= 3 &&
417 "This code is for instructions with 3 or less operands");
418
419 LLT Ty = MRI.getType(MI.getOperand(0).getReg());
421 bool IsFPR = Ty.isVector() || isPreISelGenericFloatingPointOpcode(Opc);
422
424
425#ifndef NDEBUG
426 // Make sure all the operands are using similar size and type.
427 // Should probably be checked by the machine verifier.
428 // This code won't catch cases where the number of lanes is
429 // different between the operands.
430 // If we want to go to that level of details, it is probably
431 // best to check that the types are the same, period.
432 // Currently, we just check that the register banks are the same
433 // for each types.
434 for (unsigned Idx = 1; Idx != NumOperands; ++Idx) {
435 LLT OpTy = MRI.getType(MI.getOperand(Idx).getReg());
436 assert(
438 RBIdx, OpTy.getSizeInBits()) ==
440 "Operand has incompatible size");
441 bool OpIsFPR = OpTy.isVector() || isPreISelGenericFloatingPointOpcode(Opc);
442 (void)OpIsFPR;
443 assert(IsFPR == OpIsFPR && "Operand has incompatible type");
444 }
445#endif // End NDEBUG.
446
448 getValueMapping(RBIdx, Size), NumOperands);
449}
450
451/// \returns true if a given intrinsic only uses and defines FPRs.
453 const MachineInstr &MI) {
454 // TODO: Add more intrinsics.
455 switch (cast<GIntrinsic>(MI).getIntrinsicID()) {
456 default:
457 return false;
458 case Intrinsic::aarch64_neon_uaddlv:
459 case Intrinsic::aarch64_neon_uaddv:
460 case Intrinsic::aarch64_neon_saddv:
461 case Intrinsic::aarch64_neon_umaxv:
462 case Intrinsic::aarch64_neon_smaxv:
463 case Intrinsic::aarch64_neon_uminv:
464 case Intrinsic::aarch64_neon_sminv:
465 case Intrinsic::aarch64_neon_faddv:
466 case Intrinsic::aarch64_neon_fmaxv:
467 case Intrinsic::aarch64_neon_fminv:
468 case Intrinsic::aarch64_neon_fmaxnmv:
469 case Intrinsic::aarch64_neon_fminnmv:
470 return true;
471 case Intrinsic::aarch64_neon_saddlv: {
472 const LLT SrcTy = MRI.getType(MI.getOperand(2).getReg());
473 return SrcTy.getElementType().getSizeInBits() >= 16 &&
474 SrcTy.getElementCount().getFixedValue() >= 4;
475 }
476 }
477}
478
479bool AArch64RegisterBankInfo::isPHIWithFPContraints(
480 const MachineInstr &MI, const MachineRegisterInfo &MRI,
481 const TargetRegisterInfo &TRI, const unsigned Depth) const {
482 if (!MI.isPHI() || Depth > MaxFPRSearchDepth)
483 return false;
484
485 return any_of(MRI.use_nodbg_instructions(MI.getOperand(0).getReg()),
486 [&](const MachineInstr &UseMI) {
487 if (onlyUsesFP(UseMI, MRI, TRI, Depth + 1))
488 return true;
489 return isPHIWithFPContraints(UseMI, MRI, TRI, Depth + 1);
490 });
491}
492
493bool AArch64RegisterBankInfo::hasFPConstraints(const MachineInstr &MI,
495 const TargetRegisterInfo &TRI,
496 unsigned Depth) const {
497 unsigned Op = MI.getOpcode();
498 if (Op == TargetOpcode::G_INTRINSIC && isFPIntrinsic(MRI, MI))
499 return true;
500
501 // Do we have an explicit floating point instruction?
503 return true;
504
505 // No. Check if we have a copy-like instruction. If we do, then we could
506 // still be fed by floating point instructions.
507 if (Op != TargetOpcode::COPY && !MI.isPHI() &&
509 return false;
510
511 // Check if we already know the register bank.
512 auto *RB = getRegBank(MI.getOperand(0).getReg(), MRI, TRI);
513 if (RB == &AArch64::FPRRegBank)
514 return true;
515 if (RB == &AArch64::GPRRegBank)
516 return false;
517
518 // We don't know anything.
519 //
520 // If we have a phi, we may be able to infer that it will be assigned a FPR
521 // based off of its inputs.
522 if (!MI.isPHI() || Depth > MaxFPRSearchDepth)
523 return false;
524
525 return any_of(MI.explicit_uses(), [&](const MachineOperand &Op) {
526 return Op.isReg() &&
527 onlyDefinesFP(*MRI.getVRegDef(Op.getReg()), MRI, TRI, Depth + 1);
528 });
529}
530
531bool AArch64RegisterBankInfo::onlyUsesFP(const MachineInstr &MI,
533 const TargetRegisterInfo &TRI,
534 unsigned Depth) const {
535 switch (MI.getOpcode()) {
536 case TargetOpcode::G_FPTOSI:
537 case TargetOpcode::G_FPTOUI:
538 case TargetOpcode::G_FCMP:
539 case TargetOpcode::G_LROUND:
540 case TargetOpcode::G_LLROUND:
541 return true;
542 default:
543 break;
544 }
545 return hasFPConstraints(MI, MRI, TRI, Depth);
546}
547
548bool AArch64RegisterBankInfo::onlyDefinesFP(const MachineInstr &MI,
550 const TargetRegisterInfo &TRI,
551 unsigned Depth) const {
552 switch (MI.getOpcode()) {
553 case AArch64::G_DUP:
554 case TargetOpcode::G_SITOFP:
555 case TargetOpcode::G_UITOFP:
556 case TargetOpcode::G_EXTRACT_VECTOR_ELT:
557 case TargetOpcode::G_INSERT_VECTOR_ELT:
558 case TargetOpcode::G_BUILD_VECTOR:
559 case TargetOpcode::G_BUILD_VECTOR_TRUNC:
560 return true;
561 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
562 switch (cast<GIntrinsic>(MI).getIntrinsicID()) {
563 case Intrinsic::aarch64_neon_ld1x2:
564 case Intrinsic::aarch64_neon_ld1x3:
565 case Intrinsic::aarch64_neon_ld1x4:
566 case Intrinsic::aarch64_neon_ld2:
567 case Intrinsic::aarch64_neon_ld2lane:
568 case Intrinsic::aarch64_neon_ld2r:
569 case Intrinsic::aarch64_neon_ld3:
570 case Intrinsic::aarch64_neon_ld3lane:
571 case Intrinsic::aarch64_neon_ld3r:
572 case Intrinsic::aarch64_neon_ld4:
573 case Intrinsic::aarch64_neon_ld4lane:
574 case Intrinsic::aarch64_neon_ld4r:
575 return true;
576 default:
577 break;
578 }
579 break;
580 default:
581 break;
582 }
583 return hasFPConstraints(MI, MRI, TRI, Depth);
584}
585
586bool AArch64RegisterBankInfo::isLoadFromFPType(const MachineInstr &MI) const {
587 // GMemOperation because we also want to match indexed loads.
588 auto *MemOp = cast<GMemOperation>(&MI);
589 const Value *LdVal = MemOp->getMMO().getValue();
590 if (!LdVal)
591 return false;
592
593 Type *EltTy = nullptr;
594 if (const GlobalValue *GV = dyn_cast<GlobalValue>(LdVal)) {
595 EltTy = GV->getValueType();
596 // Look at the first element of the struct to determine the type we are
597 // loading
598 while (StructType *StructEltTy = dyn_cast<StructType>(EltTy)) {
599 if (StructEltTy->getNumElements() == 0)
600 break;
601 EltTy = StructEltTy->getTypeAtIndex(0U);
602 }
603 // Look at the first element of the array to determine its type
604 if (isa<ArrayType>(EltTy))
605 EltTy = EltTy->getArrayElementType();
606 } else {
607 // FIXME: grubbing around uses is pretty ugly, but with no more
608 // `getPointerElementType` there's not much else we can do.
609 for (const auto *LdUser : LdVal->users()) {
610 if (isa<LoadInst>(LdUser)) {
611 EltTy = LdUser->getType();
612 break;
613 }
614 if (isa<StoreInst>(LdUser) && LdUser->getOperand(1) == LdVal) {
615 EltTy = LdUser->getOperand(0)->getType();
616 break;
617 }
618 }
619 }
620 return EltTy && EltTy->isFPOrFPVectorTy();
621}
622
625 const unsigned Opc = MI.getOpcode();
626
627 // Try the default logic for non-generic instructions that are either copies
628 // or already have some operands assigned to banks.
629 if ((Opc != TargetOpcode::COPY && !isPreISelGenericOpcode(Opc)) ||
630 Opc == TargetOpcode::G_PHI) {
633 if (Mapping.isValid())
634 return Mapping;
635 }
636
637 const MachineFunction &MF = *MI.getParent()->getParent();
638 const MachineRegisterInfo &MRI = MF.getRegInfo();
639 const TargetSubtargetInfo &STI = MF.getSubtarget();
640 const TargetRegisterInfo &TRI = *STI.getRegisterInfo();
641
642 switch (Opc) {
643 // G_{F|S|U}REM are not listed because they are not legal.
644 // Arithmetic ops.
645 case TargetOpcode::G_ADD:
646 case TargetOpcode::G_SUB:
647 case TargetOpcode::G_PTR_ADD:
648 case TargetOpcode::G_MUL:
649 case TargetOpcode::G_SDIV:
650 case TargetOpcode::G_UDIV:
651 // Bitwise ops.
652 case TargetOpcode::G_AND:
653 case TargetOpcode::G_OR:
654 case TargetOpcode::G_XOR:
655 // Floating point ops.
656 case TargetOpcode::G_FADD:
657 case TargetOpcode::G_FSUB:
658 case TargetOpcode::G_FMUL:
659 case TargetOpcode::G_FDIV:
660 case TargetOpcode::G_FMAXIMUM:
661 case TargetOpcode::G_FMINIMUM:
662 return getSameKindOfOperandsMapping(MI);
663 case TargetOpcode::G_FPEXT: {
664 LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
665 LLT SrcTy = MRI.getType(MI.getOperand(1).getReg());
667 DefaultMappingID, /*Cost*/ 1,
669 /*NumOperands*/ 2);
670 }
671 // Shifts.
672 case TargetOpcode::G_SHL:
673 case TargetOpcode::G_LSHR:
674 case TargetOpcode::G_ASHR: {
675 LLT ShiftAmtTy = MRI.getType(MI.getOperand(2).getReg());
676 LLT SrcTy = MRI.getType(MI.getOperand(1).getReg());
677 if (ShiftAmtTy.getSizeInBits() == 64 && SrcTy.getSizeInBits() == 32)
680 return getSameKindOfOperandsMapping(MI);
681 }
682 case TargetOpcode::COPY: {
683 Register DstReg = MI.getOperand(0).getReg();
684 Register SrcReg = MI.getOperand(1).getReg();
685 // Check if one of the register is not a generic register.
686 if ((DstReg.isPhysical() || !MRI.getType(DstReg).isValid()) ||
687 (SrcReg.isPhysical() || !MRI.getType(SrcReg).isValid())) {
688 const RegisterBank *DstRB = getRegBank(DstReg, MRI, TRI);
689 const RegisterBank *SrcRB = getRegBank(SrcReg, MRI, TRI);
690 if (!DstRB)
691 DstRB = SrcRB;
692 else if (!SrcRB)
693 SrcRB = DstRB;
694 // If both RB are null that means both registers are generic.
695 // We shouldn't be here.
696 assert(DstRB && SrcRB && "Both RegBank were nullptr");
697 TypeSize Size = getSizeInBits(DstReg, MRI, TRI);
699 DefaultMappingID, copyCost(*DstRB, *SrcRB, Size),
700 getCopyMapping(DstRB->getID(), SrcRB->getID(), Size),
701 // We only care about the mapping of the destination.
702 /*NumOperands*/ 1);
703 }
704 // Both registers are generic, use G_BITCAST.
705 [[fallthrough]];
706 }
707 case TargetOpcode::G_BITCAST: {
708 LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
709 LLT SrcTy = MRI.getType(MI.getOperand(1).getReg());
710 TypeSize Size = DstTy.getSizeInBits();
711 bool DstIsGPR = !DstTy.isVector() && DstTy.getSizeInBits() <= 64;
712 bool SrcIsGPR = !SrcTy.isVector() && SrcTy.getSizeInBits() <= 64;
713 const RegisterBank &DstRB =
714 DstIsGPR ? AArch64::GPRRegBank : AArch64::FPRRegBank;
715 const RegisterBank &SrcRB =
716 SrcIsGPR ? AArch64::GPRRegBank : AArch64::FPRRegBank;
718 DefaultMappingID, copyCost(DstRB, SrcRB, Size),
719 getCopyMapping(DstRB.getID(), SrcRB.getID(), Size),
720 // We only care about the mapping of the destination for COPY.
721 /*NumOperands*/ Opc == TargetOpcode::G_BITCAST ? 2 : 1);
722 }
723 default:
724 break;
725 }
726
727 unsigned NumOperands = MI.getNumOperands();
728 unsigned MappingID = DefaultMappingID;
729
730 // Track the size and bank of each register. We don't do partial mappings.
731 SmallVector<unsigned, 4> OpSize(NumOperands);
732 SmallVector<PartialMappingIdx, 4> OpRegBankIdx(NumOperands);
733 for (unsigned Idx = 0; Idx < NumOperands; ++Idx) {
734 auto &MO = MI.getOperand(Idx);
735 if (!MO.isReg() || !MO.getReg())
736 continue;
737
738 LLT Ty = MRI.getType(MO.getReg());
739 if (!Ty.isValid())
740 continue;
741 OpSize[Idx] = Ty.getSizeInBits().getKnownMinValue();
742
743 // As a top-level guess, vectors including both scalable and non-scalable
744 // ones go in FPRs, scalars and pointers in GPRs.
745 // For floating-point instructions, scalars go in FPRs.
746 if (Ty.isVector())
747 OpRegBankIdx[Idx] = PMI_FirstFPR;
749 Ty.getSizeInBits() > 64)
750 OpRegBankIdx[Idx] = PMI_FirstFPR;
751 else
752 OpRegBankIdx[Idx] = PMI_FirstGPR;
753 }
754
755 unsigned Cost = 1;
756 // Some of the floating-point instructions have mixed GPR and FPR operands:
757 // fine-tune the computed mapping.
758 switch (Opc) {
759 case AArch64::G_DUP: {
760 Register ScalarReg = MI.getOperand(1).getReg();
761 LLT ScalarTy = MRI.getType(ScalarReg);
762 auto ScalarDef = MRI.getVRegDef(ScalarReg);
763 // We want to select dup(load) into LD1R.
764 if (ScalarDef->getOpcode() == TargetOpcode::G_LOAD)
765 OpRegBankIdx = {PMI_FirstFPR, PMI_FirstFPR};
766 // s8 is an exception for G_DUP, which we always want on gpr.
767 else if (ScalarTy.getSizeInBits() != 8 &&
768 (getRegBank(ScalarReg, MRI, TRI) == &AArch64::FPRRegBank ||
769 onlyDefinesFP(*ScalarDef, MRI, TRI)))
770 OpRegBankIdx = {PMI_FirstFPR, PMI_FirstFPR};
771 else {
772 if (ScalarTy.getSizeInBits() < 32 &&
773 getRegBank(ScalarReg, MRI, TRI) == &AArch64::GPRRegBank) {
774 // Calls applyMappingImpl()
775 MappingID = CustomMappingID;
776 }
777 OpRegBankIdx = {PMI_FirstFPR, PMI_FirstGPR};
778 }
779 break;
780 }
781 case TargetOpcode::G_TRUNC: {
782 LLT SrcTy = MRI.getType(MI.getOperand(1).getReg());
783 if (!SrcTy.isVector() && SrcTy.getSizeInBits() == 128)
784 OpRegBankIdx = {PMI_FirstFPR, PMI_FirstFPR};
785 break;
786 }
787 case TargetOpcode::G_SITOFP:
788 case TargetOpcode::G_UITOFP: {
789 if (MRI.getType(MI.getOperand(0).getReg()).isVector())
790 break;
791 // Integer to FP conversions don't necessarily happen between GPR -> FPR
792 // regbanks. They can also be done within an FPR register.
793 Register SrcReg = MI.getOperand(1).getReg();
794 if (getRegBank(SrcReg, MRI, TRI) == &AArch64::FPRRegBank)
795 OpRegBankIdx = {PMI_FirstFPR, PMI_FirstFPR};
796 else
797 OpRegBankIdx = {PMI_FirstFPR, PMI_FirstGPR};
798 break;
799 }
800 case TargetOpcode::G_FPTOSI:
801 case TargetOpcode::G_FPTOUI:
802 case TargetOpcode::G_INTRINSIC_LRINT:
803 case TargetOpcode::G_INTRINSIC_LLRINT:
804 if (MRI.getType(MI.getOperand(0).getReg()).isVector())
805 break;
806 OpRegBankIdx = {PMI_FirstGPR, PMI_FirstFPR};
807 break;
808 case TargetOpcode::G_FCMP: {
809 // If the result is a vector, it must use a FPR.
811 MRI.getType(MI.getOperand(0).getReg()).isVector() ? PMI_FirstFPR
812 : PMI_FirstGPR;
813 OpRegBankIdx = {Idx0,
814 /* Predicate */ PMI_None, PMI_FirstFPR, PMI_FirstFPR};
815 break;
816 }
817 case TargetOpcode::G_BITCAST:
818 // This is going to be a cross register bank copy and this is expensive.
819 if (OpRegBankIdx[0] != OpRegBankIdx[1])
820 Cost = copyCost(
821 *AArch64GenRegisterBankInfo::PartMappings[OpRegBankIdx[0]].RegBank,
822 *AArch64GenRegisterBankInfo::PartMappings[OpRegBankIdx[1]].RegBank,
823 TypeSize::getFixed(OpSize[0]));
824 break;
825 case TargetOpcode::G_LOAD: {
826 // Loading in vector unit is slightly more expensive.
827 // This is actually only true for the LD1R and co instructions,
828 // but anyway for the fast mode this number does not matter and
829 // for the greedy mode the cost of the cross bank copy will
830 // offset this number.
831 // FIXME: Should be derived from the scheduling model.
832 if (OpRegBankIdx[0] != PMI_FirstGPR) {
833 Cost = 2;
834 break;
835 }
836
837 if (cast<GLoad>(MI).isAtomic()) {
838 // Atomics always use GPR destinations. Don't refine any further.
839 OpRegBankIdx[0] = PMI_FirstGPR;
840 break;
841 }
842
843 // Try to guess the type of the load from the MMO.
844 if (isLoadFromFPType(MI)) {
845 OpRegBankIdx[0] = PMI_FirstFPR;
846 break;
847 }
848
849 // Check if that load feeds fp instructions.
850 // In that case, we want the default mapping to be on FPR
851 // instead of blind map every scalar to GPR.
852 if (any_of(MRI.use_nodbg_instructions(MI.getOperand(0).getReg()),
853 [&](const MachineInstr &UseMI) {
854 // If we have at least one direct or indirect use
855 // in a FP instruction,
856 // assume this was a floating point load in the IR. If it was
857 // not, we would have had a bitcast before reaching that
858 // instruction.
859 //
860 // Int->FP conversion operations are also captured in
861 // onlyDefinesFP().
862
863 if (isPHIWithFPContraints(UseMI, MRI, TRI))
864 return true;
865
866 return onlyUsesFP(UseMI, MRI, TRI) ||
867 onlyDefinesFP(UseMI, MRI, TRI);
868 }))
869 OpRegBankIdx[0] = PMI_FirstFPR;
870 break;
871 }
872 case TargetOpcode::G_STORE:
873 // Check if that store is fed by fp instructions.
874 if (OpRegBankIdx[0] == PMI_FirstGPR) {
875 Register VReg = MI.getOperand(0).getReg();
876 if (!VReg)
877 break;
878 MachineInstr *DefMI = MRI.getVRegDef(VReg);
879 if (onlyDefinesFP(*DefMI, MRI, TRI))
880 OpRegBankIdx[0] = PMI_FirstFPR;
881 break;
882 }
883 break;
884 case TargetOpcode::G_INDEXED_STORE:
885 if (OpRegBankIdx[1] == PMI_FirstGPR) {
886 Register VReg = MI.getOperand(1).getReg();
887 if (!VReg)
888 break;
889 MachineInstr *DefMI = MRI.getVRegDef(VReg);
890 if (onlyDefinesFP(*DefMI, MRI, TRI))
891 OpRegBankIdx[1] = PMI_FirstFPR;
892 break;
893 }
894 break;
895 case TargetOpcode::G_INDEXED_SEXTLOAD:
896 case TargetOpcode::G_INDEXED_ZEXTLOAD:
897 // These should always be GPR.
898 OpRegBankIdx[0] = PMI_FirstGPR;
899 break;
900 case TargetOpcode::G_INDEXED_LOAD: {
901 if (isLoadFromFPType(MI))
902 OpRegBankIdx[0] = PMI_FirstFPR;
903 break;
904 }
905 case TargetOpcode::G_SELECT: {
906 // If the destination is FPR, preserve that.
907 if (OpRegBankIdx[0] != PMI_FirstGPR)
908 break;
909
910 // If we're taking in vectors, we have no choice but to put everything on
911 // FPRs, except for the condition. The condition must always be on a GPR.
912 LLT SrcTy = MRI.getType(MI.getOperand(2).getReg());
913 if (SrcTy.isVector()) {
915 break;
916 }
917
918 // Try to minimize the number of copies. If we have more floating point
919 // constrained values than not, then we'll put everything on FPR. Otherwise,
920 // everything has to be on GPR.
921 unsigned NumFP = 0;
922
923 // Check if the uses of the result always produce floating point values.
924 //
925 // For example:
926 //
927 // %z = G_SELECT %cond %x %y
928 // fpr = G_FOO %z ...
929 if (any_of(MRI.use_nodbg_instructions(MI.getOperand(0).getReg()),
930 [&](MachineInstr &MI) { return onlyUsesFP(MI, MRI, TRI); }))
931 ++NumFP;
932
933 // Check if the defs of the source values always produce floating point
934 // values.
935 //
936 // For example:
937 //
938 // %x = G_SOMETHING_ALWAYS_FLOAT %a ...
939 // %z = G_SELECT %cond %x %y
940 //
941 // Also check whether or not the sources have already been decided to be
942 // FPR. Keep track of this.
943 //
944 // This doesn't check the condition, since it's just whatever is in NZCV.
945 // This isn't passed explicitly in a register to fcsel/csel.
946 for (unsigned Idx = 2; Idx < 4; ++Idx) {
947 Register VReg = MI.getOperand(Idx).getReg();
948 MachineInstr *DefMI = MRI.getVRegDef(VReg);
949 if (getRegBank(VReg, MRI, TRI) == &AArch64::FPRRegBank ||
950 onlyDefinesFP(*DefMI, MRI, TRI))
951 ++NumFP;
952 }
953
954 // If we have more FP constraints than not, then move everything over to
955 // FPR.
956 if (NumFP >= 2)
958
959 break;
960 }
961 case TargetOpcode::G_UNMERGE_VALUES: {
962 // If the first operand belongs to a FPR register bank, then make sure that
963 // we preserve that.
964 if (OpRegBankIdx[0] != PMI_FirstGPR)
965 break;
966
967 LLT SrcTy = MRI.getType(MI.getOperand(MI.getNumOperands()-1).getReg());
968 // UNMERGE into scalars from a vector should always use FPR.
969 // Likewise if any of the uses are FP instructions.
970 if (SrcTy.isVector() || SrcTy == LLT::scalar(128) ||
971 any_of(MRI.use_nodbg_instructions(MI.getOperand(0).getReg()),
972 [&](MachineInstr &MI) { return onlyUsesFP(MI, MRI, TRI); })) {
973 // Set the register bank of every operand to FPR.
974 for (unsigned Idx = 0, NumOperands = MI.getNumOperands();
975 Idx < NumOperands; ++Idx)
976 OpRegBankIdx[Idx] = PMI_FirstFPR;
977 }
978 break;
979 }
980 case TargetOpcode::G_EXTRACT_VECTOR_ELT:
981 // Destination and source need to be FPRs.
982 OpRegBankIdx[0] = PMI_FirstFPR;
983 OpRegBankIdx[1] = PMI_FirstFPR;
984
985 // Index needs to be a GPR.
986 OpRegBankIdx[2] = PMI_FirstGPR;
987 break;
988 case TargetOpcode::G_INSERT_VECTOR_ELT:
989 OpRegBankIdx[0] = PMI_FirstFPR;
990 OpRegBankIdx[1] = PMI_FirstFPR;
991
992 // The element may be either a GPR or FPR. Preserve that behaviour.
993 if (getRegBank(MI.getOperand(2).getReg(), MRI, TRI) == &AArch64::FPRRegBank)
994 OpRegBankIdx[2] = PMI_FirstFPR;
995 else {
996 // If the type is i8/i16, and the regank will be GPR, then we change the
997 // type to i32 in applyMappingImpl.
998 LLT Ty = MRI.getType(MI.getOperand(2).getReg());
999 if (Ty.getSizeInBits() == 8 || Ty.getSizeInBits() == 16) {
1000 // Calls applyMappingImpl()
1001 MappingID = CustomMappingID;
1002 }
1003 OpRegBankIdx[2] = PMI_FirstGPR;
1004 }
1005
1006 // Index needs to be a GPR.
1007 OpRegBankIdx[3] = PMI_FirstGPR;
1008 break;
1009 case TargetOpcode::G_EXTRACT: {
1010 // For s128 sources we have to use fpr unless we know otherwise.
1011 auto Src = MI.getOperand(1).getReg();
1012 LLT SrcTy = MRI.getType(MI.getOperand(1).getReg());
1013 if (SrcTy.getSizeInBits() != 128)
1014 break;
1015 auto Idx = MRI.getRegClassOrNull(Src) == &AArch64::XSeqPairsClassRegClass
1016 ? PMI_FirstGPR
1017 : PMI_FirstFPR;
1018 OpRegBankIdx[0] = Idx;
1019 OpRegBankIdx[1] = Idx;
1020 break;
1021 }
1022 case TargetOpcode::G_BUILD_VECTOR: {
1023 // If the first source operand belongs to a FPR register bank, then make
1024 // sure that we preserve that.
1025 if (OpRegBankIdx[1] != PMI_FirstGPR)
1026 break;
1027 Register VReg = MI.getOperand(1).getReg();
1028 if (!VReg)
1029 break;
1030
1031 // Get the instruction that defined the source operand reg, and check if
1032 // it's a floating point operation. Or, if it's a type like s16 which
1033 // doesn't have a exact size gpr register class. The exception is if the
1034 // build_vector has all constant operands, which may be better to leave as
1035 // gpr without copies, so it can be matched in imported patterns.
1036 MachineInstr *DefMI = MRI.getVRegDef(VReg);
1037 unsigned DefOpc = DefMI->getOpcode();
1038 const LLT SrcTy = MRI.getType(VReg);
1039 if (all_of(MI.operands(), [&](const MachineOperand &Op) {
1040 return Op.isDef() || MRI.getVRegDef(Op.getReg())->getOpcode() ==
1041 TargetOpcode::G_CONSTANT;
1042 }))
1043 break;
1045 SrcTy.getSizeInBits() < 32 ||
1046 getRegBank(VReg, MRI, TRI) == &AArch64::FPRRegBank) {
1047 // Have a floating point op.
1048 // Make sure every operand gets mapped to a FPR register class.
1049 unsigned NumOperands = MI.getNumOperands();
1050 for (unsigned Idx = 0; Idx < NumOperands; ++Idx)
1051 OpRegBankIdx[Idx] = PMI_FirstFPR;
1052 }
1053 break;
1054 }
1055 case TargetOpcode::G_VECREDUCE_FADD:
1056 case TargetOpcode::G_VECREDUCE_FMUL:
1057 case TargetOpcode::G_VECREDUCE_FMAX:
1058 case TargetOpcode::G_VECREDUCE_FMIN:
1059 case TargetOpcode::G_VECREDUCE_FMAXIMUM:
1060 case TargetOpcode::G_VECREDUCE_FMINIMUM:
1061 case TargetOpcode::G_VECREDUCE_ADD:
1062 case TargetOpcode::G_VECREDUCE_MUL:
1063 case TargetOpcode::G_VECREDUCE_AND:
1064 case TargetOpcode::G_VECREDUCE_OR:
1065 case TargetOpcode::G_VECREDUCE_XOR:
1066 case TargetOpcode::G_VECREDUCE_SMAX:
1067 case TargetOpcode::G_VECREDUCE_SMIN:
1068 case TargetOpcode::G_VECREDUCE_UMAX:
1069 case TargetOpcode::G_VECREDUCE_UMIN:
1070 // Reductions produce a scalar value from a vector, the scalar should be on
1071 // FPR bank.
1072 OpRegBankIdx = {PMI_FirstFPR, PMI_FirstFPR};
1073 break;
1074 case TargetOpcode::G_VECREDUCE_SEQ_FADD:
1075 case TargetOpcode::G_VECREDUCE_SEQ_FMUL:
1076 // These reductions also take a scalar accumulator input.
1077 // Assign them FPR for now.
1078 OpRegBankIdx = {PMI_FirstFPR, PMI_FirstFPR, PMI_FirstFPR};
1079 break;
1080 case TargetOpcode::G_INTRINSIC:
1081 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS: {
1082 // Check if we know that the intrinsic has any constraints on its register
1083 // banks. If it does, then update the mapping accordingly.
1084 unsigned Idx = 0;
1085 if (onlyDefinesFP(MI, MRI, TRI))
1086 for (const auto &Op : MI.defs()) {
1087 if (Op.isReg())
1088 OpRegBankIdx[Idx] = PMI_FirstFPR;
1089 ++Idx;
1090 }
1091 else
1092 Idx += MI.getNumExplicitDefs();
1093
1094 if (onlyUsesFP(MI, MRI, TRI))
1095 for (const auto &Op : MI.explicit_uses()) {
1096 if (Op.isReg())
1097 OpRegBankIdx[Idx] = PMI_FirstFPR;
1098 ++Idx;
1099 }
1100 break;
1101 }
1102 case TargetOpcode::G_LROUND:
1103 case TargetOpcode::G_LLROUND: {
1104 // Source is always floating point and destination is always integer.
1105 OpRegBankIdx = {PMI_FirstGPR, PMI_FirstFPR};
1106 break;
1107 }
1108 }
1109
1110 // Finally construct the computed mapping.
1111 SmallVector<const ValueMapping *, 8> OpdsMapping(NumOperands);
1112 for (unsigned Idx = 0; Idx < NumOperands; ++Idx) {
1113 if (MI.getOperand(Idx).isReg() && MI.getOperand(Idx).getReg()) {
1114 LLT Ty = MRI.getType(MI.getOperand(Idx).getReg());
1115 if (!Ty.isValid())
1116 continue;
1117 auto Mapping =
1118 getValueMapping(OpRegBankIdx[Idx], TypeSize::getFixed(OpSize[Idx]));
1119 if (!Mapping->isValid())
1121
1122 OpdsMapping[Idx] = Mapping;
1123 }
1124 }
1125
1126 return getInstructionMapping(MappingID, Cost, getOperandsMapping(OpdsMapping),
1127 NumOperands);
1128}
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
static unsigned getIntrinsicID(const SDNode *N)
#define CHECK_VALUEMAP(RBName, Size)
static bool isFPIntrinsic(const MachineRegisterInfo &MRI, const MachineInstr &MI)
#define CHECK_VALUEMAP_3OPS(RBName, Size)
static const unsigned CustomMappingID
#define CHECK_PARTIALMAP(Idx, ValStartIdx, ValLength, RB)
#define CHECK_VALUEMAP_CROSSREGCPY(RBNameDst, RBNameSrc, Size)
#define CHECK_VALUEMAP_FPEXT(DstSize, SrcSize)
This file declares the targeting of the RegisterBankInfo class for AArch64.
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
uint64_t Size
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
IRTranslator LLVM IR MI
Implement a low-level type suitable for MachineInstr level instruction selection.
This file declares the MachineIRBuilder class.
unsigned const TargetRegisterInfo * TRI
ppc ctr loops verify
static const MCPhysReg FPR[]
FPR - The set of FP registers that should be allocated for arguments on Darwin and AIX.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
This file defines the SmallVector class.
static unsigned getRegBankBaseIdxOffset(unsigned RBIdx, TypeSize Size)
static const RegisterBankInfo::ValueMapping * getCopyMapping(unsigned DstBankID, unsigned SrcBankID, TypeSize Size)
Get the pointer to the ValueMapping of the operands of a copy instruction from the SrcBankID register...
static bool checkPartialMappingIdx(PartialMappingIdx FirstAlias, PartialMappingIdx LastAlias, ArrayRef< PartialMappingIdx > Order)
static const RegisterBankInfo::PartialMapping PartMappings[]
static const RegisterBankInfo::ValueMapping * getFPExtMapping(unsigned DstSize, unsigned SrcSize)
Get the instruction mapping for G_FPEXT.
static const RegisterBankInfo::ValueMapping * getValueMapping(PartialMappingIdx RBIdx, TypeSize Size)
Get the pointer to the ValueMapping representing the RegisterBank at RBIdx with a size of Size.
static const RegisterBankInfo::ValueMapping ValMappings[]
InstructionMappings getInstrAlternativeMappings(const MachineInstr &MI) const override
Get the alternative mappings for MI.
unsigned copyCost(const RegisterBank &A, const RegisterBank &B, TypeSize Size) const override
Get the cost of a copy from B to A, or put differently, get the cost of A = COPY B.
const RegisterBank & getRegBankFromRegClass(const TargetRegisterClass &RC, LLT Ty) const override
Get a register bank that covers RC.
AArch64RegisterBankInfo(const TargetRegisterInfo &TRI)
const InstructionMapping & getInstrMapping(const MachineInstr &MI) const override
Get the mapping of the different operands of MI on the register bank.
This class represents an Operation in the Expression.
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
Definition: LowLevelType.h:42
constexpr bool isValid() const
Definition: LowLevelType.h:145
constexpr bool isVector() const
Definition: LowLevelType.h:148
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
Definition: LowLevelType.h:193
constexpr LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
Definition: LowLevelType.h:290
constexpr ElementCount getElementCount() const
Definition: LowLevelType.h:184
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Helper class to build MachineInstr.
void setInsertPt(MachineBasicBlock &MBB, MachineBasicBlock::iterator II)
Set the insertion point before the specified position.
MachineInstrBuilder buildAnyExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ANYEXT Op0.
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
Register getReg(unsigned Idx) const
Get the register for the operand index.
Representation of each machine instruction.
Definition: MachineInstr.h:69
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:569
MachineOperand class - Representation of each machine instruction operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Helper class that represents how the value of an instruction may be mapped and what is the related co...
bool isValid() const
Check whether this object is valid.
virtual InstructionMappings getInstrAlternativeMappings(const MachineInstr &MI) const
Get the alternative mappings for MI.
const InstructionMapping & getInstructionMapping(unsigned ID, unsigned Cost, const ValueMapping *OperandsMapping, unsigned NumOperands) const
Method to get a uniquely generated InstructionMapping.
static void applyDefaultMapping(const OperandsMapper &OpdMapper)
Helper method to apply something that is like the default mapping.
const InstructionMapping & getInvalidInstructionMapping() const
Method to get a uniquely generated invalid InstructionMapping.
const RegisterBank & getRegBank(unsigned ID)
Get the register bank identified by ID.
unsigned getMaximumSize(unsigned RegBankID) const
Get the maximum size in bits that fits in the given register bank.
TypeSize getSizeInBits(Register Reg, const MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI) const
Get the size in bits of Reg.
virtual const RegisterBank & getRegBankFromRegClass(const TargetRegisterClass &RC, LLT Ty) const
Get a register bank that covers RC.
const ValueMapping * getOperandsMapping(Iterator Begin, Iterator End) const
Get the uniquely generated array of ValueMapping for the elements of between Begin and End.
static const unsigned DefaultMappingID
Identifier used when the related instruction mapping instance is generated by target independent code...
virtual unsigned copyCost(const RegisterBank &A, const RegisterBank &B, TypeSize Size) const
Get the cost of a copy from B to A, or put differently, get the cost of A = COPY B.
const InstructionMapping & getInstrMappingImpl(const MachineInstr &MI) const
Try to get the mapping of MI.
This class implements the register bank concept.
Definition: RegisterBank.h:28
bool covers(const TargetRegisterClass &RC) const
Check whether this register bank covers RC.
unsigned getID() const
Get the identifier of this register bank.
Definition: RegisterBank.h:45
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition: Register.h:95
void push_back(const T &Elt)
Definition: SmallVector.h:427
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1210
Class to represent struct types.
Definition: DerivedTypes.h:216
unsigned getID() const
Return the register class ID number.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition: TypeSize.h:345
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
Type * getArrayElementType() const
Definition: Type.h:399
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition: Type.h:212
LLVM Value Representation.
Definition: Value.h:74
iterator_range< user_iterator > users()
Definition: Value.h:421
constexpr ScalarTy getFixedValue() const
Definition: TypeSize.h:202
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition: TypeSize.h:168
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1722
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
Definition: TargetOpcodes.h:30
bool isPreISelGenericOptimizationHint(unsigned Opcode)
Definition: TargetOpcodes.h:42
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1729
void call_once(once_flag &flag, Function &&F, Args &&... ArgList)
Execute the function specified as a parameter once.
Definition: Threading.h:87
bool isPreISelGenericFloatingPointOpcode(unsigned Opc)
Returns whether opcode Opc is a pre-isel generic floating-point opcode, having only floating-point op...
Definition: Utils.cpp:1697
The llvm::once_flag structure.
Definition: Threading.h:68