LLVM 19.0.0git
AArch64RegisterBankInfo.cpp
Go to the documentation of this file.
1//===- AArch64RegisterBankInfo.cpp ----------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the targeting of the RegisterBankInfo class for
10/// AArch64.
11/// \todo This should be generated by TableGen.
12//===----------------------------------------------------------------------===//
13
15#include "AArch64RegisterInfo.h"
17#include "llvm/ADT/STLExtras.h"
32#include "llvm/IR/IntrinsicsAArch64.h"
35#include <algorithm>
36#include <cassert>
37
38#define GET_TARGET_REGBANK_IMPL
39#include "AArch64GenRegisterBank.inc"
40
41// This file will be TableGen'ed at some point.
42#include "AArch64GenRegisterBankInfo.def"
43
44using namespace llvm;
45
47 const TargetRegisterInfo &TRI) {
48 static llvm::once_flag InitializeRegisterBankFlag;
49
50 static auto InitializeRegisterBankOnce = [&]() {
51 // We have only one set of register banks, whatever the subtarget
52 // is. Therefore, the initialization of the RegBanks table should be
53 // done only once. Indeed the table of all register banks
54 // (AArch64::RegBanks) is unique in the compiler. At some point, it
55 // will get tablegen'ed and the whole constructor becomes empty.
56
57 const RegisterBank &RBGPR = getRegBank(AArch64::GPRRegBankID);
58 (void)RBGPR;
59 assert(&AArch64::GPRRegBank == &RBGPR &&
60 "The order in RegBanks is messed up");
61
62 const RegisterBank &RBFPR = getRegBank(AArch64::FPRRegBankID);
63 (void)RBFPR;
64 assert(&AArch64::FPRRegBank == &RBFPR &&
65 "The order in RegBanks is messed up");
66
67 const RegisterBank &RBCCR = getRegBank(AArch64::CCRegBankID);
68 (void)RBCCR;
69 assert(&AArch64::CCRegBank == &RBCCR &&
70 "The order in RegBanks is messed up");
71
72 // The GPR register bank is fully defined by all the registers in
73 // GR64all + its subclasses.
74 assert(RBGPR.covers(*TRI.getRegClass(AArch64::GPR32RegClassID)) &&
75 "Subclass not added?");
76 assert(getMaximumSize(RBGPR.getID()) == 128 &&
77 "GPRs should hold up to 128-bit");
78
79 // The FPR register bank is fully defined by all the registers in
80 // GR64all + its subclasses.
81 assert(RBFPR.covers(*TRI.getRegClass(AArch64::QQRegClassID)) &&
82 "Subclass not added?");
83 assert(RBFPR.covers(*TRI.getRegClass(AArch64::FPR64RegClassID)) &&
84 "Subclass not added?");
85 assert(getMaximumSize(RBFPR.getID()) == 512 &&
86 "FPRs should hold up to 512-bit via QQQQ sequence");
87
88 assert(RBCCR.covers(*TRI.getRegClass(AArch64::CCRRegClassID)) &&
89 "Class not added?");
90 assert(getMaximumSize(RBCCR.getID()) == 32 &&
91 "CCR should hold up to 32-bit");
92
93 // Check that the TableGen'ed like file is in sync we our expectations.
94 // First, the Idx.
97 "PartialMappingIdx's are incorrectly ordered");
101 "PartialMappingIdx's are incorrectly ordered");
102// Now, the content.
103// Check partial mapping.
104#define CHECK_PARTIALMAP(Idx, ValStartIdx, ValLength, RB) \
105 do { \
106 assert( \
107 checkPartialMap(PartialMappingIdx::Idx, ValStartIdx, ValLength, RB) && \
108 #Idx " is incorrectly initialized"); \
109 } while (false)
110
111 CHECK_PARTIALMAP(PMI_GPR32, 0, 32, RBGPR);
112 CHECK_PARTIALMAP(PMI_GPR64, 0, 64, RBGPR);
113 CHECK_PARTIALMAP(PMI_GPR128, 0, 128, RBGPR);
114 CHECK_PARTIALMAP(PMI_FPR16, 0, 16, RBFPR);
115 CHECK_PARTIALMAP(PMI_FPR32, 0, 32, RBFPR);
116 CHECK_PARTIALMAP(PMI_FPR64, 0, 64, RBFPR);
117 CHECK_PARTIALMAP(PMI_FPR128, 0, 128, RBFPR);
118 CHECK_PARTIALMAP(PMI_FPR256, 0, 256, RBFPR);
119 CHECK_PARTIALMAP(PMI_FPR512, 0, 512, RBFPR);
120
121// Check value mapping.
122#define CHECK_VALUEMAP_IMPL(RBName, Size, Offset) \
123 do { \
124 assert(checkValueMapImpl(PartialMappingIdx::PMI_##RBName##Size, \
125 PartialMappingIdx::PMI_First##RBName, Size, \
126 Offset) && \
127 #RBName #Size " " #Offset " is incorrectly initialized"); \
128 } while (false)
129
130#define CHECK_VALUEMAP(RBName, Size) CHECK_VALUEMAP_IMPL(RBName, Size, 0)
131
132 CHECK_VALUEMAP(GPR, 32);
133 CHECK_VALUEMAP(GPR, 64);
134 CHECK_VALUEMAP(GPR, 128);
135 CHECK_VALUEMAP(FPR, 16);
136 CHECK_VALUEMAP(FPR, 32);
137 CHECK_VALUEMAP(FPR, 64);
138 CHECK_VALUEMAP(FPR, 128);
139 CHECK_VALUEMAP(FPR, 256);
140 CHECK_VALUEMAP(FPR, 512);
141
142// Check the value mapping for 3-operands instructions where all the operands
143// map to the same value mapping.
144#define CHECK_VALUEMAP_3OPS(RBName, Size) \
145 do { \
146 CHECK_VALUEMAP_IMPL(RBName, Size, 0); \
147 CHECK_VALUEMAP_IMPL(RBName, Size, 1); \
148 CHECK_VALUEMAP_IMPL(RBName, Size, 2); \
149 } while (false)
150
151 CHECK_VALUEMAP_3OPS(GPR, 32);
152 CHECK_VALUEMAP_3OPS(GPR, 64);
153 CHECK_VALUEMAP_3OPS(GPR, 128);
159
160#define CHECK_VALUEMAP_CROSSREGCPY(RBNameDst, RBNameSrc, Size) \
161 do { \
162 unsigned PartialMapDstIdx = PMI_##RBNameDst##Size - PMI_Min; \
163 unsigned PartialMapSrcIdx = PMI_##RBNameSrc##Size - PMI_Min; \
164 (void)PartialMapDstIdx; \
165 (void)PartialMapSrcIdx; \
166 const ValueMapping *Map = getCopyMapping( \
167 AArch64::RBNameDst##RegBankID, AArch64::RBNameSrc##RegBankID, Size); \
168 (void)Map; \
169 assert(Map[0].BreakDown == \
170 &AArch64GenRegisterBankInfo::PartMappings[PartialMapDstIdx] && \
171 Map[0].NumBreakDowns == 1 && #RBNameDst #Size \
172 " Dst is incorrectly initialized"); \
173 assert(Map[1].BreakDown == \
174 &AArch64GenRegisterBankInfo::PartMappings[PartialMapSrcIdx] && \
175 Map[1].NumBreakDowns == 1 && #RBNameSrc #Size \
176 " Src is incorrectly initialized"); \
177 \
178 } while (false)
179
180 CHECK_VALUEMAP_CROSSREGCPY(GPR, GPR, 32);
182 CHECK_VALUEMAP_CROSSREGCPY(GPR, GPR, 64);
188
189#define CHECK_VALUEMAP_FPEXT(DstSize, SrcSize) \
190 do { \
191 unsigned PartialMapDstIdx = PMI_FPR##DstSize - PMI_Min; \
192 unsigned PartialMapSrcIdx = PMI_FPR##SrcSize - PMI_Min; \
193 (void)PartialMapDstIdx; \
194 (void)PartialMapSrcIdx; \
195 const ValueMapping *Map = getFPExtMapping(DstSize, SrcSize); \
196 (void)Map; \
197 assert(Map[0].BreakDown == \
198 &AArch64GenRegisterBankInfo::PartMappings[PartialMapDstIdx] && \
199 Map[0].NumBreakDowns == 1 && "FPR" #DstSize \
200 " Dst is incorrectly initialized"); \
201 assert(Map[1].BreakDown == \
202 &AArch64GenRegisterBankInfo::PartMappings[PartialMapSrcIdx] && \
203 Map[1].NumBreakDowns == 1 && "FPR" #SrcSize \
204 " Src is incorrectly initialized"); \
205 \
206 } while (false)
207
208 CHECK_VALUEMAP_FPEXT(32, 16);
209 CHECK_VALUEMAP_FPEXT(64, 16);
210 CHECK_VALUEMAP_FPEXT(64, 32);
211 CHECK_VALUEMAP_FPEXT(128, 64);
212
213 assert(verify(TRI) && "Invalid register bank information");
214 };
215
216 llvm::call_once(InitializeRegisterBankFlag, InitializeRegisterBankOnce);
217}
218
220 const RegisterBank &B,
221 TypeSize Size) const {
222 // What do we do with different size?
223 // copy are same size.
224 // Will introduce other hooks for different size:
225 // * extract cost.
226 // * build_sequence cost.
227
228 // Copy from (resp. to) GPR to (resp. from) FPR involves FMOV.
229 // FIXME: This should be deduced from the scheduling model.
230 if (&A == &AArch64::GPRRegBank && &B == &AArch64::FPRRegBank)
231 // FMOVXDr or FMOVWSr.
232 return 5;
233 if (&A == &AArch64::FPRRegBank && &B == &AArch64::GPRRegBank)
234 // FMOVDXr or FMOVSWr.
235 return 4;
236
238}
239
240const RegisterBank &
242 LLT) const {
243 switch (RC.getID()) {
244 case AArch64::FPR8RegClassID:
245 case AArch64::FPR16RegClassID:
246 case AArch64::FPR16_loRegClassID:
247 case AArch64::FPR32_with_hsub_in_FPR16_loRegClassID:
248 case AArch64::FPR32RegClassID:
249 case AArch64::FPR64RegClassID:
250 case AArch64::FPR128RegClassID:
251 case AArch64::FPR64_loRegClassID:
252 case AArch64::FPR128_loRegClassID:
253 case AArch64::FPR128_0to7RegClassID:
254 case AArch64::DDRegClassID:
255 case AArch64::DDDRegClassID:
256 case AArch64::DDDDRegClassID:
257 case AArch64::QQRegClassID:
258 case AArch64::QQQRegClassID:
259 case AArch64::QQQQRegClassID:
260 return getRegBank(AArch64::FPRRegBankID);
261 case AArch64::GPR32commonRegClassID:
262 case AArch64::GPR32RegClassID:
263 case AArch64::GPR32spRegClassID:
264 case AArch64::GPR32sponlyRegClassID:
265 case AArch64::GPR32argRegClassID:
266 case AArch64::GPR32allRegClassID:
267 case AArch64::GPR64commonRegClassID:
268 case AArch64::GPR64RegClassID:
269 case AArch64::GPR64spRegClassID:
270 case AArch64::GPR64sponlyRegClassID:
271 case AArch64::GPR64argRegClassID:
272 case AArch64::GPR64allRegClassID:
273 case AArch64::GPR64noipRegClassID:
274 case AArch64::GPR64common_and_GPR64noipRegClassID:
275 case AArch64::GPR64noip_and_tcGPR64RegClassID:
276 case AArch64::tcGPR64RegClassID:
277 case AArch64::tcGPRx16x17RegClassID:
278 case AArch64::tcGPRx17RegClassID:
279 case AArch64::tcGPRnotx16RegClassID:
280 case AArch64::WSeqPairsClassRegClassID:
281 case AArch64::XSeqPairsClassRegClassID:
282 case AArch64::MatrixIndexGPR32_8_11RegClassID:
283 case AArch64::MatrixIndexGPR32_12_15RegClassID:
284 case AArch64::GPR64_with_sub_32_in_MatrixIndexGPR32_8_11RegClassID:
285 case AArch64::GPR64_with_sub_32_in_MatrixIndexGPR32_12_15RegClassID:
286 return getRegBank(AArch64::GPRRegBankID);
287 case AArch64::CCRRegClassID:
288 return getRegBank(AArch64::CCRegBankID);
289 default:
290 llvm_unreachable("Register class not supported");
291 }
292}
293
296 const MachineInstr &MI) const {
297 const MachineFunction &MF = *MI.getParent()->getParent();
298 const TargetSubtargetInfo &STI = MF.getSubtarget();
299 const TargetRegisterInfo &TRI = *STI.getRegisterInfo();
300 const MachineRegisterInfo &MRI = MF.getRegInfo();
301
302 switch (MI.getOpcode()) {
303 case TargetOpcode::G_OR: {
304 // 32 and 64-bit or can be mapped on either FPR or
305 // GPR for the same cost.
306 unsigned Size = getSizeInBits(MI.getOperand(0).getReg(), MRI, TRI);
307 if (Size != 32 && Size != 64)
308 break;
309
310 // If the instruction has any implicit-defs or uses,
311 // do not mess with it.
312 if (MI.getNumOperands() != 3)
313 break;
314 InstructionMappings AltMappings;
315 const InstructionMapping &GPRMapping = getInstructionMapping(
316 /*ID*/ 1, /*Cost*/ 1, getValueMapping(PMI_FirstGPR, Size),
317 /*NumOperands*/ 3);
318 const InstructionMapping &FPRMapping = getInstructionMapping(
319 /*ID*/ 2, /*Cost*/ 1, getValueMapping(PMI_FirstFPR, Size),
320 /*NumOperands*/ 3);
321
322 AltMappings.push_back(&GPRMapping);
323 AltMappings.push_back(&FPRMapping);
324 return AltMappings;
325 }
326 case TargetOpcode::G_BITCAST: {
327 unsigned Size = getSizeInBits(MI.getOperand(0).getReg(), MRI, TRI);
328 if (Size != 32 && Size != 64)
329 break;
330
331 // If the instruction has any implicit-defs or uses,
332 // do not mess with it.
333 if (MI.getNumOperands() != 2)
334 break;
335
336 InstructionMappings AltMappings;
337 const InstructionMapping &GPRMapping = getInstructionMapping(
338 /*ID*/ 1, /*Cost*/ 1,
339 getCopyMapping(AArch64::GPRRegBankID, AArch64::GPRRegBankID, Size),
340 /*NumOperands*/ 2);
341 const InstructionMapping &FPRMapping = getInstructionMapping(
342 /*ID*/ 2, /*Cost*/ 1,
343 getCopyMapping(AArch64::FPRRegBankID, AArch64::FPRRegBankID, Size),
344 /*NumOperands*/ 2);
345 const InstructionMapping &GPRToFPRMapping = getInstructionMapping(
346 /*ID*/ 3,
347 /*Cost*/
348 copyCost(AArch64::GPRRegBank, AArch64::FPRRegBank,
350 getCopyMapping(AArch64::FPRRegBankID, AArch64::GPRRegBankID, Size),
351 /*NumOperands*/ 2);
352 const InstructionMapping &FPRToGPRMapping = getInstructionMapping(
353 /*ID*/ 3,
354 /*Cost*/
355 copyCost(AArch64::GPRRegBank, AArch64::FPRRegBank,
357 getCopyMapping(AArch64::GPRRegBankID, AArch64::FPRRegBankID, Size),
358 /*NumOperands*/ 2);
359
360 AltMappings.push_back(&GPRMapping);
361 AltMappings.push_back(&FPRMapping);
362 AltMappings.push_back(&GPRToFPRMapping);
363 AltMappings.push_back(&FPRToGPRMapping);
364 return AltMappings;
365 }
366 case TargetOpcode::G_LOAD: {
367 unsigned Size = getSizeInBits(MI.getOperand(0).getReg(), MRI, TRI);
368 if (Size != 64)
369 break;
370
371 // If the instruction has any implicit-defs or uses,
372 // do not mess with it.
373 if (MI.getNumOperands() != 2)
374 break;
375
376 InstructionMappings AltMappings;
377 const InstructionMapping &GPRMapping = getInstructionMapping(
378 /*ID*/ 1, /*Cost*/ 1,
380 // Addresses are GPR 64-bit.
382 /*NumOperands*/ 2);
383 const InstructionMapping &FPRMapping = getInstructionMapping(
384 /*ID*/ 2, /*Cost*/ 1,
386 // Addresses are GPR 64-bit.
388 /*NumOperands*/ 2);
389
390 AltMappings.push_back(&GPRMapping);
391 AltMappings.push_back(&FPRMapping);
392 return AltMappings;
393 }
394 default:
395 break;
396 }
398}
399
400void AArch64RegisterBankInfo::applyMappingImpl(
401 MachineIRBuilder &Builder, const OperandsMapper &OpdMapper) const {
402 MachineInstr &MI = OpdMapper.getMI();
403 MachineRegisterInfo &MRI = OpdMapper.getMRI();
404
405 switch (MI.getOpcode()) {
406 case TargetOpcode::G_OR:
407 case TargetOpcode::G_BITCAST:
408 case TargetOpcode::G_LOAD:
409 // Those ID must match getInstrAlternativeMappings.
410 assert((OpdMapper.getInstrMapping().getID() >= 1 &&
411 OpdMapper.getInstrMapping().getID() <= 4) &&
412 "Don't know how to handle that ID");
413 return applyDefaultMapping(OpdMapper);
414 case TargetOpcode::G_INSERT_VECTOR_ELT: {
415 // Extend smaller gpr operands to 32 bit.
416 Builder.setInsertPt(*MI.getParent(), MI.getIterator());
417 auto Ext = Builder.buildAnyExt(LLT::scalar(32), MI.getOperand(2).getReg());
418 MRI.setRegBank(Ext.getReg(0), getRegBank(AArch64::GPRRegBankID));
419 MI.getOperand(2).setReg(Ext.getReg(0));
420 return applyDefaultMapping(OpdMapper);
421 }
422 default:
423 llvm_unreachable("Don't know how to handle that operation");
424 }
425}
426
428AArch64RegisterBankInfo::getSameKindOfOperandsMapping(
429 const MachineInstr &MI) const {
430 const unsigned Opc = MI.getOpcode();
431 const MachineFunction &MF = *MI.getParent()->getParent();
432 const MachineRegisterInfo &MRI = MF.getRegInfo();
433
434 unsigned NumOperands = MI.getNumOperands();
435 assert(NumOperands <= 3 &&
436 "This code is for instructions with 3 or less operands");
437
438 LLT Ty = MRI.getType(MI.getOperand(0).getReg());
439 unsigned Size = Ty.getSizeInBits();
440 bool IsFPR = Ty.isVector() || isPreISelGenericFloatingPointOpcode(Opc);
441
443
444#ifndef NDEBUG
445 // Make sure all the operands are using similar size and type.
446 // Should probably be checked by the machine verifier.
447 // This code won't catch cases where the number of lanes is
448 // different between the operands.
449 // If we want to go to that level of details, it is probably
450 // best to check that the types are the same, period.
451 // Currently, we just check that the register banks are the same
452 // for each types.
453 for (unsigned Idx = 1; Idx != NumOperands; ++Idx) {
454 LLT OpTy = MRI.getType(MI.getOperand(Idx).getReg());
455 assert(
457 RBIdx, OpTy.getSizeInBits()) ==
459 "Operand has incompatible size");
460 bool OpIsFPR = OpTy.isVector() || isPreISelGenericFloatingPointOpcode(Opc);
461 (void)OpIsFPR;
462 assert(IsFPR == OpIsFPR && "Operand has incompatible type");
463 }
464#endif // End NDEBUG.
465
467 getValueMapping(RBIdx, Size), NumOperands);
468}
469
470/// \returns true if a given intrinsic only uses and defines FPRs.
472 const MachineInstr &MI) {
473 // TODO: Add more intrinsics.
474 switch (cast<GIntrinsic>(MI).getIntrinsicID()) {
475 default:
476 return false;
477 case Intrinsic::aarch64_neon_uaddlv:
478 case Intrinsic::aarch64_neon_uaddv:
479 case Intrinsic::aarch64_neon_saddv:
480 case Intrinsic::aarch64_neon_umaxv:
481 case Intrinsic::aarch64_neon_smaxv:
482 case Intrinsic::aarch64_neon_uminv:
483 case Intrinsic::aarch64_neon_sminv:
484 case Intrinsic::aarch64_neon_faddv:
485 case Intrinsic::aarch64_neon_fmaxv:
486 case Intrinsic::aarch64_neon_fminv:
487 case Intrinsic::aarch64_neon_fmaxnmv:
488 case Intrinsic::aarch64_neon_fminnmv:
489 return true;
490 case Intrinsic::aarch64_neon_saddlv: {
491 const LLT SrcTy = MRI.getType(MI.getOperand(2).getReg());
492 return SrcTy.getElementType().getSizeInBits() >= 16 &&
493 SrcTy.getElementCount().getFixedValue() >= 4;
494 }
495 }
496}
497
498bool AArch64RegisterBankInfo::hasFPConstraints(const MachineInstr &MI,
500 const TargetRegisterInfo &TRI,
501 unsigned Depth) const {
502 unsigned Op = MI.getOpcode();
503 if (Op == TargetOpcode::G_INTRINSIC && isFPIntrinsic(MRI, MI))
504 return true;
505
506 // Do we have an explicit floating point instruction?
508 return true;
509
510 // No. Check if we have a copy-like instruction. If we do, then we could
511 // still be fed by floating point instructions.
512 if (Op != TargetOpcode::COPY && !MI.isPHI() &&
514 return false;
515
516 // Check if we already know the register bank.
517 auto *RB = getRegBank(MI.getOperand(0).getReg(), MRI, TRI);
518 if (RB == &AArch64::FPRRegBank)
519 return true;
520 if (RB == &AArch64::GPRRegBank)
521 return false;
522
523 // We don't know anything.
524 //
525 // If we have a phi, we may be able to infer that it will be assigned a FPR
526 // based off of its inputs.
527 if (!MI.isPHI() || Depth > MaxFPRSearchDepth)
528 return false;
529
530 return any_of(MI.explicit_uses(), [&](const MachineOperand &Op) {
531 return Op.isReg() &&
532 onlyDefinesFP(*MRI.getVRegDef(Op.getReg()), MRI, TRI, Depth + 1);
533 });
534}
535
536bool AArch64RegisterBankInfo::onlyUsesFP(const MachineInstr &MI,
538 const TargetRegisterInfo &TRI,
539 unsigned Depth) const {
540 switch (MI.getOpcode()) {
541 case TargetOpcode::G_FPTOSI:
542 case TargetOpcode::G_FPTOUI:
543 case TargetOpcode::G_FCMP:
544 case TargetOpcode::G_LROUND:
545 case TargetOpcode::G_LLROUND:
546 return true;
547 default:
548 break;
549 }
550 return hasFPConstraints(MI, MRI, TRI, Depth);
551}
552
553bool AArch64RegisterBankInfo::onlyDefinesFP(const MachineInstr &MI,
555 const TargetRegisterInfo &TRI,
556 unsigned Depth) const {
557 switch (MI.getOpcode()) {
558 case AArch64::G_DUP:
559 case TargetOpcode::G_SITOFP:
560 case TargetOpcode::G_UITOFP:
561 case TargetOpcode::G_EXTRACT_VECTOR_ELT:
562 case TargetOpcode::G_INSERT_VECTOR_ELT:
563 case TargetOpcode::G_BUILD_VECTOR:
564 case TargetOpcode::G_BUILD_VECTOR_TRUNC:
565 return true;
566 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
567 switch (cast<GIntrinsic>(MI).getIntrinsicID()) {
568 case Intrinsic::aarch64_neon_ld1x2:
569 case Intrinsic::aarch64_neon_ld1x3:
570 case Intrinsic::aarch64_neon_ld1x4:
571 case Intrinsic::aarch64_neon_ld2:
572 case Intrinsic::aarch64_neon_ld2lane:
573 case Intrinsic::aarch64_neon_ld2r:
574 case Intrinsic::aarch64_neon_ld3:
575 case Intrinsic::aarch64_neon_ld3lane:
576 case Intrinsic::aarch64_neon_ld3r:
577 case Intrinsic::aarch64_neon_ld4:
578 case Intrinsic::aarch64_neon_ld4lane:
579 case Intrinsic::aarch64_neon_ld4r:
580 return true;
581 default:
582 break;
583 }
584 break;
585 default:
586 break;
587 }
588 return hasFPConstraints(MI, MRI, TRI, Depth);
589}
590
591bool AArch64RegisterBankInfo::isLoadFromFPType(const MachineInstr &MI) const {
592 // GMemOperation because we also want to match indexed loads.
593 auto *MemOp = cast<GMemOperation>(&MI);
594 const Value *LdVal = MemOp->getMMO().getValue();
595 if (!LdVal)
596 return false;
597
598 Type *EltTy = nullptr;
599 if (const GlobalValue *GV = dyn_cast<GlobalValue>(LdVal)) {
600 EltTy = GV->getValueType();
601 // Look at the first element of the struct to determine the type we are
602 // loading
603 while (StructType *StructEltTy = dyn_cast<StructType>(EltTy)) {
604 if (StructEltTy->getNumElements() == 0)
605 break;
606 EltTy = StructEltTy->getTypeAtIndex(0U);
607 }
608 // Look at the first element of the array to determine its type
609 if (isa<ArrayType>(EltTy))
610 EltTy = EltTy->getArrayElementType();
611 } else {
612 // FIXME: grubbing around uses is pretty ugly, but with no more
613 // `getPointerElementType` there's not much else we can do.
614 for (const auto *LdUser : LdVal->users()) {
615 if (isa<LoadInst>(LdUser)) {
616 EltTy = LdUser->getType();
617 break;
618 }
619 if (isa<StoreInst>(LdUser) && LdUser->getOperand(1) == LdVal) {
620 EltTy = LdUser->getOperand(0)->getType();
621 break;
622 }
623 }
624 }
625 return EltTy && EltTy->isFPOrFPVectorTy();
626}
627
630 const unsigned Opc = MI.getOpcode();
631
632 // Try the default logic for non-generic instructions that are either copies
633 // or already have some operands assigned to banks.
634 if ((Opc != TargetOpcode::COPY && !isPreISelGenericOpcode(Opc)) ||
635 Opc == TargetOpcode::G_PHI) {
638 if (Mapping.isValid())
639 return Mapping;
640 }
641
642 const MachineFunction &MF = *MI.getParent()->getParent();
643 const MachineRegisterInfo &MRI = MF.getRegInfo();
644 const TargetSubtargetInfo &STI = MF.getSubtarget();
645 const TargetRegisterInfo &TRI = *STI.getRegisterInfo();
646
647 switch (Opc) {
648 // G_{F|S|U}REM are not listed because they are not legal.
649 // Arithmetic ops.
650 case TargetOpcode::G_ADD:
651 case TargetOpcode::G_SUB:
652 case TargetOpcode::G_PTR_ADD:
653 case TargetOpcode::G_MUL:
654 case TargetOpcode::G_SDIV:
655 case TargetOpcode::G_UDIV:
656 // Bitwise ops.
657 case TargetOpcode::G_AND:
658 case TargetOpcode::G_OR:
659 case TargetOpcode::G_XOR:
660 // Floating point ops.
661 case TargetOpcode::G_FADD:
662 case TargetOpcode::G_FSUB:
663 case TargetOpcode::G_FMUL:
664 case TargetOpcode::G_FDIV:
665 case TargetOpcode::G_FMAXIMUM:
666 case TargetOpcode::G_FMINIMUM:
667 return getSameKindOfOperandsMapping(MI);
668 case TargetOpcode::G_FPEXT: {
669 LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
670 LLT SrcTy = MRI.getType(MI.getOperand(1).getReg());
672 DefaultMappingID, /*Cost*/ 1,
674 /*NumOperands*/ 2);
675 }
676 // Shifts.
677 case TargetOpcode::G_SHL:
678 case TargetOpcode::G_LSHR:
679 case TargetOpcode::G_ASHR: {
680 LLT ShiftAmtTy = MRI.getType(MI.getOperand(2).getReg());
681 LLT SrcTy = MRI.getType(MI.getOperand(1).getReg());
682 if (ShiftAmtTy.getSizeInBits() == 64 && SrcTy.getSizeInBits() == 32)
685 return getSameKindOfOperandsMapping(MI);
686 }
687 case TargetOpcode::COPY: {
688 Register DstReg = MI.getOperand(0).getReg();
689 Register SrcReg = MI.getOperand(1).getReg();
690 // Check if one of the register is not a generic register.
691 if ((DstReg.isPhysical() || !MRI.getType(DstReg).isValid()) ||
692 (SrcReg.isPhysical() || !MRI.getType(SrcReg).isValid())) {
693 const RegisterBank *DstRB = getRegBank(DstReg, MRI, TRI);
694 const RegisterBank *SrcRB = getRegBank(SrcReg, MRI, TRI);
695 if (!DstRB)
696 DstRB = SrcRB;
697 else if (!SrcRB)
698 SrcRB = DstRB;
699 // If both RB are null that means both registers are generic.
700 // We shouldn't be here.
701 assert(DstRB && SrcRB && "Both RegBank were nullptr");
702 unsigned Size = getSizeInBits(DstReg, MRI, TRI);
705 getCopyMapping(DstRB->getID(), SrcRB->getID(), Size),
706 // We only care about the mapping of the destination.
707 /*NumOperands*/ 1);
708 }
709 // Both registers are generic, use G_BITCAST.
710 [[fallthrough]];
711 }
712 case TargetOpcode::G_BITCAST: {
713 LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
714 LLT SrcTy = MRI.getType(MI.getOperand(1).getReg());
715 unsigned Size = DstTy.getSizeInBits();
716 bool DstIsGPR = !DstTy.isVector() && DstTy.getSizeInBits() <= 64;
717 bool SrcIsGPR = !SrcTy.isVector() && SrcTy.getSizeInBits() <= 64;
718 const RegisterBank &DstRB =
719 DstIsGPR ? AArch64::GPRRegBank : AArch64::FPRRegBank;
720 const RegisterBank &SrcRB =
721 SrcIsGPR ? AArch64::GPRRegBank : AArch64::FPRRegBank;
724 getCopyMapping(DstRB.getID(), SrcRB.getID(), Size),
725 // We only care about the mapping of the destination for COPY.
726 /*NumOperands*/ Opc == TargetOpcode::G_BITCAST ? 2 : 1);
727 }
728 default:
729 break;
730 }
731
732 unsigned NumOperands = MI.getNumOperands();
733 unsigned MappingID = DefaultMappingID;
734
735 // Track the size and bank of each register. We don't do partial mappings.
736 SmallVector<unsigned, 4> OpSize(NumOperands);
737 SmallVector<PartialMappingIdx, 4> OpRegBankIdx(NumOperands);
738 for (unsigned Idx = 0; Idx < NumOperands; ++Idx) {
739 auto &MO = MI.getOperand(Idx);
740 if (!MO.isReg() || !MO.getReg())
741 continue;
742
743 LLT Ty = MRI.getType(MO.getReg());
744 if (!Ty.isValid())
745 continue;
746 OpSize[Idx] = Ty.getSizeInBits();
747
748 // As a top-level guess, vectors go in FPRs, scalars and pointers in GPRs.
749 // For floating-point instructions, scalars go in FPRs.
751 Ty.getSizeInBits() > 64)
752 OpRegBankIdx[Idx] = PMI_FirstFPR;
753 else
754 OpRegBankIdx[Idx] = PMI_FirstGPR;
755 }
756
757 unsigned Cost = 1;
758 // Some of the floating-point instructions have mixed GPR and FPR operands:
759 // fine-tune the computed mapping.
760 switch (Opc) {
761 case AArch64::G_DUP: {
762 Register ScalarReg = MI.getOperand(1).getReg();
763 LLT ScalarTy = MRI.getType(ScalarReg);
764 auto ScalarDef = MRI.getVRegDef(ScalarReg);
765 // We want to select dup(load) into LD1R.
766 if (ScalarDef->getOpcode() == TargetOpcode::G_LOAD)
767 OpRegBankIdx = {PMI_FirstFPR, PMI_FirstFPR};
768 // s8 is an exception for G_DUP, which we always want on gpr.
769 else if (ScalarTy.getSizeInBits() != 8 &&
770 (getRegBank(ScalarReg, MRI, TRI) == &AArch64::FPRRegBank ||
771 onlyDefinesFP(*ScalarDef, MRI, TRI)))
772 OpRegBankIdx = {PMI_FirstFPR, PMI_FirstFPR};
773 else
774 OpRegBankIdx = {PMI_FirstFPR, PMI_FirstGPR};
775 break;
776 }
777 case TargetOpcode::G_TRUNC: {
778 LLT SrcTy = MRI.getType(MI.getOperand(1).getReg());
779 if (!SrcTy.isVector() && SrcTy.getSizeInBits() == 128)
780 OpRegBankIdx = {PMI_FirstFPR, PMI_FirstFPR};
781 break;
782 }
783 case TargetOpcode::G_SITOFP:
784 case TargetOpcode::G_UITOFP: {
785 if (MRI.getType(MI.getOperand(0).getReg()).isVector())
786 break;
787 // Integer to FP conversions don't necessarily happen between GPR -> FPR
788 // regbanks. They can also be done within an FPR register.
789 Register SrcReg = MI.getOperand(1).getReg();
790 if (getRegBank(SrcReg, MRI, TRI) == &AArch64::FPRRegBank)
791 OpRegBankIdx = {PMI_FirstFPR, PMI_FirstFPR};
792 else
793 OpRegBankIdx = {PMI_FirstFPR, PMI_FirstGPR};
794 break;
795 }
796 case TargetOpcode::G_FPTOSI:
797 case TargetOpcode::G_FPTOUI:
798 case TargetOpcode::G_INTRINSIC_LRINT:
799 case TargetOpcode::G_INTRINSIC_LLRINT:
800 if (MRI.getType(MI.getOperand(0).getReg()).isVector())
801 break;
802 OpRegBankIdx = {PMI_FirstGPR, PMI_FirstFPR};
803 break;
804 case TargetOpcode::G_FCMP: {
805 // If the result is a vector, it must use a FPR.
807 MRI.getType(MI.getOperand(0).getReg()).isVector() ? PMI_FirstFPR
808 : PMI_FirstGPR;
809 OpRegBankIdx = {Idx0,
810 /* Predicate */ PMI_None, PMI_FirstFPR, PMI_FirstFPR};
811 break;
812 }
813 case TargetOpcode::G_BITCAST:
814 // This is going to be a cross register bank copy and this is expensive.
815 if (OpRegBankIdx[0] != OpRegBankIdx[1])
816 Cost = copyCost(
817 *AArch64GenRegisterBankInfo::PartMappings[OpRegBankIdx[0]].RegBank,
818 *AArch64GenRegisterBankInfo::PartMappings[OpRegBankIdx[1]].RegBank,
819 TypeSize::getFixed(OpSize[0]));
820 break;
821 case TargetOpcode::G_LOAD: {
822 // Loading in vector unit is slightly more expensive.
823 // This is actually only true for the LD1R and co instructions,
824 // but anyway for the fast mode this number does not matter and
825 // for the greedy mode the cost of the cross bank copy will
826 // offset this number.
827 // FIXME: Should be derived from the scheduling model.
828 if (OpRegBankIdx[0] != PMI_FirstGPR) {
829 Cost = 2;
830 break;
831 }
832
833 if (cast<GLoad>(MI).isAtomic()) {
834 // Atomics always use GPR destinations. Don't refine any further.
835 OpRegBankIdx[0] = PMI_FirstGPR;
836 break;
837 }
838
839 // Try to guess the type of the load from the MMO.
840 if (isLoadFromFPType(MI)) {
841 OpRegBankIdx[0] = PMI_FirstFPR;
842 break;
843 }
844
845 // Check if that load feeds fp instructions.
846 // In that case, we want the default mapping to be on FPR
847 // instead of blind map every scalar to GPR.
848 if (any_of(MRI.use_nodbg_instructions(MI.getOperand(0).getReg()),
849 [&](const MachineInstr &UseMI) {
850 // If we have at least one direct use in a FP instruction,
851 // assume this was a floating point load in the IR. If it was
852 // not, we would have had a bitcast before reaching that
853 // instruction.
854 //
855 // Int->FP conversion operations are also captured in
856 // onlyDefinesFP().
857 return onlyUsesFP(UseMI, MRI, TRI) ||
858 onlyDefinesFP(UseMI, MRI, TRI);
859 }))
860 OpRegBankIdx[0] = PMI_FirstFPR;
861 break;
862 }
863 case TargetOpcode::G_STORE:
864 // Check if that store is fed by fp instructions.
865 if (OpRegBankIdx[0] == PMI_FirstGPR) {
866 Register VReg = MI.getOperand(0).getReg();
867 if (!VReg)
868 break;
869 MachineInstr *DefMI = MRI.getVRegDef(VReg);
870 if (onlyDefinesFP(*DefMI, MRI, TRI))
871 OpRegBankIdx[0] = PMI_FirstFPR;
872 break;
873 }
874 break;
875 case TargetOpcode::G_INDEXED_STORE:
876 if (OpRegBankIdx[1] == PMI_FirstGPR) {
877 Register VReg = MI.getOperand(1).getReg();
878 if (!VReg)
879 break;
880 MachineInstr *DefMI = MRI.getVRegDef(VReg);
881 if (onlyDefinesFP(*DefMI, MRI, TRI))
882 OpRegBankIdx[1] = PMI_FirstFPR;
883 break;
884 }
885 break;
886 case TargetOpcode::G_INDEXED_SEXTLOAD:
887 case TargetOpcode::G_INDEXED_ZEXTLOAD:
888 // These should always be GPR.
889 OpRegBankIdx[0] = PMI_FirstGPR;
890 break;
891 case TargetOpcode::G_INDEXED_LOAD: {
892 if (isLoadFromFPType(MI))
893 OpRegBankIdx[0] = PMI_FirstFPR;
894 break;
895 }
896 case TargetOpcode::G_SELECT: {
897 // If the destination is FPR, preserve that.
898 if (OpRegBankIdx[0] != PMI_FirstGPR)
899 break;
900
901 // If we're taking in vectors, we have no choice but to put everything on
902 // FPRs, except for the condition. The condition must always be on a GPR.
903 LLT SrcTy = MRI.getType(MI.getOperand(2).getReg());
904 if (SrcTy.isVector()) {
906 break;
907 }
908
909 // Try to minimize the number of copies. If we have more floating point
910 // constrained values than not, then we'll put everything on FPR. Otherwise,
911 // everything has to be on GPR.
912 unsigned NumFP = 0;
913
914 // Check if the uses of the result always produce floating point values.
915 //
916 // For example:
917 //
918 // %z = G_SELECT %cond %x %y
919 // fpr = G_FOO %z ...
920 if (any_of(MRI.use_nodbg_instructions(MI.getOperand(0).getReg()),
921 [&](MachineInstr &MI) { return onlyUsesFP(MI, MRI, TRI); }))
922 ++NumFP;
923
924 // Check if the defs of the source values always produce floating point
925 // values.
926 //
927 // For example:
928 //
929 // %x = G_SOMETHING_ALWAYS_FLOAT %a ...
930 // %z = G_SELECT %cond %x %y
931 //
932 // Also check whether or not the sources have already been decided to be
933 // FPR. Keep track of this.
934 //
935 // This doesn't check the condition, since it's just whatever is in NZCV.
936 // This isn't passed explicitly in a register to fcsel/csel.
937 for (unsigned Idx = 2; Idx < 4; ++Idx) {
938 Register VReg = MI.getOperand(Idx).getReg();
939 MachineInstr *DefMI = MRI.getVRegDef(VReg);
940 if (getRegBank(VReg, MRI, TRI) == &AArch64::FPRRegBank ||
941 onlyDefinesFP(*DefMI, MRI, TRI))
942 ++NumFP;
943 }
944
945 // If we have more FP constraints than not, then move everything over to
946 // FPR.
947 if (NumFP >= 2)
949
950 break;
951 }
952 case TargetOpcode::G_UNMERGE_VALUES: {
953 // If the first operand belongs to a FPR register bank, then make sure that
954 // we preserve that.
955 if (OpRegBankIdx[0] != PMI_FirstGPR)
956 break;
957
958 LLT SrcTy = MRI.getType(MI.getOperand(MI.getNumOperands()-1).getReg());
959 // UNMERGE into scalars from a vector should always use FPR.
960 // Likewise if any of the uses are FP instructions.
961 if (SrcTy.isVector() || SrcTy == LLT::scalar(128) ||
962 any_of(MRI.use_nodbg_instructions(MI.getOperand(0).getReg()),
963 [&](MachineInstr &MI) { return onlyUsesFP(MI, MRI, TRI); })) {
964 // Set the register bank of every operand to FPR.
965 for (unsigned Idx = 0, NumOperands = MI.getNumOperands();
966 Idx < NumOperands; ++Idx)
967 OpRegBankIdx[Idx] = PMI_FirstFPR;
968 }
969 break;
970 }
971 case TargetOpcode::G_EXTRACT_VECTOR_ELT:
972 // Destination and source need to be FPRs.
973 OpRegBankIdx[0] = PMI_FirstFPR;
974 OpRegBankIdx[1] = PMI_FirstFPR;
975
976 // Index needs to be a GPR.
977 OpRegBankIdx[2] = PMI_FirstGPR;
978 break;
979 case TargetOpcode::G_INSERT_VECTOR_ELT:
980 OpRegBankIdx[0] = PMI_FirstFPR;
981 OpRegBankIdx[1] = PMI_FirstFPR;
982
983 // The element may be either a GPR or FPR. Preserve that behaviour.
984 if (getRegBank(MI.getOperand(2).getReg(), MRI, TRI) == &AArch64::FPRRegBank)
985 OpRegBankIdx[2] = PMI_FirstFPR;
986 else {
987 // If the type is i8/i16, and the regank will be GPR, then we change the
988 // type to i32 in applyMappingImpl.
989 LLT Ty = MRI.getType(MI.getOperand(2).getReg());
990 if (Ty.getSizeInBits() == 8 || Ty.getSizeInBits() == 16)
991 MappingID = 1;
992 OpRegBankIdx[2] = PMI_FirstGPR;
993 }
994
995 // Index needs to be a GPR.
996 OpRegBankIdx[3] = PMI_FirstGPR;
997 break;
998 case TargetOpcode::G_EXTRACT: {
999 // For s128 sources we have to use fpr unless we know otherwise.
1000 auto Src = MI.getOperand(1).getReg();
1001 LLT SrcTy = MRI.getType(MI.getOperand(1).getReg());
1002 if (SrcTy.getSizeInBits() != 128)
1003 break;
1004 auto Idx = MRI.getRegClassOrNull(Src) == &AArch64::XSeqPairsClassRegClass
1005 ? PMI_FirstGPR
1006 : PMI_FirstFPR;
1007 OpRegBankIdx[0] = Idx;
1008 OpRegBankIdx[1] = Idx;
1009 break;
1010 }
1011 case TargetOpcode::G_BUILD_VECTOR: {
1012 // If the first source operand belongs to a FPR register bank, then make
1013 // sure that we preserve that.
1014 if (OpRegBankIdx[1] != PMI_FirstGPR)
1015 break;
1016 Register VReg = MI.getOperand(1).getReg();
1017 if (!VReg)
1018 break;
1019
1020 // Get the instruction that defined the source operand reg, and check if
1021 // it's a floating point operation. Or, if it's a type like s16 which
1022 // doesn't have a exact size gpr register class. The exception is if the
1023 // build_vector has all constant operands, which may be better to leave as
1024 // gpr without copies, so it can be matched in imported patterns.
1025 MachineInstr *DefMI = MRI.getVRegDef(VReg);
1026 unsigned DefOpc = DefMI->getOpcode();
1027 const LLT SrcTy = MRI.getType(VReg);
1028 if (all_of(MI.operands(), [&](const MachineOperand &Op) {
1029 return Op.isDef() || MRI.getVRegDef(Op.getReg())->getOpcode() ==
1030 TargetOpcode::G_CONSTANT;
1031 }))
1032 break;
1034 SrcTy.getSizeInBits() < 32 ||
1035 getRegBank(VReg, MRI, TRI) == &AArch64::FPRRegBank) {
1036 // Have a floating point op.
1037 // Make sure every operand gets mapped to a FPR register class.
1038 unsigned NumOperands = MI.getNumOperands();
1039 for (unsigned Idx = 0; Idx < NumOperands; ++Idx)
1040 OpRegBankIdx[Idx] = PMI_FirstFPR;
1041 }
1042 break;
1043 }
1044 case TargetOpcode::G_VECREDUCE_FADD:
1045 case TargetOpcode::G_VECREDUCE_FMUL:
1046 case TargetOpcode::G_VECREDUCE_FMAX:
1047 case TargetOpcode::G_VECREDUCE_FMIN:
1048 case TargetOpcode::G_VECREDUCE_FMAXIMUM:
1049 case TargetOpcode::G_VECREDUCE_FMINIMUM:
1050 case TargetOpcode::G_VECREDUCE_ADD:
1051 case TargetOpcode::G_VECREDUCE_MUL:
1052 case TargetOpcode::G_VECREDUCE_AND:
1053 case TargetOpcode::G_VECREDUCE_OR:
1054 case TargetOpcode::G_VECREDUCE_XOR:
1055 case TargetOpcode::G_VECREDUCE_SMAX:
1056 case TargetOpcode::G_VECREDUCE_SMIN:
1057 case TargetOpcode::G_VECREDUCE_UMAX:
1058 case TargetOpcode::G_VECREDUCE_UMIN:
1059 // Reductions produce a scalar value from a vector, the scalar should be on
1060 // FPR bank.
1061 OpRegBankIdx = {PMI_FirstFPR, PMI_FirstFPR};
1062 break;
1063 case TargetOpcode::G_VECREDUCE_SEQ_FADD:
1064 case TargetOpcode::G_VECREDUCE_SEQ_FMUL:
1065 // These reductions also take a scalar accumulator input.
1066 // Assign them FPR for now.
1067 OpRegBankIdx = {PMI_FirstFPR, PMI_FirstFPR, PMI_FirstFPR};
1068 break;
1069 case TargetOpcode::G_INTRINSIC:
1070 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS: {
1071 // Check if we know that the intrinsic has any constraints on its register
1072 // banks. If it does, then update the mapping accordingly.
1073 unsigned Idx = 0;
1074 if (onlyDefinesFP(MI, MRI, TRI))
1075 for (const auto &Op : MI.defs()) {
1076 if (Op.isReg())
1077 OpRegBankIdx[Idx] = PMI_FirstFPR;
1078 ++Idx;
1079 }
1080 else
1081 Idx += MI.getNumExplicitDefs();
1082
1083 if (onlyUsesFP(MI, MRI, TRI))
1084 for (const auto &Op : MI.explicit_uses()) {
1085 if (Op.isReg())
1086 OpRegBankIdx[Idx] = PMI_FirstFPR;
1087 ++Idx;
1088 }
1089 break;
1090 }
1091 case TargetOpcode::G_LROUND:
1092 case TargetOpcode::G_LLROUND: {
1093 // Source is always floating point and destination is always integer.
1094 OpRegBankIdx = {PMI_FirstGPR, PMI_FirstFPR};
1095 break;
1096 }
1097 }
1098
1099 // Finally construct the computed mapping.
1100 SmallVector<const ValueMapping *, 8> OpdsMapping(NumOperands);
1101 for (unsigned Idx = 0; Idx < NumOperands; ++Idx) {
1102 if (MI.getOperand(Idx).isReg() && MI.getOperand(Idx).getReg()) {
1103 LLT Ty = MRI.getType(MI.getOperand(Idx).getReg());
1104 if (!Ty.isValid())
1105 continue;
1106 auto Mapping = getValueMapping(OpRegBankIdx[Idx], OpSize[Idx]);
1107 if (!Mapping->isValid())
1109
1110 OpdsMapping[Idx] = Mapping;
1111 }
1112 }
1113
1114 return getInstructionMapping(MappingID, Cost, getOperandsMapping(OpdsMapping),
1115 NumOperands);
1116}
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
static unsigned getIntrinsicID(const SDNode *N)
#define CHECK_VALUEMAP(RBName, Size)
static bool isFPIntrinsic(const MachineRegisterInfo &MRI, const MachineInstr &MI)
#define CHECK_VALUEMAP_3OPS(RBName, Size)
#define CHECK_PARTIALMAP(Idx, ValStartIdx, ValLength, RB)
#define CHECK_VALUEMAP_CROSSREGCPY(RBNameDst, RBNameSrc, Size)
#define CHECK_VALUEMAP_FPEXT(DstSize, SrcSize)
This file declares the targeting of the RegisterBankInfo class for AArch64.
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
uint64_t Size
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
IRTranslator LLVM IR MI
Implement a low-level type suitable for MachineInstr level instruction selection.
This file declares the MachineIRBuilder class.
unsigned const TargetRegisterInfo * TRI
ppc ctr loops verify
static const MCPhysReg FPR[]
FPR - The set of FP registers that should be allocated for arguments on Darwin and AIX.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
This file defines the SmallVector class.
static const RegisterBankInfo::ValueMapping * getValueMapping(PartialMappingIdx RBIdx, unsigned Size)
Get the pointer to the ValueMapping representing the RegisterBank at RBIdx with a size of Size.
static bool checkPartialMappingIdx(PartialMappingIdx FirstAlias, PartialMappingIdx LastAlias, ArrayRef< PartialMappingIdx > Order)
static const RegisterBankInfo::PartialMapping PartMappings[]
static unsigned getRegBankBaseIdxOffset(unsigned RBIdx, unsigned Size)
static const RegisterBankInfo::ValueMapping * getFPExtMapping(unsigned DstSize, unsigned SrcSize)
Get the instruction mapping for G_FPEXT.
static const RegisterBankInfo::ValueMapping * getCopyMapping(unsigned DstBankID, unsigned SrcBankID, unsigned Size)
Get the pointer to the ValueMapping of the operands of a copy instruction from the SrcBankID register...
static const RegisterBankInfo::ValueMapping ValMappings[]
const RegisterBank & getRegBankFromRegClass(const TargetRegisterClass &RC, LLT) const override
Get a register bank that covers RC.
InstructionMappings getInstrAlternativeMappings(const MachineInstr &MI) const override
Get the alternative mappings for MI.
unsigned copyCost(const RegisterBank &A, const RegisterBank &B, TypeSize Size) const override
Get the cost of a copy from B to A, or put differently, get the cost of A = COPY B.
AArch64RegisterBankInfo(const TargetRegisterInfo &TRI)
const InstructionMapping & getInstrMapping(const MachineInstr &MI) const override
Get the mapping of the different operands of MI on the register bank.
This class represents an Operation in the Expression.
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
Definition: LowLevelType.h:42
constexpr bool isValid() const
Definition: LowLevelType.h:145
constexpr bool isVector() const
Definition: LowLevelType.h:148
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
Definition: LowLevelType.h:193
constexpr LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
Definition: LowLevelType.h:290
constexpr ElementCount getElementCount() const
Definition: LowLevelType.h:184
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Helper class to build MachineInstr.
void setInsertPt(MachineBasicBlock &MBB, MachineBasicBlock::iterator II)
Set the insertion point before the specified position.
MachineInstrBuilder buildAnyExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ANYEXT Op0.
Representation of each machine instruction.
Definition: MachineInstr.h:69
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:558
MachineOperand class - Representation of each machine instruction operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Helper class that represents how the value of an instruction may be mapped and what is the related co...
bool isValid() const
Check whether this object is valid.
virtual InstructionMappings getInstrAlternativeMappings(const MachineInstr &MI) const
Get the alternative mappings for MI.
const InstructionMapping & getInstructionMapping(unsigned ID, unsigned Cost, const ValueMapping *OperandsMapping, unsigned NumOperands) const
Method to get a uniquely generated InstructionMapping.
static void applyDefaultMapping(const OperandsMapper &OpdMapper)
Helper method to apply something that is like the default mapping.
const InstructionMapping & getInvalidInstructionMapping() const
Method to get a uniquely generated invalid InstructionMapping.
const RegisterBank & getRegBank(unsigned ID)
Get the register bank identified by ID.
unsigned getMaximumSize(unsigned RegBankID) const
Get the maximum size in bits that fits in the given register bank.
TypeSize getSizeInBits(Register Reg, const MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI) const
Get the size in bits of Reg.
const ValueMapping * getOperandsMapping(Iterator Begin, Iterator End) const
Get the uniquely generated array of ValueMapping for the elements of between Begin and End.
static const unsigned DefaultMappingID
Identifier used when the related instruction mapping instance is generated by target independent code...
virtual unsigned copyCost(const RegisterBank &A, const RegisterBank &B, TypeSize Size) const
Get the cost of a copy from B to A, or put differently, get the cost of A = COPY B.
const InstructionMapping & getInstrMappingImpl(const MachineInstr &MI) const
Try to get the mapping of MI.
This class implements the register bank concept.
Definition: RegisterBank.h:28
bool covers(const TargetRegisterClass &RC) const
Check whether this register bank covers RC.
unsigned getID() const
Get the identifier of this register bank.
Definition: RegisterBank.h:45
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition: Register.h:95
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
Class to represent struct types.
Definition: DerivedTypes.h:216
unsigned getID() const
Return the register class ID number.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition: TypeSize.h:342
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
Type * getArrayElementType() const
Definition: Type.h:404
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition: Type.h:216
LLVM Value Representation.
Definition: Value.h:74
iterator_range< user_iterator > users()
Definition: Value.h:421
constexpr ScalarTy getFixedValue() const
Definition: TypeSize.h:199
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1722
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
Definition: TargetOpcodes.h:30
bool isPreISelGenericOptimizationHint(unsigned Opcode)
Definition: TargetOpcodes.h:42
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1729
void call_once(once_flag &flag, Function &&F, Args &&... ArgList)
Execute the function specified as a parameter once.
Definition: Threading.h:87
bool isPreISelGenericFloatingPointOpcode(unsigned Opc)
Returns whether opcode Opc is a pre-isel generic floating-point opcode, having only floating-point op...
Definition: Utils.cpp:1683
The llvm::once_flag structure.
Definition: Threading.h:68