LLVM 17.0.0git
MipsLegalizerInfo.cpp
Go to the documentation of this file.
1//===- MipsLegalizerInfo.cpp ------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the targeting of the Machinelegalizer class for Mips.
10/// \todo This should be generated by TableGen.
11//===----------------------------------------------------------------------===//
12
13#include "MipsLegalizerInfo.h"
14#include "MipsTargetMachine.h"
17#include "llvm/IR/IntrinsicsMips.h"
18
19using namespace llvm;
20
24 unsigned MemSize;
26};
27
28// Assumes power of 2 memory size. Subtargets that have only naturally-aligned
29// memory access need to perform additional legalization here.
30static bool isUnalignedMemmoryAccess(uint64_t MemSize, uint64_t AlignInBits) {
31 assert(isPowerOf2_64(MemSize) && "Expected power of 2 memory size");
32 assert(isPowerOf2_64(AlignInBits) && "Expected power of 2 align");
33 if (MemSize > AlignInBits)
34 return true;
35 return false;
36}
37
38static bool
40 std::initializer_list<TypesAndMemOps> SupportedValues) {
41 unsigned QueryMemSize = Query.MMODescrs[0].MemoryTy.getSizeInBits();
42
43 // Non power of two memory access is never legal.
44 if (!isPowerOf2_64(QueryMemSize))
45 return false;
46
47 for (auto &Val : SupportedValues) {
48 if (Val.ValTy != Query.Types[0])
49 continue;
50 if (Val.PtrTy != Query.Types[1])
51 continue;
52 if (Val.MemSize != QueryMemSize)
53 continue;
54 if (!Val.SystemSupportsUnalignedAccess &&
55 isUnalignedMemmoryAccess(QueryMemSize, Query.MMODescrs[0].AlignInBits))
56 return false;
57 return true;
58 }
59 return false;
60}
61
62static bool CheckTyN(unsigned N, const LegalityQuery &Query,
63 std::initializer_list<LLT> SupportedValues) {
64 return llvm::is_contained(SupportedValues, Query.Types[N]);
65}
66
68 using namespace TargetOpcode;
69
70 const LLT s1 = LLT::scalar(1);
71 const LLT s8 = LLT::scalar(8);
72 const LLT s16 = LLT::scalar(16);
73 const LLT s32 = LLT::scalar(32);
74 const LLT s64 = LLT::scalar(64);
75 const LLT v16s8 = LLT::fixed_vector(16, 8);
76 const LLT v8s16 = LLT::fixed_vector(8, 16);
77 const LLT v4s32 = LLT::fixed_vector(4, 32);
78 const LLT v2s64 = LLT::fixed_vector(2, 64);
79 const LLT p0 = LLT::pointer(0, 32);
80
81 getActionDefinitionsBuilder({G_ADD, G_SUB, G_MUL})
82 .legalIf([=, &ST](const LegalityQuery &Query) {
83 if (CheckTyN(0, Query, {s32}))
84 return true;
85 if (ST.hasMSA() && CheckTyN(0, Query, {v16s8, v8s16, v4s32, v2s64}))
86 return true;
87 return false;
88 })
89 .clampScalar(0, s32, s32);
90
91 getActionDefinitionsBuilder({G_UADDO, G_UADDE, G_USUBO, G_USUBE, G_UMULO})
92 .lowerFor({{s32, s1}});
93
95 .legalFor({s32})
96 .maxScalar(0, s32);
97
98 // MIPS32r6 does not have alignment restrictions for memory access.
99 // For MIPS32r5 and older memory access must be naturally-aligned i.e. aligned
100 // to at least a multiple of its own size. There is however a two instruction
101 // combination that performs 4 byte unaligned access (lwr/lwl and swl/swr)
102 // therefore 4 byte load and store are legal and will use NoAlignRequirements.
103 bool NoAlignRequirements = true;
104
105 getActionDefinitionsBuilder({G_LOAD, G_STORE})
106 .legalIf([=, &ST](const LegalityQuery &Query) {
108 Query, {{s32, p0, 8, NoAlignRequirements},
109 {s32, p0, 16, ST.systemSupportsUnalignedAccess()},
110 {s32, p0, 32, NoAlignRequirements},
111 {p0, p0, 32, NoAlignRequirements},
112 {s64, p0, 64, ST.systemSupportsUnalignedAccess()}}))
113 return true;
114 if (ST.hasMSA() && CheckTy0Ty1MemSizeAlign(
115 Query, {{v16s8, p0, 128, NoAlignRequirements},
116 {v8s16, p0, 128, NoAlignRequirements},
117 {v4s32, p0, 128, NoAlignRequirements},
118 {v2s64, p0, 128, NoAlignRequirements}}))
119 return true;
120 return false;
121 })
122 // Custom lower scalar memory access, up to 8 bytes, for:
123 // - non-power-of-2 MemSizes
124 // - unaligned 2 or 8 byte MemSizes for MIPS32r5 and older
125 .customIf([=, &ST](const LegalityQuery &Query) {
126 if (!Query.Types[0].isScalar() || Query.Types[1] != p0 ||
127 Query.Types[0] == s1)
128 return false;
129
130 unsigned Size = Query.Types[0].getSizeInBits();
131 unsigned QueryMemSize = Query.MMODescrs[0].MemoryTy.getSizeInBits();
132 assert(QueryMemSize <= Size && "Scalar can't hold MemSize");
133
134 if (Size > 64 || QueryMemSize > 64)
135 return false;
136
137 if (!isPowerOf2_64(Query.MMODescrs[0].MemoryTy.getSizeInBits()))
138 return true;
139
140 if (!ST.systemSupportsUnalignedAccess() &&
141 isUnalignedMemmoryAccess(QueryMemSize,
142 Query.MMODescrs[0].AlignInBits)) {
143 assert(QueryMemSize != 32 && "4 byte load and store are legal");
144 return true;
145 }
146
147 return false;
148 })
149 .minScalar(0, s32)
150 .lower();
151
152 getActionDefinitionsBuilder(G_IMPLICIT_DEF)
153 .legalFor({s32, s64});
154
155 getActionDefinitionsBuilder(G_UNMERGE_VALUES)
156 .legalFor({{s32, s64}});
157
158 getActionDefinitionsBuilder(G_MERGE_VALUES)
159 .legalFor({{s64, s32}});
160
161 getActionDefinitionsBuilder({G_ZEXTLOAD, G_SEXTLOAD})
162 .legalForTypesWithMemDesc({{s32, p0, s8, 8},
163 {s32, p0, s16, 8}})
164 .clampScalar(0, s32, s32);
165
166 getActionDefinitionsBuilder({G_ZEXT, G_SEXT, G_ANYEXT})
167 .legalIf([](const LegalityQuery &Query) { return false; })
168 .maxScalar(0, s32);
169
171 .legalIf([](const LegalityQuery &Query) { return false; })
172 .maxScalar(1, s32);
173
175 .legalForCartesianProduct({p0, s32, s64}, {s32})
176 .minScalar(0, s32)
177 .minScalar(1, s32);
178
180 .legalFor({s32})
181 .minScalar(0, s32);
182
184 .legalFor({{p0, s32}});
185
186 getActionDefinitionsBuilder(G_BRINDIRECT)
187 .legalFor({p0});
188
190 .legalFor({p0, s32, s64})
191 .minScalar(0, s32);
192
193 getActionDefinitionsBuilder({G_AND, G_OR, G_XOR})
194 .legalFor({s32})
195 .clampScalar(0, s32, s32);
196
197 getActionDefinitionsBuilder({G_SDIV, G_SREM, G_UDIV, G_UREM})
198 .legalIf([=, &ST](const LegalityQuery &Query) {
199 if (CheckTyN(0, Query, {s32}))
200 return true;
201 if (ST.hasMSA() && CheckTyN(0, Query, {v16s8, v8s16, v4s32, v2s64}))
202 return true;
203 return false;
204 })
205 .minScalar(0, s32)
206 .libcallFor({s64});
207
208 getActionDefinitionsBuilder({G_SHL, G_ASHR, G_LSHR})
209 .legalFor({{s32, s32}})
210 .clampScalar(1, s32, s32)
211 .clampScalar(0, s32, s32);
212
214 .legalForCartesianProduct({s32}, {s32, p0})
215 .clampScalar(1, s32, s32)
216 .minScalar(0, s32);
217
219 .legalFor({s32})
220 .clampScalar(0, s32, s32);
221
222 getActionDefinitionsBuilder({G_PTR_ADD, G_INTTOPTR})
223 .legalFor({{p0, s32}});
224
226 .legalFor({{s32, p0}});
227
228 getActionDefinitionsBuilder(G_FRAME_INDEX)
229 .legalFor({p0});
230
231 getActionDefinitionsBuilder({G_GLOBAL_VALUE, G_JUMP_TABLE})
232 .legalFor({p0});
233
234 getActionDefinitionsBuilder(G_DYN_STACKALLOC)
235 .lowerFor({{p0, s32}});
236
238 .legalFor({p0});
239
241 .legalIf([=, &ST](const LegalityQuery &Query) {
242 if (ST.hasMips32r2() && CheckTyN(0, Query, {s32}))
243 return true;
244 return false;
245 })
246 .lowerIf([=, &ST](const LegalityQuery &Query) {
247 if (!ST.hasMips32r2() && CheckTyN(0, Query, {s32}))
248 return true;
249 return false;
250 })
251 .maxScalar(0, s32);
252
253 getActionDefinitionsBuilder(G_BITREVERSE)
254 .lowerFor({s32})
255 .maxScalar(0, s32);
256
258 .legalFor({{s32, s32}})
259 .maxScalar(0, s32)
260 .maxScalar(1, s32);
261 getActionDefinitionsBuilder(G_CTLZ_ZERO_UNDEF)
262 .lowerFor({{s32, s32}});
263
265 .lowerFor({{s32, s32}})
266 .maxScalar(0, s32)
267 .maxScalar(1, s32);
268 getActionDefinitionsBuilder(G_CTTZ_ZERO_UNDEF)
269 .lowerFor({{s32, s32}, {s64, s64}});
270
272 .lowerFor({{s32, s32}})
273 .clampScalar(0, s32, s32)
274 .clampScalar(1, s32, s32);
275
276 // FP instructions
277 getActionDefinitionsBuilder(G_FCONSTANT)
278 .legalFor({s32, s64});
279
280 getActionDefinitionsBuilder({G_FADD, G_FSUB, G_FMUL, G_FDIV, G_FABS, G_FSQRT})
281 .legalIf([=, &ST](const LegalityQuery &Query) {
282 if (CheckTyN(0, Query, {s32, s64}))
283 return true;
284 if (ST.hasMSA() && CheckTyN(0, Query, {v16s8, v8s16, v4s32, v2s64}))
285 return true;
286 return false;
287 });
288
290 .legalFor({{s32, s32}, {s32, s64}})
291 .minScalar(0, s32);
292
293 getActionDefinitionsBuilder({G_FCEIL, G_FFLOOR})
294 .libcallFor({s32, s64});
295
297 .legalFor({{s64, s32}});
298
300 .legalFor({{s32, s64}});
301
302 // FP to int conversion instructions
304 .legalForCartesianProduct({s32}, {s64, s32})
305 .libcallForCartesianProduct({s64}, {s64, s32})
306 .minScalar(0, s32);
307
309 .libcallForCartesianProduct({s64}, {s64, s32})
310 .lowerForCartesianProduct({s32}, {s64, s32})
311 .minScalar(0, s32);
312
313 // Int to FP conversion instructions
315 .legalForCartesianProduct({s64, s32}, {s32})
316 .libcallForCartesianProduct({s64, s32}, {s64})
317 .minScalar(1, s32);
318
320 .libcallForCartesianProduct({s64, s32}, {s64})
321 .customForCartesianProduct({s64, s32}, {s32})
322 .minScalar(1, s32);
323
324 getActionDefinitionsBuilder(G_SEXT_INREG).lower();
325
326 getActionDefinitionsBuilder({G_MEMCPY, G_MEMMOVE, G_MEMSET}).libcall();
327
329 verify(*ST.getInstrInfo());
330}
331
333 MachineInstr &MI) const {
334 using namespace TargetOpcode;
335
336 MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
337 MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
338
339 const LLT s32 = LLT::scalar(32);
340 const LLT s64 = LLT::scalar(64);
341
342 switch (MI.getOpcode()) {
343 case G_LOAD:
344 case G_STORE: {
345 unsigned MemSize = (**MI.memoperands_begin()).getSize();
346 Register Val = MI.getOperand(0).getReg();
347 unsigned Size = MRI.getType(Val).getSizeInBits();
348
349 MachineMemOperand *MMOBase = *MI.memoperands_begin();
350
351 assert(MemSize <= 8 && "MemSize is too large");
352 assert(Size <= 64 && "Scalar size is too large");
353
354 // Split MemSize into two, P2HalfMemSize is largest power of two smaller
355 // then MemSize. e.g. 8 = 4 + 4 , 6 = 4 + 2, 3 = 2 + 1.
356 unsigned P2HalfMemSize, RemMemSize;
357 if (isPowerOf2_64(MemSize)) {
358 P2HalfMemSize = RemMemSize = MemSize / 2;
359 } else {
360 P2HalfMemSize = 1 << Log2_32(MemSize);
361 RemMemSize = MemSize - P2HalfMemSize;
362 }
363
364 Register BaseAddr = MI.getOperand(1).getReg();
365 LLT PtrTy = MRI.getType(BaseAddr);
366 MachineFunction &MF = MIRBuilder.getMF();
367
368 auto P2HalfMemOp = MF.getMachineMemOperand(MMOBase, 0, P2HalfMemSize);
369 auto RemMemOp = MF.getMachineMemOperand(MMOBase, P2HalfMemSize, RemMemSize);
370
371 if (MI.getOpcode() == G_STORE) {
372 // Widen Val to s32 or s64 in order to create legal G_LSHR or G_UNMERGE.
373 if (Size < 32)
374 Val = MIRBuilder.buildAnyExt(s32, Val).getReg(0);
375 if (Size > 32 && Size < 64)
376 Val = MIRBuilder.buildAnyExt(s64, Val).getReg(0);
377
378 auto C_P2HalfMemSize = MIRBuilder.buildConstant(s32, P2HalfMemSize);
379 auto Addr = MIRBuilder.buildPtrAdd(PtrTy, BaseAddr, C_P2HalfMemSize);
380
381 if (MI.getOpcode() == G_STORE && MemSize <= 4) {
382 MIRBuilder.buildStore(Val, BaseAddr, *P2HalfMemOp);
383 auto C_P2Half_InBits = MIRBuilder.buildConstant(s32, P2HalfMemSize * 8);
384 auto Shift = MIRBuilder.buildLShr(s32, Val, C_P2Half_InBits);
385 MIRBuilder.buildStore(Shift, Addr, *RemMemOp);
386 } else {
387 auto Unmerge = MIRBuilder.buildUnmerge(s32, Val);
388 MIRBuilder.buildStore(Unmerge.getReg(0), BaseAddr, *P2HalfMemOp);
389 MIRBuilder.buildStore(Unmerge.getReg(1), Addr, *RemMemOp);
390 }
391 }
392
393 if (MI.getOpcode() == G_LOAD) {
394
395 if (MemSize <= 4) {
396 // This is anyextending load, use 4 byte lwr/lwl.
397 auto *Load4MMO = MF.getMachineMemOperand(MMOBase, 0, 4);
398
399 if (Size == 32)
400 MIRBuilder.buildLoad(Val, BaseAddr, *Load4MMO);
401 else {
402 auto Load = MIRBuilder.buildLoad(s32, BaseAddr, *Load4MMO);
403 MIRBuilder.buildTrunc(Val, Load.getReg(0));
404 }
405
406 } else {
407 auto C_P2HalfMemSize = MIRBuilder.buildConstant(s32, P2HalfMemSize);
408 auto Addr = MIRBuilder.buildPtrAdd(PtrTy, BaseAddr, C_P2HalfMemSize);
409
410 auto Load_P2Half = MIRBuilder.buildLoad(s32, BaseAddr, *P2HalfMemOp);
411 auto Load_Rem = MIRBuilder.buildLoad(s32, Addr, *RemMemOp);
412
413 if (Size == 64)
414 MIRBuilder.buildMergeLikeInstr(Val, {Load_P2Half, Load_Rem});
415 else {
416 auto Merge =
417 MIRBuilder.buildMergeLikeInstr(s64, {Load_P2Half, Load_Rem});
418 MIRBuilder.buildTrunc(Val, Merge);
419 }
420 }
421 }
422 MI.eraseFromParent();
423 break;
424 }
425 case G_UITOFP: {
426 Register Dst = MI.getOperand(0).getReg();
427 Register Src = MI.getOperand(1).getReg();
428 LLT DstTy = MRI.getType(Dst);
429 LLT SrcTy = MRI.getType(Src);
430
431 if (SrcTy != s32)
432 return false;
433 if (DstTy != s32 && DstTy != s64)
434 return false;
435
436 // Let 0xABCDEFGH be given unsigned in MI.getOperand(1). First let's convert
437 // unsigned to double. Mantissa has 52 bits so we use following trick:
438 // First make floating point bit mask 0x43300000ABCDEFGH.
439 // Mask represents 2^52 * 0x1.00000ABCDEFGH i.e. 0x100000ABCDEFGH.0 .
440 // Next, subtract 2^52 * 0x1.0000000000000 i.e. 0x10000000000000.0 from it.
441 // Done. Trunc double to float if needed.
442
443 auto C_HiMask = MIRBuilder.buildConstant(s32, UINT32_C(0x43300000));
444 auto Bitcast =
445 MIRBuilder.buildMergeLikeInstr(s64, {Src, C_HiMask.getReg(0)});
446
447 MachineInstrBuilder TwoP52FP = MIRBuilder.buildFConstant(
448 s64, llvm::bit_cast<double>(UINT64_C(0x4330000000000000)));
449
450 if (DstTy == s64)
451 MIRBuilder.buildFSub(Dst, Bitcast, TwoP52FP);
452 else {
453 MachineInstrBuilder ResF64 = MIRBuilder.buildFSub(s64, Bitcast, TwoP52FP);
454 MIRBuilder.buildFPTrunc(Dst, ResF64);
455 }
456
457 MI.eraseFromParent();
458 break;
459 }
460 default:
461 return false;
462 }
463
464 return true;
465}
466
467static bool SelectMSA3OpIntrinsic(MachineInstr &MI, unsigned Opcode,
468 MachineIRBuilder &MIRBuilder,
469 const MipsSubtarget &ST) {
470 assert(ST.hasMSA() && "MSA intrinsic not supported on target without MSA.");
471 if (!MIRBuilder.buildInstr(Opcode)
472 .add(MI.getOperand(0))
473 .add(MI.getOperand(2))
474 .add(MI.getOperand(3))
475 .constrainAllUses(MIRBuilder.getTII(), *ST.getRegisterInfo(),
476 *ST.getRegBankInfo()))
477 return false;
478 MI.eraseFromParent();
479 return true;
480}
481
482static bool MSA3OpIntrinsicToGeneric(MachineInstr &MI, unsigned Opcode,
483 MachineIRBuilder &MIRBuilder,
484 const MipsSubtarget &ST) {
485 assert(ST.hasMSA() && "MSA intrinsic not supported on target without MSA.");
486 MIRBuilder.buildInstr(Opcode)
487 .add(MI.getOperand(0))
488 .add(MI.getOperand(2))
489 .add(MI.getOperand(3));
490 MI.eraseFromParent();
491 return true;
492}
493
494static bool MSA2OpIntrinsicToGeneric(MachineInstr &MI, unsigned Opcode,
495 MachineIRBuilder &MIRBuilder,
496 const MipsSubtarget &ST) {
497 assert(ST.hasMSA() && "MSA intrinsic not supported on target without MSA.");
498 MIRBuilder.buildInstr(Opcode)
499 .add(MI.getOperand(0))
500 .add(MI.getOperand(2));
501 MI.eraseFromParent();
502 return true;
503}
504
506 MachineInstr &MI) const {
507 MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
508 const MipsSubtarget &ST = MI.getMF()->getSubtarget<MipsSubtarget>();
509 const MipsInstrInfo &TII = *ST.getInstrInfo();
510 const MipsRegisterInfo &TRI = *ST.getRegisterInfo();
511 const RegisterBankInfo &RBI = *ST.getRegBankInfo();
512
513 switch (MI.getIntrinsicID()) {
514 case Intrinsic::trap: {
515 MachineInstr *Trap = MIRBuilder.buildInstr(Mips::TRAP);
516 MI.eraseFromParent();
518 }
519 case Intrinsic::vacopy: {
521 LLT PtrTy = LLT::pointer(0, 32);
522 auto Tmp =
523 MIRBuilder.buildLoad(PtrTy, MI.getOperand(2),
524 *MI.getMF()->getMachineMemOperand(
525 MPO, MachineMemOperand::MOLoad, PtrTy, Align(4)));
526 MIRBuilder.buildStore(Tmp, MI.getOperand(1),
527 *MI.getMF()->getMachineMemOperand(
528 MPO, MachineMemOperand::MOStore, PtrTy, Align(4)));
529 MI.eraseFromParent();
530 return true;
531 }
532 case Intrinsic::mips_addv_b:
533 case Intrinsic::mips_addv_h:
534 case Intrinsic::mips_addv_w:
535 case Intrinsic::mips_addv_d:
536 return MSA3OpIntrinsicToGeneric(MI, TargetOpcode::G_ADD, MIRBuilder, ST);
537 case Intrinsic::mips_addvi_b:
538 return SelectMSA3OpIntrinsic(MI, Mips::ADDVI_B, MIRBuilder, ST);
539 case Intrinsic::mips_addvi_h:
540 return SelectMSA3OpIntrinsic(MI, Mips::ADDVI_H, MIRBuilder, ST);
541 case Intrinsic::mips_addvi_w:
542 return SelectMSA3OpIntrinsic(MI, Mips::ADDVI_W, MIRBuilder, ST);
543 case Intrinsic::mips_addvi_d:
544 return SelectMSA3OpIntrinsic(MI, Mips::ADDVI_D, MIRBuilder, ST);
545 case Intrinsic::mips_subv_b:
546 case Intrinsic::mips_subv_h:
547 case Intrinsic::mips_subv_w:
548 case Intrinsic::mips_subv_d:
549 return MSA3OpIntrinsicToGeneric(MI, TargetOpcode::G_SUB, MIRBuilder, ST);
550 case Intrinsic::mips_subvi_b:
551 return SelectMSA3OpIntrinsic(MI, Mips::SUBVI_B, MIRBuilder, ST);
552 case Intrinsic::mips_subvi_h:
553 return SelectMSA3OpIntrinsic(MI, Mips::SUBVI_H, MIRBuilder, ST);
554 case Intrinsic::mips_subvi_w:
555 return SelectMSA3OpIntrinsic(MI, Mips::SUBVI_W, MIRBuilder, ST);
556 case Intrinsic::mips_subvi_d:
557 return SelectMSA3OpIntrinsic(MI, Mips::SUBVI_D, MIRBuilder, ST);
558 case Intrinsic::mips_mulv_b:
559 case Intrinsic::mips_mulv_h:
560 case Intrinsic::mips_mulv_w:
561 case Intrinsic::mips_mulv_d:
562 return MSA3OpIntrinsicToGeneric(MI, TargetOpcode::G_MUL, MIRBuilder, ST);
563 case Intrinsic::mips_div_s_b:
564 case Intrinsic::mips_div_s_h:
565 case Intrinsic::mips_div_s_w:
566 case Intrinsic::mips_div_s_d:
567 return MSA3OpIntrinsicToGeneric(MI, TargetOpcode::G_SDIV, MIRBuilder, ST);
568 case Intrinsic::mips_mod_s_b:
569 case Intrinsic::mips_mod_s_h:
570 case Intrinsic::mips_mod_s_w:
571 case Intrinsic::mips_mod_s_d:
572 return MSA3OpIntrinsicToGeneric(MI, TargetOpcode::G_SREM, MIRBuilder, ST);
573 case Intrinsic::mips_div_u_b:
574 case Intrinsic::mips_div_u_h:
575 case Intrinsic::mips_div_u_w:
576 case Intrinsic::mips_div_u_d:
577 return MSA3OpIntrinsicToGeneric(MI, TargetOpcode::G_UDIV, MIRBuilder, ST);
578 case Intrinsic::mips_mod_u_b:
579 case Intrinsic::mips_mod_u_h:
580 case Intrinsic::mips_mod_u_w:
581 case Intrinsic::mips_mod_u_d:
582 return MSA3OpIntrinsicToGeneric(MI, TargetOpcode::G_UREM, MIRBuilder, ST);
583 case Intrinsic::mips_fadd_w:
584 case Intrinsic::mips_fadd_d:
585 return MSA3OpIntrinsicToGeneric(MI, TargetOpcode::G_FADD, MIRBuilder, ST);
586 case Intrinsic::mips_fsub_w:
587 case Intrinsic::mips_fsub_d:
588 return MSA3OpIntrinsicToGeneric(MI, TargetOpcode::G_FSUB, MIRBuilder, ST);
589 case Intrinsic::mips_fmul_w:
590 case Intrinsic::mips_fmul_d:
591 return MSA3OpIntrinsicToGeneric(MI, TargetOpcode::G_FMUL, MIRBuilder, ST);
592 case Intrinsic::mips_fdiv_w:
593 case Intrinsic::mips_fdiv_d:
594 return MSA3OpIntrinsicToGeneric(MI, TargetOpcode::G_FDIV, MIRBuilder, ST);
595 case Intrinsic::mips_fmax_a_w:
596 return SelectMSA3OpIntrinsic(MI, Mips::FMAX_A_W, MIRBuilder, ST);
597 case Intrinsic::mips_fmax_a_d:
598 return SelectMSA3OpIntrinsic(MI, Mips::FMAX_A_D, MIRBuilder, ST);
599 case Intrinsic::mips_fsqrt_w:
600 return MSA2OpIntrinsicToGeneric(MI, TargetOpcode::G_FSQRT, MIRBuilder, ST);
601 case Intrinsic::mips_fsqrt_d:
602 return MSA2OpIntrinsicToGeneric(MI, TargetOpcode::G_FSQRT, MIRBuilder, ST);
603 default:
604 break;
605 }
606 return true;
607}
unsigned const MachineRegisterInfo * MRI
uint64_t Addr
uint64_t Size
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
This file declares the MachineIRBuilder class.
unsigned const TargetRegisterInfo * TRI
static bool CheckTy0Ty1MemSizeAlign(const LegalityQuery &Query, std::initializer_list< TypesAndMemOps > SupportedValues)
static bool CheckTyN(unsigned N, const LegalityQuery &Query, std::initializer_list< LLT > SupportedValues)
static bool MSA2OpIntrinsicToGeneric(MachineInstr &MI, unsigned Opcode, MachineIRBuilder &MIRBuilder, const MipsSubtarget &ST)
static bool SelectMSA3OpIntrinsic(MachineInstr &MI, unsigned Opcode, MachineIRBuilder &MIRBuilder, const MipsSubtarget &ST)
static bool isUnalignedMemmoryAccess(uint64_t MemSize, uint64_t AlignInBits)
static bool MSA3OpIntrinsicToGeneric(MachineInstr &MI, unsigned Opcode, MachineIRBuilder &MIRBuilder, const MipsSubtarget &ST)
This file declares the targeting of the Machinelegalizer class for Mips.
ppc ctr loops verify
if(VerifyEach)
R600 Clause Merge
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static unsigned getSize(unsigned Kind)
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
void computeTables()
Compute any ancillary tables needed to quickly decide how an operation should be handled.
LegalizeRuleSet & minScalar(unsigned TypeIdx, const LLT Ty)
Ensure the scalar is at least as wide as Ty.
LegalizeRuleSet & legalFor(std::initializer_list< LLT > Types)
The instruction is legal when type index 0 is any type in the given list.
LegalizeRuleSet & libcallFor(std::initializer_list< LLT > Types)
LegalizeRuleSet & maxScalar(unsigned TypeIdx, const LLT Ty)
Ensure the scalar is at most as wide as Ty.
LegalizeRuleSet & libcallForCartesianProduct(std::initializer_list< LLT > Types)
LegalizeRuleSet & lower()
The instruction is lowered.
LegalizeRuleSet & lowerFor(std::initializer_list< LLT > Types)
The instruction is lowered when type index 0 is any type in the given list.
LegalizeRuleSet & clampScalar(unsigned TypeIdx, const LLT MinTy, const LLT MaxTy)
Limit the range of scalar sizes to MinTy and MaxTy.
LegalizeRuleSet & legalForCartesianProduct(std::initializer_list< LLT > Types)
The instruction is legal when type indexes 0 and 1 are both in the given list.
LegalizeRuleSet & legalIf(LegalityPredicate Predicate)
The instruction is legal if predicate is true.
MachineIRBuilder & MIRBuilder
Expose MIRBuilder so clients can set their own RecordInsertInstruction functions.
LegalizeRuleSet & getActionDefinitionsBuilder(unsigned Opcode)
Get the action definition builder for the given opcode.
const LegacyLegalizerInfo & getLegacyLegalizerInfo() const
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
Helper class to build MachineInstr.
MachineInstrBuilder buildFSub(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_FSUB Op0, Op1.
MachineInstrBuilder buildUnmerge(ArrayRef< LLT > Res, const SrcOp &Op)
Build and insert Res0, ... = G_UNMERGE_VALUES Op.
const TargetInstrInfo & getTII()
MachineInstrBuilder buildLShr(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
MachineInstrBuilder buildMergeLikeInstr(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_MERGE_VALUES Op0, ... or Res = G_BUILD_VECTOR Op0, ... or Res = G_CONCAT_VEC...
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
virtual MachineInstrBuilder buildFConstant(const DstOp &Res, const ConstantFP &Val)
Build and insert Res = G_FCONSTANT Val.
MachineInstrBuilder buildPtrAdd(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert Res = G_PTR_ADD Op0, Op1.
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineFunction & getMF()
Getter for the function we currently build.
MachineInstrBuilder buildAnyExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ANYEXT Op0.
MachineInstrBuilder buildTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_TRUNC Op.
MachineRegisterInfo * getMRI()
Getter for MRI.
MachineInstrBuilder buildFPTrunc(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_FPTRUNC Op.
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & add(const MachineOperand &MO) const
bool constrainAllUses(const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI) const
Representation of each machine instruction.
Definition: MachineInstr.h:68
A description of a memory reference used in the backend.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
MipsLegalizerInfo(const MipsSubtarget &ST)
bool legalizeCustom(LegalizerHelper &Helper, MachineInstr &MI) const override
Called for instructions with the Custom LegalizationAction.
bool legalizeIntrinsic(LegalizerHelper &Helper, MachineInstr &MI) const override
Holds all the information related to register banks.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
Definition: Utils.cpp:152
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition: MathExtras.h:297
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:382
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1939
#define N
bool SystemSupportsUnalignedAccess
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
The LegalityQuery object bundles together all the information that's needed to decide whether a given...
ArrayRef< MemDesc > MMODescrs
Operations which require memory can use this to place requirements on the memory type for each MMO.
ArrayRef< LLT > Types
This class contains a discriminated union of information about pointers in memory operands,...