17 #include "llvm/IR/IntrinsicsMips.h"
33 if (MemSize > AlignInBits)
40 std::initializer_list<TypesAndMemOps> SupportedValues) {
41 unsigned QueryMemSize = Query.
MMODescrs[0].MemoryTy.getSizeInBits();
47 for (
auto &Val : SupportedValues) {
48 if (Val.ValTy != Query.
Types[0])
50 if (Val.PtrTy != Query.
Types[1])
52 if (Val.MemSize != QueryMemSize)
54 if (!Val.SystemSupportsUnalignedAccess &&
63 std::initializer_list<LLT> SupportedValues) {
68 using namespace TargetOpcode;
85 if (
ST.hasMSA() &&
CheckTyN(0, Query, {v16s8, v8s16, v4s32, v2s64}))
89 .clampScalar(0, s32, s32);
92 .lowerFor({{s32,
s1}});
103 bool NoAlignRequirements =
true;
108 Query, {{s32, p0, 8, NoAlignRequirements},
109 {s32, p0, 16,
ST.systemSupportsUnalignedAccess()},
110 {s32, p0, 32, NoAlignRequirements},
111 {p0, p0, 32, NoAlignRequirements},
112 {s64, p0, 64,
ST.systemSupportsUnalignedAccess()}}))
115 Query, {{v16s8, p0, 128, NoAlignRequirements},
116 {v8s16, p0, 128, NoAlignRequirements},
117 {v4s32, p0, 128, NoAlignRequirements},
118 {v2s64, p0, 128, NoAlignRequirements}}))
126 if (!Query.
Types[0].isScalar() || Query.
Types[1] != p0 ||
130 unsigned Size = Query.
Types[0].getSizeInBits();
131 unsigned QueryMemSize = Query.
MMODescrs[0].MemoryTy.getSizeInBits();
132 assert(QueryMemSize <= Size &&
"Scalar can't hold MemSize");
134 if (Size > 64 || QueryMemSize > 64)
140 if (!
ST.systemSupportsUnalignedAccess() &&
143 assert(QueryMemSize != 32 &&
"4 byte load and store are legal");
162 .legalForTypesWithMemDesc({{s32, p0, s8, 8},
164 .clampScalar(0, s32, s32);
195 .clampScalar(0, s32, s32);
201 if (
ST.hasMSA() &&
CheckTyN(0, Query, {v16s8, v8s16, v4s32, v2s64}))
209 .legalFor({{s32, s32}})
210 .clampScalar(1, s32, s32)
215 .clampScalar(1, s32, s32)
220 .clampScalar(0, s32, s32);
223 .legalFor({{p0, s32}});
242 if (
ST.hasMips32r2() &&
CheckTyN(0, Query, {s32}))
247 if (!
ST.hasMips32r2() &&
CheckTyN(0, Query, {s32}))
269 .
lowerFor({{s32, s32}, {s64, s64}});
273 .clampScalar(0, s32, s32)
284 if (
ST.hasMSA() &&
CheckTyN(0, Query, {v16s8, v8s16, v4s32, v2s64}))
294 .libcallFor({s32, s64});
305 .libcallForCartesianProduct({s64}, {s64, s32})
310 .lowerForCartesianProduct({s32}, {s64, s32})
316 .libcallForCartesianProduct({s64, s32}, {s64})
321 .customForCartesianProduct({s64, s32}, {s32})
334 using namespace TargetOpcode;
342 switch (
MI.getOpcode()) {
345 unsigned MemSize = (**
MI.memoperands_begin()).getSize();
351 assert(MemSize <= 8 &&
"MemSize is too large");
352 assert(Size <= 64 &&
"Scalar size is too large");
356 unsigned P2HalfMemSize, RemMemSize;
358 P2HalfMemSize = RemMemSize = MemSize / 2;
360 P2HalfMemSize = 1 <<
Log2_32(MemSize);
361 RemMemSize = MemSize - P2HalfMemSize;
364 Register BaseAddr =
MI.getOperand(1).getReg();
371 if (
MI.getOpcode() == G_STORE) {
375 if (Size > 32 && Size < 64)
378 auto C_P2HalfMemSize = MIRBuilder.
buildConstant(s32, P2HalfMemSize);
381 if (
MI.getOpcode() == G_STORE && MemSize <= 4) {
382 MIRBuilder.
buildStore(Val, BaseAddr, *P2HalfMemOp);
383 auto C_P2Half_InBits = MIRBuilder.
buildConstant(s32, P2HalfMemSize * 8);
388 MIRBuilder.
buildStore(Unmerge.getReg(0), BaseAddr, *P2HalfMemOp);
393 if (
MI.getOpcode() == G_LOAD) {
400 MIRBuilder.
buildLoad(Val, BaseAddr, *Load4MMO);
407 auto C_P2HalfMemSize = MIRBuilder.
buildConstant(s32, P2HalfMemSize);
410 auto Load_P2Half = MIRBuilder.
buildLoad(s32, BaseAddr, *P2HalfMemOp);
414 MIRBuilder.
buildMerge(Val, {Load_P2Half, Load_Rem});
421 MI.eraseFromParent();
432 if (DstTy != s32 && DstTy != s64)
442 auto C_HiMask = MIRBuilder.
buildConstant(s32, UINT32_C(0x43300000));
455 MI.eraseFromParent();
468 assert(
ST.hasMSA() &&
"MSA intrinsic not supported on target without MSA.");
470 .
add(
MI.getOperand(0))
471 .
add(
MI.getOperand(2))
472 .
add(
MI.getOperand(3))
474 *
ST.getRegBankInfo()))
476 MI.eraseFromParent();
483 assert(
ST.hasMSA() &&
"MSA intrinsic not supported on target without MSA.");
485 .
add(
MI.getOperand(0))
486 .
add(
MI.getOperand(2))
487 .
add(
MI.getOperand(3));
488 MI.eraseFromParent();
495 assert(
ST.hasMSA() &&
"MSA intrinsic not supported on target without MSA.");
497 .
add(
MI.getOperand(0))
498 .
add(
MI.getOperand(2));
499 MI.eraseFromParent();
511 switch (
MI.getIntrinsicID()) {
512 case Intrinsic::trap: {
514 MI.eraseFromParent();
517 case Intrinsic::vacopy: {
522 *
MI.getMF()->getMachineMemOperand(
525 *
MI.getMF()->getMachineMemOperand(
527 MI.eraseFromParent();
530 case Intrinsic::mips_addv_b:
531 case Intrinsic::mips_addv_h:
532 case Intrinsic::mips_addv_w:
533 case Intrinsic::mips_addv_d:
535 case Intrinsic::mips_addvi_b:
537 case Intrinsic::mips_addvi_h:
539 case Intrinsic::mips_addvi_w:
541 case Intrinsic::mips_addvi_d:
543 case Intrinsic::mips_subv_b:
544 case Intrinsic::mips_subv_h:
545 case Intrinsic::mips_subv_w:
546 case Intrinsic::mips_subv_d:
548 case Intrinsic::mips_subvi_b:
550 case Intrinsic::mips_subvi_h:
552 case Intrinsic::mips_subvi_w:
554 case Intrinsic::mips_subvi_d:
556 case Intrinsic::mips_mulv_b:
557 case Intrinsic::mips_mulv_h:
558 case Intrinsic::mips_mulv_w:
559 case Intrinsic::mips_mulv_d:
561 case Intrinsic::mips_div_s_b:
562 case Intrinsic::mips_div_s_h:
563 case Intrinsic::mips_div_s_w:
564 case Intrinsic::mips_div_s_d:
566 case Intrinsic::mips_mod_s_b:
567 case Intrinsic::mips_mod_s_h:
568 case Intrinsic::mips_mod_s_w:
569 case Intrinsic::mips_mod_s_d:
571 case Intrinsic::mips_div_u_b:
572 case Intrinsic::mips_div_u_h:
573 case Intrinsic::mips_div_u_w:
574 case Intrinsic::mips_div_u_d:
576 case Intrinsic::mips_mod_u_b:
577 case Intrinsic::mips_mod_u_h:
578 case Intrinsic::mips_mod_u_w:
579 case Intrinsic::mips_mod_u_d:
581 case Intrinsic::mips_fadd_w:
582 case Intrinsic::mips_fadd_d:
584 case Intrinsic::mips_fsub_w:
585 case Intrinsic::mips_fsub_d:
587 case Intrinsic::mips_fmul_w:
588 case Intrinsic::mips_fmul_d:
590 case Intrinsic::mips_fdiv_w:
591 case Intrinsic::mips_fdiv_d:
593 case Intrinsic::mips_fmax_a_w:
595 case Intrinsic::mips_fmax_a_d:
597 case Intrinsic::mips_fsqrt_w:
599 case Intrinsic::mips_fsqrt_d: