LCOV - code coverage report
Current view: top level - lib/Target/AArch64 - AArch64LegalizerInfo.cpp (source / functions) Hit Total Coverage
Test: llvm-toolchain.info Lines: 212 228 93.0 %
Date: 2018-07-13 00:08:38 Functions: 12 16 75.0 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : //===- AArch64LegalizerInfo.cpp ----------------------------------*- C++ -*-==//
       2             : //
       3             : //                     The LLVM Compiler Infrastructure
       4             : //
       5             : // This file is distributed under the University of Illinois Open Source
       6             : // License. See LICENSE.TXT for details.
       7             : //
       8             : //===----------------------------------------------------------------------===//
       9             : /// \file
      10             : /// This file implements the targeting of the Machinelegalizer class for
      11             : /// AArch64.
      12             : /// \todo This should be generated by TableGen.
      13             : //===----------------------------------------------------------------------===//
      14             : 
      15             : #include "AArch64LegalizerInfo.h"
      16             : #include "AArch64Subtarget.h"
      17             : #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
      18             : #include "llvm/CodeGen/MachineInstr.h"
      19             : #include "llvm/CodeGen/MachineRegisterInfo.h"
      20             : #include "llvm/CodeGen/TargetOpcodes.h"
      21             : #include "llvm/CodeGen/ValueTypes.h"
      22             : #include "llvm/IR/DerivedTypes.h"
      23             : #include "llvm/IR/Type.h"
      24             : 
      25             : using namespace llvm;
      26             : using namespace LegalizeActions;
      27             : using namespace LegalityPredicates;
      28             : 
      29        1438 : AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST) {
      30             :   using namespace TargetOpcode;
      31             :   const LLT p0 = LLT::pointer(0, 64);
      32        1438 :   const LLT s1 = LLT::scalar(1);
      33        1438 :   const LLT s8 = LLT::scalar(8);
      34        1438 :   const LLT s16 = LLT::scalar(16);
      35        1438 :   const LLT s32 = LLT::scalar(32);
      36        1438 :   const LLT s64 = LLT::scalar(64);
      37             :   const LLT s128 = LLT::scalar(128);
      38        1438 :   const LLT s256 = LLT::scalar(256);
      39        1438 :   const LLT s512 = LLT::scalar(512);
      40             :   const LLT v16s8 = LLT::vector(16, 8);
      41             :   const LLT v8s8 = LLT::vector(8, 8);
      42             :   const LLT v4s8 = LLT::vector(4, 8);
      43             :   const LLT v8s16 = LLT::vector(8, 16);
      44             :   const LLT v4s16 = LLT::vector(4, 16);
      45             :   const LLT v2s16 = LLT::vector(2, 16);
      46        1438 :   const LLT v2s32 = LLT::vector(2, 32);
      47        1438 :   const LLT v4s32 = LLT::vector(4, 32);
      48        1438 :   const LLT v2s64 = LLT::vector(2, 64);
      49             : 
      50        1438 :   getActionDefinitionsBuilder(G_IMPLICIT_DEF)
      51             :       .legalFor({p0, s1, s8, s16, s32, s64})
      52        2876 :       .clampScalar(0, s1, s64)
      53        1438 :       .widenScalarToNextPow2(0, 8);
      54             : 
      55        1438 :   getActionDefinitionsBuilder(G_PHI)
      56             :       .legalFor({p0, s16, s32, s64})
      57        2876 :       .clampScalar(0, s16, s64)
      58        1438 :       .widenScalarToNextPow2(0);
      59             : 
      60        1438 :   getActionDefinitionsBuilder(G_BSWAP)
      61             :       .legalFor({s32, s64})
      62        2876 :       .clampScalar(0, s16, s64)
      63        1438 :       .widenScalarToNextPow2(0);
      64             : 
      65        1438 :   getActionDefinitionsBuilder({G_ADD, G_SUB, G_MUL, G_AND, G_OR, G_XOR, G_SHL})
      66        1438 :       .legalFor({s32, s64, v2s32, v4s32, v2s64})
      67        2876 :       .clampScalar(0, s32, s64)
      68        1438 :       .widenScalarToNextPow2(0)
      69        1438 :       .clampNumElements(0, v2s32, v4s32)
      70        1438 :       .clampNumElements(0, v2s64, v2s64)
      71        1438 :       .moreElementsToNextPow2(0);
      72             : 
      73        1438 :   getActionDefinitionsBuilder(G_GEP)
      74             :       .legalFor({{p0, s64}})
      75        1438 :       .clampScalar(1, s64, s64);
      76             : 
      77        2876 :   getActionDefinitionsBuilder(G_PTR_MASK).legalFor({p0});
      78             : 
      79        1438 :   getActionDefinitionsBuilder({G_LSHR, G_ASHR, G_SDIV, G_UDIV})
      80        1438 :       .legalFor({s32, s64})
      81        2876 :       .clampScalar(0, s32, s64)
      82        1438 :       .widenScalarToNextPow2(0);
      83             : 
      84        1438 :   getActionDefinitionsBuilder({G_SREM, G_UREM})
      85        2876 :       .lowerFor({s1, s8, s16, s32, s64});
      86             : 
      87        1438 :   getActionDefinitionsBuilder({G_SMULO, G_UMULO})
      88        2876 :       .lowerFor({{s64, s1}});
      89             : 
      90        2876 :   getActionDefinitionsBuilder({G_SMULH, G_UMULH}).legalFor({s32, s64});
      91             : 
      92        1438 :   getActionDefinitionsBuilder({G_UADDE, G_USUBE, G_SADDO, G_SSUBO})
      93        2876 :       .legalFor({{s32, s1}, {s64, s1}});
      94             : 
      95        1438 :   getActionDefinitionsBuilder({G_FADD, G_FSUB, G_FMA, G_FMUL, G_FDIV})
      96        4314 :       .legalFor({s32, s64});
      97             : 
      98        2876 :   getActionDefinitionsBuilder({G_FREM, G_FPOW}).libcallFor({s32, s64});
      99             : 
     100        1438 :   getActionDefinitionsBuilder(G_INSERT)
     101          13 :       .unsupportedIf([=](const LegalityQuery &Query) {
     102          26 :         return Query.Types[0].getSizeInBits() <= Query.Types[1].getSizeInBits();
     103        2889 :       })
     104          13 :       .legalIf([=](const LegalityQuery &Query) {
     105          13 :         const LLT &Ty0 = Query.Types[0];
     106             :         const LLT &Ty1 = Query.Types[1];
     107             :         if (Ty0 != s32 && Ty0 != s64 && Ty0 != p0)
     108             :           return false;
     109          10 :         return isPowerOf2_32(Ty1.getSizeInBits()) &&
     110           4 :                (Ty1.getSizeInBits() == 1 || Ty1.getSizeInBits() >= 8);
     111        4314 :       })
     112             :       .clampScalar(0, s32, s64)
     113        1438 :       .widenScalarToNextPow2(0)
     114        2876 :       .maxScalarIf(typeInSet(0, {s32}), 1, s16)
     115        2876 :       .maxScalarIf(typeInSet(0, {s64}), 1, s32)
     116        1438 :       .widenScalarToNextPow2(1);
     117             : 
     118        1438 :   getActionDefinitionsBuilder(G_EXTRACT)
     119          13 :       .unsupportedIf([=](const LegalityQuery &Query) {
     120          26 :         return Query.Types[0].getSizeInBits() >= Query.Types[1].getSizeInBits();
     121        2889 :       })
     122          13 :       .legalIf([=](const LegalityQuery &Query) {
     123          13 :         const LLT &Ty0 = Query.Types[0];
     124             :         const LLT &Ty1 = Query.Types[1];
     125             :         if (Ty1 != s32 && Ty1 != s64)
     126             :           return false;
     127             :         if (Ty1 == p0)
     128             :           return true;
     129          14 :         return isPowerOf2_32(Ty0.getSizeInBits()) &&
     130           6 :                (Ty0.getSizeInBits() == 1 || Ty0.getSizeInBits() >= 8);
     131        4314 :       })
     132             :       .clampScalar(1, s32, s64)
     133        1438 :       .widenScalarToNextPow2(1)
     134        2876 :       .maxScalarIf(typeInSet(1, {s32}), 0, s16)
     135        2876 :       .maxScalarIf(typeInSet(1, {s64}), 0, s32)
     136        1438 :       .widenScalarToNextPow2(0);
     137             : 
     138        1438 :   getActionDefinitionsBuilder({G_SEXTLOAD, G_ZEXTLOAD})
     139             :       .legalForTypesWithMemSize({{s32, p0, 8},
     140             :                                  {s32, p0, 16},
     141             :                                  {s32, p0, 32},
     142             :                                  {s64, p0, 64},
     143             :                                  {p0, p0, 64},
     144        2876 :                                  {v2s32, p0, 64}})
     145        2876 :       .clampScalar(0, s32, s64)
     146        1438 :       .widenScalarToNextPow2(0)
     147             :       // TODO: We could support sum-of-pow2's but the lowering code doesn't know
     148             :       //       how to do that yet.
     149        1438 :       .unsupportedIfMemSizeNotPow2()
     150             :       // Lower anything left over into G_*EXT and G_LOAD
     151        1438 :       .lower();
     152             : 
     153        1438 :   getActionDefinitionsBuilder(G_LOAD)
     154             :       .legalForTypesWithMemSize({{s8, p0, 8},
     155             :                                  {s16, p0, 16},
     156             :                                  {s32, p0, 32},
     157             :                                  {s64, p0, 64},
     158             :                                  {p0, p0, 64},
     159        1438 :                                  {v2s32, p0, 64}})
     160             :       // These extends are also legal
     161             :       .legalForTypesWithMemSize({{s32, p0, 8},
     162        4314 :                                  {s32, p0, 16}})
     163        2876 :       .clampScalar(0, s8, s64)
     164        1438 :       .widenScalarToNextPow2(0)
     165             :       // TODO: We could support sum-of-pow2's but the lowering code doesn't know
     166             :       //       how to do that yet.
     167        1438 :       .unsupportedIfMemSizeNotPow2()
     168             :       // Lower any any-extending loads left into G_ANYEXT and G_LOAD
     169             :       .lowerIf([=](const LegalityQuery &Query) {
     170           1 :         return Query.Types[0].getSizeInBits() != Query.MMODescrs[0].Size * 8;
     171        2877 :       })
     172        1438 :       .clampNumElements(0, v2s32, v2s32);
     173             : 
     174        1438 :   getActionDefinitionsBuilder(G_STORE)
     175             :       .legalForTypesWithMemSize({{s8, p0, 8},
     176             :                                  {s16, p0, 16},
     177             :                                  {s32, p0, 32},
     178             :                                  {s64, p0, 64},
     179             :                                  {p0, p0, 64},
     180        1438 :                                  {v2s32, p0, 64}})
     181        2876 :       .clampScalar(0, s8, s64)
     182        1438 :       .widenScalarToNextPow2(0)
     183             :       // TODO: We could support sum-of-pow2's but the lowering code doesn't know
     184             :       //       how to do that yet.
     185        1438 :       .unsupportedIfMemSizeNotPow2()
     186           2 :       .lowerIf([=](const LegalityQuery &Query) {
     187           2 :         return Query.Types[0].isScalar() &&
     188           0 :                Query.Types[0].getSizeInBits() != Query.MMODescrs[0].Size * 8;
     189        2878 :       })
     190        1438 :       .clampNumElements(0, v2s32, v2s32);
     191             : 
     192             :   // Constants
     193        1438 :   getActionDefinitionsBuilder(G_CONSTANT)
     194             :       .legalFor({p0, s32, s64})
     195        2876 :       .clampScalar(0, s32, s64)
     196        1438 :       .widenScalarToNextPow2(0);
     197        1438 :   getActionDefinitionsBuilder(G_FCONSTANT)
     198             :       .legalFor({s32, s64})
     199        2876 :       .clampScalar(0, s32, s64);
     200             : 
     201        1438 :   getActionDefinitionsBuilder(G_ICMP)
     202             :       .legalFor({{s32, s32}, {s32, s64}, {s32, p0}})
     203        1438 :       .clampScalar(0, s32, s32)
     204             :       .clampScalar(1, s32, s64)
     205        1438 :       .widenScalarToNextPow2(1);
     206             : 
     207        1438 :   getActionDefinitionsBuilder(G_FCMP)
     208             :       .legalFor({{s32, s32}, {s32, s64}})
     209        1438 :       .clampScalar(0, s32, s32)
     210             :       .clampScalar(1, s32, s64)
     211        1438 :       .widenScalarToNextPow2(1);
     212             : 
     213             :   // Extensions
     214        1438 :   getActionDefinitionsBuilder({G_ZEXT, G_SEXT, G_ANYEXT})
     215        4314 :       .legalForCartesianProduct({s8, s16, s32, s64}, {s1, s8, s16, s32});
     216             : 
     217             :   // FP conversions
     218        2876 :   getActionDefinitionsBuilder(G_FPTRUNC).legalFor(
     219             :       {{s16, s32}, {s16, s64}, {s32, s64}});
     220        2876 :   getActionDefinitionsBuilder(G_FPEXT).legalFor(
     221             :       {{s32, s16}, {s64, s16}, {s64, s32}});
     222             : 
     223             :   // Conversions
     224        1438 :   getActionDefinitionsBuilder({G_FPTOSI, G_FPTOUI})
     225        1438 :       .legalForCartesianProduct({s32, s64})
     226        2876 :       .clampScalar(0, s32, s64)
     227        1438 :       .widenScalarToNextPow2(0)
     228             :       .clampScalar(1, s32, s64)
     229        1438 :       .widenScalarToNextPow2(1);
     230             : 
     231        1438 :   getActionDefinitionsBuilder({G_SITOFP, G_UITOFP})
     232        1438 :       .legalForCartesianProduct({s32, s64})
     233        2876 :       .clampScalar(1, s32, s64)
     234        1438 :       .widenScalarToNextPow2(1)
     235             :       .clampScalar(0, s32, s64)
     236        1438 :       .widenScalarToNextPow2(0);
     237             : 
     238             :   // Control-flow
     239        2876 :   getActionDefinitionsBuilder(G_BRCOND).legalFor({s1, s8, s16, s32});
     240        2876 :   getActionDefinitionsBuilder(G_BRINDIRECT).legalFor({p0});
     241             : 
     242             :   // Select
     243        1438 :   getActionDefinitionsBuilder(G_SELECT)
     244             :       .legalFor({{s32, s1}, {s64, s1}, {p0, s1}})
     245        1438 :       .clampScalar(0, s32, s64)
     246        1438 :       .widenScalarToNextPow2(0);
     247             : 
     248             :   // Pointer-handling
     249        2876 :   getActionDefinitionsBuilder(G_FRAME_INDEX).legalFor({p0});
     250        2876 :   getActionDefinitionsBuilder(G_GLOBAL_VALUE).legalFor({p0});
     251             : 
     252        1438 :   getActionDefinitionsBuilder(G_PTRTOINT)
     253             :       .legalForCartesianProduct({s1, s8, s16, s32, s64}, {p0})
     254        4314 :       .maxScalar(0, s64)
     255        1438 :       .widenScalarToNextPow2(0, /*Min*/ 8);
     256             : 
     257        1438 :   getActionDefinitionsBuilder(G_INTTOPTR)
     258           2 :       .unsupportedIf([&](const LegalityQuery &Query) {
     259           4 :         return Query.Types[0].getSizeInBits() != Query.Types[1].getSizeInBits();
     260        2878 :       })
     261        1438 :       .legalFor({{p0, s64}});
     262             : 
     263             :   // Casts for 32 and 64-bit width type are just copies.
     264             :   // Same for 128-bit width type, except they are on the FPR bank.
     265        1438 :   getActionDefinitionsBuilder(G_BITCAST)
     266             :       // FIXME: This is wrong since G_BITCAST is not allowed to change the
     267             :       // number of bits but it's what the previous code described and fixing
     268             :       // it breaks tests.
     269        2876 :       .legalForCartesianProduct({s1, s8, s16, s32, s64, s128, v16s8, v8s8, v4s8,
     270             :                                  v8s16, v4s16, v2s16, v4s32, v2s32, v2s64});
     271             : 
     272        2876 :   getActionDefinitionsBuilder(G_VASTART).legalFor({p0});
     273             : 
     274             :   // va_list must be a pointer, but most sized types are pretty easy to handle
     275             :   // as the destination.
     276        1438 :   getActionDefinitionsBuilder(G_VAARG)
     277             :       .customForCartesianProduct({s8, s16, s32, s64, p0}, {p0})
     278        2876 :       .clampScalar(0, s8, s64)
     279        1438 :       .widenScalarToNextPow2(0, /*Min*/ 8);
     280             : 
     281        1438 :   if (ST.hasLSE()) {
     282          31 :     getActionDefinitionsBuilder(G_ATOMIC_CMPXCHG_WITH_SUCCESS)
     283          62 :         .lowerIf(all(
     284         124 :             typeInSet(0, {s8, s16, s32, s64}), typeIs(1, s1), typeIs(2, p0),
     285          62 :             atomicOrderingAtLeastOrStrongerThan(0, AtomicOrdering::Monotonic)));
     286             : 
     287             :     getActionDefinitionsBuilder(
     288             :         {G_ATOMICRMW_XCHG, G_ATOMICRMW_ADD, G_ATOMICRMW_SUB, G_ATOMICRMW_AND,
     289             :          G_ATOMICRMW_OR, G_ATOMICRMW_XOR, G_ATOMICRMW_MIN, G_ATOMICRMW_MAX,
     290          31 :          G_ATOMICRMW_UMIN, G_ATOMICRMW_UMAX, G_ATOMIC_CMPXCHG})
     291          62 :         .legalIf(all(
     292          93 :             typeInSet(0, {s8, s16, s32, s64}), typeIs(1, p0),
     293          93 :             atomicOrderingAtLeastOrStrongerThan(0, AtomicOrdering::Monotonic)));
     294             :   }
     295             : 
     296             :   // Merge/Unmerge
     297        7190 :   for (unsigned Op : {G_MERGE_VALUES, G_UNMERGE_VALUES}) {
     298        2876 :     unsigned BigTyIdx = Op == G_MERGE_VALUES ? 0 : 1;
     299        2876 :     unsigned LitTyIdx = Op == G_MERGE_VALUES ? 1 : 0;
     300             : 
     301          22 :     auto notValidElt = [](const LegalityQuery &Query, unsigned TypeIdx) {
     302          22 :       const LLT &Ty = Query.Types[TypeIdx];
     303             :       if (Ty.isVector()) {
     304           1 :         const LLT &EltTy = Ty.getElementType();
     305           1 :         if (EltTy.getSizeInBits() < 8 || EltTy.getSizeInBits() > 64)
     306             :           return true;
     307             :         if (!isPowerOf2_32(EltTy.getSizeInBits()))
     308             :           return true;
     309             :       }
     310             :       return false;
     311             :     };
     312             :     auto scalarize =
     313           0 :         [](const LegalityQuery &Query, unsigned TypeIdx) {
     314           0 :           const LLT &Ty = Query.Types[TypeIdx];
     315           0 :           return std::make_pair(TypeIdx, Ty.getElementType());
     316             :         };
     317             : 
     318             :     // FIXME: This rule is horrible, but specifies the same as what we had
     319             :     // before with the particularly strange definitions removed (e.g.
     320             :     // s8 = G_MERGE_VALUES s32, s32).
     321             :     // Part of the complexity comes from these ops being extremely flexible. For
     322             :     // example, you can build/decompose vectors with it, concatenate vectors,
     323             :     // etc. and in addition to this you can also bitcast with it at the same
     324             :     // time. We've been considering breaking it up into multiple ops to make it
     325             :     // more manageable throughout the backend.
     326        2876 :     getActionDefinitionsBuilder(Op)
     327             :         // Break up vectors with weird elements into scalars
     328             :         .fewerElementsIf(
     329          11 :             [=](const LegalityQuery &Query) { return notValidElt(Query, 0); },
     330        8628 :             [=](const LegalityQuery &Query) { return scalarize(Query, 0); })
     331             :         .fewerElementsIf(
     332          11 :             [=](const LegalityQuery &Query) { return notValidElt(Query, 1); },
     333        8628 :             [=](const LegalityQuery &Query) { return scalarize(Query, 1); })
     334             :         // Clamp the big scalar to s8-s512 and make it either a power of 2, 192,
     335             :         // or 384.
     336             :         .clampScalar(BigTyIdx, s8, s512)
     337             :         .widenScalarIf(
     338          11 :             [=](const LegalityQuery &Query) {
     339          11 :               const LLT &Ty = Query.Types[BigTyIdx];
     340          12 :               return !isPowerOf2_32(Ty.getSizeInBits()) &&
     341          12 :                      Ty.getSizeInBits() % 64 != 0;
     342             :             },
     343           0 :             [=](const LegalityQuery &Query) {
     344             :               // Pick the next power of 2, or a multiple of 64 over 128.
     345             :               // Whichever is smaller.
     346           0 :               const LLT &Ty = Query.Types[BigTyIdx];
     347             :               unsigned NewSizeInBits = 1
     348           0 :                                        << Log2_32_Ceil(Ty.getSizeInBits() + 1);
     349           0 :               if (NewSizeInBits >= 256) {
     350           0 :                 unsigned RoundedTo = alignTo<64>(Ty.getSizeInBits() + 1);
     351           0 :                 if (RoundedTo < NewSizeInBits)
     352             :                   NewSizeInBits = RoundedTo;
     353             :               }
     354           0 :               return std::make_pair(BigTyIdx, LLT::scalar(NewSizeInBits));
     355        8628 :             })
     356             :         // Clamp the little scalar to s8-s256 and make it a power of 2. It's not
     357             :         // worth considering the multiples of 64 since 2*192 and 2*384 are not
     358             :         // valid.
     359             :         .clampScalar(LitTyIdx, s8, s256)
     360        2876 :         .widenScalarToNextPow2(LitTyIdx, /*Min*/ 8)
     361             :         // So at this point, we have s8, s16, s32, s64, s128, s192, s256, s384,
     362             :         // s512, <X x s8>, <X x s16>, <X x s32>, or <X x s64>.
     363             :         // At this point it's simple enough to accept the legal types.
     364           9 :         .legalIf([=](const LegalityQuery &Query) {
     365           9 :           const LLT &BigTy = Query.Types[BigTyIdx];
     366           9 :           const LLT &LitTy = Query.Types[LitTyIdx];
     367           1 :           if (BigTy.isVector() && BigTy.getSizeInBits() < 32)
     368             :             return false;
     369           0 :           if (LitTy.isVector() && LitTy.getSizeInBits() < 32)
     370             :             return false;
     371           9 :           return BigTy.getSizeInBits() % LitTy.getSizeInBits() == 0;
     372        5752 :         })
     373             :         // Any vectors left are the wrong size. Scalarize them.
     374             :         .fewerElementsIf([](const LegalityQuery &Query) { return true; },
     375           0 :                          [](const LegalityQuery &Query) {
     376             :                            return std::make_pair(
     377           0 :                                0, Query.Types[0].getElementType());
     378        8628 :                          })
     379        2876 :         .fewerElementsIf([](const LegalityQuery &Query) { return true; },
     380           0 :                          [](const LegalityQuery &Query) {
     381             :                            return std::make_pair(
     382           0 :                                1, Query.Types[1].getElementType());
     383        5752 :                          });
     384             :   }
     385             : 
     386        1438 :   computeTables();
     387        1438 :   verify(*ST.getInstrInfo());
     388        1438 : }
     389             : 
     390           3 : bool AArch64LegalizerInfo::legalizeCustom(MachineInstr &MI,
     391             :                                           MachineRegisterInfo &MRI,
     392             :                                           MachineIRBuilder &MIRBuilder) const {
     393           6 :   switch (MI.getOpcode()) {
     394             :   default:
     395             :     // No idea what to do.
     396             :     return false;
     397           3 :   case TargetOpcode::G_VAARG:
     398           3 :     return legalizeVaArg(MI, MRI, MIRBuilder);
     399             :   }
     400             : 
     401             :   llvm_unreachable("expected switch to return");
     402             : }
     403             : 
     404           3 : bool AArch64LegalizerInfo::legalizeVaArg(MachineInstr &MI,
     405             :                                          MachineRegisterInfo &MRI,
     406             :                                          MachineIRBuilder &MIRBuilder) const {
     407           3 :   MIRBuilder.setInstr(MI);
     408           3 :   MachineFunction &MF = MIRBuilder.getMF();
     409           3 :   unsigned Align = MI.getOperand(2).getImm();
     410           3 :   unsigned Dst = MI.getOperand(0).getReg();
     411           3 :   unsigned ListPtr = MI.getOperand(1).getReg();
     412             : 
     413           3 :   LLT PtrTy = MRI.getType(ListPtr);
     414           6 :   LLT IntPtrTy = LLT::scalar(PtrTy.getSizeInBits());
     415             : 
     416           3 :   const unsigned PtrSize = PtrTy.getSizeInBits() / 8;
     417           3 :   unsigned List = MRI.createGenericVirtualRegister(PtrTy);
     418           3 :   MIRBuilder.buildLoad(
     419             :       List, ListPtr,
     420           9 :       *MF.getMachineMemOperand(MachinePointerInfo(), MachineMemOperand::MOLoad,
     421             :                                PtrSize, /* Align = */ PtrSize));
     422             : 
     423             :   unsigned DstPtr;
     424           3 :   if (Align > PtrSize) {
     425             :     // Realign the list to the actual required alignment.
     426           1 :     auto AlignMinus1 = MIRBuilder.buildConstant(IntPtrTy, Align - 1);
     427             : 
     428           1 :     unsigned ListTmp = MRI.createGenericVirtualRegister(PtrTy);
     429           1 :     MIRBuilder.buildGEP(ListTmp, List, AlignMinus1->getOperand(0).getReg());
     430             : 
     431           1 :     DstPtr = MRI.createGenericVirtualRegister(PtrTy);
     432           2 :     MIRBuilder.buildPtrMask(DstPtr, ListTmp, Log2_64(Align));
     433             :   } else
     434             :     DstPtr = List;
     435             : 
     436           3 :   uint64_t ValSize = MRI.getType(Dst).getSizeInBits() / 8;
     437           3 :   MIRBuilder.buildLoad(
     438             :       Dst, DstPtr,
     439           9 :       *MF.getMachineMemOperand(MachinePointerInfo(), MachineMemOperand::MOLoad,
     440             :                                ValSize, std::max(Align, PtrSize)));
     441             : 
     442           3 :   unsigned SizeReg = MRI.createGenericVirtualRegister(IntPtrTy);
     443           3 :   MIRBuilder.buildConstant(SizeReg, alignTo(ValSize, PtrSize));
     444             : 
     445           3 :   unsigned NewList = MRI.createGenericVirtualRegister(PtrTy);
     446           3 :   MIRBuilder.buildGEP(NewList, DstPtr, SizeReg);
     447             : 
     448           3 :   MIRBuilder.buildStore(
     449             :       NewList, ListPtr,
     450           6 :       *MF.getMachineMemOperand(MachinePointerInfo(), MachineMemOperand::MOStore,
     451             :                                PtrSize, /* Align = */ PtrSize));
     452             : 
     453           3 :   MI.eraseFromParent();
     454           3 :   return true;
     455             : }

Generated by: LCOV version 1.13