LLVM  15.0.0git
AArch64LegalizerInfo.cpp
Go to the documentation of this file.
1 //===- AArch64LegalizerInfo.cpp ----------------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the targeting of the Machinelegalizer class for
10 /// AArch64.
11 /// \todo This should be generated by TableGen.
12 //===----------------------------------------------------------------------===//
13 
14 #include "AArch64LegalizerInfo.h"
16 #include "AArch64Subtarget.h"
26 #include "llvm/IR/DerivedTypes.h"
27 #include "llvm/IR/Intrinsics.h"
28 #include "llvm/IR/IntrinsicsAArch64.h"
29 #include "llvm/IR/Type.h"
31 #include <initializer_list>
32 
33 #define DEBUG_TYPE "aarch64-legalinfo"
34 
35 using namespace llvm;
36 using namespace LegalizeActions;
37 using namespace LegalizeMutations;
38 using namespace LegalityPredicates;
39 using namespace MIPatternMatch;
40 
42  : ST(&ST) {
43  using namespace TargetOpcode;
44  const LLT p0 = LLT::pointer(0, 64);
45  const LLT s1 = LLT::scalar(1);
46  const LLT s8 = LLT::scalar(8);
47  const LLT s16 = LLT::scalar(16);
48  const LLT s32 = LLT::scalar(32);
49  const LLT s64 = LLT::scalar(64);
50  const LLT s128 = LLT::scalar(128);
51  const LLT v16s8 = LLT::fixed_vector(16, 8);
52  const LLT v8s8 = LLT::fixed_vector(8, 8);
53  const LLT v4s8 = LLT::fixed_vector(4, 8);
54  const LLT v8s16 = LLT::fixed_vector(8, 16);
55  const LLT v4s16 = LLT::fixed_vector(4, 16);
56  const LLT v2s16 = LLT::fixed_vector(2, 16);
57  const LLT v2s32 = LLT::fixed_vector(2, 32);
58  const LLT v4s32 = LLT::fixed_vector(4, 32);
59  const LLT v2s64 = LLT::fixed_vector(2, 64);
60  const LLT v2p0 = LLT::fixed_vector(2, p0);
61 
62  std::initializer_list<LLT> PackedVectorAllTypeList = {/* Begin 128bit types */
63  v16s8, v8s16, v4s32,
64  v2s64, v2p0,
65  /* End 128bit types */
66  /* Begin 64bit types */
67  v8s8, v4s16, v2s32};
68 
69  const TargetMachine &TM = ST.getTargetLowering()->getTargetMachine();
70 
71  // FIXME: support subtargets which have neon/fp-armv8 disabled.
72  if (!ST.hasNEON() || !ST.hasFPARMv8()) {
74  return;
75  }
76 
77  // Some instructions only support s16 if the subtarget has full 16-bit FP
78  // support.
79  const bool HasFP16 = ST.hasFullFP16();
80  const LLT &MinFPScalar = HasFP16 ? s16 : s32;
81 
82  getActionDefinitionsBuilder({G_IMPLICIT_DEF, G_FREEZE})
83  .legalFor({p0, s1, s8, s16, s32, s64})
84  .legalFor(PackedVectorAllTypeList)
86  .clampScalar(0, s8, s64)
88  [=](const LegalityQuery &Query) {
89  return Query.Types[0].isVector() &&
90  (Query.Types[0].getElementType() != s64 ||
91  Query.Types[0].getNumElements() != 2);
92  },
93  [=](const LegalityQuery &Query) {
94  LLT EltTy = Query.Types[0].getElementType();
95  if (EltTy == s64)
96  return std::make_pair(0, LLT::fixed_vector(2, 64));
97  return std::make_pair(0, EltTy);
98  });
99 
101  .legalFor({p0, s16, s32, s64})
102  .legalFor(PackedVectorAllTypeList)
104  .clampScalar(0, s16, s64)
105  // Maximum: sN * k = 128
106  .clampMaxNumElements(0, s8, 16)
107  .clampMaxNumElements(0, s16, 8)
108  .clampMaxNumElements(0, s32, 4)
109  .clampMaxNumElements(0, s64, 2)
110  .clampMaxNumElements(0, p0, 2);
111 
113  .legalFor({s32, s64, v4s32, v2s32, v2s64})
114  .widenScalarToNextPow2(0)
115  .clampScalar(0, s32, s64);
116 
117  getActionDefinitionsBuilder({G_ADD, G_SUB, G_MUL, G_AND, G_OR, G_XOR})
118  .legalFor({s32, s64, v2s32, v4s32, v4s16, v8s16, v16s8, v8s8})
119  .scalarizeIf(
120  [=](const LegalityQuery &Query) {
121  return Query.Opcode == G_MUL && Query.Types[0] == v2s64;
122  },
123  0)
124  .legalFor({v2s64})
125  .widenScalarToNextPow2(0)
126  .clampScalar(0, s32, s64)
127  .clampNumElements(0, v2s32, v4s32)
128  .clampNumElements(0, v2s64, v2s64)
130 
131  getActionDefinitionsBuilder({G_SHL, G_ASHR, G_LSHR})
132  .customIf([=](const LegalityQuery &Query) {
133  const auto &SrcTy = Query.Types[0];
134  const auto &AmtTy = Query.Types[1];
135  return !SrcTy.isVector() && SrcTy.getSizeInBits() == 32 &&
136  AmtTy.getSizeInBits() == 32;
137  })
138  .legalFor({
139  {s32, s32},
140  {s32, s64},
141  {s64, s64},
142  {v8s8, v8s8},
143  {v16s8, v16s8},
144  {v4s16, v4s16},
145  {v8s16, v8s16},
146  {v2s32, v2s32},
147  {v4s32, v4s32},
148  {v2s64, v2s64},
149  })
150  .widenScalarToNextPow2(0)
151  .clampScalar(1, s32, s64)
152  .clampScalar(0, s32, s64)
153  .clampNumElements(0, v2s32, v4s32)
154  .clampNumElements(0, v2s64, v2s64)
156  .minScalarSameAs(1, 0);
157 
158  getActionDefinitionsBuilder(G_PTR_ADD)
159  .legalFor({{p0, s64}, {v2p0, v2s64}})
160  .clampScalar(1, s64, s64);
161 
162  getActionDefinitionsBuilder(G_PTRMASK).legalFor({{p0, s64}});
163 
164  getActionDefinitionsBuilder({G_SDIV, G_UDIV})
165  .legalFor({s32, s64})
166  .libcallFor({s128})
167  .clampScalar(0, s32, s64)
169  .scalarize(0);
170 
171  getActionDefinitionsBuilder({G_SREM, G_UREM, G_SDIVREM, G_UDIVREM})
172  .lowerFor({s8, s16, s32, s64, v2s64, v4s32, v2s32})
174  .clampScalarOrElt(0, s32, s64)
175  .clampNumElements(0, v2s32, v4s32)
176  .clampNumElements(0, v2s64, v2s64)
177  .moreElementsToNextPow2(0);
178 
179 
180  getActionDefinitionsBuilder({G_SMULO, G_UMULO})
181  .widenScalarToNextPow2(0, /*Min = */ 32)
182  .clampScalar(0, s32, s64)
183  .lower();
184 
185  getActionDefinitionsBuilder({G_SMULH, G_UMULH})
186  .legalFor({s64, v8s16, v16s8, v4s32})
187  .lower();
188 
189  getActionDefinitionsBuilder({G_SMIN, G_SMAX, G_UMIN, G_UMAX})
190  .legalFor({v8s8, v16s8, v4s16, v8s16, v2s32, v4s32})
191  .clampNumElements(0, v8s8, v16s8)
192  .clampNumElements(0, v4s16, v8s16)
193  .clampNumElements(0, v2s32, v4s32)
194  // FIXME: This sholdn't be needed as v2s64 types are going to
195  // be expanded anyway, but G_ICMP doesn't support splitting vectors yet
196  .clampNumElements(0, v2s64, v2s64)
197  .lower();
198 
200  {G_SADDE, G_SSUBE, G_UADDE, G_USUBE, G_SADDO, G_SSUBO, G_UADDO, G_USUBO})
201  .legalFor({{s32, s1}, {s64, s1}})
202  .clampScalar(0, s32, s64)
204 
205  getActionDefinitionsBuilder({G_FADD, G_FSUB, G_FMUL, G_FDIV, G_FNEG})
206  .legalFor({MinFPScalar, s32, s64, v2s64, v4s32, v2s32})
207  .clampScalar(0, MinFPScalar, s64)
208  .clampNumElements(0, v2s32, v4s32)
209  .clampNumElements(0, v2s64, v2s64);
210 
211  getActionDefinitionsBuilder(G_FREM).libcallFor({s32, s64});
212 
213  getActionDefinitionsBuilder({G_FCEIL, G_FABS, G_FSQRT, G_FFLOOR, G_FRINT,
214  G_FMA, G_INTRINSIC_TRUNC, G_INTRINSIC_ROUND,
215  G_FNEARBYINT, G_INTRINSIC_LRINT})
216  // If we don't have full FP16 support, then scalarize the elements of
217  // vectors containing fp16 types.
218  .fewerElementsIf(
219  [=, &ST](const LegalityQuery &Query) {
220  const auto &Ty = Query.Types[0];
221  return Ty.isVector() && Ty.getElementType() == s16 &&
222  !ST.hasFullFP16();
223  },
224  [=](const LegalityQuery &Query) { return std::make_pair(0, s16); })
225  // If we don't have full FP16 support, then widen s16 to s32 if we
226  // encounter it.
227  .widenScalarIf(
228  [=, &ST](const LegalityQuery &Query) {
229  return Query.Types[0] == s16 && !ST.hasFullFP16();
230  },
231  [=](const LegalityQuery &Query) { return std::make_pair(0, s32); })
232  .legalFor({s16, s32, s64, v2s32, v4s32, v2s64, v2s16, v4s16, v8s16});
233 
235  {G_FCOS, G_FSIN, G_FLOG10, G_FLOG, G_FLOG2, G_FEXP, G_FEXP2, G_FPOW})
236  // We need a call for these, so we always need to scalarize.
237  .scalarize(0)
238  // Regardless of FP16 support, widen 16-bit elements to 32-bits.
239  .minScalar(0, s32)
240  .libcallFor({s32, s64, v2s32, v4s32, v2s64});
241 
243  .legalIf(all(typeInSet(0, {s32, s64, p0}),
244  typeInSet(1, {s1, s8, s16, s32}), smallerThan(1, 0)))
246  .clampScalar(0, s32, s64)
248  .minScalar(1, s8)
249  .maxScalarIf(typeInSet(0, {s32}), 1, s16)
250  .maxScalarIf(typeInSet(0, {s64, p0}), 1, s32);
251 
252  getActionDefinitionsBuilder(G_EXTRACT)
253  .legalIf(all(typeInSet(0, {s16, s32, s64, p0}),
254  typeInSet(1, {s32, s64, s128, p0}), smallerThan(0, 1)))
256  .clampScalar(1, s32, s128)
258  .minScalar(0, s16)
259  .maxScalarIf(typeInSet(1, {s32}), 0, s16)
260  .maxScalarIf(typeInSet(1, {s64, p0}), 0, s32)
261  .maxScalarIf(typeInSet(1, {s128}), 0, s64);
262 
263  getActionDefinitionsBuilder({G_SEXTLOAD, G_ZEXTLOAD})
265  .legalForTypesWithMemDesc({{s32, p0, s8, 8},
266  {s32, p0, s16, 8},
267  {s32, p0, s32, 8},
268  {s64, p0, s8, 2},
269  {s64, p0, s16, 2},
270  {s64, p0, s32, 4},
271  {s64, p0, s64, 8},
272  {p0, p0, s64, 8},
273  {v2s32, p0, s64, 8}})
274  .widenScalarToNextPow2(0)
275  .clampScalar(0, s32, s64)
276  // TODO: We could support sum-of-pow2's but the lowering code doesn't know
277  // how to do that yet.
279  // Lower anything left over into G_*EXT and G_LOAD
280  .lower();
281 
282  auto IsPtrVecPred = [=](const LegalityQuery &Query) {
283  const LLT &ValTy = Query.Types[0];
284  if (!ValTy.isVector())
285  return false;
286  const LLT EltTy = ValTy.getElementType();
287  return EltTy.isPointer() && EltTy.getAddressSpace() == 0;
288  };
289 
291  .customIf([=](const LegalityQuery &Query) {
292  return Query.Types[0] == s128 &&
293  Query.MMODescrs[0].Ordering != AtomicOrdering::NotAtomic;
294  })
295  .legalForTypesWithMemDesc({{s8, p0, s8, 8},
296  {s16, p0, s16, 8},
297  {s32, p0, s32, 8},
298  {s64, p0, s64, 8},
299  {p0, p0, s64, 8},
300  {s128, p0, s128, 8},
301  {v8s8, p0, s64, 8},
302  {v16s8, p0, s128, 8},
303  {v4s16, p0, s64, 8},
304  {v8s16, p0, s128, 8},
305  {v2s32, p0, s64, 8},
306  {v4s32, p0, s128, 8},
307  {v2s64, p0, s128, 8}})
308  // These extends are also legal
309  .legalForTypesWithMemDesc({{s32, p0, s8, 8}, {s32, p0, s16, 8}})
310  .widenScalarToNextPow2(0, /* MinSize = */8)
312  .clampScalar(0, s8, s64)
313  .narrowScalarIf([=](const LegalityQuery &Query) {
314  // Clamp extending load results to 32-bits.
315  return Query.Types[0].isScalar() &&
316  Query.Types[0] != Query.MMODescrs[0].MemoryTy &&
317  Query.Types[0].getSizeInBits() > 32;
318  },
319  changeTo(0, s32))
320  .clampMaxNumElements(0, s8, 16)
321  .clampMaxNumElements(0, s16, 8)
322  .clampMaxNumElements(0, s32, 4)
323  .clampMaxNumElements(0, s64, 2)
324  .clampMaxNumElements(0, p0, 2)
325  .customIf(IsPtrVecPred)
326  .scalarizeIf(typeIs(0, v2s16), 0);
327 
329  .customIf([=](const LegalityQuery &Query) {
330  return Query.Types[0] == s128 &&
331  Query.MMODescrs[0].Ordering != AtomicOrdering::NotAtomic;
332  })
333  .legalForTypesWithMemDesc({{s8, p0, s8, 8},
334  {s16, p0, s8, 8}, // truncstorei8 from s16
335  {s32, p0, s8, 8}, // truncstorei8 from s32
336  {s64, p0, s8, 8}, // truncstorei8 from s64
337  {s16, p0, s16, 8},
338  {s32, p0, s16, 8}, // truncstorei16 from s32
339  {s64, p0, s16, 8}, // truncstorei16 from s64
340  {s32, p0, s8, 8},
341  {s32, p0, s16, 8},
342  {s32, p0, s32, 8},
343  {s64, p0, s64, 8},
344  {s64, p0, s32, 8}, // truncstorei32 from s64
345  {p0, p0, s64, 8},
346  {s128, p0, s128, 8},
347  {v16s8, p0, s128, 8},
348  {v8s8, p0, s64, 8},
349  {v4s16, p0, s64, 8},
350  {v8s16, p0, s128, 8},
351  {v2s32, p0, s64, 8},
352  {v4s32, p0, s128, 8},
353  {v2s64, p0, s128, 8}})
354  .clampScalar(0, s8, s64)
355  .lowerIf([=](const LegalityQuery &Query) {
356  return Query.Types[0].isScalar() &&
357  Query.Types[0] != Query.MMODescrs[0].MemoryTy;
358  })
359  // Maximum: sN * k = 128
360  .clampMaxNumElements(0, s8, 16)
361  .clampMaxNumElements(0, s16, 8)
362  .clampMaxNumElements(0, s32, 4)
363  .clampMaxNumElements(0, s64, 2)
364  .clampMaxNumElements(0, p0, 2)
366  .customIf(IsPtrVecPred)
367  .scalarizeIf(typeIs(0, v2s16), 0);
368 
369  // Constants
370  getActionDefinitionsBuilder(G_CONSTANT)
371  .legalFor({p0, s8, s16, s32, s64})
372  .widenScalarToNextPow2(0)
373  .clampScalar(0, s8, s64);
374  getActionDefinitionsBuilder(G_FCONSTANT)
375  .legalIf([=](const LegalityQuery &Query) {
376  const auto &Ty = Query.Types[0];
377  if (HasFP16 && Ty == s16)
378  return true;
379  return Ty == s32 || Ty == s64 || Ty == s128;
380  })
381  .clampScalar(0, MinFPScalar, s128);
382 
383  getActionDefinitionsBuilder({G_ICMP, G_FCMP})
384  .legalFor({{s32, s32},
385  {s32, s64},
386  {s32, p0},
387  {v4s32, v4s32},
388  {v2s32, v2s32},
389  {v2s64, v2s64},
390  {v2s64, v2p0},
391  {v4s16, v4s16},
392  {v8s16, v8s16},
393  {v8s8, v8s8},
394  {v16s8, v16s8}})
396  .clampScalar(1, s32, s64)
397  .clampScalar(0, s32, s32)
398  .minScalarEltSameAsIf(
399  [=](const LegalityQuery &Query) {
400  const LLT &Ty = Query.Types[0];
401  const LLT &SrcTy = Query.Types[1];
402  return Ty.isVector() && !SrcTy.getElementType().isPointer() &&
403  Ty.getElementType() != SrcTy.getElementType();
404  },
405  0, 1)
406  .minScalarOrEltIf(
407  [=](const LegalityQuery &Query) { return Query.Types[1] == v2s16; },
408  1, s32)
409  .minScalarOrEltIf(
410  [=](const LegalityQuery &Query) { return Query.Types[1] == v2p0; }, 0,
411  s64)
412  .clampNumElements(0, v2s32, v4s32);
413 
414  // Extensions
415  auto ExtLegalFunc = [=](const LegalityQuery &Query) {
416  unsigned DstSize = Query.Types[0].getSizeInBits();
417 
418  if (DstSize == 128 && !Query.Types[0].isVector())
419  return false; // Extending to a scalar s128 needs narrowing.
420 
421  // Make sure that we have something that will fit in a register, and
422  // make sure it's a power of 2.
423  if (DstSize < 8 || DstSize > 128 || !isPowerOf2_32(DstSize))
424  return false;
425 
426  const LLT &SrcTy = Query.Types[1];
427 
428  // Special case for s1.
429  if (SrcTy == s1)
430  return true;
431 
432  // Make sure we fit in a register otherwise. Don't bother checking that
433  // the source type is below 128 bits. We shouldn't be allowing anything
434  // through which is wider than the destination in the first place.
435  unsigned SrcSize = SrcTy.getSizeInBits();
436  if (SrcSize < 8 || !isPowerOf2_32(SrcSize))
437  return false;
438 
439  return true;
440  };
441  getActionDefinitionsBuilder({G_ZEXT, G_SEXT, G_ANYEXT})
442  .legalIf(ExtLegalFunc)
443  .clampScalar(0, s64, s64); // Just for s128, others are handled above.
444 
447  [=](const LegalityQuery &Query) { return Query.Types[0].isVector(); },
448  0, s8)
449  .customIf([=](const LegalityQuery &Query) {
450  LLT DstTy = Query.Types[0];
451  LLT SrcTy = Query.Types[1];
452  return DstTy == v8s8 && SrcTy.getSizeInBits() > 128;
453  })
454  .alwaysLegal();
455 
456  getActionDefinitionsBuilder(G_SEXT_INREG).legalFor({s32, s64}).lower();
457 
458  // FP conversions
459  getActionDefinitionsBuilder(G_FPTRUNC)
460  .legalFor(
461  {{s16, s32}, {s16, s64}, {s32, s64}, {v4s16, v4s32}, {v2s32, v2s64}})
462  .clampMaxNumElements(0, s32, 2);
464  .legalFor(
465  {{s32, s16}, {s64, s16}, {s64, s32}, {v4s32, v4s16}, {v2s64, v2s32}})
466  .clampMaxNumElements(0, s64, 2);
467 
468  // Conversions
469  getActionDefinitionsBuilder({G_FPTOSI, G_FPTOUI})
470  .legalForCartesianProduct({s32, s64, v2s64, v4s32, v2s32})
471  .widenScalarToNextPow2(0)
472  .clampScalar(0, s32, s64)
474  .clampScalar(1, s32, s64);
475 
476  getActionDefinitionsBuilder({G_SITOFP, G_UITOFP})
477  .legalForCartesianProduct({s32, s64, v2s64, v4s32, v2s32})
478  .clampScalar(1, s32, s64)
479  .minScalarSameAs(1, 0)
480  .clampScalar(0, s32, s64)
482 
483  // Control-flow
484  getActionDefinitionsBuilder(G_BRCOND).legalFor({s1, s8, s16, s32});
485  getActionDefinitionsBuilder(G_BRINDIRECT).legalFor({p0});
486 
488  .legalFor({{s32, s1}, {s64, s1}, {p0, s1}})
489  .widenScalarToNextPow2(0)
490  .clampScalar(0, s32, s64)
491  .minScalarEltSameAsIf(all(isVector(0), isVector(1)), 1, 0)
492  .lowerIf(isVector(0));
493 
494  // Pointer-handling
495  getActionDefinitionsBuilder(G_FRAME_INDEX).legalFor({p0});
496 
497  if (TM.getCodeModel() == CodeModel::Small)
498  getActionDefinitionsBuilder(G_GLOBAL_VALUE).custom();
499  else
500  getActionDefinitionsBuilder(G_GLOBAL_VALUE).legalFor({p0});
501 
502  getActionDefinitionsBuilder(G_PTRTOINT)
503  .legalForCartesianProduct({s1, s8, s16, s32, s64}, {p0})
504  .legalFor({{v2s64, v2p0}})
505  .maxScalar(0, s64)
506  .widenScalarToNextPow2(0, /*Min*/ 8);
507 
508  getActionDefinitionsBuilder(G_INTTOPTR)
509  .unsupportedIf([&](const LegalityQuery &Query) {
510  return Query.Types[0].getSizeInBits() != Query.Types[1].getSizeInBits();
511  })
512  .legalFor({{p0, s64}, {v2p0, v2s64}});
513 
514  // Casts for 32 and 64-bit width type are just copies.
515  // Same for 128-bit width type, except they are on the FPR bank.
516  getActionDefinitionsBuilder(G_BITCAST)
517  // FIXME: This is wrong since G_BITCAST is not allowed to change the
518  // number of bits but it's what the previous code described and fixing
519  // it breaks tests.
520  .legalForCartesianProduct({s1, s8, s16, s32, s64, s128, v16s8, v8s8, v4s8,
521  v8s16, v4s16, v2s16, v4s32, v2s32, v2s64,
522  v2p0});
523 
524  getActionDefinitionsBuilder(G_VASTART).legalFor({p0});
525 
526  // va_list must be a pointer, but most sized types are pretty easy to handle
527  // as the destination.
529  .customForCartesianProduct({s8, s16, s32, s64, p0}, {p0})
530  .clampScalar(0, s8, s64)
531  .widenScalarToNextPow2(0, /*Min*/ 8);
532 
533  getActionDefinitionsBuilder(G_ATOMIC_CMPXCHG_WITH_SUCCESS)
534  .lowerIf(
535  all(typeInSet(0, {s8, s16, s32, s64, s128}), typeIs(2, p0)));
536 
537  getActionDefinitionsBuilder(G_ATOMIC_CMPXCHG)
538  .customIf([](const LegalityQuery &Query) {
539  return Query.Types[0].getSizeInBits() == 128;
540  })
541  .clampScalar(0, s32, s64)
542  .legalIf(all(typeInSet(0, {s32, s64}), typeIs(1, p0)));
543 
545  {G_ATOMICRMW_XCHG, G_ATOMICRMW_ADD, G_ATOMICRMW_SUB, G_ATOMICRMW_AND,
546  G_ATOMICRMW_OR, G_ATOMICRMW_XOR, G_ATOMICRMW_MIN, G_ATOMICRMW_MAX,
547  G_ATOMICRMW_UMIN, G_ATOMICRMW_UMAX})
548  .clampScalar(0, s32, s64)
549  .legalIf(all(typeInSet(0, {s32, s64}), typeIs(1, p0)));
550 
551  getActionDefinitionsBuilder(G_BLOCK_ADDR).legalFor({p0});
552 
553  // Merge/Unmerge
554  for (unsigned Op : {G_MERGE_VALUES, G_UNMERGE_VALUES}) {
555  unsigned BigTyIdx = Op == G_MERGE_VALUES ? 0 : 1;
556  unsigned LitTyIdx = Op == G_MERGE_VALUES ? 1 : 0;
558  .widenScalarToNextPow2(LitTyIdx, 8)
559  .widenScalarToNextPow2(BigTyIdx, 32)
560  .clampScalar(LitTyIdx, s8, s64)
561  .clampScalar(BigTyIdx, s32, s128)
562  .legalIf([=](const LegalityQuery &Q) {
563  switch (Q.Types[BigTyIdx].getSizeInBits()) {
564  case 32:
565  case 64:
566  case 128:
567  break;
568  default:
569  return false;
570  }
571  switch (Q.Types[LitTyIdx].getSizeInBits()) {
572  case 8:
573  case 16:
574  case 32:
575  case 64:
576  return true;
577  default:
578  return false;
579  }
580  });
581  }
582 
583  getActionDefinitionsBuilder(G_EXTRACT_VECTOR_ELT)
584  .unsupportedIf([=](const LegalityQuery &Query) {
585  const LLT &EltTy = Query.Types[1].getElementType();
586  return Query.Types[0] != EltTy;
587  })
588  .minScalar(2, s64)
589  .legalIf([=](const LegalityQuery &Query) {
590  const LLT &VecTy = Query.Types[1];
591  return VecTy == v2s16 || VecTy == v4s16 || VecTy == v8s16 ||
592  VecTy == v4s32 || VecTy == v2s64 || VecTy == v2s32 ||
593  VecTy == v8s8 || VecTy == v16s8 || VecTy == v2s32 ||
594  VecTy == v2p0;
595  })
596  .minScalarOrEltIf(
597  [=](const LegalityQuery &Query) {
598  // We want to promote to <M x s1> to <M x s64> if that wouldn't
599  // cause the total vec size to be > 128b.
600  return Query.Types[1].getNumElements() <= 2;
601  },
602  0, s64)
603  .minScalarOrEltIf(
604  [=](const LegalityQuery &Query) {
605  return Query.Types[1].getNumElements() <= 4;
606  },
607  0, s32)
608  .minScalarOrEltIf(
609  [=](const LegalityQuery &Query) {
610  return Query.Types[1].getNumElements() <= 8;
611  },
612  0, s16)
613  .minScalarOrEltIf(
614  [=](const LegalityQuery &Query) {
615  return Query.Types[1].getNumElements() <= 16;
616  },
617  0, s8)
618  .minScalarOrElt(0, s8) // Worst case, we need at least s8.
619  .clampMaxNumElements(1, s64, 2)
620  .clampMaxNumElements(1, s32, 4)
621  .clampMaxNumElements(1, s16, 8)
622  .clampMaxNumElements(1, p0, 2);
623 
624  getActionDefinitionsBuilder(G_INSERT_VECTOR_ELT)
625  .legalIf(typeInSet(0, {v8s16, v2s32, v4s32, v2s64}));
626 
627  getActionDefinitionsBuilder(G_BUILD_VECTOR)
628  .legalFor({{v8s8, s8},
629  {v16s8, s8},
630  {v2s16, s16},
631  {v4s16, s16},
632  {v8s16, s16},
633  {v2s32, s32},
634  {v4s32, s32},
635  {v2p0, p0},
636  {v2s64, s64}})
637  .clampNumElements(0, v4s32, v4s32)
638  .clampNumElements(0, v2s64, v2s64)
639  .minScalarOrElt(0, s8)
640  .minScalarSameAs(1, 0);
641 
642  getActionDefinitionsBuilder(G_BUILD_VECTOR_TRUNC).lower();
643 
646  {s32, s64, v8s8, v16s8, v4s16, v8s16, v2s32, v4s32})
647  .scalarize(1);
648  getActionDefinitionsBuilder(G_CTLZ_ZERO_UNDEF).lower();
649 
650  // TODO: Custom lowering for v2s32, v4s32, v2s64.
651  getActionDefinitionsBuilder(G_BITREVERSE)
652  .legalFor({s32, s64, v8s8, v16s8})
653  .widenScalarToNextPow2(0, /*Min = */ 32)
654  .clampScalar(0, s32, s64);
655 
656  getActionDefinitionsBuilder(G_CTTZ_ZERO_UNDEF).lower();
657 
658  // TODO: Handle vector types.
660  .clampScalar(0, s32, s64)
661  .scalarSameSizeAs(1, 0)
662  .customFor({s32, s64});
663 
664  getActionDefinitionsBuilder(G_SHUFFLE_VECTOR)
665  .legalIf([=](const LegalityQuery &Query) {
666  const LLT &DstTy = Query.Types[0];
667  const LLT &SrcTy = Query.Types[1];
668  // For now just support the TBL2 variant which needs the source vectors
669  // to be the same size as the dest.
670  if (DstTy != SrcTy)
671  return false;
672  for (auto &Ty : {v2s32, v4s32, v2s64, v2p0, v16s8, v8s16}) {
673  if (DstTy == Ty)
674  return true;
675  }
676  return false;
677  })
678  // G_SHUFFLE_VECTOR can have scalar sources (from 1 x s vectors), we
679  // just want those lowered into G_BUILD_VECTOR
680  .lowerIf([=](const LegalityQuery &Query) {
681  return !Query.Types[1].isVector();
682  })
684  .clampNumElements(0, v4s32, v4s32)
685  .clampNumElements(0, v2s64, v2s64);
686 
687  getActionDefinitionsBuilder(G_CONCAT_VECTORS)
688  .legalFor({{v4s32, v2s32}, {v8s16, v4s16}, {v16s8, v8s8}});
689 
690  getActionDefinitionsBuilder(G_JUMP_TABLE).legalFor({{p0}, {s64}});
691 
692  getActionDefinitionsBuilder(G_BRJT).legalIf([=](const LegalityQuery &Query) {
693  return Query.Types[0] == p0 && Query.Types[1] == s64;
694  });
695 
696  getActionDefinitionsBuilder(G_DYN_STACKALLOC).lower();
697 
698  if (ST.hasMOPS()) {
699  // G_BZERO is not supported. Currently it is only emitted by
700  // PreLegalizerCombiner for G_MEMSET with zero constant.
702 
704  .legalForCartesianProduct({p0}, {s64}, {s64})
705  .customForCartesianProduct({p0}, {s8}, {s64})
706  .immIdx(0); // Inform verifier imm idx 0 is handled.
707 
708  getActionDefinitionsBuilder({G_MEMCPY, G_MEMMOVE})
709  .legalForCartesianProduct({p0}, {p0}, {s64})
710  .immIdx(0); // Inform verifier imm idx 0 is handled.
711 
712  // G_MEMCPY_INLINE does not have a tailcall immediate
713  getActionDefinitionsBuilder(G_MEMCPY_INLINE)
714  .legalForCartesianProduct({p0}, {p0}, {s64});
715 
716  } else {
717  getActionDefinitionsBuilder({G_BZERO, G_MEMCPY, G_MEMMOVE, G_MEMSET})
718  .libcall();
719  }
720 
721  // FIXME: Legal types are only legal with NEON.
723  .lowerIf(isScalar(0))
724  .legalFor(PackedVectorAllTypeList);
725 
726  getActionDefinitionsBuilder(G_VECREDUCE_FADD)
727  // We only have FADDP to do reduction-like operations. Lower the rest.
728  .legalFor({{s32, v2s32}, {s64, v2s64}})
729  .clampMaxNumElements(1, s64, 2)
730  .clampMaxNumElements(1, s32, 2)
731  .lower();
732 
733  getActionDefinitionsBuilder(G_VECREDUCE_ADD)
734  .legalFor(
735  {{s8, v16s8}, {s16, v8s16}, {s32, v4s32}, {s32, v2s32}, {s64, v2s64}})
736  .clampMaxNumElements(1, s64, 2)
737  .clampMaxNumElements(1, s32, 4)
738  .lower();
739 
741  {G_VECREDUCE_OR, G_VECREDUCE_AND, G_VECREDUCE_XOR})
742  // Try to break down into smaller vectors as long as they're at least 64
743  // bits. This lets us use vector operations for some parts of the
744  // reduction.
745  .fewerElementsIf(
746  [=](const LegalityQuery &Q) {
747  LLT SrcTy = Q.Types[1];
748  if (SrcTy.isScalar())
749  return false;
750  if (!isPowerOf2_32(SrcTy.getNumElements()))
751  return false;
752  // We can usually perform 64b vector operations.
753  return SrcTy.getSizeInBits() > 64;
754  },
755  [=](const LegalityQuery &Q) {
756  LLT SrcTy = Q.Types[1];
757  return std::make_pair(1, SrcTy.divide(2));
758  })
759  .scalarize(1)
760  .lower();
761 
762  getActionDefinitionsBuilder({G_UADDSAT, G_USUBSAT})
763  .lowerIf([=](const LegalityQuery &Q) { return Q.Types[0].isScalar(); });
764 
765  getActionDefinitionsBuilder({G_FSHL, G_FSHR}).lower();
766 
768  .legalFor({{s32, s64}, {s64, s64}})
769  .customIf([=](const LegalityQuery &Q) {
770  return Q.Types[0].isScalar() && Q.Types[1].getScalarSizeInBits() < 64;
771  })
772  .lower();
774 
775  getActionDefinitionsBuilder({G_SBFX, G_UBFX})
776  .customFor({{s32, s32}, {s64, s64}});
777 
778  // TODO: Use generic lowering when custom lowering is not possible.
779  auto always = [=](const LegalityQuery &Q) { return true; };
781  .legalFor({{v8s8, v8s8}, {v16s8, v16s8}})
782  .clampScalar(0, s32, s128)
786  .customFor({{s32, s32},
787  {s64, s64},
788  {s128, s128},
789  {v2s64, v2s64},
790  {v2s32, v2s32},
791  {v4s32, v4s32},
792  {v4s16, v4s16},
793  {v8s16, v8s16}});
794 
795  // TODO: Vector types.
796  getActionDefinitionsBuilder({G_SADDSAT, G_SSUBSAT}).lowerIf(isScalar(0));
797 
798  // TODO: Vector types.
799  getActionDefinitionsBuilder({G_FMAXNUM, G_FMINNUM})
800  .legalFor({MinFPScalar, s32, s64})
801  .libcallFor({s128})
802  .minScalar(0, MinFPScalar);
803 
804  // TODO: Vector types.
805  getActionDefinitionsBuilder({G_FMAXIMUM, G_FMINIMUM})
806  .legalFor({MinFPScalar, s32, s64})
807  .minScalar(0, MinFPScalar);
808 
809  // TODO: Libcall support for s128.
810  // TODO: s16 should be legal with full FP16 support.
811  getActionDefinitionsBuilder({G_LROUND, G_LLROUND})
812  .legalFor({{s64, s32}, {s64, s64}});
813 
815  verify(*ST.getInstrInfo());
816 }
817 
819  MachineInstr &MI) const {
820  MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
821  MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
822  GISelChangeObserver &Observer = Helper.Observer;
823  switch (MI.getOpcode()) {
824  default:
825  // No idea what to do.
826  return false;
827  case TargetOpcode::G_VAARG:
828  return legalizeVaArg(MI, MRI, MIRBuilder);
829  case TargetOpcode::G_LOAD:
830  case TargetOpcode::G_STORE:
831  return legalizeLoadStore(MI, MRI, MIRBuilder, Observer);
832  case TargetOpcode::G_SHL:
833  case TargetOpcode::G_ASHR:
834  case TargetOpcode::G_LSHR:
835  return legalizeShlAshrLshr(MI, MRI, MIRBuilder, Observer);
836  case TargetOpcode::G_GLOBAL_VALUE:
837  return legalizeSmallCMGlobalValue(MI, MRI, MIRBuilder, Observer);
838  case TargetOpcode::G_TRUNC:
839  return legalizeVectorTrunc(MI, Helper);
840  case TargetOpcode::G_SBFX:
841  case TargetOpcode::G_UBFX:
842  return legalizeBitfieldExtract(MI, MRI, Helper);
843  case TargetOpcode::G_ROTR:
844  return legalizeRotate(MI, MRI, Helper);
845  case TargetOpcode::G_CTPOP:
846  return legalizeCTPOP(MI, MRI, Helper);
847  case TargetOpcode::G_ATOMIC_CMPXCHG:
848  return legalizeAtomicCmpxchg128(MI, MRI, Helper);
849  case TargetOpcode::G_CTTZ:
850  return legalizeCTTZ(MI, Helper);
851  case TargetOpcode::G_BZERO:
852  case TargetOpcode::G_MEMCPY:
853  case TargetOpcode::G_MEMMOVE:
854  case TargetOpcode::G_MEMSET:
855  return legalizeMemOps(MI, Helper);
856  }
857 
858  llvm_unreachable("expected switch to return");
859 }
860 
861 bool AArch64LegalizerInfo::legalizeRotate(MachineInstr &MI,
863  LegalizerHelper &Helper) const {
864  // To allow for imported patterns to match, we ensure that the rotate amount
865  // is 64b with an extension.
866  Register AmtReg = MI.getOperand(2).getReg();
867  LLT AmtTy = MRI.getType(AmtReg);
868  (void)AmtTy;
869  assert(AmtTy.isScalar() && "Expected a scalar rotate");
870  assert(AmtTy.getSizeInBits() < 64 && "Expected this rotate to be legal");
871  auto NewAmt = Helper.MIRBuilder.buildSExt(LLT::scalar(64), AmtReg);
872  Helper.Observer.changingInstr(MI);
873  MI.getOperand(2).setReg(NewAmt.getReg(0));
874  Helper.Observer.changedInstr(MI);
875  return true;
876 }
877 
879  MachineIRBuilder &MIRBuilder, LLT Ty, int NumParts,
880  SmallVectorImpl<Register> &VRegs) {
881  for (int I = 0; I < NumParts; ++I)
882  VRegs.push_back(MRI.createGenericVirtualRegister(Ty));
883  MIRBuilder.buildUnmerge(VRegs, Reg);
884 }
885 
886 bool AArch64LegalizerInfo::legalizeVectorTrunc(
887  MachineInstr &MI, LegalizerHelper &Helper) const {
888  MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
889  MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
890  // Similar to how operand splitting is done in SelectiondDAG, we can handle
891  // %res(v8s8) = G_TRUNC %in(v8s32) by generating:
892  // %inlo(<4x s32>), %inhi(<4 x s32>) = G_UNMERGE %in(<8 x s32>)
893  // %lo16(<4 x s16>) = G_TRUNC %inlo
894  // %hi16(<4 x s16>) = G_TRUNC %inhi
895  // %in16(<8 x s16>) = G_CONCAT_VECTORS %lo16, %hi16
896  // %res(<8 x s8>) = G_TRUNC %in16
897 
898  Register DstReg = MI.getOperand(0).getReg();
899  Register SrcReg = MI.getOperand(1).getReg();
900  LLT DstTy = MRI.getType(DstReg);
901  LLT SrcTy = MRI.getType(SrcReg);
903  isPowerOf2_32(SrcTy.getSizeInBits()));
904 
905  // Split input type.
906  LLT SplitSrcTy =
908  // First, split the source into two smaller vectors.
909  SmallVector<Register, 2> SplitSrcs;
910  extractParts(SrcReg, MRI, MIRBuilder, SplitSrcTy, 2, SplitSrcs);
911 
912  // Truncate the splits into intermediate narrower elements.
913  LLT InterTy = SplitSrcTy.changeElementSize(DstTy.getScalarSizeInBits() * 2);
914  for (unsigned I = 0; I < SplitSrcs.size(); ++I)
915  SplitSrcs[I] = MIRBuilder.buildTrunc(InterTy, SplitSrcs[I]).getReg(0);
916 
917  auto Concat = MIRBuilder.buildConcatVectors(
918  DstTy.changeElementSize(DstTy.getScalarSizeInBits() * 2), SplitSrcs);
919 
920  Helper.Observer.changingInstr(MI);
921  MI.getOperand(1).setReg(Concat.getReg(0));
922  Helper.Observer.changedInstr(MI);
923  return true;
924 }
925 
926 bool AArch64LegalizerInfo::legalizeSmallCMGlobalValue(
928  GISelChangeObserver &Observer) const {
929  assert(MI.getOpcode() == TargetOpcode::G_GLOBAL_VALUE);
930  // We do this custom legalization to convert G_GLOBAL_VALUE into target ADRP +
931  // G_ADD_LOW instructions.
932  // By splitting this here, we can optimize accesses in the small code model by
933  // folding in the G_ADD_LOW into the load/store offset.
934  auto &GlobalOp = MI.getOperand(1);
935  const auto* GV = GlobalOp.getGlobal();
936  if (GV->isThreadLocal())
937  return true; // Don't want to modify TLS vars.
938 
939  auto &TM = ST->getTargetLowering()->getTargetMachine();
940  unsigned OpFlags = ST->ClassifyGlobalReference(GV, TM);
941 
942  if (OpFlags & AArch64II::MO_GOT)
943  return true;
944 
945  auto Offset = GlobalOp.getOffset();
946  Register DstReg = MI.getOperand(0).getReg();
947  auto ADRP = MIRBuilder.buildInstr(AArch64::ADRP, {LLT::pointer(0, 64)}, {})
948  .addGlobalAddress(GV, Offset, OpFlags | AArch64II::MO_PAGE);
949  // Set the regclass on the dest reg too.
950  MRI.setRegClass(ADRP.getReg(0), &AArch64::GPR64RegClass);
951 
952  // MO_TAGGED on the page indicates a tagged address. Set the tag now. We do so
953  // by creating a MOVK that sets bits 48-63 of the register to (global address
954  // + 0x100000000 - PC) >> 48. The additional 0x100000000 offset here is to
955  // prevent an incorrect tag being generated during relocation when the the
956  // global appears before the code section. Without the offset, a global at
957  // `0x0f00'0000'0000'1000` (i.e. at `0x1000` with tag `0xf`) that's referenced
958  // by code at `0x2000` would result in `0x0f00'0000'0000'1000 - 0x2000 =
959  // 0x0eff'ffff'ffff'f000`, meaning the tag would be incorrectly set to `0xe`
960  // instead of `0xf`.
961  // This assumes that we're in the small code model so we can assume a binary
962  // size of <= 4GB, which makes the untagged PC relative offset positive. The
963  // binary must also be loaded into address range [0, 2^48). Both of these
964  // properties need to be ensured at runtime when using tagged addresses.
965  if (OpFlags & AArch64II::MO_TAGGED) {
966  assert(!Offset &&
967  "Should not have folded in an offset for a tagged global!");
968  ADRP = MIRBuilder.buildInstr(AArch64::MOVKXi, {LLT::pointer(0, 64)}, {ADRP})
969  .addGlobalAddress(GV, 0x100000000,
971  .addImm(48);
972  MRI.setRegClass(ADRP.getReg(0), &AArch64::GPR64RegClass);
973  }
974 
975  MIRBuilder.buildInstr(AArch64::G_ADD_LOW, {DstReg}, {ADRP})
976  .addGlobalAddress(GV, Offset,
978  MI.eraseFromParent();
979  return true;
980 }
981 
983  MachineInstr &MI) const {
984  switch (MI.getIntrinsicID()) {
985  case Intrinsic::vacopy: {
986  unsigned PtrSize = ST->isTargetILP32() ? 4 : 8;
987  unsigned VaListSize =
988  (ST->isTargetDarwin() || ST->isTargetWindows())
989  ? PtrSize
990  : ST->isTargetILP32() ? 20 : 32;
991 
992  MachineFunction &MF = *MI.getMF();
994  LLT::scalar(VaListSize * 8));
995  MachineIRBuilder MIB(MI);
996  MIB.buildLoad(Val, MI.getOperand(2),
999  VaListSize, Align(PtrSize)));
1000  MIB.buildStore(Val, MI.getOperand(1),
1003  VaListSize, Align(PtrSize)));
1004  MI.eraseFromParent();
1005  return true;
1006  }
1007  case Intrinsic::get_dynamic_area_offset: {
1008  MachineIRBuilder &MIB = Helper.MIRBuilder;
1009  MIB.buildConstant(MI.getOperand(0).getReg(), 0);
1010  MI.eraseFromParent();
1011  return true;
1012  }
1013  case Intrinsic::aarch64_mops_memset_tag: {
1014  assert(MI.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS);
1015  // Zext the value to 64 bit
1016  MachineIRBuilder MIB(MI);
1017  auto &Value = MI.getOperand(3);
1018  Register ZExtValueReg = MIB.buildAnyExt(LLT::scalar(64), Value).getReg(0);
1019  Value.setReg(ZExtValueReg);
1020  return true;
1021  }
1022  }
1023 
1024  return true;
1025 }
1026 
1027 bool AArch64LegalizerInfo::legalizeShlAshrLshr(
1029  GISelChangeObserver &Observer) const {
1030  assert(MI.getOpcode() == TargetOpcode::G_ASHR ||
1031  MI.getOpcode() == TargetOpcode::G_LSHR ||
1032  MI.getOpcode() == TargetOpcode::G_SHL);
1033  // If the shift amount is a G_CONSTANT, promote it to a 64 bit type so the
1034  // imported patterns can select it later. Either way, it will be legal.
1035  Register AmtReg = MI.getOperand(2).getReg();
1036  auto VRegAndVal = getIConstantVRegValWithLookThrough(AmtReg, MRI);
1037  if (!VRegAndVal)
1038  return true;
1039  // Check the shift amount is in range for an immediate form.
1040  int64_t Amount = VRegAndVal->Value.getSExtValue();
1041  if (Amount > 31)
1042  return true; // This will have to remain a register variant.
1043  auto ExtCst = MIRBuilder.buildConstant(LLT::scalar(64), Amount);
1044  Observer.changingInstr(MI);
1045  MI.getOperand(2).setReg(ExtCst.getReg(0));
1046  Observer.changedInstr(MI);
1047  return true;
1048 }
1049 
1050 static void matchLDPSTPAddrMode(Register Root, Register &Base, int &Offset,
1052  Base = Root;
1053  Offset = 0;
1054 
1055  Register NewBase;
1056  int64_t NewOffset;
1057  if (mi_match(Root, MRI, m_GPtrAdd(m_Reg(NewBase), m_ICst(NewOffset))) &&
1058  isShiftedInt<7, 3>(NewOffset)) {
1059  Base = NewBase;
1060  Offset = NewOffset;
1061  }
1062 }
1063 
1064 // FIXME: This should be removed and replaced with the generic bitcast legalize
1065 // action.
1066 bool AArch64LegalizerInfo::legalizeLoadStore(
1068  GISelChangeObserver &Observer) const {
1069  assert(MI.getOpcode() == TargetOpcode::G_STORE ||
1070  MI.getOpcode() == TargetOpcode::G_LOAD);
1071  // Here we just try to handle vector loads/stores where our value type might
1072  // have pointer elements, which the SelectionDAG importer can't handle. To
1073  // allow the existing patterns for s64 to fire for p0, we just try to bitcast
1074  // the value to use s64 types.
1075 
1076  // Custom legalization requires the instruction, if not deleted, must be fully
1077  // legalized. In order to allow further legalization of the inst, we create
1078  // a new instruction and erase the existing one.
1079 
1080  Register ValReg = MI.getOperand(0).getReg();
1081  const LLT ValTy = MRI.getType(ValReg);
1082 
1083  if (ValTy == LLT::scalar(128)) {
1084  assert((*MI.memoperands_begin())->getSuccessOrdering() ==
1086  (*MI.memoperands_begin())->getSuccessOrdering() ==
1088  assert(ST->hasLSE2() && "ldp/stp not single copy atomic without +lse2");
1089  LLT s64 = LLT::scalar(64);
1090  MachineInstrBuilder NewI;
1091  if (MI.getOpcode() == TargetOpcode::G_LOAD) {
1092  NewI = MIRBuilder.buildInstr(AArch64::LDPXi, {s64, s64}, {});
1093  MIRBuilder.buildMerge(ValReg, {NewI->getOperand(0), NewI->getOperand(1)});
1094  } else {
1095  auto Split = MIRBuilder.buildUnmerge(s64, MI.getOperand(0));
1096  NewI = MIRBuilder.buildInstr(
1097  AArch64::STPXi, {}, {Split->getOperand(0), Split->getOperand(1)});
1098  }
1099  Register Base;
1100  int Offset;
1101  matchLDPSTPAddrMode(MI.getOperand(1).getReg(), Base, Offset, MRI);
1102  NewI.addUse(Base);
1103  NewI.addImm(Offset / 8);
1104 
1105  NewI.cloneMemRefs(MI);
1108  *ST->getRegBankInfo());
1109  MI.eraseFromParent();
1110  return true;
1111  }
1112 
1113  if (!ValTy.isVector() || !ValTy.getElementType().isPointer() ||
1114  ValTy.getElementType().getAddressSpace() != 0) {
1115  LLVM_DEBUG(dbgs() << "Tried to do custom legalization on wrong load/store");
1116  return false;
1117  }
1118 
1119  unsigned PtrSize = ValTy.getElementType().getSizeInBits();
1120  const LLT NewTy = LLT::vector(ValTy.getElementCount(), PtrSize);
1121  auto &MMO = **MI.memoperands_begin();
1122  MMO.setType(NewTy);
1123 
1124  if (MI.getOpcode() == TargetOpcode::G_STORE) {
1125  auto Bitcast = MIRBuilder.buildBitcast(NewTy, ValReg);
1126  MIRBuilder.buildStore(Bitcast.getReg(0), MI.getOperand(1), MMO);
1127  } else {
1128  auto NewLoad = MIRBuilder.buildLoad(NewTy, MI.getOperand(1), MMO);
1129  MIRBuilder.buildBitcast(ValReg, NewLoad);
1130  }
1131  MI.eraseFromParent();
1132  return true;
1133 }
1134 
1135 bool AArch64LegalizerInfo::legalizeVaArg(MachineInstr &MI,
1137  MachineIRBuilder &MIRBuilder) const {
1138  MachineFunction &MF = MIRBuilder.getMF();
1139  Align Alignment(MI.getOperand(2).getImm());
1140  Register Dst = MI.getOperand(0).getReg();
1141  Register ListPtr = MI.getOperand(1).getReg();
1142 
1143  LLT PtrTy = MRI.getType(ListPtr);
1144  LLT IntPtrTy = LLT::scalar(PtrTy.getSizeInBits());
1145 
1146  const unsigned PtrSize = PtrTy.getSizeInBits() / 8;
1147  const Align PtrAlign = Align(PtrSize);
1148  auto List = MIRBuilder.buildLoad(
1149  PtrTy, ListPtr,
1151  PtrTy, PtrAlign));
1152 
1153  MachineInstrBuilder DstPtr;
1154  if (Alignment > PtrAlign) {
1155  // Realign the list to the actual required alignment.
1156  auto AlignMinus1 =
1157  MIRBuilder.buildConstant(IntPtrTy, Alignment.value() - 1);
1158  auto ListTmp = MIRBuilder.buildPtrAdd(PtrTy, List, AlignMinus1.getReg(0));
1159  DstPtr = MIRBuilder.buildMaskLowPtrBits(PtrTy, ListTmp, Log2(Alignment));
1160  } else
1161  DstPtr = List;
1162 
1163  LLT ValTy = MRI.getType(Dst);
1164  uint64_t ValSize = ValTy.getSizeInBits() / 8;
1165  MIRBuilder.buildLoad(
1166  Dst, DstPtr,
1168  ValTy, std::max(Alignment, PtrAlign)));
1169 
1170  auto Size = MIRBuilder.buildConstant(IntPtrTy, alignTo(ValSize, PtrAlign));
1171 
1172  auto NewList = MIRBuilder.buildPtrAdd(PtrTy, DstPtr, Size.getReg(0));
1173 
1174  MIRBuilder.buildStore(NewList, ListPtr,
1177  PtrTy, PtrAlign));
1178 
1179  MI.eraseFromParent();
1180  return true;
1181 }
1182 
1183 bool AArch64LegalizerInfo::legalizeBitfieldExtract(
1185  // Only legal if we can select immediate forms.
1186  // TODO: Lower this otherwise.
1187  return getIConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI) &&
1188  getIConstantVRegValWithLookThrough(MI.getOperand(3).getReg(), MRI);
1189 }
1190 
1191 bool AArch64LegalizerInfo::legalizeCTPOP(MachineInstr &MI,
1193  LegalizerHelper &Helper) const {
1194  // While there is no integer popcount instruction, it can
1195  // be more efficiently lowered to the following sequence that uses
1196  // AdvSIMD registers/instructions as long as the copies to/from
1197  // the AdvSIMD registers are cheap.
1198  // FMOV D0, X0 // copy 64-bit int to vector, high bits zero'd
1199  // CNT V0.8B, V0.8B // 8xbyte pop-counts
1200  // ADDV B0, V0.8B // sum 8xbyte pop-counts
1201  // UMOV X0, V0.B[0] // copy byte result back to integer reg
1202  //
1203  // For 128 bit vector popcounts, we lower to the following sequence:
1204  // cnt.16b v0, v0 // v8s16, v4s32, v2s64
1205  // uaddlp.8h v0, v0 // v8s16, v4s32, v2s64
1206  // uaddlp.4s v0, v0 // v4s32, v2s64
1207  // uaddlp.2d v0, v0 // v2s64
1208  //
1209  // For 64 bit vector popcounts, we lower to the following sequence:
1210  // cnt.8b v0, v0 // v4s16, v2s32
1211  // uaddlp.4h v0, v0 // v4s16, v2s32
1212  // uaddlp.2s v0, v0 // v2s32
1213 
1214  if (!ST->hasNEON() ||
1215  MI.getMF()->getFunction().hasFnAttribute(Attribute::NoImplicitFloat))
1216  return false;
1217  MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
1218  Register Dst = MI.getOperand(0).getReg();
1219  Register Val = MI.getOperand(1).getReg();
1220  LLT Ty = MRI.getType(Val);
1221 
1222  assert(Ty == MRI.getType(Dst) &&
1223  "Expected src and dst to have the same type!");
1224  unsigned Size = Ty.getSizeInBits();
1225 
1226  // Pre-conditioning: widen Val up to the nearest vector type.
1227  // s32,s64,v4s16,v2s32 -> v8i8
1228  // v8s16,v4s32,v2s64 -> v16i8
1229  LLT VTy = Size == 128 ? LLT::fixed_vector(16, 8) : LLT::fixed_vector(8, 8);
1230  if (Ty.isScalar()) {
1231  assert((Size == 32 || Size == 64 || Size == 128) && "Expected only 32, 64, or 128 bit scalars!");
1232  if (Size == 32) {
1233  Val = MIRBuilder.buildZExt(LLT::scalar(64), Val).getReg(0);
1234  }
1235  }
1236  Val = MIRBuilder.buildBitcast(VTy, Val).getReg(0);
1237 
1238  // Count bits in each byte-sized lane.
1239  auto CTPOP = MIRBuilder.buildCTPOP(VTy, Val);
1240 
1241  // Sum across lanes.
1242  Register HSum = CTPOP.getReg(0);
1243  unsigned Opc;
1244  SmallVector<LLT> HAddTys;
1245  if (Ty.isScalar()) {
1246  Opc = Intrinsic::aarch64_neon_uaddlv;
1247  HAddTys.push_back(LLT::scalar(32));
1248  } else if (Ty == LLT::fixed_vector(8, 16)) {
1249  Opc = Intrinsic::aarch64_neon_uaddlp;
1250  HAddTys.push_back(LLT::fixed_vector(8, 16));
1251  } else if (Ty == LLT::fixed_vector(4, 32)) {
1252  Opc = Intrinsic::aarch64_neon_uaddlp;
1253  HAddTys.push_back(LLT::fixed_vector(8, 16));
1254  HAddTys.push_back(LLT::fixed_vector(4, 32));
1255  } else if (Ty == LLT::fixed_vector(2, 64)) {
1256  Opc = Intrinsic::aarch64_neon_uaddlp;
1257  HAddTys.push_back(LLT::fixed_vector(8, 16));
1258  HAddTys.push_back(LLT::fixed_vector(4, 32));
1259  HAddTys.push_back(LLT::fixed_vector(2, 64));
1260  } else if (Ty == LLT::fixed_vector(4, 16)) {
1261  Opc = Intrinsic::aarch64_neon_uaddlp;
1262  HAddTys.push_back(LLT::fixed_vector(4, 16));
1263  } else if (Ty == LLT::fixed_vector(2, 32)) {
1264  Opc = Intrinsic::aarch64_neon_uaddlp;
1265  HAddTys.push_back(LLT::fixed_vector(4, 16));
1266  HAddTys.push_back(LLT::fixed_vector(2, 32));
1267  } else
1268  llvm_unreachable("unexpected vector shape");
1269  MachineInstrBuilder UADD;
1270  for (LLT HTy : HAddTys) {
1271  UADD = MIRBuilder.buildIntrinsic(Opc, {HTy}, /*HasSideEffects =*/false)
1272  .addUse(HSum);
1273  HSum = UADD.getReg(0);
1274  }
1275 
1276  // Post-conditioning.
1277  if (Ty.isScalar() && (Size == 64 || Size == 128))
1278  MIRBuilder.buildZExt(Dst, UADD);
1279  else
1280  UADD->getOperand(0).setReg(Dst);
1281  MI.eraseFromParent();
1282  return true;
1283 }
1284 
1285 bool AArch64LegalizerInfo::legalizeAtomicCmpxchg128(
1287  MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
1288  LLT s64 = LLT::scalar(64);
1289  auto Addr = MI.getOperand(1).getReg();
1290  auto DesiredI = MIRBuilder.buildUnmerge({s64, s64}, MI.getOperand(2));
1291  auto NewI = MIRBuilder.buildUnmerge({s64, s64}, MI.getOperand(3));
1292  auto DstLo = MRI.createGenericVirtualRegister(s64);
1293  auto DstHi = MRI.createGenericVirtualRegister(s64);
1294 
1295  MachineInstrBuilder CAS;
1296  if (ST->hasLSE()) {
1297  // We have 128-bit CASP instructions taking XSeqPair registers, which are
1298  // s128. We need the merge/unmerge to bracket the expansion and pair up with
1299  // the rest of the MIR so we must reassemble the extracted registers into a
1300  // 128-bit known-regclass one with code like this:
1301  //
1302  // %in1 = REG_SEQUENCE Lo, Hi ; One for each input
1303  // %out = CASP %in1, ...
1304  // %OldLo = G_EXTRACT %out, 0
1305  // %OldHi = G_EXTRACT %out, 64
1306  auto Ordering = (*MI.memoperands_begin())->getMergedOrdering();
1307  unsigned Opcode;
1308  switch (Ordering) {
1310  Opcode = AArch64::CASPAX;
1311  break;
1313  Opcode = AArch64::CASPLX;
1314  break;
1317  Opcode = AArch64::CASPALX;
1318  break;
1319  default:
1320  Opcode = AArch64::CASPX;
1321  break;
1322  }
1323 
1324  LLT s128 = LLT::scalar(128);
1325  auto CASDst = MRI.createGenericVirtualRegister(s128);
1326  auto CASDesired = MRI.createGenericVirtualRegister(s128);
1327  auto CASNew = MRI.createGenericVirtualRegister(s128);
1328  MIRBuilder.buildInstr(TargetOpcode::REG_SEQUENCE, {CASDesired}, {})
1329  .addUse(DesiredI->getOperand(0).getReg())
1330  .addImm(AArch64::sube64)
1331  .addUse(DesiredI->getOperand(1).getReg())
1332  .addImm(AArch64::subo64);
1333  MIRBuilder.buildInstr(TargetOpcode::REG_SEQUENCE, {CASNew}, {})
1334  .addUse(NewI->getOperand(0).getReg())
1335  .addImm(AArch64::sube64)
1336  .addUse(NewI->getOperand(1).getReg())
1337  .addImm(AArch64::subo64);
1338 
1339  CAS = MIRBuilder.buildInstr(Opcode, {CASDst}, {CASDesired, CASNew, Addr});
1340 
1341  MIRBuilder.buildExtract({DstLo}, {CASDst}, 0);
1342  MIRBuilder.buildExtract({DstHi}, {CASDst}, 64);
1343  } else {
1344  // The -O0 CMP_SWAP_128 is friendlier to generate code for because LDXP/STXP
1345  // can take arbitrary registers so it just has the normal GPR64 operands the
1346  // rest of AArch64 is expecting.
1347  auto Ordering = (*MI.memoperands_begin())->getMergedOrdering();
1348  unsigned Opcode;
1349  switch (Ordering) {
1351  Opcode = AArch64::CMP_SWAP_128_ACQUIRE;
1352  break;
1354  Opcode = AArch64::CMP_SWAP_128_RELEASE;
1355  break;
1358  Opcode = AArch64::CMP_SWAP_128;
1359  break;
1360  default:
1361  Opcode = AArch64::CMP_SWAP_128_MONOTONIC;
1362  break;
1363  }
1364 
1365  auto Scratch = MRI.createVirtualRegister(&AArch64::GPR64RegClass);
1366  CAS = MIRBuilder.buildInstr(Opcode, {DstLo, DstHi, Scratch},
1367  {Addr, DesiredI->getOperand(0),
1368  DesiredI->getOperand(1), NewI->getOperand(0),
1369  NewI->getOperand(1)});
1370  }
1371 
1372  CAS.cloneMemRefs(MI);
1375  *ST->getRegBankInfo());
1376 
1377  MIRBuilder.buildMerge(MI.getOperand(0), {DstLo, DstHi});
1378  MI.eraseFromParent();
1379  return true;
1380 }
1381 
1382 bool AArch64LegalizerInfo::legalizeCTTZ(MachineInstr &MI,
1383  LegalizerHelper &Helper) const {
1384  MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
1385  MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
1386  LLT Ty = MRI.getType(MI.getOperand(1).getReg());
1387  auto BitReverse = MIRBuilder.buildBitReverse(Ty, MI.getOperand(1));
1388  MIRBuilder.buildCTLZ(MI.getOperand(0).getReg(), BitReverse);
1389  MI.eraseFromParent();
1390  return true;
1391 }
1392 
1393 bool AArch64LegalizerInfo::legalizeMemOps(MachineInstr &MI,
1394  LegalizerHelper &Helper) const {
1395  MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
1396 
1397  // Tagged version MOPSMemorySetTagged is legalised in legalizeIntrinsic
1398  if (MI.getOpcode() == TargetOpcode::G_MEMSET) {
1399  // Zext the value operand to 64 bit
1400  auto &Value = MI.getOperand(1);
1401  Register ZExtValueReg =
1402  MIRBuilder.buildAnyExt(LLT::scalar(64), Value).getReg(0);
1403  Value.setReg(ZExtValueReg);
1404  return true;
1405  }
1406 
1407  return false;
1408 }
AArch64LegalizerInfo.h
llvm::Check::Size
@ Size
Definition: FileCheck.h:77
MIPatternMatch.h
llvm::alignTo
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition: Alignment.h:156
llvm::LegalizeRuleSet::unsupported
LegalizeRuleSet & unsupported()
The instruction is unsupported.
Definition: LegalizerInfo.h:801
llvm::AArch64II::MO_G3
@ MO_G3
MO_G3 - A symbol operand with this flag (granule 3) represents the high 16-bits of a 64-bit address,...
Definition: AArch64BaseInfo.h:690
ValueTypes.h
llvm::AArch64Subtarget::isTargetWindows
bool isTargetWindows() const
Definition: AArch64Subtarget.h:245
llvm::LegalizeRuleSet::widenScalarToNextPow2
LegalizeRuleSet & widenScalarToNextPow2(unsigned TypeIdx, unsigned MinSize=0)
Widen the scalar to the next power of two that is at least MinSize.
Definition: LegalizerInfo.h:877
llvm::AtomicOrdering::AcquireRelease
@ AcquireRelease
matchLDPSTPAddrMode
static void matchLDPSTPAddrMode(Register Root, Register &Base, int &Offset, MachineRegisterInfo &MRI)
Definition: AArch64LegalizerInfo.cpp:1050
MI
IRTranslator LLVM IR MI
Definition: IRTranslator.cpp:104
MachineInstr.h
MathExtras.h
llvm::MachineInstrBuilder::addImm
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
Definition: MachineInstrBuilder.h:131
llvm
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:17
llvm::LLT::getScalarSizeInBits
unsigned getScalarSizeInBits() const
Definition: LowLevelTypeImpl.h:224
llvm::MachineRegisterInfo::createVirtualRegister
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
Definition: MachineRegisterInfo.cpp:156
AArch64RegisterBankInfo.h
llvm::LegalizeRuleSet::unsupportedIfMemSizeNotPow2
LegalizeRuleSet & unsupportedIfMemSizeNotPow2()
Definition: LegalizerInfo.h:813
llvm::MIPatternMatch::m_Reg
operand_type_match m_Reg()
Definition: MIPatternMatch.h:252
llvm::LegalizerInfo::getActionDefinitionsBuilder
LegalizeRuleSet & getActionDefinitionsBuilder(unsigned Opcode)
Get the action definition builder for the given opcode.
Definition: LegalizerInfo.cpp:288
llvm::MachineRegisterInfo
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Definition: MachineRegisterInfo.h:50
llvm::LegalizeRuleSet::minScalarEltSameAsIf
LegalizeRuleSet & minScalarEltSameAsIf(LegalityPredicate Predicate, unsigned TypeIdx, unsigned LargeTypeIdx)
Conditionally widen the scalar or elt to match the size of another.
Definition: LegalizerInfo.h:1032
llvm::LegalizeRuleSet::maxScalarIf
LegalizeRuleSet & maxScalarIf(LegalityPredicate Predicate, unsigned TypeIdx, const LLT Ty)
Conditionally limit the maximum size of the scalar.
Definition: LegalizerInfo.h:974
llvm::SmallVector
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1185
llvm::MachineIRBuilder::getMRI
MachineRegisterInfo * getMRI()
Getter for MRI.
Definition: MachineIRBuilder.h:287
llvm::LegalizeRuleSet::customFor
LegalizeRuleSet & customFor(std::initializer_list< LLT > Types)
Definition: LegalizerInfo.h:840
llvm::MachineFunction::getMachineMemOperand
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
Definition: MachineFunction.cpp:454
llvm::LegacyLegalizeActions::Bitcast
@ Bitcast
Perform the operation on a different, but equivalently sized type.
Definition: LegacyLegalizerInfo.h:54
llvm::X86Disassembler::Reg
Reg
All possible values of the reg field in the ModR/M byte.
Definition: X86DisassemblerDecoder.h:462
llvm::LegalizeRuleSet::clampNumElements
LegalizeRuleSet & clampNumElements(unsigned TypeIdx, const LLT MinTy, const LLT MaxTy)
Limit the number of elements for the given vectors to at least MinTy's number of elements and at most...
Definition: LegalizerInfo.h:1137
llvm::MachineRegisterInfo::getTargetRegisterInfo
const TargetRegisterInfo * getTargetRegisterInfo() const
Definition: MachineRegisterInfo.h:151
llvm::LegacyLegalizerInfo::computeTables
void computeTables()
Compute any ancillary tables needed to quickly decide how an operation should be handled.
Definition: LegacyLegalizerInfo.cpp:102
llvm::AArch64Subtarget::isTargetDarwin
bool isTargetDarwin() const
Definition: AArch64Subtarget.h:242
llvm::AtomicOrdering::SequentiallyConsistent
@ SequentiallyConsistent
llvm::LegalizeRuleSet::minScalarOrEltIf
LegalizeRuleSet & minScalarOrEltIf(LegalityPredicate Predicate, unsigned TypeIdx, const LLT Ty)
Ensure the scalar or element is at least as wide as Ty.
Definition: LegalizerInfo.h:934
llvm::AArch64II::MO_PREL
@ MO_PREL
MO_PREL - Indicates that the bits of the symbol operand represented by MO_G0 etc are PC relative.
Definition: AArch64BaseInfo.h:741
llvm::AArch64Subtarget::getInstrInfo
const AArch64InstrInfo * getInstrInfo() const override
Definition: AArch64Subtarget.h:171
llvm::LLT::changeElementCount
LLT changeElementCount(ElementCount EC) const
Return a vector or scalar with the same element type and the new element count.
Definition: LowLevelTypeImpl.h:189
llvm::LLT::vector
static LLT vector(ElementCount EC, unsigned ScalarSizeInBits)
Get a low-level vector of some number of elements and element width.
Definition: LowLevelTypeImpl.h:56
llvm::max
Expected< ExpressionValue > max(const ExpressionValue &Lhs, const ExpressionValue &Rhs)
Definition: FileCheck.cpp:337
llvm::LegalizeRuleSet::scalarizeIf
LegalizeRuleSet & scalarizeIf(LegalityPredicate Predicate, unsigned TypeIdx)
Definition: LegalizerInfo.h:917
MachineIRBuilder.h
llvm::LegalizeRuleSet::minScalarOrElt
LegalizeRuleSet & minScalarOrElt(unsigned TypeIdx, const LLT Ty)
Ensure the scalar or element is at least as wide as Ty.
Definition: LegalizerInfo.h:925
llvm::LegalizeRuleSet::scalarize
LegalizeRuleSet & scalarize(unsigned TypeIdx)
Definition: LegalizerInfo.h:911
llvm::isPowerOf2_32
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:491
llvm::LegalizeRuleSet::lower
LegalizeRuleSet & lower()
The instruction is lowered.
Definition: LegalizerInfo.h:659
llvm::LegalizerHelper
Definition: LegalizerHelper.h:46
llvm::LegalizeMutations::changeTo
LegalizeMutation changeTo(unsigned TypeIdx, LLT Ty)
Select this specific type for the given type index.
Definition: LegalizeMutations.cpp:17
LegalizerInfo.h
llvm::MIPatternMatch::m_GPtrAdd
BinaryOp_match< LHS, RHS, TargetOpcode::G_PTR_ADD, false > m_GPtrAdd(const LHS &L, const RHS &R)
Definition: MIPatternMatch.h:428
llvm::LegalityPredicates::atomicOrderingAtLeastOrStrongerThan
LegalityPredicate atomicOrderingAtLeastOrStrongerThan(unsigned MMOIdx, AtomicOrdering Ordering)
True iff the specified MMO index has at an atomic ordering of at Ordering or stronger.
Definition: LegalityPredicates.cpp:205
LLVM_DEBUG
#define LLVM_DEBUG(X)
Definition: Debug.h:101
llvm::AArch64Subtarget::getTargetLowering
const AArch64TargetLowering * getTargetLowering() const override
Definition: AArch64Subtarget.h:168
llvm::constrainSelectedInstRegOperands
bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
Definition: Utils.cpp:150
llvm::MachineIRBuilder::buildConstant
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
Definition: MachineIRBuilder.cpp:283
llvm::LegalizeRuleSet::legalIf
LegalizeRuleSet & legalIf(LegalityPredicate Predicate)
The instruction is legal if predicate is true.
Definition: LegalizerInfo.h:586
MachineRegisterInfo.h
llvm::AtomicOrdering::Monotonic
@ Monotonic
llvm::LegalizeRuleSet::minScalar
LegalizeRuleSet & minScalar(unsigned TypeIdx, const LLT Ty)
Ensure the scalar is at least as wide as Ty.
Definition: LegalizerInfo.h:945
always
bar al al movzbl eax ret Missed when stored in a memory are stored as single byte objects the value of which is always(false) or 1(true). We are not using this fact
Definition: README.txt:1412
llvm::dbgs
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
llvm::LLT::fixed_vector
static LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
Definition: LowLevelTypeImpl.h:74
llvm::MachineIRBuilder::buildBitReverse
MachineInstrBuilder buildBitReverse(const DstOp &Dst, const SrcOp &Src)
Build and insert Dst = G_BITREVERSE Src.
Definition: MachineIRBuilder.h:1917
llvm::MachineFunction::getRegInfo
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Definition: MachineFunction.h:666
llvm::LegalizeRuleSet::customIf
LegalizeRuleSet & customIf(LegalityPredicate Predicate)
Definition: LegalizerInfo.h:834
llvm::LegalityPredicates::typeIs
LegalityPredicate typeIs(unsigned TypeIdx, LLT TypesInit)
True iff the given type index is the specified type.
Definition: LegalityPredicates.cpp:28
llvm::MachineIRBuilder::buildUnmerge
MachineInstrBuilder buildUnmerge(ArrayRef< LLT > Res, const SrcOp &Op)
Build and insert Res0, ...
Definition: MachineIRBuilder.cpp:590
llvm::MachineIRBuilder::buildZExt
MachineInstrBuilder buildZExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op.
Definition: MachineIRBuilder.cpp:452
llvm::MachineIRBuilder::buildLoad
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
Definition: MachineIRBuilder.h:888
llvm::LegalityPredicates::smallerThan
LegalityPredicate smallerThan(unsigned TypeIdx0, unsigned TypeIdx1)
True iff the first type index has a smaller total bit size than second type index.
Definition: LegalityPredicates.cpp:117
llvm::AArch64Subtarget::isTargetILP32
bool isTargetILP32() const
Definition: AArch64Subtarget.h:253
Intrinsics.h
llvm::MachineInstr::getOperand
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:501
llvm::Log2
unsigned Log2(Align A)
Returns the log2 of the alignment.
Definition: Alignment.h:209
llvm::LLT::getSizeInBits
TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
Definition: LowLevelTypeImpl.h:152
llvm::AArch64LegalizerInfo::legalizeIntrinsic
bool legalizeIntrinsic(LegalizerHelper &Helper, MachineInstr &MI) const override
Definition: AArch64LegalizerInfo.cpp:982
llvm::LegalizeRuleSet::lowerIfMemSizeNotPow2
LegalizeRuleSet & lowerIfMemSizeNotPow2()
Lower a memory operation if the memory size, rounded to bytes, is not a power of 2.
Definition: LegalizerInfo.h:821
Utils.h
llvm::MachineIRBuilder::buildCTLZ
MachineInstrBuilder buildCTLZ(const DstOp &Dst, const SrcOp &Src0)
Build and insert Res = G_CTLZ Op0, Src0.
Definition: MachineIRBuilder.h:1599
llvm::LegalizeRuleSet::fewerElementsIf
LegalizeRuleSet & fewerElementsIf(LegalityPredicate Predicate, LegalizeMutation Mutation)
Remove elements to reach the type selected by the mutation if the predicate is true.
Definition: LegalizerInfo.h:792
TargetOpcodes.h
llvm::MachineIRBuilder::buildConcatVectors
MachineInstrBuilder buildConcatVectors(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_CONCAT_VECTORS Op0, ...
Definition: MachineIRBuilder.cpp:685
llvm::CodeModel::Small
@ Small
Definition: CodeGen.h:28
Concat
static constexpr int Concat[]
Definition: X86InterleavedAccess.cpp:239
llvm::MachineIRBuilder::getMF
MachineFunction & getMF()
Getter for the function we currently build.
Definition: MachineIRBuilder.h:269
llvm::AtomicOrdering::Acquire
@ Acquire
llvm::AArch64LegalizerInfo::AArch64LegalizerInfo
AArch64LegalizerInfo(const AArch64Subtarget &ST)
Definition: AArch64LegalizerInfo.cpp:41
llvm::GISelChangeObserver::changingInstr
virtual void changingInstr(MachineInstr &MI)=0
This instruction is about to be mutated in some way.
llvm::LegalizeRuleSet::clampMaxNumElements
LegalizeRuleSet & clampMaxNumElements(unsigned TypeIdx, const LLT EltTy, unsigned MaxElements)
Limit the number of elements in EltTy vectors to at most MaxElements.
Definition: LegalizerInfo.h:1113
Align
uint64_t Align
Definition: ELFObjHandler.cpp:81
llvm::Align
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
llvm::LegalizeRuleSet::maxScalarEltSameAsIf
LegalizeRuleSet & maxScalarEltSameAsIf(LegalityPredicate Predicate, unsigned TypeIdx, unsigned SmallTypeIdx)
Conditionally narrow the scalar or elt to match the size of another.
Definition: LegalizerInfo.h:1048
Type.h
llvm::MachineInstrBuilder::getReg
Register getReg(unsigned Idx) const
Get the register for the operand index.
Definition: MachineInstrBuilder.h:94
llvm::MachineInstrBuilder::cloneMemRefs
const MachineInstrBuilder & cloneMemRefs(const MachineInstr &OtherMI) const
Definition: MachineInstrBuilder.h:213
llvm::AArch64LegalizerInfo::legalizeCustom
bool legalizeCustom(LegalizerHelper &Helper, MachineInstr &MI) const override
Called for instructions with the Custom LegalizationAction.
Definition: AArch64LegalizerInfo.cpp:818
llvm::MachineIRBuilder::buildCTPOP
MachineInstrBuilder buildCTPOP(const DstOp &Dst, const SrcOp &Src0)
Build and insert Res = G_CTPOP Op0, Src0.
Definition: MachineIRBuilder.h:1594
llvm::LLT::pointer
static LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
Definition: LowLevelTypeImpl.h:49
llvm::LLT::getAddressSpace
unsigned getAddressSpace() const
Definition: LowLevelTypeImpl.h:238
llvm::GISelChangeObserver::changedInstr
virtual void changedInstr(MachineInstr &MI)=0
This instruction was mutated in some way.
llvm::MachineIRBuilder::buildMaskLowPtrBits
MachineInstrBuilder buildMaskLowPtrBits(const DstOp &Res, const SrcOp &Op0, uint32_t NumBits)
Build and insert Res = G_PTRMASK Op0, G_CONSTANT (1 << NumBits) - 1.
Definition: MachineIRBuilder.cpp:206
llvm::SPIRV::Decoration::Alignment
@ Alignment
llvm::LegalizeMutations::scalarize
LegalizeMutation scalarize(unsigned TypeIdx)
Break up the vector type for the given type index into the element type.
Definition: LegalizeMutations.cpp:108
llvm::LegalizeRuleSet::lowerIf
LegalizeRuleSet & lowerIf(LegalityPredicate Predicate)
The instruction is lowered if predicate is true.
Definition: LegalizerInfo.h:668
llvm::LLT::divide
LLT divide(int Factor) const
Return a type that is Factor times smaller.
Definition: LowLevelTypeImpl.h:196
llvm::MachineIRBuilder
Helper class to build MachineInstr.
Definition: MachineIRBuilder.h:219
llvm::LegalizeRuleSet::legalFor
LegalizeRuleSet & legalFor(std::initializer_list< LLT > Types)
The instruction is legal when type index 0 is any type in the given list.
Definition: LegalizerInfo.h:593
llvm::AMDGPU::Hwreg::Offset
Offset
Definition: SIDefines.h:416
llvm::MachineInstr
Representation of each machine instruction.
Definition: MachineInstr.h:66
llvm::MachineInstrBuilder
Definition: MachineInstrBuilder.h:69
uint64_t
llvm::ARM_MB::ST
@ ST
Definition: ARMBaseInfo.h:73
Addr
uint64_t Addr
Definition: ELFObjHandler.cpp:78
llvm::MachineIRBuilder::buildBitcast
MachineInstrBuilder buildBitcast(const DstOp &Dst, const SrcOp &Src)
Build and insert Dst = G_BITCAST Src.
Definition: MachineIRBuilder.h:668
llvm::LegalityPredicates::all
Predicate all(Predicate P0, Predicate P1)
True iff P0 and P1 are true.
Definition: LegalizerInfo.h:228
llvm::AArch64II::MO_NC
@ MO_NC
MO_NC - Indicates whether the linker is expected to check the symbol reference for overflow.
Definition: AArch64BaseInfo.h:722
llvm::MachinePointerInfo
This class contains a discriminated union of information about pointers in memory operands,...
Definition: MachineMemOperand.h:39
llvm::AArch64II::MO_PAGEOFF
@ MO_PAGEOFF
MO_PAGEOFF - A symbol operand with this flag represents the offset of that symbol within a 4K page.
Definition: AArch64BaseInfo.h:686
llvm::MachineIRBuilder::buildPtrAdd
MachineInstrBuilder buildPtrAdd(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert Res = G_PTR_ADD Op0, Op1.
Definition: MachineIRBuilder.cpp:180
llvm::AtomicOrdering::Unordered
@ Unordered
I
#define I(x, y, z)
Definition: MD5.cpp:58
llvm::LLT::isVector
bool isVector() const
Definition: LowLevelTypeImpl.h:122
llvm::LegalizeRuleSet::clampScalar
LegalizeRuleSet & clampScalar(unsigned TypeIdx, const LLT MinTy, const LLT MaxTy)
Limit the range of scalar sizes to MinTy and MaxTy.
Definition: LegalizerInfo.h:990
llvm::LLT::getNumElements
uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
Definition: LowLevelTypeImpl.h:126
llvm::LegalizeRuleSet::legalForCartesianProduct
LegalizeRuleSet & legalForCartesianProduct(std::initializer_list< LLT > Types)
The instruction is legal when type indexes 0 and 1 are both in the given list.
Definition: LegalizerInfo.h:625
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
llvm::TargetMachine
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:77
llvm::LLT::isPointer
bool isPointer() const
Definition: LowLevelTypeImpl.h:120
llvm::LegalityPredicates::typeInSet
LegalityPredicate typeInSet(unsigned TypeIdx, std::initializer_list< LLT > TypesInit)
True iff the given type index is one of the specified types.
Definition: LegalityPredicates.cpp:34
llvm::LegalizeRuleSet::lowerIfMemSizeNotByteSizePow2
LegalizeRuleSet & lowerIfMemSizeNotByteSizePow2()
Lower a memory operation if the memory access size is not a round power of 2 byte size.
Definition: LegalizerInfo.h:829
llvm::MachineRegisterInfo::createGenericVirtualRegister
Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
Definition: MachineRegisterInfo.cpp:186
llvm::MachineInstrBuilder::addUse
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
Definition: MachineInstrBuilder.h:123
llvm::MachineOperand::getReg
Register getReg() const
getReg - Returns the register number.
Definition: MachineOperand.h:359
llvm::LLT::isScalar
bool isScalar() const
Definition: LowLevelTypeImpl.h:118
llvm::MachineFunction
Definition: MachineFunction.h:257
llvm::LegalityQuery::Opcode
unsigned Opcode
Definition: LegalizerInfo.h:109
llvm::MachineIRBuilder::buildInstr
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
Definition: MachineIRBuilder.h:374
llvm::LegalityQuery
The LegalityQuery object bundles together all the information that's needed to decide whether a given...
Definition: LegalizerInfo.h:108
llvm::LegalizeRuleSet::customForCartesianProduct
LegalizeRuleSet & customForCartesianProduct(std::initializer_list< LLT > Types)
Definition: LegalizerInfo.h:850
llvm::getIConstantVRegValWithLookThrough
Optional< ValueAndVReg > getIConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_CONSTANT returns its...
Definition: Utils.cpp:407
llvm_unreachable
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:143
if
if(llvm_vc STREQUAL "") set(fake_version_inc "$
Definition: CMakeLists.txt:14
llvm::LegalizerHelper::Observer
GISelChangeObserver & Observer
To keep track of changes made by the LegalizerHelper.
Definition: LegalizerHelper.h:53
llvm::MachineIRBuilder::buildExtract
MachineInstrBuilder buildExtract(const DstOp &Res, const SrcOp &Src, uint64_t Index)
Build and insert Res0, ...
Definition: MachineIRBuilder.cpp:544
llvm::GISelChangeObserver
Abstract class that contains various methods for clients to notify about changes.
Definition: GISelChangeObserver.h:29
llvm::LegalizeMutations::widenScalarOrEltToNextPow2
LegalizeMutation widenScalarOrEltToNextPow2(unsigned TypeIdx, unsigned Min=0)
Widen the scalar type or vector element type for the given type index to the next power of 2.
Definition: LegalizeMutations.cpp:77
llvm::MachineMemOperand::MOLoad
@ MOLoad
The memory access reads data.
Definition: MachineMemOperand.h:134
MRI
unsigned const MachineRegisterInfo * MRI
Definition: AArch64AdvSIMDScalarPass.cpp:105
llvm::MachineIRBuilder::buildAnyExt
MachineInstrBuilder buildAnyExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ANYEXT Op0.
Definition: MachineIRBuilder.cpp:442
llvm::Register
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
llvm::LegalityPredicates::isScalar
LegalityPredicate isScalar(unsigned TypeIdx)
True iff the specified type index is a scalar.
Definition: LegalityPredicates.cpp:67
llvm::AtomicOrdering::Release
@ Release
llvm::LegalizerInfo::getLegacyLegalizerInfo
const LegacyLegalizerInfo & getLegacyLegalizerInfo() const
Definition: LegalizerInfo.h:1184
llvm::TargetLoweringBase::getTargetMachine
const TargetMachine & getTargetMachine() const
Definition: TargetLowering.h:347
llvm::LegalizeRuleSet::narrowScalarIf
LegalizeRuleSet & narrowScalarIf(LegalityPredicate Predicate, LegalizeMutation Mutation)
Narrow the scalar to the one selected by the mutation if the predicate is true.
Definition: LegalizerInfo.h:766
llvm::MachineIRBuilder::buildTrunc
MachineInstrBuilder buildTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_TRUNC Op.
Definition: MachineIRBuilder.cpp:733
llvm::AArch64Subtarget::getRegBankInfo
const RegisterBankInfo * getRegBankInfo() const override
Definition: AArch64Subtarget.cpp:308
llvm::MIPatternMatch::m_ICst
ConstantMatch< APInt > m_ICst(APInt &Cst)
Definition: MIPatternMatch.h:90
llvm::AArch64Subtarget::ClassifyGlobalReference
unsigned ClassifyGlobalReference(const GlobalValue *GV, const TargetMachine &TM) const
ClassifyGlobalReference - Find the target operand flags that describe how a global value should be re...
Definition: AArch64Subtarget.cpp:315
llvm::AMDGPU::SendMsg::Op
Op
Definition: SIDefines.h:348
llvm::LegalityQuery::MMODescrs
ArrayRef< MemDesc > MMODescrs
Operations which require memory can use this to place requirements on the memory type for each MMO.
Definition: LegalizerInfo.h:128
llvm::LegalizeRuleSet::unsupportedIf
LegalizeRuleSet & unsupportedIf(LegalityPredicate Predicate)
Definition: LegalizerInfo.h:805
LegalizerHelper.h
llvm::AArch64ISD::ADRP
@ ADRP
Definition: AArch64ISelLowering.h:63
llvm::LegalizeRuleSet::minScalarSameAs
LegalizeRuleSet & minScalarSameAs(unsigned TypeIdx, unsigned LargeTypeIdx)
Widen the scalar to match the size of another.
Definition: LegalizerInfo.h:1003
llvm::MachineMemOperand::MOStore
@ MOStore
The memory access writes data.
Definition: MachineMemOperand.h:136
llvm::LegalizerInfo::verify
void verify(const MCInstrInfo &MII) const
Perform simple self-diagnostic and assert if there is anything obviously wrong with the actions set u...
Definition: LegalizerInfo.cpp:376
llvm::LegalityPredicates::isVector
LegalityPredicate isVector(unsigned TypeIdx)
True iff the specified type index is a vector.
Definition: LegalityPredicates.cpp:73
AArch64Subtarget.h
llvm::MachineIRBuilder::buildIntrinsic
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef< Register > Res, bool HasSideEffects)
Build and insert either a G_INTRINSIC (if HasSideEffects is false) or G_INTRINSIC_W_SIDE_EFFECTS inst...
Definition: MachineIRBuilder.cpp:709
s1
int s1
Definition: README.txt:182
llvm::MachineRegisterInfo::getType
LLT getType(Register Reg) const
Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register.
Definition: MachineRegisterInfo.h:740
llvm::LegalityQuery::Types
ArrayRef< LLT > Types
Definition: LegalizerInfo.h:110
llvm::LegalizeRuleSet::moreElementsToNextPow2
LegalizeRuleSet & moreElementsToNextPow2(unsigned TypeIdx)
Add more elements to the vector to reach the next power of two.
Definition: LegalizerInfo.h:1067
llvm::MachineIRBuilder::buildSExt
MachineInstrBuilder buildSExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_SEXT Op.
Definition: MachineIRBuilder.cpp:447
List
const NodeList & List
Definition: RDFGraph.cpp:199
llvm::LLT::getElementCount
ElementCount getElementCount() const
Definition: LowLevelTypeImpl.h:143
llvm::MachineOperand::setReg
void setReg(Register Reg)
Change the register this operand corresponds to.
Definition: MachineOperand.cpp:53
llvm::LegalizerHelper::MIRBuilder
MachineIRBuilder & MIRBuilder
Expose MIRBuilder so clients can set their own RecordInsertInstruction functions.
Definition: LegalizerHelper.h:50
llvm::MachineIRBuilder::buildMerge
MachineInstrBuilder buildMerge(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_MERGE_VALUES Op0, ...
Definition: MachineIRBuilder.cpp:573
llvm::LegalizeRuleSet::custom
LegalizeRuleSet & custom()
Unconditionally custom lower.
Definition: LegalizerInfo.h:871
llvm::SmallVectorImpl
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:42
llvm::LegalizeMutations::moreElementsToNextPow2
LegalizeMutation moreElementsToNextPow2(unsigned TypeIdx, unsigned Min=0)
Add more elements to the type for the given type index to the next power of.
Definition: LegalizeMutations.cpp:97
extractParts
static void extractParts(Register Reg, MachineRegisterInfo &MRI, MachineIRBuilder &MIRBuilder, LLT Ty, int NumParts, SmallVectorImpl< Register > &VRegs)
Definition: AArch64LegalizerInfo.cpp:878
DerivedTypes.h
TM
const char LLVMTargetMachineRef TM
Definition: PassBuilderBindings.cpp:47
llvm::LegalizeRuleSet::scalarSameSizeAs
LegalizeRuleSet & scalarSameSizeAs(unsigned TypeIdx, unsigned SameSizeIdx)
Change the type TypeIdx to have the same scalar size as type SameSizeIdx.
Definition: LegalizerInfo.h:1026
llvm::MIPatternMatch::mi_match
bool mi_match(Reg R, const MachineRegisterInfo &MRI, Pattern &&P)
Definition: MIPatternMatch.h:25
llvm::AArch64II::MO_TAGGED
@ MO_TAGGED
MO_TAGGED - With MO_PAGE, indicates that the page includes a memory tag in bits 56-63.
Definition: AArch64BaseInfo.h:749
llvm::MachineIRBuilder::buildStore
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
Definition: MachineIRBuilder.cpp:415
llvm::LLT::getElementType
LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
Definition: LowLevelTypeImpl.h:248
llvm::AArch64Subtarget
Definition: AArch64Subtarget.h:38
llvm::LegalizeRuleSet::libcallFor
LegalizeRuleSet & libcallFor(std::initializer_list< LLT > Types)
Definition: LegalizerInfo.h:738
llvm::LinearPolySize::divideCoefficientBy
LeafTy divideCoefficientBy(ScalarTy RHS) const
We do not provide the '/' operator here because division for polynomial types does not work in the sa...
Definition: TypeSize.h:360
llvm::LLT::scalar
static LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
Definition: LowLevelTypeImpl.h:42
llvm::LLT::changeElementSize
LLT changeElementSize(unsigned NewEltSize) const
If this type is a vector, return a vector with the same number of elements but the new element size.
Definition: LowLevelTypeImpl.h:180
llvm::AArch64II::MO_GOT
@ MO_GOT
MO_GOT - This flag indicates that a symbol operand represents the address of the GOT entry for the sy...
Definition: AArch64BaseInfo.h:717
llvm::Value
LLVM Value Representation.
Definition: Value.h:74
libcall
Common register allocation spilling lr str ldr sxth r3 ldr mla r4 can lr mov lr str ldr sxth r3 mla r4 and then merge mul and lr str ldr sxth r3 mla r4 It also increase the likelihood the store may become dead bb27 Successors according to LLVM ID Predecessors according to mbb< bb27, 0x8b0a7c0 > Note ADDri is not a two address instruction its result reg1037 is an operand of the PHI node in bb76 and its operand reg1039 is the result of the PHI node We should treat it as a two address code and make sure the ADDri is scheduled after any node that reads reg1039 Use info(i.e. register scavenger) to assign it a free register to allow reuse the collector could move the objects and invalidate the derived pointer This is bad enough in the first but safe points can crop up unpredictably **array_addr i32 n y store obj obj **nth_el If the i64 division is lowered to a libcall
Definition: README.txt:127
llvm::LegalizeRuleSet::legalForTypesWithMemDesc
LegalizeRuleSet & legalForTypesWithMemDesc(std::initializer_list< LegalityPredicates::TypePairAndMemDesc > TypesAndMemDesc)
The instruction is legal when type indexes 0 and 1 along with the memory size and minimum alignment i...
Definition: LegalizerInfo.h:616
llvm::AtomicOrdering::NotAtomic
@ NotAtomic
llvm::ISD::CTPOP
@ CTPOP
Definition: ISDOpcodes.h:703
llvm::MachineRegisterInfo::setRegClass
void setRegClass(Register Reg, const TargetRegisterClass *RC)
setRegClass - Set the register class of the specified virtual register.
Definition: MachineRegisterInfo.cpp:56
llvm::sampleprof::Base
@ Base
Definition: Discriminator.h:58
llvm::AArch64II::MO_PAGE
@ MO_PAGE
MO_PAGE - A symbol operand with this flag represents the pc-relative offset of the 4K page containing...
Definition: AArch64BaseInfo.h:681
llvm::LLT
Definition: LowLevelTypeImpl.h:39