LLVM  9.0.0svn
AArch64LegalizerInfo.cpp
Go to the documentation of this file.
1 //===- AArch64LegalizerInfo.cpp ----------------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the targeting of the Machinelegalizer class for
10 /// AArch64.
11 /// \todo This should be generated by TableGen.
12 //===----------------------------------------------------------------------===//
13 
14 #include "AArch64LegalizerInfo.h"
15 #include "AArch64Subtarget.h"
21 #include "llvm/IR/DerivedTypes.h"
22 #include "llvm/IR/Type.h"
23 
24 using namespace llvm;
25 using namespace LegalizeActions;
26 using namespace LegalizeMutations;
27 using namespace LegalityPredicates;
28 
30  using namespace TargetOpcode;
31  const LLT p0 = LLT::pointer(0, 64);
32  const LLT s1 = LLT::scalar(1);
33  const LLT s8 = LLT::scalar(8);
34  const LLT s16 = LLT::scalar(16);
35  const LLT s32 = LLT::scalar(32);
36  const LLT s64 = LLT::scalar(64);
37  const LLT s128 = LLT::scalar(128);
38  const LLT s256 = LLT::scalar(256);
39  const LLT s512 = LLT::scalar(512);
40  const LLT v16s8 = LLT::vector(16, 8);
41  const LLT v8s8 = LLT::vector(8, 8);
42  const LLT v4s8 = LLT::vector(4, 8);
43  const LLT v8s16 = LLT::vector(8, 16);
44  const LLT v4s16 = LLT::vector(4, 16);
45  const LLT v2s16 = LLT::vector(2, 16);
46  const LLT v2s32 = LLT::vector(2, 32);
47  const LLT v4s32 = LLT::vector(4, 32);
48  const LLT v2s64 = LLT::vector(2, 64);
49 
50  getActionDefinitionsBuilder(G_IMPLICIT_DEF)
51  .legalFor({p0, s1, s8, s16, s32, s64, v2s64})
52  .clampScalar(0, s1, s64)
53  .widenScalarToNextPow2(0, 8)
54  .fewerElementsIf(
55  [=](const LegalityQuery &Query) {
56  return Query.Types[0].isVector() &&
57  (Query.Types[0].getElementType() != s64 ||
58  Query.Types[0].getNumElements() != 2);
59  },
60  [=](const LegalityQuery &Query) {
61  LLT EltTy = Query.Types[0].getElementType();
62  if (EltTy == s64)
63  return std::make_pair(0, LLT::vector(2, 64));
64  return std::make_pair(0, EltTy);
65  });
66 
67  getActionDefinitionsBuilder(G_PHI)
68  .legalFor({p0, s16, s32, s64})
69  .clampScalar(0, s16, s64)
70  .widenScalarToNextPow2(0);
71 
72  getActionDefinitionsBuilder(G_BSWAP)
73  .legalFor({s32, s64})
74  .clampScalar(0, s16, s64)
75  .widenScalarToNextPow2(0);
76 
77  getActionDefinitionsBuilder({G_ADD, G_SUB, G_MUL, G_AND, G_OR, G_XOR})
78  .legalFor({s32, s64, v2s32, v4s32, v2s64})
79  .clampScalar(0, s32, s64)
80  .widenScalarToNextPow2(0)
81  .clampNumElements(0, v2s32, v4s32)
82  .clampNumElements(0, v2s64, v2s64)
83  .moreElementsToNextPow2(0);
84 
85  getActionDefinitionsBuilder(G_SHL)
86  .legalFor({{s32, s32}, {s64, s64},
87  {v2s32, v2s32}, {v4s32, v4s32}, {v2s64, v2s64}})
88  .clampScalar(1, s32, s64)
89  .clampScalar(0, s32, s64)
90  .widenScalarToNextPow2(0)
91  .clampNumElements(0, v2s32, v4s32)
92  .clampNumElements(0, v2s64, v2s64)
93  .moreElementsToNextPow2(0)
94  .minScalarSameAs(1, 0);
95 
96  getActionDefinitionsBuilder(G_GEP)
97  .legalFor({{p0, s64}})
98  .clampScalar(1, s64, s64);
99 
100  getActionDefinitionsBuilder(G_PTR_MASK).legalFor({p0});
101 
102  getActionDefinitionsBuilder({G_SDIV, G_UDIV})
103  .legalFor({s32, s64})
104  .clampScalar(0, s32, s64)
105  .widenScalarToNextPow2(0);
106 
107  getActionDefinitionsBuilder({G_LSHR, G_ASHR})
108  .legalFor({{s32, s32}, {s64, s64}})
109  .clampScalar(1, s32, s64)
110  .clampScalar(0, s32, s64)
111  .minScalarSameAs(1, 0);
112 
113  getActionDefinitionsBuilder({G_SREM, G_UREM})
114  .lowerFor({s1, s8, s16, s32, s64});
115 
116  getActionDefinitionsBuilder({G_SMULO, G_UMULO})
117  .lowerFor({{s64, s1}});
118 
119  getActionDefinitionsBuilder({G_SMULH, G_UMULH}).legalFor({s32, s64});
120 
121  getActionDefinitionsBuilder({G_UADDE, G_USUBE, G_SADDO, G_SSUBO})
122  .legalFor({{s32, s1}, {s64, s1}});
123 
124  getActionDefinitionsBuilder({G_FADD, G_FSUB, G_FMA, G_FMUL, G_FDIV, G_FNEG})
125  .legalFor({s32, s64, v2s64, v4s32, v2s32});
126 
127  getActionDefinitionsBuilder({G_FREM, G_FPOW}).libcallFor({s32, s64});
128 
129  getActionDefinitionsBuilder({G_FCEIL, G_FABS, G_FSQRT, G_FFLOOR})
130  // If we don't have full FP16 support, then scalarize the elements of
131  // vectors containing fp16 types.
132  .fewerElementsIf(
133  [=, &ST](const LegalityQuery &Query) {
134  const auto &Ty = Query.Types[0];
135  return Ty.isVector() && Ty.getElementType() == s16 &&
136  !ST.hasFullFP16();
137  },
138  [=](const LegalityQuery &Query) { return std::make_pair(0, s16); })
139  // If we don't have full FP16 support, then widen s16 to s32 if we
140  // encounter it.
141  .widenScalarIf(
142  [=, &ST](const LegalityQuery &Query) {
143  return Query.Types[0] == s16 && !ST.hasFullFP16();
144  },
145  [=](const LegalityQuery &Query) { return std::make_pair(0, s32); })
146  .legalFor({s16, s32, s64, v2s32, v4s32, v2s64, v2s16, v4s16, v8s16});
147 
148  getActionDefinitionsBuilder(
149  {G_FCOS, G_FSIN, G_FLOG10, G_FLOG, G_FLOG2, G_FEXP})
150  // We need a call for these, so we always need to scalarize.
151  .scalarize(0)
152  // Regardless of FP16 support, widen 16-bit elements to 32-bits.
153  .minScalar(0, s32)
154  .libcallFor({s32, s64, v2s32, v4s32, v2s64});
155 
156  getActionDefinitionsBuilder(G_INSERT)
157  .unsupportedIf([=](const LegalityQuery &Query) {
158  return Query.Types[0].getSizeInBits() <= Query.Types[1].getSizeInBits();
159  })
160  .legalIf([=](const LegalityQuery &Query) {
161  const LLT &Ty0 = Query.Types[0];
162  const LLT &Ty1 = Query.Types[1];
163  if (Ty0 != s32 && Ty0 != s64 && Ty0 != p0)
164  return false;
165  return isPowerOf2_32(Ty1.getSizeInBits()) &&
166  (Ty1.getSizeInBits() == 1 || Ty1.getSizeInBits() >= 8);
167  })
168  .clampScalar(0, s32, s64)
169  .widenScalarToNextPow2(0)
170  .maxScalarIf(typeInSet(0, {s32}), 1, s16)
171  .maxScalarIf(typeInSet(0, {s64}), 1, s32)
172  .widenScalarToNextPow2(1);
173 
174  getActionDefinitionsBuilder(G_EXTRACT)
175  .unsupportedIf([=](const LegalityQuery &Query) {
176  return Query.Types[0].getSizeInBits() >= Query.Types[1].getSizeInBits();
177  })
178  .legalIf([=](const LegalityQuery &Query) {
179  const LLT &Ty0 = Query.Types[0];
180  const LLT &Ty1 = Query.Types[1];
181  if (Ty1 != s32 && Ty1 != s64)
182  return false;
183  if (Ty1 == p0)
184  return true;
185  return isPowerOf2_32(Ty0.getSizeInBits()) &&
186  (Ty0.getSizeInBits() == 1 || Ty0.getSizeInBits() >= 8);
187  })
188  .clampScalar(1, s32, s64)
189  .widenScalarToNextPow2(1)
190  .maxScalarIf(typeInSet(1, {s32}), 0, s16)
191  .maxScalarIf(typeInSet(1, {s64}), 0, s32)
192  .widenScalarToNextPow2(0);
193 
194  getActionDefinitionsBuilder({G_SEXTLOAD, G_ZEXTLOAD})
195  .legalForTypesWithMemDesc({{s32, p0, 8, 8},
196  {s32, p0, 16, 8},
197  {s32, p0, 32, 8},
198  {s64, p0, 64, 8},
199  {p0, p0, 64, 8},
200  {v2s32, p0, 64, 8}})
201  .clampScalar(0, s32, s64)
202  .widenScalarToNextPow2(0)
203  // TODO: We could support sum-of-pow2's but the lowering code doesn't know
204  // how to do that yet.
205  .unsupportedIfMemSizeNotPow2()
206  // Lower anything left over into G_*EXT and G_LOAD
207  .lower();
208 
209  getActionDefinitionsBuilder(G_LOAD)
210  .legalForTypesWithMemDesc({{s8, p0, 8, 8},
211  {s16, p0, 16, 8},
212  {s32, p0, 32, 8},
213  {s64, p0, 64, 8},
214  {p0, p0, 64, 8},
215  {v2s32, p0, 64, 8}})
216  // These extends are also legal
217  .legalForTypesWithMemDesc({{s32, p0, 8, 8},
218  {s32, p0, 16, 8}})
219  .clampScalar(0, s8, s64)
220  .widenScalarToNextPow2(0)
221  // TODO: We could support sum-of-pow2's but the lowering code doesn't know
222  // how to do that yet.
223  .unsupportedIfMemSizeNotPow2()
224  // Lower any any-extending loads left into G_ANYEXT and G_LOAD
225  .lowerIf([=](const LegalityQuery &Query) {
226  return Query.Types[0].getSizeInBits() != Query.MMODescrs[0].SizeInBits;
227  })
228  .clampMaxNumElements(0, s32, 2)
229  .clampMaxNumElements(0, s64, 1);
230 
231  getActionDefinitionsBuilder(G_STORE)
232  .legalForTypesWithMemDesc({{s8, p0, 8, 8},
233  {s16, p0, 16, 8},
234  {s32, p0, 32, 8},
235  {s64, p0, 64, 8},
236  {p0, p0, 64, 8},
237  {v2s32, p0, 64, 8}})
238  .clampScalar(0, s8, s64)
239  .widenScalarToNextPow2(0)
240  // TODO: We could support sum-of-pow2's but the lowering code doesn't know
241  // how to do that yet.
242  .unsupportedIfMemSizeNotPow2()
243  .lowerIf([=](const LegalityQuery &Query) {
244  return Query.Types[0].isScalar() &&
245  Query.Types[0].getSizeInBits() != Query.MMODescrs[0].SizeInBits;
246  })
247  .clampMaxNumElements(0, s32, 2)
248  .clampMaxNumElements(0, s64, 1);
249 
250  // Constants
251  getActionDefinitionsBuilder(G_CONSTANT)
252  .legalFor({p0, s32, s64})
253  .clampScalar(0, s32, s64)
254  .widenScalarToNextPow2(0);
255  getActionDefinitionsBuilder(G_FCONSTANT)
256  .legalFor({s32, s64})
257  .clampScalar(0, s32, s64);
258 
259  getActionDefinitionsBuilder(G_ICMP)
260  .legalFor({{s32, s32}, {s32, s64}, {s32, p0}})
261  .clampScalar(0, s32, s32)
262  .clampScalar(1, s32, s64)
263  .widenScalarToNextPow2(1);
264 
265  getActionDefinitionsBuilder(G_FCMP)
266  .legalFor({{s32, s32}, {s32, s64}})
267  .clampScalar(0, s32, s32)
268  .clampScalar(1, s32, s64)
269  .widenScalarToNextPow2(1);
270 
271  // Extensions
272  getActionDefinitionsBuilder({G_ZEXT, G_SEXT, G_ANYEXT})
273  .legalForCartesianProduct({s8, s16, s32, s64}, {s1, s8, s16, s32});
274 
275  // FP conversions
276  getActionDefinitionsBuilder(G_FPTRUNC).legalFor(
277  {{s16, s32}, {s16, s64}, {s32, s64}, {v4s16, v4s32}, {v2s32, v2s64}});
278  getActionDefinitionsBuilder(G_FPEXT).legalFor(
279  {{s32, s16}, {s64, s16}, {s64, s32}, {v4s32, v4s16}, {v2s64, v2s32}});
280 
281  // Conversions
282  getActionDefinitionsBuilder({G_FPTOSI, G_FPTOUI})
283  .legalForCartesianProduct({s32, s64, v2s64, v4s32, v2s32})
284  .clampScalar(0, s32, s64)
285  .widenScalarToNextPow2(0)
286  .clampScalar(1, s32, s64)
287  .widenScalarToNextPow2(1);
288 
289  getActionDefinitionsBuilder({G_SITOFP, G_UITOFP})
290  .legalForCartesianProduct({s32, s64, v2s64, v4s32, v2s32})
291  .clampScalar(1, s32, s64)
292  .widenScalarToNextPow2(1)
293  .clampScalar(0, s32, s64)
294  .widenScalarToNextPow2(0);
295 
296  // Control-flow
297  getActionDefinitionsBuilder(G_BRCOND).legalFor({s1, s8, s16, s32});
298  getActionDefinitionsBuilder(G_BRINDIRECT).legalFor({p0});
299 
300  // Select
301  getActionDefinitionsBuilder(G_SELECT)
302  .legalFor({{s32, s1}, {s64, s1}, {p0, s1}})
303  .clampScalar(0, s32, s64)
304  .widenScalarToNextPow2(0);
305 
306  // Pointer-handling
307  getActionDefinitionsBuilder(G_FRAME_INDEX).legalFor({p0});
308  getActionDefinitionsBuilder(G_GLOBAL_VALUE).legalFor({p0});
309 
310  getActionDefinitionsBuilder(G_PTRTOINT)
311  .legalForCartesianProduct({s1, s8, s16, s32, s64}, {p0})
312  .maxScalar(0, s64)
313  .widenScalarToNextPow2(0, /*Min*/ 8);
314 
315  getActionDefinitionsBuilder(G_INTTOPTR)
316  .unsupportedIf([&](const LegalityQuery &Query) {
317  return Query.Types[0].getSizeInBits() != Query.Types[1].getSizeInBits();
318  })
319  .legalFor({{p0, s64}});
320 
321  // Casts for 32 and 64-bit width type are just copies.
322  // Same for 128-bit width type, except they are on the FPR bank.
323  getActionDefinitionsBuilder(G_BITCAST)
324  // FIXME: This is wrong since G_BITCAST is not allowed to change the
325  // number of bits but it's what the previous code described and fixing
326  // it breaks tests.
327  .legalForCartesianProduct({s1, s8, s16, s32, s64, s128, v16s8, v8s8, v4s8,
328  v8s16, v4s16, v2s16, v4s32, v2s32, v2s64});
329 
330  getActionDefinitionsBuilder(G_VASTART).legalFor({p0});
331 
332  // va_list must be a pointer, but most sized types are pretty easy to handle
333  // as the destination.
334  getActionDefinitionsBuilder(G_VAARG)
335  .customForCartesianProduct({s8, s16, s32, s64, p0}, {p0})
336  .clampScalar(0, s8, s64)
337  .widenScalarToNextPow2(0, /*Min*/ 8);
338 
339  if (ST.hasLSE()) {
340  getActionDefinitionsBuilder(G_ATOMIC_CMPXCHG_WITH_SUCCESS)
341  .lowerIf(all(
342  typeInSet(0, {s8, s16, s32, s64}), typeIs(1, s1), typeIs(2, p0),
344 
345  getActionDefinitionsBuilder(
346  {G_ATOMICRMW_XCHG, G_ATOMICRMW_ADD, G_ATOMICRMW_SUB, G_ATOMICRMW_AND,
347  G_ATOMICRMW_OR, G_ATOMICRMW_XOR, G_ATOMICRMW_MIN, G_ATOMICRMW_MAX,
348  G_ATOMICRMW_UMIN, G_ATOMICRMW_UMAX, G_ATOMIC_CMPXCHG})
349  .legalIf(all(
350  typeInSet(0, {s8, s16, s32, s64}), typeIs(1, p0),
352  }
353 
354  getActionDefinitionsBuilder(G_BLOCK_ADDR).legalFor({p0});
355 
356  // Merge/Unmerge
357  for (unsigned Op : {G_MERGE_VALUES, G_UNMERGE_VALUES}) {
358  unsigned BigTyIdx = Op == G_MERGE_VALUES ? 0 : 1;
359  unsigned LitTyIdx = Op == G_MERGE_VALUES ? 1 : 0;
360 
361  auto notValidElt = [](const LegalityQuery &Query, unsigned TypeIdx) {
362  const LLT &Ty = Query.Types[TypeIdx];
363  if (Ty.isVector()) {
364  const LLT &EltTy = Ty.getElementType();
365  if (EltTy.getSizeInBits() < 8 || EltTy.getSizeInBits() > 64)
366  return true;
367  if (!isPowerOf2_32(EltTy.getSizeInBits()))
368  return true;
369  }
370  return false;
371  };
372 
373  // FIXME: This rule is horrible, but specifies the same as what we had
374  // before with the particularly strange definitions removed (e.g.
375  // s8 = G_MERGE_VALUES s32, s32).
376  // Part of the complexity comes from these ops being extremely flexible. For
377  // example, you can build/decompose vectors with it, concatenate vectors,
378  // etc. and in addition to this you can also bitcast with it at the same
379  // time. We've been considering breaking it up into multiple ops to make it
380  // more manageable throughout the backend.
381  getActionDefinitionsBuilder(Op)
382  // Break up vectors with weird elements into scalars
383  .fewerElementsIf(
384  [=](const LegalityQuery &Query) { return notValidElt(Query, 0); },
385  scalarize(0))
386  .fewerElementsIf(
387  [=](const LegalityQuery &Query) { return notValidElt(Query, 1); },
388  scalarize(1))
389  // Clamp the big scalar to s8-s512 and make it either a power of 2, 192,
390  // or 384.
391  .clampScalar(BigTyIdx, s8, s512)
392  .widenScalarIf(
393  [=](const LegalityQuery &Query) {
394  const LLT &Ty = Query.Types[BigTyIdx];
395  return !isPowerOf2_32(Ty.getSizeInBits()) &&
396  Ty.getSizeInBits() % 64 != 0;
397  },
398  [=](const LegalityQuery &Query) {
399  // Pick the next power of 2, or a multiple of 64 over 128.
400  // Whichever is smaller.
401  const LLT &Ty = Query.Types[BigTyIdx];
402  unsigned NewSizeInBits = 1
403  << Log2_32_Ceil(Ty.getSizeInBits() + 1);
404  if (NewSizeInBits >= 256) {
405  unsigned RoundedTo = alignTo<64>(Ty.getSizeInBits() + 1);
406  if (RoundedTo < NewSizeInBits)
407  NewSizeInBits = RoundedTo;
408  }
409  return std::make_pair(BigTyIdx, LLT::scalar(NewSizeInBits));
410  })
411  // Clamp the little scalar to s8-s256 and make it a power of 2. It's not
412  // worth considering the multiples of 64 since 2*192 and 2*384 are not
413  // valid.
414  .clampScalar(LitTyIdx, s8, s256)
415  .widenScalarToNextPow2(LitTyIdx, /*Min*/ 8)
416  // So at this point, we have s8, s16, s32, s64, s128, s192, s256, s384,
417  // s512, <X x s8>, <X x s16>, <X x s32>, or <X x s64>.
418  // At this point it's simple enough to accept the legal types.
419  .legalIf([=](const LegalityQuery &Query) {
420  const LLT &BigTy = Query.Types[BigTyIdx];
421  const LLT &LitTy = Query.Types[LitTyIdx];
422  if (BigTy.isVector() && BigTy.getSizeInBits() < 32)
423  return false;
424  if (LitTy.isVector() && LitTy.getSizeInBits() < 32)
425  return false;
426  return BigTy.getSizeInBits() % LitTy.getSizeInBits() == 0;
427  })
428  // Any vectors left are the wrong size. Scalarize them.
429  .scalarize(0)
430  .scalarize(1);
431  }
432 
433  getActionDefinitionsBuilder(G_EXTRACT_VECTOR_ELT)
434  .unsupportedIf([=](const LegalityQuery &Query) {
435  const LLT &EltTy = Query.Types[1].getElementType();
436  return Query.Types[0] != EltTy;
437  })
438  .minScalar(2, s64)
439  .legalIf([=](const LegalityQuery &Query) {
440  const LLT &VecTy = Query.Types[1];
441  return VecTy == v4s32 || VecTy == v2s64;
442  });
443 
444  getActionDefinitionsBuilder(G_BUILD_VECTOR)
445  .legalFor({{v4s16, s16},
446  {v8s16, s16},
447  {v2s32, s32},
448  {v4s32, s32},
449  {v2s64, s64}})
450  .clampNumElements(0, v4s32, v4s32)
451  .clampNumElements(0, v2s64, v2s64)
452 
453  // Deal with larger scalar types, which will be implicitly truncated.
454  .legalIf([=](const LegalityQuery &Query) {
455  return Query.Types[0].getScalarSizeInBits() <
456  Query.Types[1].getSizeInBits();
457  })
458  .minScalarSameAs(1, 0);
459 
460  computeTables();
461  verify(*ST.getInstrInfo());
462 }
463 
466  MachineIRBuilder &MIRBuilder,
467  GISelChangeObserver &Observer) const {
468  switch (MI.getOpcode()) {
469  default:
470  // No idea what to do.
471  return false;
472  case TargetOpcode::G_VAARG:
473  return legalizeVaArg(MI, MRI, MIRBuilder);
474  }
475 
476  llvm_unreachable("expected switch to return");
477 }
478 
479 bool AArch64LegalizerInfo::legalizeVaArg(MachineInstr &MI,
481  MachineIRBuilder &MIRBuilder) const {
482  MIRBuilder.setInstr(MI);
483  MachineFunction &MF = MIRBuilder.getMF();
484  unsigned Align = MI.getOperand(2).getImm();
485  unsigned Dst = MI.getOperand(0).getReg();
486  unsigned ListPtr = MI.getOperand(1).getReg();
487 
488  LLT PtrTy = MRI.getType(ListPtr);
489  LLT IntPtrTy = LLT::scalar(PtrTy.getSizeInBits());
490 
491  const unsigned PtrSize = PtrTy.getSizeInBits() / 8;
492  unsigned List = MRI.createGenericVirtualRegister(PtrTy);
493  MIRBuilder.buildLoad(
494  List, ListPtr,
496  PtrSize, /* Align = */ PtrSize));
497 
498  unsigned DstPtr;
499  if (Align > PtrSize) {
500  // Realign the list to the actual required alignment.
501  auto AlignMinus1 = MIRBuilder.buildConstant(IntPtrTy, Align - 1);
502 
503  unsigned ListTmp = MRI.createGenericVirtualRegister(PtrTy);
504  MIRBuilder.buildGEP(ListTmp, List, AlignMinus1.getReg(0));
505 
506  DstPtr = MRI.createGenericVirtualRegister(PtrTy);
507  MIRBuilder.buildPtrMask(DstPtr, ListTmp, Log2_64(Align));
508  } else
509  DstPtr = List;
510 
511  uint64_t ValSize = MRI.getType(Dst).getSizeInBits() / 8;
512  MIRBuilder.buildLoad(
513  Dst, DstPtr,
515  ValSize, std::max(Align, PtrSize)));
516 
517  unsigned SizeReg = MRI.createGenericVirtualRegister(IntPtrTy);
518  MIRBuilder.buildConstant(SizeReg, alignTo(ValSize, PtrSize));
519 
520  unsigned NewList = MRI.createGenericVirtualRegister(PtrTy);
521  MIRBuilder.buildGEP(NewList, DstPtr, SizeReg);
522 
523  MIRBuilder.buildStore(
524  NewList, ListPtr,
526  PtrSize, /* Align = */ PtrSize));
527 
528  MI.eraseFromParent();
529  return true;
530 }
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
Definition: MathExtras.h:551
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
static LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
MachineInstrBuilder buildGEP(unsigned Res, unsigned Op0, unsigned Op1)
Build and insert Res = G_GEP Op0, Op1.
This class represents lattice values for constants.
Definition: AllocatorList.h:23
The LegalityQuery object bundles together all the information that&#39;s needed to decide whether a given...
unsigned getReg() const
getReg - Returns the register number.
LLT getType(unsigned Reg) const
Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register...
uint64_t alignTo(uint64_t Value, uint64_t Align, uint64_t Skew=0)
Returns the next integer (mod 2**64) that is greater than or equal to Value and is a multiple of Alig...
Definition: MathExtras.h:684
LegalityPredicate typeIs(unsigned TypeIdx, LLT TypesInit)
True iff the given type index is the specified types.
MachineInstrBuilder buildStore(unsigned Val, unsigned Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
bool isVector() const
void eraseFromParent()
Unlink &#39;this&#39; from the containing basic block and delete it.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:408
LegalityPredicate atomicOrderingAtLeastOrStrongerThan(unsigned MMOIdx, AtomicOrdering Ordering)
True iff the specified MMO index has at an atomic ordering of at Ordering or stronger.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, unsigned base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
LLT getElementType() const
Returns the vector&#39;s element type. Only valid for vector types.
This file declares the targeting of the Machinelegalizer class for AArch64.
Predicate all(Predicate P0, Predicate P1)
True iff P0 and P1 are true.
MachineFunction & getMF()
Getter for the function we currently build.
static LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
LegalizeMutation scalarize(unsigned TypeIdx)
Break up the vector type for the given type index into the element type.
Abstract class that contains various methods for clients to notify about changes. ...
unsigned const MachineRegisterInfo * MRI
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:428
AArch64LegalizerInfo(const AArch64Subtarget &ST)
MachineInstrBuilder buildPtrMask(unsigned Res, unsigned Op0, uint32_t NumBits)
Build and insert Res = G_PTR_MASK Op0, NumBits.
Helper class to build MachineInstr.
void setInstr(MachineInstr &MI)
Set the insertion point to before MI.
bool legalizeCustom(MachineInstr &MI, MachineRegisterInfo &MRI, MachineIRBuilder &MIRBuilder, GISelChangeObserver &Observer) const override
This class contains a discriminated union of information about pointers in memory operands...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
bool verify(const TargetRegisterInfo &TRI) const
Check that information hold by this instance make sense for the given TRI.
The memory access writes data.
unsigned createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
unsigned getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
int64_t getImm() const
This file declares the MachineIRBuilder class.
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
The memory access reads data.
Representation of each machine instruction.
Definition: MachineInstr.h:63
ArrayRef< LLT > Types
const NodeList & List
Definition: RDFGraph.cpp:209
LegalityPredicate typeInSet(unsigned TypeIdx, std::initializer_list< LLT > TypesInit)
True iff the given type index is one of the specified types.
const AArch64InstrInfo * getInstrInfo() const override
MachineInstrBuilder buildLoad(unsigned Res, unsigned Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
IRTranslator LLVM IR MI
static LLT vector(uint16_t NumElements, unsigned ScalarSizeInBits)
Get a low-level vector of some number of elements and element width.
ArrayRef< MemDesc > MMODescrs
Operations which require memory can use this to place requirements on the memory type for each MMO...
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:413
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:544