LLVM  8.0.0svn
AArch64LegalizerInfo.cpp
Go to the documentation of this file.
1 //===- AArch64LegalizerInfo.cpp ----------------------------------*- C++ -*-==//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 /// \file
10 /// This file implements the targeting of the Machinelegalizer class for
11 /// AArch64.
12 /// \todo This should be generated by TableGen.
13 //===----------------------------------------------------------------------===//
14 
15 #include "AArch64LegalizerInfo.h"
16 #include "AArch64Subtarget.h"
22 #include "llvm/IR/DerivedTypes.h"
23 #include "llvm/IR/Type.h"
24 
25 using namespace llvm;
26 using namespace LegalizeActions;
27 using namespace LegalityPredicates;
28 
30  using namespace TargetOpcode;
31  const LLT p0 = LLT::pointer(0, 64);
32  const LLT s1 = LLT::scalar(1);
33  const LLT s8 = LLT::scalar(8);
34  const LLT s16 = LLT::scalar(16);
35  const LLT s32 = LLT::scalar(32);
36  const LLT s64 = LLT::scalar(64);
37  const LLT s128 = LLT::scalar(128);
38  const LLT s256 = LLT::scalar(256);
39  const LLT s512 = LLT::scalar(512);
40  const LLT v16s8 = LLT::vector(16, 8);
41  const LLT v8s8 = LLT::vector(8, 8);
42  const LLT v4s8 = LLT::vector(4, 8);
43  const LLT v8s16 = LLT::vector(8, 16);
44  const LLT v4s16 = LLT::vector(4, 16);
45  const LLT v2s16 = LLT::vector(2, 16);
46  const LLT v2s32 = LLT::vector(2, 32);
47  const LLT v4s32 = LLT::vector(4, 32);
48  const LLT v2s64 = LLT::vector(2, 64);
49 
50  getActionDefinitionsBuilder(G_IMPLICIT_DEF)
51  .legalFor({p0, s1, s8, s16, s32, s64})
52  .clampScalar(0, s1, s64)
53  .widenScalarToNextPow2(0, 8);
54 
55  getActionDefinitionsBuilder(G_PHI)
56  .legalFor({p0, s16, s32, s64})
57  .clampScalar(0, s16, s64)
58  .widenScalarToNextPow2(0);
59 
60  getActionDefinitionsBuilder(G_BSWAP)
61  .legalFor({s32, s64})
62  .clampScalar(0, s16, s64)
63  .widenScalarToNextPow2(0);
64 
65  getActionDefinitionsBuilder({G_ADD, G_SUB, G_MUL, G_AND, G_OR, G_XOR, G_SHL})
66  .legalFor({s32, s64, v2s32, v4s32, v2s64})
67  .clampScalar(0, s32, s64)
68  .widenScalarToNextPow2(0)
69  .clampNumElements(0, v2s32, v4s32)
70  .clampNumElements(0, v2s64, v2s64)
71  .moreElementsToNextPow2(0);
72 
73  getActionDefinitionsBuilder(G_GEP)
74  .legalFor({{p0, s64}})
75  .clampScalar(1, s64, s64);
76 
77  getActionDefinitionsBuilder(G_PTR_MASK).legalFor({p0});
78 
79  getActionDefinitionsBuilder({G_LSHR, G_ASHR, G_SDIV, G_UDIV})
80  .legalFor({s32, s64})
81  .clampScalar(0, s32, s64)
82  .widenScalarToNextPow2(0);
83 
84  getActionDefinitionsBuilder({G_SREM, G_UREM})
85  .lowerFor({s1, s8, s16, s32, s64});
86 
87  getActionDefinitionsBuilder({G_SMULO, G_UMULO})
88  .lowerFor({{s64, s1}});
89 
90  getActionDefinitionsBuilder({G_SMULH, G_UMULH}).legalFor({s32, s64});
91 
92  getActionDefinitionsBuilder({G_UADDE, G_USUBE, G_SADDO, G_SSUBO})
93  .legalFor({{s32, s1}, {s64, s1}});
94 
95  getActionDefinitionsBuilder({G_FADD, G_FSUB, G_FMA, G_FMUL, G_FDIV})
96  .legalFor({s32, s64});
97 
98  getActionDefinitionsBuilder({G_FREM, G_FPOW}).libcallFor({s32, s64});
99 
100  getActionDefinitionsBuilder(G_INSERT)
101  .unsupportedIf([=](const LegalityQuery &Query) {
102  return Query.Types[0].getSizeInBits() <= Query.Types[1].getSizeInBits();
103  })
104  .legalIf([=](const LegalityQuery &Query) {
105  const LLT &Ty0 = Query.Types[0];
106  const LLT &Ty1 = Query.Types[1];
107  if (Ty0 != s32 && Ty0 != s64 && Ty0 != p0)
108  return false;
109  return isPowerOf2_32(Ty1.getSizeInBits()) &&
110  (Ty1.getSizeInBits() == 1 || Ty1.getSizeInBits() >= 8);
111  })
112  .clampScalar(0, s32, s64)
113  .widenScalarToNextPow2(0)
114  .maxScalarIf(typeInSet(0, {s32}), 1, s16)
115  .maxScalarIf(typeInSet(0, {s64}), 1, s32)
116  .widenScalarToNextPow2(1);
117 
118  getActionDefinitionsBuilder(G_EXTRACT)
119  .unsupportedIf([=](const LegalityQuery &Query) {
120  return Query.Types[0].getSizeInBits() >= Query.Types[1].getSizeInBits();
121  })
122  .legalIf([=](const LegalityQuery &Query) {
123  const LLT &Ty0 = Query.Types[0];
124  const LLT &Ty1 = Query.Types[1];
125  if (Ty1 != s32 && Ty1 != s64)
126  return false;
127  if (Ty1 == p0)
128  return true;
129  return isPowerOf2_32(Ty0.getSizeInBits()) &&
130  (Ty0.getSizeInBits() == 1 || Ty0.getSizeInBits() >= 8);
131  })
132  .clampScalar(1, s32, s64)
133  .widenScalarToNextPow2(1)
134  .maxScalarIf(typeInSet(1, {s32}), 0, s16)
135  .maxScalarIf(typeInSet(1, {s64}), 0, s32)
136  .widenScalarToNextPow2(0);
137 
138  getActionDefinitionsBuilder({G_SEXTLOAD, G_ZEXTLOAD})
139  .legalForTypesWithMemSize({{s32, p0, 8},
140  {s32, p0, 16},
141  {s32, p0, 32},
142  {s64, p0, 64},
143  {p0, p0, 64},
144  {v2s32, p0, 64}})
145  .clampScalar(0, s32, s64)
146  .widenScalarToNextPow2(0)
147  // TODO: We could support sum-of-pow2's but the lowering code doesn't know
148  // how to do that yet.
149  .unsupportedIfMemSizeNotPow2()
150  // Lower anything left over into G_*EXT and G_LOAD
151  .lower();
152 
153  getActionDefinitionsBuilder(G_LOAD)
154  .legalForTypesWithMemSize({{s8, p0, 8},
155  {s16, p0, 16},
156  {s32, p0, 32},
157  {s64, p0, 64},
158  {p0, p0, 64},
159  {v2s32, p0, 64}})
160  // These extends are also legal
161  .legalForTypesWithMemSize({{s32, p0, 8},
162  {s32, p0, 16}})
163  .clampScalar(0, s8, s64)
164  .widenScalarToNextPow2(0)
165  // TODO: We could support sum-of-pow2's but the lowering code doesn't know
166  // how to do that yet.
167  .unsupportedIfMemSizeNotPow2()
168  // Lower any any-extending loads left into G_ANYEXT and G_LOAD
169  .lowerIf([=](const LegalityQuery &Query) {
170  return Query.Types[0].getSizeInBits() != Query.MMODescrs[0].SizeInBits;
171  })
172  .clampNumElements(0, v2s32, v2s32)
173  .clampMaxNumElements(0, s64, 1);
174 
175  getActionDefinitionsBuilder(G_STORE)
176  .legalForTypesWithMemSize({{s8, p0, 8},
177  {s16, p0, 16},
178  {s32, p0, 32},
179  {s64, p0, 64},
180  {p0, p0, 64},
181  {v2s32, p0, 64}})
182  .clampScalar(0, s8, s64)
183  .widenScalarToNextPow2(0)
184  // TODO: We could support sum-of-pow2's but the lowering code doesn't know
185  // how to do that yet.
186  .unsupportedIfMemSizeNotPow2()
187  .lowerIf([=](const LegalityQuery &Query) {
188  return Query.Types[0].isScalar() &&
189  Query.Types[0].getSizeInBits() != Query.MMODescrs[0].SizeInBits;
190  })
191  .clampNumElements(0, v2s32, v2s32)
192  .clampMaxNumElements(0, s64, 1);
193 
194  // Constants
195  getActionDefinitionsBuilder(G_CONSTANT)
196  .legalFor({p0, s32, s64})
197  .clampScalar(0, s32, s64)
198  .widenScalarToNextPow2(0);
199  getActionDefinitionsBuilder(G_FCONSTANT)
200  .legalFor({s32, s64})
201  .clampScalar(0, s32, s64);
202 
203  getActionDefinitionsBuilder(G_ICMP)
204  .legalFor({{s32, s32}, {s32, s64}, {s32, p0}})
205  .clampScalar(0, s32, s32)
206  .clampScalar(1, s32, s64)
207  .widenScalarToNextPow2(1);
208 
209  getActionDefinitionsBuilder(G_FCMP)
210  .legalFor({{s32, s32}, {s32, s64}})
211  .clampScalar(0, s32, s32)
212  .clampScalar(1, s32, s64)
213  .widenScalarToNextPow2(1);
214 
215  // Extensions
216  getActionDefinitionsBuilder({G_ZEXT, G_SEXT, G_ANYEXT})
217  .legalForCartesianProduct({s8, s16, s32, s64}, {s1, s8, s16, s32});
218 
219  // FP conversions
220  getActionDefinitionsBuilder(G_FPTRUNC).legalFor(
221  {{s16, s32}, {s16, s64}, {s32, s64}});
222  getActionDefinitionsBuilder(G_FPEXT).legalFor(
223  {{s32, s16}, {s64, s16}, {s64, s32}});
224 
225  // Conversions
226  getActionDefinitionsBuilder({G_FPTOSI, G_FPTOUI})
227  .legalForCartesianProduct({s32, s64})
228  .clampScalar(0, s32, s64)
229  .widenScalarToNextPow2(0)
230  .clampScalar(1, s32, s64)
231  .widenScalarToNextPow2(1);
232 
233  getActionDefinitionsBuilder({G_SITOFP, G_UITOFP})
234  .legalForCartesianProduct({s32, s64})
235  .clampScalar(1, s32, s64)
236  .widenScalarToNextPow2(1)
237  .clampScalar(0, s32, s64)
238  .widenScalarToNextPow2(0);
239 
240  // Control-flow
241  getActionDefinitionsBuilder(G_BRCOND).legalFor({s1, s8, s16, s32});
242  getActionDefinitionsBuilder(G_BRINDIRECT).legalFor({p0});
243 
244  // Select
245  getActionDefinitionsBuilder(G_SELECT)
246  .legalFor({{s32, s1}, {s64, s1}, {p0, s1}})
247  .clampScalar(0, s32, s64)
248  .widenScalarToNextPow2(0);
249 
250  // Pointer-handling
251  getActionDefinitionsBuilder(G_FRAME_INDEX).legalFor({p0});
252  getActionDefinitionsBuilder(G_GLOBAL_VALUE).legalFor({p0});
253 
254  getActionDefinitionsBuilder(G_PTRTOINT)
255  .legalForCartesianProduct({s1, s8, s16, s32, s64}, {p0})
256  .maxScalar(0, s64)
257  .widenScalarToNextPow2(0, /*Min*/ 8);
258 
259  getActionDefinitionsBuilder(G_INTTOPTR)
260  .unsupportedIf([&](const LegalityQuery &Query) {
261  return Query.Types[0].getSizeInBits() != Query.Types[1].getSizeInBits();
262  })
263  .legalFor({{p0, s64}});
264 
265  // Casts for 32 and 64-bit width type are just copies.
266  // Same for 128-bit width type, except they are on the FPR bank.
267  getActionDefinitionsBuilder(G_BITCAST)
268  // FIXME: This is wrong since G_BITCAST is not allowed to change the
269  // number of bits but it's what the previous code described and fixing
270  // it breaks tests.
271  .legalForCartesianProduct({s1, s8, s16, s32, s64, s128, v16s8, v8s8, v4s8,
272  v8s16, v4s16, v2s16, v4s32, v2s32, v2s64});
273 
274  getActionDefinitionsBuilder(G_VASTART).legalFor({p0});
275 
276  // va_list must be a pointer, but most sized types are pretty easy to handle
277  // as the destination.
278  getActionDefinitionsBuilder(G_VAARG)
279  .customForCartesianProduct({s8, s16, s32, s64, p0}, {p0})
280  .clampScalar(0, s8, s64)
281  .widenScalarToNextPow2(0, /*Min*/ 8);
282 
283  if (ST.hasLSE()) {
284  getActionDefinitionsBuilder(G_ATOMIC_CMPXCHG_WITH_SUCCESS)
285  .lowerIf(all(
286  typeInSet(0, {s8, s16, s32, s64}), typeIs(1, s1), typeIs(2, p0),
288 
289  getActionDefinitionsBuilder(
290  {G_ATOMICRMW_XCHG, G_ATOMICRMW_ADD, G_ATOMICRMW_SUB, G_ATOMICRMW_AND,
291  G_ATOMICRMW_OR, G_ATOMICRMW_XOR, G_ATOMICRMW_MIN, G_ATOMICRMW_MAX,
292  G_ATOMICRMW_UMIN, G_ATOMICRMW_UMAX, G_ATOMIC_CMPXCHG})
293  .legalIf(all(
294  typeInSet(0, {s8, s16, s32, s64}), typeIs(1, p0),
296  }
297 
298  getActionDefinitionsBuilder(G_BLOCK_ADDR).legalFor({p0});
299 
300  // Merge/Unmerge
301  for (unsigned Op : {G_MERGE_VALUES, G_UNMERGE_VALUES}) {
302  unsigned BigTyIdx = Op == G_MERGE_VALUES ? 0 : 1;
303  unsigned LitTyIdx = Op == G_MERGE_VALUES ? 1 : 0;
304 
305  auto notValidElt = [](const LegalityQuery &Query, unsigned TypeIdx) {
306  const LLT &Ty = Query.Types[TypeIdx];
307  if (Ty.isVector()) {
308  const LLT &EltTy = Ty.getElementType();
309  if (EltTy.getSizeInBits() < 8 || EltTy.getSizeInBits() > 64)
310  return true;
311  if (!isPowerOf2_32(EltTy.getSizeInBits()))
312  return true;
313  }
314  return false;
315  };
316  auto scalarize =
317  [](const LegalityQuery &Query, unsigned TypeIdx) {
318  const LLT &Ty = Query.Types[TypeIdx];
319  return std::make_pair(TypeIdx, Ty.getElementType());
320  };
321 
322  // FIXME: This rule is horrible, but specifies the same as what we had
323  // before with the particularly strange definitions removed (e.g.
324  // s8 = G_MERGE_VALUES s32, s32).
325  // Part of the complexity comes from these ops being extremely flexible. For
326  // example, you can build/decompose vectors with it, concatenate vectors,
327  // etc. and in addition to this you can also bitcast with it at the same
328  // time. We've been considering breaking it up into multiple ops to make it
329  // more manageable throughout the backend.
330  getActionDefinitionsBuilder(Op)
331  // Break up vectors with weird elements into scalars
332  .fewerElementsIf(
333  [=](const LegalityQuery &Query) { return notValidElt(Query, 0); },
334  [=](const LegalityQuery &Query) { return scalarize(Query, 0); })
335  .fewerElementsIf(
336  [=](const LegalityQuery &Query) { return notValidElt(Query, 1); },
337  [=](const LegalityQuery &Query) { return scalarize(Query, 1); })
338  // Clamp the big scalar to s8-s512 and make it either a power of 2, 192,
339  // or 384.
340  .clampScalar(BigTyIdx, s8, s512)
341  .widenScalarIf(
342  [=](const LegalityQuery &Query) {
343  const LLT &Ty = Query.Types[BigTyIdx];
344  return !isPowerOf2_32(Ty.getSizeInBits()) &&
345  Ty.getSizeInBits() % 64 != 0;
346  },
347  [=](const LegalityQuery &Query) {
348  // Pick the next power of 2, or a multiple of 64 over 128.
349  // Whichever is smaller.
350  const LLT &Ty = Query.Types[BigTyIdx];
351  unsigned NewSizeInBits = 1
352  << Log2_32_Ceil(Ty.getSizeInBits() + 1);
353  if (NewSizeInBits >= 256) {
354  unsigned RoundedTo = alignTo<64>(Ty.getSizeInBits() + 1);
355  if (RoundedTo < NewSizeInBits)
356  NewSizeInBits = RoundedTo;
357  }
358  return std::make_pair(BigTyIdx, LLT::scalar(NewSizeInBits));
359  })
360  // Clamp the little scalar to s8-s256 and make it a power of 2. It's not
361  // worth considering the multiples of 64 since 2*192 and 2*384 are not
362  // valid.
363  .clampScalar(LitTyIdx, s8, s256)
364  .widenScalarToNextPow2(LitTyIdx, /*Min*/ 8)
365  // So at this point, we have s8, s16, s32, s64, s128, s192, s256, s384,
366  // s512, <X x s8>, <X x s16>, <X x s32>, or <X x s64>.
367  // At this point it's simple enough to accept the legal types.
368  .legalIf([=](const LegalityQuery &Query) {
369  const LLT &BigTy = Query.Types[BigTyIdx];
370  const LLT &LitTy = Query.Types[LitTyIdx];
371  if (BigTy.isVector() && BigTy.getSizeInBits() < 32)
372  return false;
373  if (LitTy.isVector() && LitTy.getSizeInBits() < 32)
374  return false;
375  return BigTy.getSizeInBits() % LitTy.getSizeInBits() == 0;
376  })
377  // Any vectors left are the wrong size. Scalarize them.
378  .fewerElementsIf([](const LegalityQuery &Query) { return true; },
379  [](const LegalityQuery &Query) {
380  return std::make_pair(
381  0, Query.Types[0].getElementType());
382  })
383  .fewerElementsIf([](const LegalityQuery &Query) { return true; },
384  [](const LegalityQuery &Query) {
385  return std::make_pair(
386  1, Query.Types[1].getElementType());
387  });
388  }
389 
390  getActionDefinitionsBuilder(G_EXTRACT_VECTOR_ELT)
391  .unsupportedIf([=](const LegalityQuery &Query) {
392  const LLT &EltTy = Query.Types[1].getElementType();
393  return Query.Types[0] != EltTy;
394  })
395  .minScalar(2, s64)
396  .legalIf([=](const LegalityQuery &Query) {
397  const LLT &VecTy = Query.Types[1];
398  return VecTy == v4s32 || VecTy == v2s64;
399  });
400 
401  computeTables();
402  verify(*ST.getInstrInfo());
403 }
404 
407  MachineIRBuilder &MIRBuilder) const {
408  switch (MI.getOpcode()) {
409  default:
410  // No idea what to do.
411  return false;
412  case TargetOpcode::G_VAARG:
413  return legalizeVaArg(MI, MRI, MIRBuilder);
414  }
415 
416  llvm_unreachable("expected switch to return");
417 }
418 
419 bool AArch64LegalizerInfo::legalizeVaArg(MachineInstr &MI,
421  MachineIRBuilder &MIRBuilder) const {
422  MIRBuilder.setInstr(MI);
423  MachineFunction &MF = MIRBuilder.getMF();
424  unsigned Align = MI.getOperand(2).getImm();
425  unsigned Dst = MI.getOperand(0).getReg();
426  unsigned ListPtr = MI.getOperand(1).getReg();
427 
428  LLT PtrTy = MRI.getType(ListPtr);
429  LLT IntPtrTy = LLT::scalar(PtrTy.getSizeInBits());
430 
431  const unsigned PtrSize = PtrTy.getSizeInBits() / 8;
432  unsigned List = MRI.createGenericVirtualRegister(PtrTy);
433  MIRBuilder.buildLoad(
434  List, ListPtr,
436  PtrSize, /* Align = */ PtrSize));
437 
438  unsigned DstPtr;
439  if (Align > PtrSize) {
440  // Realign the list to the actual required alignment.
441  auto AlignMinus1 = MIRBuilder.buildConstant(IntPtrTy, Align - 1);
442 
443  unsigned ListTmp = MRI.createGenericVirtualRegister(PtrTy);
444  MIRBuilder.buildGEP(ListTmp, List, AlignMinus1->getOperand(0).getReg());
445 
446  DstPtr = MRI.createGenericVirtualRegister(PtrTy);
447  MIRBuilder.buildPtrMask(DstPtr, ListTmp, Log2_64(Align));
448  } else
449  DstPtr = List;
450 
451  uint64_t ValSize = MRI.getType(Dst).getSizeInBits() / 8;
452  MIRBuilder.buildLoad(
453  Dst, DstPtr,
455  ValSize, std::max(Align, PtrSize)));
456 
457  unsigned SizeReg = MRI.createGenericVirtualRegister(IntPtrTy);
458  MIRBuilder.buildConstant(SizeReg, alignTo(ValSize, PtrSize));
459 
460  unsigned NewList = MRI.createGenericVirtualRegister(PtrTy);
461  MIRBuilder.buildGEP(NewList, DstPtr, SizeReg);
462 
463  MIRBuilder.buildStore(
464  NewList, ListPtr,
466  PtrSize, /* Align = */ PtrSize));
467 
468  MI.eraseFromParent();
469  return true;
470 }
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
Definition: MathExtras.h:552
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
Compute iterated dominance frontiers using a linear time algorithm.
Definition: AllocatorList.h:24
MachineInstrBuilder buildGEP(unsigned Res, unsigned Op0, unsigned Op1)
Build and insert Res = G_GEP Op0, Op1.
MachineInstrBuilder buildStore(unsigned Val, unsigned Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
The LegalityQuery object bundles together all the information that&#39;s needed to decide whether a given...
unsigned getReg() const
getReg - Returns the register number.
LLT getType(unsigned Reg) const
Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register...
uint64_t alignTo(uint64_t Value, uint64_t Align, uint64_t Skew=0)
Returns the next integer (mod 2**64) that is greater than or equal to Value and is a multiple of Alig...
Definition: MathExtras.h:685
LegalityPredicate typeIs(unsigned TypeIdx, LLT TypesInit)
True iff the given type index is the specified types.
bool isVector() const
void eraseFromParent()
Unlink &#39;this&#39; from the containing basic block and delete it.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:409
LegalityPredicate atomicOrderingAtLeastOrStrongerThan(unsigned MMOIdx, AtomicOrdering Ordering)
True iff the specified MMO index has at an atomic ordering of at Ordering or stronger.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, unsigned base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
LLT getElementType() const
Returns the vector&#39;s element type. Only valid for vector types.
MachineInstrBuilder buildPtrMask(unsigned Res, unsigned Op0, uint32_t NumBits)
Build and insert Res = G_PTR_MASK Op0, NumBits.
This file declares the targeting of the Machinelegalizer class for AArch64.
Predicate all(Predicate P0, Predicate P1)
True iff P0 and P1 are true.
static LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
unsigned const MachineRegisterInfo * MRI
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:429
AArch64LegalizerInfo(const AArch64Subtarget &ST)
void setInstr(MachineInstr &MI)
Set the insertion point to before MI.
This class contains a discriminated union of information about pointers in memory operands...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
bool verify(const TargetRegisterInfo &TRI) const
Check that information hold by this instance make sense for the given TRI.
The memory access writes data.
unsigned createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
bool legalizeCustom(MachineInstr &MI, MachineRegisterInfo &MRI, MachineIRBuilder &MIRBuilder) const override
unsigned getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
int64_t getImm() const
This file declares the MachineIRBuilder class.
MachineInstrBuilder buildConstant(unsigned Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
The memory access reads data.
Representation of each machine instruction.
Definition: MachineInstr.h:64
ArrayRef< LLT > Types
const NodeList & List
Definition: RDFGraph.cpp:210
MachineInstrBuilder buildLoad(unsigned Res, unsigned Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
LegalityPredicate typeInSet(unsigned TypeIdx, std::initializer_list< LLT > TypesInit)
True iff the given type index is one of the specified types.
static void Query(const MachineInstr &MI, AliasAnalysis &AA, bool &Read, bool &Write, bool &Effects, bool &StackPointer)
MachineFunction & getMF()
Getter for the function we currently build.
static LLT pointer(uint16_t AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space (defaulting to 0).
const AArch64InstrInfo * getInstrInfo() const override
IRTranslator LLVM IR MI
static LLT vector(uint16_t NumElements, unsigned ScalarSizeInBits)
Get a low-level vector of some number of elements and element width.
ArrayRef< MemDesc > MMODescrs
Operations which require memory can use this to place requirements on the memory type for each MMO...
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:414
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:545