LLVM 19.0.0git
IRTranslator.h
Go to the documentation of this file.
1//===- llvm/CodeGen/GlobalISel/IRTranslator.h - IRTranslator ----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file declares the IRTranslator pass.
10/// This pass is responsible for translating LLVM IR into MachineInstr.
11/// It uses target hooks to lower the ABI but aside from that, the pass
12/// generated code is generic. This is the default translator used for
13/// GlobalISel.
14///
15/// \todo Replace the comments with actual doxygen comments.
16//===----------------------------------------------------------------------===//
17
18#ifndef LLVM_CODEGEN_GLOBALISEL_IRTRANSLATOR_H
19#define LLVM_CODEGEN_GLOBALISEL_IRTRANSLATOR_H
20
21#include "llvm/ADT/DenseMap.h"
31#include <memory>
32#include <utility>
33
34namespace llvm {
35
36class AllocaInst;
37class AssumptionCache;
38class BasicBlock;
39class CallInst;
40class CallLowering;
41class Constant;
42class ConstrainedFPIntrinsic;
43class DataLayout;
44class DbgDeclareInst;
45class DbgValueInst;
46class Instruction;
47class MachineBasicBlock;
48class MachineFunction;
49class MachineInstr;
50class MachineRegisterInfo;
51class OptimizationRemarkEmitter;
52class PHINode;
53class TargetLibraryInfo;
54class TargetPassConfig;
55class User;
56class Value;
57
58// Technically the pass should run on an hypothetical MachineModule,
59// since it should translate Global into some sort of MachineGlobal.
60// The MachineGlobal should ultimately just be a transfer of ownership of
61// the interesting bits that are relevant to represent a global value.
62// That being said, we could investigate what would it cost to just duplicate
63// the information from the LLVM IR.
64// The idea is that ultimately we would be able to free up the memory used
65// by the LLVM IR as soon as the translation is over.
67public:
68 static char ID;
69
70private:
71 /// Interface used to lower the everything related to calls.
72 const CallLowering *CLI = nullptr;
73
74 /// This class contains the mapping between the Values to vreg related data.
75 class ValueToVRegInfo {
76 public:
77 ValueToVRegInfo() = default;
78
79 using VRegListT = SmallVector<Register, 1>;
80 using OffsetListT = SmallVector<uint64_t, 1>;
81
82 using const_vreg_iterator =
84 using const_offset_iterator =
86
87 inline const_vreg_iterator vregs_end() const { return ValToVRegs.end(); }
88
89 VRegListT *getVRegs(const Value &V) {
90 auto It = ValToVRegs.find(&V);
91 if (It != ValToVRegs.end())
92 return It->second;
93
94 return insertVRegs(V);
95 }
96
97 OffsetListT *getOffsets(const Value &V) {
98 auto It = TypeToOffsets.find(V.getType());
99 if (It != TypeToOffsets.end())
100 return It->second;
101
102 return insertOffsets(V);
103 }
104
105 const_vreg_iterator findVRegs(const Value &V) const {
106 return ValToVRegs.find(&V);
107 }
108
109 bool contains(const Value &V) const { return ValToVRegs.contains(&V); }
110
111 void reset() {
112 ValToVRegs.clear();
113 TypeToOffsets.clear();
114 VRegAlloc.DestroyAll();
115 OffsetAlloc.DestroyAll();
116 }
117
118 private:
119 VRegListT *insertVRegs(const Value &V) {
120 assert(!ValToVRegs.contains(&V) && "Value already exists");
121
122 // We placement new using our fast allocator since we never try to free
123 // the vectors until translation is finished.
124 auto *VRegList = new (VRegAlloc.Allocate()) VRegListT();
125 ValToVRegs[&V] = VRegList;
126 return VRegList;
127 }
128
129 OffsetListT *insertOffsets(const Value &V) {
130 assert(!TypeToOffsets.contains(V.getType()) && "Type already exists");
131
132 auto *OffsetList = new (OffsetAlloc.Allocate()) OffsetListT();
133 TypeToOffsets[V.getType()] = OffsetList;
134 return OffsetList;
135 }
138
139 // We store pointers to vectors here since references may be invalidated
140 // while we hold them if we stored the vectors directly.
143 };
144
145 /// Mapping of the values of the current LLVM IR function to the related
146 /// virtual registers and offsets.
147 ValueToVRegInfo VMap;
148
149 // N.b. it's not completely obvious that this will be sufficient for every
150 // LLVM IR construct (with "invoke" being the obvious candidate to mess up our
151 // lives.
153
154 // One BasicBlock can be translated to multiple MachineBasicBlocks. For such
155 // BasicBlocks translated to multiple MachineBasicBlocks, MachinePreds retains
156 // a mapping between the edges arriving at the BasicBlock to the corresponding
157 // created MachineBasicBlocks. Some BasicBlocks that get translated to a
158 // single MachineBasicBlock may also end up in this Map.
159 using CFGEdge = std::pair<const BasicBlock *, const BasicBlock *>;
161
162 // List of stubbed PHI instructions, for values and basic blocks to be filled
163 // in once all MachineBasicBlocks have been created.
165 PendingPHIs;
166
167 /// Record of what frame index has been allocated to specified allocas for
168 /// this function.
170
171 SwiftErrorValueTracking SwiftError;
172
173 /// \name Methods for translating form LLVM IR to MachineInstr.
174 /// \see ::translate for general information on the translate methods.
175 /// @{
176
177 /// Translate \p Inst into its corresponding MachineInstr instruction(s).
178 /// Insert the newly translated instruction(s) right where the CurBuilder
179 /// is set.
180 ///
181 /// The general algorithm is:
182 /// 1. Look for a virtual register for each operand or
183 /// create one.
184 /// 2 Update the VMap accordingly.
185 /// 2.alt. For constant arguments, if they are compile time constants,
186 /// produce an immediate in the right operand and do not touch
187 /// ValToReg. Actually we will go with a virtual register for each
188 /// constants because it may be expensive to actually materialize the
189 /// constant. Moreover, if the constant spans on several instructions,
190 /// CSE may not catch them.
191 /// => Update ValToVReg and remember that we saw a constant in Constants.
192 /// We will materialize all the constants in finalize.
193 /// Note: we would need to do something so that we can recognize such operand
194 /// as constants.
195 /// 3. Create the generic instruction.
196 ///
197 /// \return true if the translation succeeded.
198 bool translate(const Instruction &Inst);
199
200 /// Materialize \p C into virtual-register \p Reg. The generic instructions
201 /// performing this materialization will be inserted into the entry block of
202 /// the function.
203 ///
204 /// \return true if the materialization succeeded.
205 bool translate(const Constant &C, Register Reg);
206
207 /// Examine any debug-info attached to the instruction (in the form of
208 /// DbgRecords) and translate it.
209 void translateDbgInfo(const Instruction &Inst,
210 MachineIRBuilder &MIRBuilder);
211
212 /// Translate a debug-info record of a dbg.value into a DBG_* instruction.
213 /// Pass in all the contents of the record, rather than relying on how it's
214 /// stored.
215 void translateDbgValueRecord(Value *V, bool HasArgList,
216 const DILocalVariable *Variable,
217 const DIExpression *Expression, const DebugLoc &DL,
218 MachineIRBuilder &MIRBuilder);
219
220 /// Translate a debug-info record of a dbg.declare into an indirect DBG_*
221 /// instruction. Pass in all the contents of the record, rather than relying
222 /// on how it's stored.
223 void translateDbgDeclareRecord(Value *Address, bool HasArgList,
224 const DILocalVariable *Variable,
225 const DIExpression *Expression, const DebugLoc &DL,
226 MachineIRBuilder &MIRBuilder);
227
228 // Translate U as a copy of V.
229 bool translateCopy(const User &U, const Value &V,
230 MachineIRBuilder &MIRBuilder);
231
232 /// Translate an LLVM bitcast into generic IR. Either a COPY or a G_BITCAST is
233 /// emitted.
234 bool translateBitCast(const User &U, MachineIRBuilder &MIRBuilder);
235
236 /// Translate an LLVM load instruction into generic IR.
237 bool translateLoad(const User &U, MachineIRBuilder &MIRBuilder);
238
239 /// Translate an LLVM store instruction into generic IR.
240 bool translateStore(const User &U, MachineIRBuilder &MIRBuilder);
241
242 /// Translate an LLVM string intrinsic (memcpy, memset, ...).
243 bool translateMemFunc(const CallInst &CI, MachineIRBuilder &MIRBuilder,
244 unsigned Opcode);
245
246 /// Translate an LLVM trap intrinsic (trap, debugtrap, ubsantrap).
247 bool translateTrap(const CallInst &U, MachineIRBuilder &MIRBuilder,
248 unsigned Opcode);
249
250 // Translate @llvm.experimental.vector.interleave2 and
251 // @llvm.experimental.vector.deinterleave2 intrinsics for fixed-width vector
252 // types into vector shuffles.
253 bool translateVectorInterleave2Intrinsic(const CallInst &CI,
254 MachineIRBuilder &MIRBuilder);
255 bool translateVectorDeinterleave2Intrinsic(const CallInst &CI,
256 MachineIRBuilder &MIRBuilder);
257
258 void getStackGuard(Register DstReg, MachineIRBuilder &MIRBuilder);
259
260 bool translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
261 MachineIRBuilder &MIRBuilder);
262 bool translateFixedPointIntrinsic(unsigned Op, const CallInst &CI,
263 MachineIRBuilder &MIRBuilder);
264
265 /// Helper function for translateSimpleIntrinsic.
266 /// \return The generic opcode for \p IntrinsicID if \p IntrinsicID is a
267 /// simple intrinsic (ceil, fabs, etc.). Otherwise, returns
268 /// Intrinsic::not_intrinsic.
269 unsigned getSimpleIntrinsicOpcode(Intrinsic::ID ID);
270
271 /// Translates the intrinsics defined in getSimpleIntrinsicOpcode.
272 /// \return true if the translation succeeded.
273 bool translateSimpleIntrinsic(const CallInst &CI, Intrinsic::ID ID,
274 MachineIRBuilder &MIRBuilder);
275
276 bool translateConstrainedFPIntrinsic(const ConstrainedFPIntrinsic &FPI,
277 MachineIRBuilder &MIRBuilder);
278
279 bool translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
280 MachineIRBuilder &MIRBuilder);
281
282 /// Returns the single livein physical register Arg was lowered to, if
283 /// possible.
284 std::optional<MCRegister> getArgPhysReg(Argument &Arg);
285
286 /// If debug-info targets an Argument and its expression is an EntryValue,
287 /// lower it as either an entry in the MF debug table (dbg.declare), or a
288 /// DBG_VALUE targeting the corresponding livein register for that Argument
289 /// (dbg.value).
290 bool translateIfEntryValueArgument(bool isDeclare, Value *Arg,
291 const DILocalVariable *Var,
292 const DIExpression *Expr,
293 const DebugLoc &DL,
294 MachineIRBuilder &MIRBuilder);
295
296 bool translateInlineAsm(const CallBase &CB, MachineIRBuilder &MIRBuilder);
297
298 /// Common code for translating normal calls or invokes.
299 bool translateCallBase(const CallBase &CB, MachineIRBuilder &MIRBuilder);
300
301 /// Translate call instruction.
302 /// \pre \p U is a call instruction.
303 bool translateCall(const User &U, MachineIRBuilder &MIRBuilder);
304
305 /// When an invoke or a cleanupret unwinds to the next EH pad, there are
306 /// many places it could ultimately go. In the IR, we have a single unwind
307 /// destination, but in the machine CFG, we enumerate all the possible blocks.
308 /// This function skips over imaginary basic blocks that hold catchswitch
309 /// instructions, and finds all the "real" machine
310 /// basic block destinations. As those destinations may not be successors of
311 /// EHPadBB, here we also calculate the edge probability to those
312 /// destinations. The passed-in Prob is the edge probability to EHPadBB.
313 bool findUnwindDestinations(
314 const BasicBlock *EHPadBB, BranchProbability Prob,
315 SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
316 &UnwindDests);
317
318 bool translateInvoke(const User &U, MachineIRBuilder &MIRBuilder);
319
320 bool translateCallBr(const User &U, MachineIRBuilder &MIRBuilder);
321
322 bool translateLandingPad(const User &U, MachineIRBuilder &MIRBuilder);
323
324 /// Translate one of LLVM's cast instructions into MachineInstrs, with the
325 /// given generic Opcode.
326 bool translateCast(unsigned Opcode, const User &U,
327 MachineIRBuilder &MIRBuilder);
328
329 /// Translate a phi instruction.
330 bool translatePHI(const User &U, MachineIRBuilder &MIRBuilder);
331
332 /// Translate a comparison (icmp or fcmp) instruction or constant.
333 bool translateCompare(const User &U, MachineIRBuilder &MIRBuilder);
334
335 /// Translate an integer compare instruction (or constant).
336 bool translateICmp(const User &U, MachineIRBuilder &MIRBuilder) {
337 return translateCompare(U, MIRBuilder);
338 }
339
340 /// Translate a floating-point compare instruction (or constant).
341 bool translateFCmp(const User &U, MachineIRBuilder &MIRBuilder) {
342 return translateCompare(U, MIRBuilder);
343 }
344
345 /// Add remaining operands onto phis we've translated. Executed after all
346 /// MachineBasicBlocks for the function have been created.
347 void finishPendingPhis();
348
349 /// Translate \p Inst into a unary operation \p Opcode.
350 /// \pre \p U is a unary operation.
351 bool translateUnaryOp(unsigned Opcode, const User &U,
352 MachineIRBuilder &MIRBuilder);
353
354 /// Translate \p Inst into a binary operation \p Opcode.
355 /// \pre \p U is a binary operation.
356 bool translateBinaryOp(unsigned Opcode, const User &U,
357 MachineIRBuilder &MIRBuilder);
358
359 /// If the set of cases should be emitted as a series of branches, return
360 /// true. If we should emit this as a bunch of and/or'd together conditions,
361 /// return false.
362 bool shouldEmitAsBranches(const std::vector<SwitchCG::CaseBlock> &Cases);
363 /// Helper method for findMergedConditions.
364 /// This function emits a branch and is used at the leaves of an OR or an
365 /// AND operator tree.
366 void emitBranchForMergedCondition(const Value *Cond, MachineBasicBlock *TBB,
367 MachineBasicBlock *FBB,
368 MachineBasicBlock *CurBB,
369 MachineBasicBlock *SwitchBB,
370 BranchProbability TProb,
371 BranchProbability FProb, bool InvertCond);
372 /// Used during condbr translation to find trees of conditions that can be
373 /// optimized.
374 void findMergedConditions(const Value *Cond, MachineBasicBlock *TBB,
375 MachineBasicBlock *FBB, MachineBasicBlock *CurBB,
376 MachineBasicBlock *SwitchBB,
377 Instruction::BinaryOps Opc, BranchProbability TProb,
378 BranchProbability FProb, bool InvertCond);
379
380 /// Translate branch (br) instruction.
381 /// \pre \p U is a branch instruction.
382 bool translateBr(const User &U, MachineIRBuilder &MIRBuilder);
383
384 // Begin switch lowering functions.
385 bool emitJumpTableHeader(SwitchCG::JumpTable &JT,
386 SwitchCG::JumpTableHeader &JTH,
387 MachineBasicBlock *HeaderBB);
388 void emitJumpTable(SwitchCG::JumpTable &JT, MachineBasicBlock *MBB);
389
390 void emitSwitchCase(SwitchCG::CaseBlock &CB, MachineBasicBlock *SwitchBB,
391 MachineIRBuilder &MIB);
392
393 /// Generate for the BitTest header block, which precedes each sequence of
394 /// BitTestCases.
395 void emitBitTestHeader(SwitchCG::BitTestBlock &BTB,
396 MachineBasicBlock *SwitchMBB);
397 /// Generate code to produces one "bit test" for a given BitTestCase \p B.
398 void emitBitTestCase(SwitchCG::BitTestBlock &BB, MachineBasicBlock *NextMBB,
399 BranchProbability BranchProbToNext, Register Reg,
400 SwitchCG::BitTestCase &B, MachineBasicBlock *SwitchBB);
401
402 void splitWorkItem(SwitchCG::SwitchWorkList &WorkList,
403 const SwitchCG::SwitchWorkListItem &W, Value *Cond,
404 MachineBasicBlock *SwitchMBB, MachineIRBuilder &MIB);
405
406 bool lowerJumpTableWorkItem(
407 SwitchCG::SwitchWorkListItem W, MachineBasicBlock *SwitchMBB,
408 MachineBasicBlock *CurMBB, MachineBasicBlock *DefaultMBB,
409 MachineIRBuilder &MIB, MachineFunction::iterator BBI,
410 BranchProbability UnhandledProbs, SwitchCG::CaseClusterIt I,
411 MachineBasicBlock *Fallthrough, bool FallthroughUnreachable);
412
413 bool lowerSwitchRangeWorkItem(SwitchCG::CaseClusterIt I, Value *Cond,
414 MachineBasicBlock *Fallthrough,
415 bool FallthroughUnreachable,
416 BranchProbability UnhandledProbs,
417 MachineBasicBlock *CurMBB,
418 MachineIRBuilder &MIB,
419 MachineBasicBlock *SwitchMBB);
420
421 bool lowerBitTestWorkItem(
422 SwitchCG::SwitchWorkListItem W, MachineBasicBlock *SwitchMBB,
423 MachineBasicBlock *CurMBB, MachineBasicBlock *DefaultMBB,
424 MachineIRBuilder &MIB, MachineFunction::iterator BBI,
425 BranchProbability DefaultProb, BranchProbability UnhandledProbs,
426 SwitchCG::CaseClusterIt I, MachineBasicBlock *Fallthrough,
427 bool FallthroughUnreachable);
428
429 bool lowerSwitchWorkItem(SwitchCG::SwitchWorkListItem W, Value *Cond,
430 MachineBasicBlock *SwitchMBB,
431 MachineBasicBlock *DefaultMBB,
432 MachineIRBuilder &MIB);
433
434 bool translateSwitch(const User &U, MachineIRBuilder &MIRBuilder);
435 // End switch lowering section.
436
437 bool translateIndirectBr(const User &U, MachineIRBuilder &MIRBuilder);
438
439 bool translateExtractValue(const User &U, MachineIRBuilder &MIRBuilder);
440
441 bool translateInsertValue(const User &U, MachineIRBuilder &MIRBuilder);
442
443 bool translateSelect(const User &U, MachineIRBuilder &MIRBuilder);
444
445 bool translateGetElementPtr(const User &U, MachineIRBuilder &MIRBuilder);
446
447 bool translateAlloca(const User &U, MachineIRBuilder &MIRBuilder);
448
449 /// Translate return (ret) instruction.
450 /// The target needs to implement CallLowering::lowerReturn for
451 /// this to succeed.
452 /// \pre \p U is a return instruction.
453 bool translateRet(const User &U, MachineIRBuilder &MIRBuilder);
454
455 bool translateFNeg(const User &U, MachineIRBuilder &MIRBuilder);
456
457 bool translateAdd(const User &U, MachineIRBuilder &MIRBuilder) {
458 return translateBinaryOp(TargetOpcode::G_ADD, U, MIRBuilder);
459 }
460 bool translateSub(const User &U, MachineIRBuilder &MIRBuilder) {
461 return translateBinaryOp(TargetOpcode::G_SUB, U, MIRBuilder);
462 }
463 bool translateAnd(const User &U, MachineIRBuilder &MIRBuilder) {
464 return translateBinaryOp(TargetOpcode::G_AND, U, MIRBuilder);
465 }
466 bool translateMul(const User &U, MachineIRBuilder &MIRBuilder) {
467 return translateBinaryOp(TargetOpcode::G_MUL, U, MIRBuilder);
468 }
469 bool translateOr(const User &U, MachineIRBuilder &MIRBuilder) {
470 return translateBinaryOp(TargetOpcode::G_OR, U, MIRBuilder);
471 }
472 bool translateXor(const User &U, MachineIRBuilder &MIRBuilder) {
473 return translateBinaryOp(TargetOpcode::G_XOR, U, MIRBuilder);
474 }
475
476 bool translateUDiv(const User &U, MachineIRBuilder &MIRBuilder) {
477 return translateBinaryOp(TargetOpcode::G_UDIV, U, MIRBuilder);
478 }
479 bool translateSDiv(const User &U, MachineIRBuilder &MIRBuilder) {
480 return translateBinaryOp(TargetOpcode::G_SDIV, U, MIRBuilder);
481 }
482 bool translateURem(const User &U, MachineIRBuilder &MIRBuilder) {
483 return translateBinaryOp(TargetOpcode::G_UREM, U, MIRBuilder);
484 }
485 bool translateSRem(const User &U, MachineIRBuilder &MIRBuilder) {
486 return translateBinaryOp(TargetOpcode::G_SREM, U, MIRBuilder);
487 }
488 bool translateIntToPtr(const User &U, MachineIRBuilder &MIRBuilder) {
489 return translateCast(TargetOpcode::G_INTTOPTR, U, MIRBuilder);
490 }
491 bool translatePtrToInt(const User &U, MachineIRBuilder &MIRBuilder) {
492 return translateCast(TargetOpcode::G_PTRTOINT, U, MIRBuilder);
493 }
494 bool translateTrunc(const User &U, MachineIRBuilder &MIRBuilder) {
495 return translateCast(TargetOpcode::G_TRUNC, U, MIRBuilder);
496 }
497 bool translateFPTrunc(const User &U, MachineIRBuilder &MIRBuilder) {
498 return translateCast(TargetOpcode::G_FPTRUNC, U, MIRBuilder);
499 }
500 bool translateFPExt(const User &U, MachineIRBuilder &MIRBuilder) {
501 return translateCast(TargetOpcode::G_FPEXT, U, MIRBuilder);
502 }
503 bool translateFPToUI(const User &U, MachineIRBuilder &MIRBuilder) {
504 return translateCast(TargetOpcode::G_FPTOUI, U, MIRBuilder);
505 }
506 bool translateFPToSI(const User &U, MachineIRBuilder &MIRBuilder) {
507 return translateCast(TargetOpcode::G_FPTOSI, U, MIRBuilder);
508 }
509 bool translateUIToFP(const User &U, MachineIRBuilder &MIRBuilder) {
510 return translateCast(TargetOpcode::G_UITOFP, U, MIRBuilder);
511 }
512 bool translateSIToFP(const User &U, MachineIRBuilder &MIRBuilder) {
513 return translateCast(TargetOpcode::G_SITOFP, U, MIRBuilder);
514 }
515 bool translateUnreachable(const User &U, MachineIRBuilder &MIRBuilder);
516
517 bool translateSExt(const User &U, MachineIRBuilder &MIRBuilder) {
518 return translateCast(TargetOpcode::G_SEXT, U, MIRBuilder);
519 }
520
521 bool translateZExt(const User &U, MachineIRBuilder &MIRBuilder) {
522 return translateCast(TargetOpcode::G_ZEXT, U, MIRBuilder);
523 }
524
525 bool translateShl(const User &U, MachineIRBuilder &MIRBuilder) {
526 return translateBinaryOp(TargetOpcode::G_SHL, U, MIRBuilder);
527 }
528 bool translateLShr(const User &U, MachineIRBuilder &MIRBuilder) {
529 return translateBinaryOp(TargetOpcode::G_LSHR, U, MIRBuilder);
530 }
531 bool translateAShr(const User &U, MachineIRBuilder &MIRBuilder) {
532 return translateBinaryOp(TargetOpcode::G_ASHR, U, MIRBuilder);
533 }
534
535 bool translateFAdd(const User &U, MachineIRBuilder &MIRBuilder) {
536 return translateBinaryOp(TargetOpcode::G_FADD, U, MIRBuilder);
537 }
538 bool translateFSub(const User &U, MachineIRBuilder &MIRBuilder) {
539 return translateBinaryOp(TargetOpcode::G_FSUB, U, MIRBuilder);
540 }
541 bool translateFMul(const User &U, MachineIRBuilder &MIRBuilder) {
542 return translateBinaryOp(TargetOpcode::G_FMUL, U, MIRBuilder);
543 }
544 bool translateFDiv(const User &U, MachineIRBuilder &MIRBuilder) {
545 return translateBinaryOp(TargetOpcode::G_FDIV, U, MIRBuilder);
546 }
547 bool translateFRem(const User &U, MachineIRBuilder &MIRBuilder) {
548 return translateBinaryOp(TargetOpcode::G_FREM, U, MIRBuilder);
549 }
550
551 bool translateVAArg(const User &U, MachineIRBuilder &MIRBuilder);
552
553 bool translateInsertElement(const User &U, MachineIRBuilder &MIRBuilder);
554
555 bool translateExtractElement(const User &U, MachineIRBuilder &MIRBuilder);
556
557 bool translateShuffleVector(const User &U, MachineIRBuilder &MIRBuilder);
558
559 bool translateAtomicCmpXchg(const User &U, MachineIRBuilder &MIRBuilder);
560 bool translateAtomicRMW(const User &U, MachineIRBuilder &MIRBuilder);
561 bool translateFence(const User &U, MachineIRBuilder &MIRBuilder);
562 bool translateFreeze(const User &U, MachineIRBuilder &MIRBuilder);
563
564 // Stubs to keep the compiler happy while we implement the rest of the
565 // translation.
566 bool translateResume(const User &U, MachineIRBuilder &MIRBuilder) {
567 return false;
568 }
569 bool translateCleanupRet(const User &U, MachineIRBuilder &MIRBuilder) {
570 return false;
571 }
572 bool translateCatchRet(const User &U, MachineIRBuilder &MIRBuilder) {
573 return false;
574 }
575 bool translateCatchSwitch(const User &U, MachineIRBuilder &MIRBuilder) {
576 return false;
577 }
578 bool translateAddrSpaceCast(const User &U, MachineIRBuilder &MIRBuilder) {
579 return translateCast(TargetOpcode::G_ADDRSPACE_CAST, U, MIRBuilder);
580 }
581 bool translateCleanupPad(const User &U, MachineIRBuilder &MIRBuilder) {
582 return false;
583 }
584 bool translateCatchPad(const User &U, MachineIRBuilder &MIRBuilder) {
585 return false;
586 }
587 bool translateUserOp1(const User &U, MachineIRBuilder &MIRBuilder) {
588 return false;
589 }
590 bool translateUserOp2(const User &U, MachineIRBuilder &MIRBuilder) {
591 return false;
592 }
593
594 bool translateConvergenceControlIntrinsic(const CallInst &CI,
596 MachineIRBuilder &MIRBuilder);
597
598 /// @}
599
600 // Builder for machine instruction a la IRBuilder.
601 // I.e., compared to regular MIBuilder, this one also inserts the instruction
602 // in the current block, it can creates block, etc., basically a kind of
603 // IRBuilder, but for Machine IR.
604 // CSEMIRBuilder CurBuilder;
605 std::unique_ptr<MachineIRBuilder> CurBuilder;
606
607 // Builder set to the entry block (just after ABI lowering instructions). Used
608 // as a convenient location for Constants.
609 // CSEMIRBuilder EntryBuilder;
610 std::unique_ptr<MachineIRBuilder> EntryBuilder;
611
612 // The MachineFunction currently being translated.
613 MachineFunction *MF = nullptr;
614
615 /// MachineRegisterInfo used to create virtual registers.
616 MachineRegisterInfo *MRI = nullptr;
617
618 const DataLayout *DL = nullptr;
619
620 /// Current target configuration. Controls how the pass handles errors.
621 const TargetPassConfig *TPC = nullptr;
622
623 CodeGenOptLevel OptLevel;
624
625 /// Current optimization remark emitter. Used to report failures.
626 std::unique_ptr<OptimizationRemarkEmitter> ORE;
627
628 AAResults *AA = nullptr;
629 AssumptionCache *AC = nullptr;
630 const TargetLibraryInfo *LibInfo = nullptr;
631 const TargetLowering *TLI = nullptr;
632 FunctionLoweringInfo FuncInfo;
633
634 // True when either the Target Machine specifies no optimizations or the
635 // function has the optnone attribute.
636 bool EnableOpts = false;
637
638 /// True when the block contains a tail call. This allows the IRTranslator to
639 /// stop translating such blocks early.
640 bool HasTailCall = false;
641
642 StackProtectorDescriptor SPDescriptor;
643
644 /// Switch analysis and optimization.
645 class GISelSwitchLowering : public SwitchCG::SwitchLowering {
646 public:
647 GISelSwitchLowering(IRTranslator *irt, FunctionLoweringInfo &funcinfo)
648 : SwitchLowering(funcinfo), IRT(irt) {
649 assert(irt && "irt is null!");
650 }
651
652 void addSuccessorWithProb(
653 MachineBasicBlock *Src, MachineBasicBlock *Dst,
654 BranchProbability Prob = BranchProbability::getUnknown()) override {
655 IRT->addSuccessorWithProb(Src, Dst, Prob);
656 }
657
658 virtual ~GISelSwitchLowering() = default;
659
660 private:
661 IRTranslator *IRT;
662 };
663
664 std::unique_ptr<GISelSwitchLowering> SL;
665
666 // * Insert all the code needed to materialize the constants
667 // at the proper place. E.g., Entry block or dominator block
668 // of each constant depending on how fancy we want to be.
669 // * Clear the different maps.
670 void finalizeFunction();
671
672 // Processing steps done per block. E.g. emitting jump tables, stack
673 // protectors etc. Returns true if no errors, false if there was a problem
674 // that caused an abort.
675 bool finalizeBasicBlock(const BasicBlock &BB, MachineBasicBlock &MBB);
676
677 /// Codegen a new tail for a stack protector check ParentMBB which has had its
678 /// tail spliced into a stack protector check success bb.
679 ///
680 /// For a high level explanation of how this fits into the stack protector
681 /// generation see the comment on the declaration of class
682 /// StackProtectorDescriptor.
683 ///
684 /// \return true if there were no problems.
685 bool emitSPDescriptorParent(StackProtectorDescriptor &SPD,
686 MachineBasicBlock *ParentBB);
687
688 /// Codegen the failure basic block for a stack protector check.
689 ///
690 /// A failure stack protector machine basic block consists simply of a call to
691 /// __stack_chk_fail().
692 ///
693 /// For a high level explanation of how this fits into the stack protector
694 /// generation see the comment on the declaration of class
695 /// StackProtectorDescriptor.
696 ///
697 /// \return true if there were no problems.
698 bool emitSPDescriptorFailure(StackProtectorDescriptor &SPD,
699 MachineBasicBlock *FailureBB);
700
701 /// Get the VRegs that represent \p Val.
702 /// Non-aggregate types have just one corresponding VReg and the list can be
703 /// used as a single "unsigned". Aggregates get flattened. If such VRegs do
704 /// not exist, they are created.
705 ArrayRef<Register> getOrCreateVRegs(const Value &Val);
706
707 Register getOrCreateVReg(const Value &Val) {
708 auto Regs = getOrCreateVRegs(Val);
709 if (Regs.empty())
710 return 0;
711 assert(Regs.size() == 1 &&
712 "attempt to get single VReg for aggregate or void");
713 return Regs[0];
714 }
715
716 Register getOrCreateConvergenceTokenVReg(const Value &Token) {
717 assert(Token.getType()->isTokenTy());
718 auto &Regs = *VMap.getVRegs(Token);
719 if (!Regs.empty()) {
720 assert(Regs.size() == 1 &&
721 "Expected a single register for convergence tokens.");
722 return Regs[0];
723 }
724
725 auto Reg = MRI->createGenericVirtualRegister(LLT::token());
726 Regs.push_back(Reg);
727 auto &Offsets = *VMap.getOffsets(Token);
728 if (Offsets.empty())
729 Offsets.push_back(0);
730 return Reg;
731 }
732
733 /// Allocate some vregs and offsets in the VMap. Then populate just the
734 /// offsets while leaving the vregs empty.
735 ValueToVRegInfo::VRegListT &allocateVRegs(const Value &Val);
736
737 /// Get the frame index that represents \p Val.
738 /// If such VReg does not exist, it is created.
739 int getOrCreateFrameIndex(const AllocaInst &AI);
740
741 /// Get the alignment of the given memory operation instruction. This will
742 /// either be the explicitly specified value or the ABI-required alignment for
743 /// the type being accessed (according to the Module's DataLayout).
744 Align getMemOpAlign(const Instruction &I);
745
746 /// Get the MachineBasicBlock that represents \p BB. Specifically, the block
747 /// returned will be the head of the translated block (suitable for branch
748 /// destinations).
749 MachineBasicBlock &getMBB(const BasicBlock &BB);
750
751 /// Record \p NewPred as a Machine predecessor to `Edge.second`, corresponding
752 /// to `Edge.first` at the IR level. This is used when IRTranslation creates
753 /// multiple MachineBasicBlocks for a given IR block and the CFG is no longer
754 /// represented simply by the IR-level CFG.
755 void addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred);
756
757 /// Returns the Machine IR predecessors for the given IR CFG edge. Usually
758 /// this is just the single MachineBasicBlock corresponding to the predecessor
759 /// in the IR. More complex lowering can result in multiple MachineBasicBlocks
760 /// preceding the original though (e.g. switch instructions).
761 SmallVector<MachineBasicBlock *, 1> getMachinePredBBs(CFGEdge Edge) {
762 auto RemappedEdge = MachinePreds.find(Edge);
763 if (RemappedEdge != MachinePreds.end())
764 return RemappedEdge->second;
765 return SmallVector<MachineBasicBlock *, 4>(1, &getMBB(*Edge.first));
766 }
767
768 /// Return branch probability calculated by BranchProbabilityInfo for IR
769 /// blocks.
770 BranchProbability getEdgeProbability(const MachineBasicBlock *Src,
771 const MachineBasicBlock *Dst) const;
772
773 void addSuccessorWithProb(
774 MachineBasicBlock *Src, MachineBasicBlock *Dst,
775 BranchProbability Prob = BranchProbability::getUnknown());
776
777public:
779
780 StringRef getPassName() const override { return "IRTranslator"; }
781
782 void getAnalysisUsage(AnalysisUsage &AU) const override;
783
784 // Algo:
785 // CallLowering = MF.subtarget.getCallLowering()
786 // F = MF.getParent()
787 // MIRBuilder.reset(MF)
788 // getMBB(F.getEntryBB())
789 // CallLowering->translateArguments(MIRBuilder, F, ValToVReg)
790 // for each bb in F
791 // getMBB(bb)
792 // for each inst in bb
793 // if (!translate(MIRBuilder, inst, ValToVReg, ConstantToSequence))
794 // report_fatal_error("Don't know how to translate input");
795 // finalize()
796 bool runOnMachineFunction(MachineFunction &MF) override;
797};
798
799} // end namespace llvm
800
801#endif // LLVM_CODEGEN_GLOBALISEL_IRTRANSLATOR_H
return AArch64::GPR64RegClass contains(Reg)
unsigned const MachineRegisterInfo * MRI
MachineBasicBlock & MBB
This file defines the BumpPtrAllocator interface.
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file defines the DenseMap class.
#define I(x, y, z)
Definition: MD5.cpp:58
This file declares the MachineIRBuilder class.
unsigned Reg
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallVector class.
Represent the analysis usage information of a pass.
This class represents an incoming formal argument to a Function.
Definition: Argument.h:31
LLVM Basic Block Representation.
Definition: BasicBlock.h:60
static BranchProbability getUnknown()
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1461
This class represents a function call, abstracting a target machine's calling convention.
This is an important base class in LLVM.
Definition: Constant.h:41
This is the common base class for constrained floating point intrinsics.
DWARF expression.
This class represents an Operation in the Expression.
A debug info location.
Definition: DebugLoc.h:33
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:155
iterator end()
Definition: DenseMap.h:84
Class representing an expression and its matching format.
bool runOnMachineFunction(MachineFunction &MF) override
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
IRTranslator(CodeGenOptLevel OptLevel=CodeGenOptLevel::None)
static char ID
Definition: IRTranslator.h:68
StringRef getPassName() const override
getPassName - Return a nice clean name for a pass.
Definition: IRTranslator.h:780
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
static constexpr LLT token()
Get a low-level token; just a scalar with zero bits (or no size).
Definition: LowLevelType.h:49
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
BasicBlockListType::iterator iterator
Helper class to build MachineInstr.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
A BumpPtrAllocator that allows only elements of a specific type to be allocated.
Definition: Allocator.h:382
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
SwitchLowering(FunctionLoweringInfo &funcinfo)
LLVM Value Representation.
Definition: Value.h:74
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition: ISDOpcodes.h:71
Offsets
Offsets in bytes from the start of the input buffer.
Definition: SIInstrInfo.h:1522
SmallVector< SwitchWorkListItem, 4 > SwitchWorkList
CaseClusterVector::iterator CaseClusterIt
@ User
could "use" a pointer
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
CodeGenOptLevel
Code generation optimization level.
Definition: CodeGen.h:54