LLVM 17.0.0git
IRTranslator.h
Go to the documentation of this file.
1//===- llvm/CodeGen/GlobalISel/IRTranslator.h - IRTranslator ----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file declares the IRTranslator pass.
10/// This pass is responsible for translating LLVM IR into MachineInstr.
11/// It uses target hooks to lower the ABI but aside from that, the pass
12/// generated code is generic. This is the default translator used for
13/// GlobalISel.
14///
15/// \todo Replace the comments with actual doxygen comments.
16//===----------------------------------------------------------------------===//
17
18#ifndef LLVM_CODEGEN_GLOBALISEL_IRTRANSLATOR_H
19#define LLVM_CODEGEN_GLOBALISEL_IRTRANSLATOR_H
20
21#include "llvm/ADT/DenseMap.h"
31#include <memory>
32#include <utility>
33
34namespace llvm {
35
36class AllocaInst;
37class AssumptionCache;
38class BasicBlock;
39class CallInst;
40class CallLowering;
41class Constant;
42class ConstrainedFPIntrinsic;
43class DataLayout;
44class DbgDeclareInst;
45class DbgValueInst;
46class Instruction;
47class MachineBasicBlock;
48class MachineFunction;
49class MachineInstr;
50class MachineRegisterInfo;
51class OptimizationRemarkEmitter;
52class PHINode;
53class TargetLibraryInfo;
54class TargetPassConfig;
55class User;
56class Value;
57
58// Technically the pass should run on an hypothetical MachineModule,
59// since it should translate Global into some sort of MachineGlobal.
60// The MachineGlobal should ultimately just be a transfer of ownership of
61// the interesting bits that are relevant to represent a global value.
62// That being said, we could investigate what would it cost to just duplicate
63// the information from the LLVM IR.
64// The idea is that ultimately we would be able to free up the memory used
65// by the LLVM IR as soon as the translation is over.
67public:
68 static char ID;
69
70private:
71 /// Interface used to lower the everything related to calls.
72 const CallLowering *CLI = nullptr;
73
74 /// This class contains the mapping between the Values to vreg related data.
75 class ValueToVRegInfo {
76 public:
77 ValueToVRegInfo() = default;
78
79 using VRegListT = SmallVector<Register, 1>;
80 using OffsetListT = SmallVector<uint64_t, 1>;
81
82 using const_vreg_iterator =
84 using const_offset_iterator =
86
87 inline const_vreg_iterator vregs_end() const { return ValToVRegs.end(); }
88
89 VRegListT *getVRegs(const Value &V) {
90 auto It = ValToVRegs.find(&V);
91 if (It != ValToVRegs.end())
92 return It->second;
93
94 return insertVRegs(V);
95 }
96
97 OffsetListT *getOffsets(const Value &V) {
98 auto It = TypeToOffsets.find(V.getType());
99 if (It != TypeToOffsets.end())
100 return It->second;
101
102 return insertOffsets(V);
103 }
104
105 const_vreg_iterator findVRegs(const Value &V) const {
106 return ValToVRegs.find(&V);
107 }
108
109 bool contains(const Value &V) const { return ValToVRegs.contains(&V); }
110
111 void reset() {
112 ValToVRegs.clear();
113 TypeToOffsets.clear();
114 VRegAlloc.DestroyAll();
115 OffsetAlloc.DestroyAll();
116 }
117
118 private:
119 VRegListT *insertVRegs(const Value &V) {
120 assert(!ValToVRegs.contains(&V) && "Value already exists");
121
122 // We placement new using our fast allocator since we never try to free
123 // the vectors until translation is finished.
124 auto *VRegList = new (VRegAlloc.Allocate()) VRegListT();
125 ValToVRegs[&V] = VRegList;
126 return VRegList;
127 }
128
129 OffsetListT *insertOffsets(const Value &V) {
130 assert(!TypeToOffsets.contains(V.getType()) && "Type already exists");
131
132 auto *OffsetList = new (OffsetAlloc.Allocate()) OffsetListT();
133 TypeToOffsets[V.getType()] = OffsetList;
134 return OffsetList;
135 }
138
139 // We store pointers to vectors here since references may be invalidated
140 // while we hold them if we stored the vectors directly.
143 };
144
145 /// Mapping of the values of the current LLVM IR function to the related
146 /// virtual registers and offsets.
147 ValueToVRegInfo VMap;
148
149 // N.b. it's not completely obvious that this will be sufficient for every
150 // LLVM IR construct (with "invoke" being the obvious candidate to mess up our
151 // lives.
153
154 // One BasicBlock can be translated to multiple MachineBasicBlocks. For such
155 // BasicBlocks translated to multiple MachineBasicBlocks, MachinePreds retains
156 // a mapping between the edges arriving at the BasicBlock to the corresponding
157 // created MachineBasicBlocks. Some BasicBlocks that get translated to a
158 // single MachineBasicBlock may also end up in this Map.
159 using CFGEdge = std::pair<const BasicBlock *, const BasicBlock *>;
161
162 // List of stubbed PHI instructions, for values and basic blocks to be filled
163 // in once all MachineBasicBlocks have been created.
165 PendingPHIs;
166
167 /// Record of what frame index has been allocated to specified allocas for
168 /// this function.
170
171 SwiftErrorValueTracking SwiftError;
172
173 /// \name Methods for translating form LLVM IR to MachineInstr.
174 /// \see ::translate for general information on the translate methods.
175 /// @{
176
177 /// Translate \p Inst into its corresponding MachineInstr instruction(s).
178 /// Insert the newly translated instruction(s) right where the CurBuilder
179 /// is set.
180 ///
181 /// The general algorithm is:
182 /// 1. Look for a virtual register for each operand or
183 /// create one.
184 /// 2 Update the VMap accordingly.
185 /// 2.alt. For constant arguments, if they are compile time constants,
186 /// produce an immediate in the right operand and do not touch
187 /// ValToReg. Actually we will go with a virtual register for each
188 /// constants because it may be expensive to actually materialize the
189 /// constant. Moreover, if the constant spans on several instructions,
190 /// CSE may not catch them.
191 /// => Update ValToVReg and remember that we saw a constant in Constants.
192 /// We will materialize all the constants in finalize.
193 /// Note: we would need to do something so that we can recognize such operand
194 /// as constants.
195 /// 3. Create the generic instruction.
196 ///
197 /// \return true if the translation succeeded.
198 bool translate(const Instruction &Inst);
199
200 /// Materialize \p C into virtual-register \p Reg. The generic instructions
201 /// performing this materialization will be inserted into the entry block of
202 /// the function.
203 ///
204 /// \return true if the materialization succeeded.
205 bool translate(const Constant &C, Register Reg);
206
207 // Translate U as a copy of V.
208 bool translateCopy(const User &U, const Value &V,
209 MachineIRBuilder &MIRBuilder);
210
211 /// Translate an LLVM bitcast into generic IR. Either a COPY or a G_BITCAST is
212 /// emitted.
213 bool translateBitCast(const User &U, MachineIRBuilder &MIRBuilder);
214
215 /// Translate an LLVM load instruction into generic IR.
216 bool translateLoad(const User &U, MachineIRBuilder &MIRBuilder);
217
218 /// Translate an LLVM store instruction into generic IR.
219 bool translateStore(const User &U, MachineIRBuilder &MIRBuilder);
220
221 /// Translate an LLVM string intrinsic (memcpy, memset, ...).
222 bool translateMemFunc(const CallInst &CI, MachineIRBuilder &MIRBuilder,
223 unsigned Opcode);
224
225 void getStackGuard(Register DstReg, MachineIRBuilder &MIRBuilder);
226
227 bool translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
228 MachineIRBuilder &MIRBuilder);
229 bool translateFixedPointIntrinsic(unsigned Op, const CallInst &CI,
230 MachineIRBuilder &MIRBuilder);
231
232 /// Helper function for translateSimpleIntrinsic.
233 /// \return The generic opcode for \p IntrinsicID if \p IntrinsicID is a
234 /// simple intrinsic (ceil, fabs, etc.). Otherwise, returns
235 /// Intrinsic::not_intrinsic.
236 unsigned getSimpleIntrinsicOpcode(Intrinsic::ID ID);
237
238 /// Translates the intrinsics defined in getSimpleIntrinsicOpcode.
239 /// \return true if the translation succeeded.
240 bool translateSimpleIntrinsic(const CallInst &CI, Intrinsic::ID ID,
241 MachineIRBuilder &MIRBuilder);
242
243 bool translateConstrainedFPIntrinsic(const ConstrainedFPIntrinsic &FPI,
244 MachineIRBuilder &MIRBuilder);
245
246 bool translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
247 MachineIRBuilder &MIRBuilder);
248
249 /// Returns the single livein physical register Arg was lowered to, if
250 /// possible.
251 std::optional<MCRegister> getArgPhysReg(Argument &Arg);
252
253 /// If DebugInst targets an Argument and its expression is an EntryValue,
254 /// lower it as an entry in the MF debug table.
255 bool translateIfEntryValueArgument(const DbgDeclareInst &DebugInst);
256
257 /// If DebugInst targets an Argument and its expression is an EntryValue,
258 /// lower as a DBG_VALUE targeting the corresponding livein register for that
259 /// Argument.
260 bool translateIfEntryValueArgument(const DbgValueInst &DebugInst,
261 MachineIRBuilder &MIRBuilder);
262
263 bool translateInlineAsm(const CallBase &CB, MachineIRBuilder &MIRBuilder);
264
265 /// Common code for translating normal calls or invokes.
266 bool translateCallBase(const CallBase &CB, MachineIRBuilder &MIRBuilder);
267
268 /// Translate call instruction.
269 /// \pre \p U is a call instruction.
270 bool translateCall(const User &U, MachineIRBuilder &MIRBuilder);
271
272 /// When an invoke or a cleanupret unwinds to the next EH pad, there are
273 /// many places it could ultimately go. In the IR, we have a single unwind
274 /// destination, but in the machine CFG, we enumerate all the possible blocks.
275 /// This function skips over imaginary basic blocks that hold catchswitch
276 /// instructions, and finds all the "real" machine
277 /// basic block destinations. As those destinations may not be successors of
278 /// EHPadBB, here we also calculate the edge probability to those
279 /// destinations. The passed-in Prob is the edge probability to EHPadBB.
280 bool findUnwindDestinations(
281 const BasicBlock *EHPadBB, BranchProbability Prob,
282 SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
283 &UnwindDests);
284
285 bool translateInvoke(const User &U, MachineIRBuilder &MIRBuilder);
286
287 bool translateCallBr(const User &U, MachineIRBuilder &MIRBuilder);
288
289 bool translateLandingPad(const User &U, MachineIRBuilder &MIRBuilder);
290
291 /// Translate one of LLVM's cast instructions into MachineInstrs, with the
292 /// given generic Opcode.
293 bool translateCast(unsigned Opcode, const User &U,
294 MachineIRBuilder &MIRBuilder);
295
296 /// Translate a phi instruction.
297 bool translatePHI(const User &U, MachineIRBuilder &MIRBuilder);
298
299 /// Translate a comparison (icmp or fcmp) instruction or constant.
300 bool translateCompare(const User &U, MachineIRBuilder &MIRBuilder);
301
302 /// Translate an integer compare instruction (or constant).
303 bool translateICmp(const User &U, MachineIRBuilder &MIRBuilder) {
304 return translateCompare(U, MIRBuilder);
305 }
306
307 /// Translate a floating-point compare instruction (or constant).
308 bool translateFCmp(const User &U, MachineIRBuilder &MIRBuilder) {
309 return translateCompare(U, MIRBuilder);
310 }
311
312 /// Add remaining operands onto phis we've translated. Executed after all
313 /// MachineBasicBlocks for the function have been created.
314 void finishPendingPhis();
315
316 /// Translate \p Inst into a unary operation \p Opcode.
317 /// \pre \p U is a unary operation.
318 bool translateUnaryOp(unsigned Opcode, const User &U,
319 MachineIRBuilder &MIRBuilder);
320
321 /// Translate \p Inst into a binary operation \p Opcode.
322 /// \pre \p U is a binary operation.
323 bool translateBinaryOp(unsigned Opcode, const User &U,
324 MachineIRBuilder &MIRBuilder);
325
326 /// If the set of cases should be emitted as a series of branches, return
327 /// true. If we should emit this as a bunch of and/or'd together conditions,
328 /// return false.
329 bool shouldEmitAsBranches(const std::vector<SwitchCG::CaseBlock> &Cases);
330 /// Helper method for findMergedConditions.
331 /// This function emits a branch and is used at the leaves of an OR or an
332 /// AND operator tree.
333 void emitBranchForMergedCondition(const Value *Cond, MachineBasicBlock *TBB,
334 MachineBasicBlock *FBB,
335 MachineBasicBlock *CurBB,
336 MachineBasicBlock *SwitchBB,
337 BranchProbability TProb,
338 BranchProbability FProb, bool InvertCond);
339 /// Used during condbr translation to find trees of conditions that can be
340 /// optimized.
341 void findMergedConditions(const Value *Cond, MachineBasicBlock *TBB,
342 MachineBasicBlock *FBB, MachineBasicBlock *CurBB,
343 MachineBasicBlock *SwitchBB,
344 Instruction::BinaryOps Opc, BranchProbability TProb,
345 BranchProbability FProb, bool InvertCond);
346
347 /// Translate branch (br) instruction.
348 /// \pre \p U is a branch instruction.
349 bool translateBr(const User &U, MachineIRBuilder &MIRBuilder);
350
351 // Begin switch lowering functions.
352 bool emitJumpTableHeader(SwitchCG::JumpTable &JT,
353 SwitchCG::JumpTableHeader &JTH,
354 MachineBasicBlock *HeaderBB);
355 void emitJumpTable(SwitchCG::JumpTable &JT, MachineBasicBlock *MBB);
356
357 void emitSwitchCase(SwitchCG::CaseBlock &CB, MachineBasicBlock *SwitchBB,
358 MachineIRBuilder &MIB);
359
360 /// Generate for for the BitTest header block, which precedes each sequence of
361 /// BitTestCases.
362 void emitBitTestHeader(SwitchCG::BitTestBlock &BTB,
363 MachineBasicBlock *SwitchMBB);
364 /// Generate code to produces one "bit test" for a given BitTestCase \p B.
365 void emitBitTestCase(SwitchCG::BitTestBlock &BB, MachineBasicBlock *NextMBB,
366 BranchProbability BranchProbToNext, Register Reg,
367 SwitchCG::BitTestCase &B, MachineBasicBlock *SwitchBB);
368
369 bool lowerJumpTableWorkItem(
370 SwitchCG::SwitchWorkListItem W, MachineBasicBlock *SwitchMBB,
371 MachineBasicBlock *CurMBB, MachineBasicBlock *DefaultMBB,
372 MachineIRBuilder &MIB, MachineFunction::iterator BBI,
373 BranchProbability UnhandledProbs, SwitchCG::CaseClusterIt I,
374 MachineBasicBlock *Fallthrough, bool FallthroughUnreachable);
375
376 bool lowerSwitchRangeWorkItem(SwitchCG::CaseClusterIt I, Value *Cond,
377 MachineBasicBlock *Fallthrough,
378 bool FallthroughUnreachable,
379 BranchProbability UnhandledProbs,
380 MachineBasicBlock *CurMBB,
381 MachineIRBuilder &MIB,
382 MachineBasicBlock *SwitchMBB);
383
384 bool lowerBitTestWorkItem(
385 SwitchCG::SwitchWorkListItem W, MachineBasicBlock *SwitchMBB,
386 MachineBasicBlock *CurMBB, MachineBasicBlock *DefaultMBB,
387 MachineIRBuilder &MIB, MachineFunction::iterator BBI,
388 BranchProbability DefaultProb, BranchProbability UnhandledProbs,
389 SwitchCG::CaseClusterIt I, MachineBasicBlock *Fallthrough,
390 bool FallthroughUnreachable);
391
392 bool lowerSwitchWorkItem(SwitchCG::SwitchWorkListItem W, Value *Cond,
393 MachineBasicBlock *SwitchMBB,
394 MachineBasicBlock *DefaultMBB,
395 MachineIRBuilder &MIB);
396
397 bool translateSwitch(const User &U, MachineIRBuilder &MIRBuilder);
398 // End switch lowering section.
399
400 bool translateIndirectBr(const User &U, MachineIRBuilder &MIRBuilder);
401
402 bool translateExtractValue(const User &U, MachineIRBuilder &MIRBuilder);
403
404 bool translateInsertValue(const User &U, MachineIRBuilder &MIRBuilder);
405
406 bool translateSelect(const User &U, MachineIRBuilder &MIRBuilder);
407
408 bool translateGetElementPtr(const User &U, MachineIRBuilder &MIRBuilder);
409
410 bool translateAlloca(const User &U, MachineIRBuilder &MIRBuilder);
411
412 /// Translate return (ret) instruction.
413 /// The target needs to implement CallLowering::lowerReturn for
414 /// this to succeed.
415 /// \pre \p U is a return instruction.
416 bool translateRet(const User &U, MachineIRBuilder &MIRBuilder);
417
418 bool translateFNeg(const User &U, MachineIRBuilder &MIRBuilder);
419
420 bool translateAdd(const User &U, MachineIRBuilder &MIRBuilder) {
421 return translateBinaryOp(TargetOpcode::G_ADD, U, MIRBuilder);
422 }
423 bool translateSub(const User &U, MachineIRBuilder &MIRBuilder) {
424 return translateBinaryOp(TargetOpcode::G_SUB, U, MIRBuilder);
425 }
426 bool translateAnd(const User &U, MachineIRBuilder &MIRBuilder) {
427 return translateBinaryOp(TargetOpcode::G_AND, U, MIRBuilder);
428 }
429 bool translateMul(const User &U, MachineIRBuilder &MIRBuilder) {
430 return translateBinaryOp(TargetOpcode::G_MUL, U, MIRBuilder);
431 }
432 bool translateOr(const User &U, MachineIRBuilder &MIRBuilder) {
433 return translateBinaryOp(TargetOpcode::G_OR, U, MIRBuilder);
434 }
435 bool translateXor(const User &U, MachineIRBuilder &MIRBuilder) {
436 return translateBinaryOp(TargetOpcode::G_XOR, U, MIRBuilder);
437 }
438
439 bool translateUDiv(const User &U, MachineIRBuilder &MIRBuilder) {
440 return translateBinaryOp(TargetOpcode::G_UDIV, U, MIRBuilder);
441 }
442 bool translateSDiv(const User &U, MachineIRBuilder &MIRBuilder) {
443 return translateBinaryOp(TargetOpcode::G_SDIV, U, MIRBuilder);
444 }
445 bool translateURem(const User &U, MachineIRBuilder &MIRBuilder) {
446 return translateBinaryOp(TargetOpcode::G_UREM, U, MIRBuilder);
447 }
448 bool translateSRem(const User &U, MachineIRBuilder &MIRBuilder) {
449 return translateBinaryOp(TargetOpcode::G_SREM, U, MIRBuilder);
450 }
451 bool translateIntToPtr(const User &U, MachineIRBuilder &MIRBuilder) {
452 return translateCast(TargetOpcode::G_INTTOPTR, U, MIRBuilder);
453 }
454 bool translatePtrToInt(const User &U, MachineIRBuilder &MIRBuilder) {
455 return translateCast(TargetOpcode::G_PTRTOINT, U, MIRBuilder);
456 }
457 bool translateTrunc(const User &U, MachineIRBuilder &MIRBuilder) {
458 return translateCast(TargetOpcode::G_TRUNC, U, MIRBuilder);
459 }
460 bool translateFPTrunc(const User &U, MachineIRBuilder &MIRBuilder) {
461 return translateCast(TargetOpcode::G_FPTRUNC, U, MIRBuilder);
462 }
463 bool translateFPExt(const User &U, MachineIRBuilder &MIRBuilder) {
464 return translateCast(TargetOpcode::G_FPEXT, U, MIRBuilder);
465 }
466 bool translateFPToUI(const User &U, MachineIRBuilder &MIRBuilder) {
467 return translateCast(TargetOpcode::G_FPTOUI, U, MIRBuilder);
468 }
469 bool translateFPToSI(const User &U, MachineIRBuilder &MIRBuilder) {
470 return translateCast(TargetOpcode::G_FPTOSI, U, MIRBuilder);
471 }
472 bool translateUIToFP(const User &U, MachineIRBuilder &MIRBuilder) {
473 return translateCast(TargetOpcode::G_UITOFP, U, MIRBuilder);
474 }
475 bool translateSIToFP(const User &U, MachineIRBuilder &MIRBuilder) {
476 return translateCast(TargetOpcode::G_SITOFP, U, MIRBuilder);
477 }
478 bool translateUnreachable(const User &U, MachineIRBuilder &MIRBuilder);
479
480 bool translateSExt(const User &U, MachineIRBuilder &MIRBuilder) {
481 return translateCast(TargetOpcode::G_SEXT, U, MIRBuilder);
482 }
483
484 bool translateZExt(const User &U, MachineIRBuilder &MIRBuilder) {
485 return translateCast(TargetOpcode::G_ZEXT, U, MIRBuilder);
486 }
487
488 bool translateShl(const User &U, MachineIRBuilder &MIRBuilder) {
489 return translateBinaryOp(TargetOpcode::G_SHL, U, MIRBuilder);
490 }
491 bool translateLShr(const User &U, MachineIRBuilder &MIRBuilder) {
492 return translateBinaryOp(TargetOpcode::G_LSHR, U, MIRBuilder);
493 }
494 bool translateAShr(const User &U, MachineIRBuilder &MIRBuilder) {
495 return translateBinaryOp(TargetOpcode::G_ASHR, U, MIRBuilder);
496 }
497
498 bool translateFAdd(const User &U, MachineIRBuilder &MIRBuilder) {
499 return translateBinaryOp(TargetOpcode::G_FADD, U, MIRBuilder);
500 }
501 bool translateFSub(const User &U, MachineIRBuilder &MIRBuilder) {
502 return translateBinaryOp(TargetOpcode::G_FSUB, U, MIRBuilder);
503 }
504 bool translateFMul(const User &U, MachineIRBuilder &MIRBuilder) {
505 return translateBinaryOp(TargetOpcode::G_FMUL, U, MIRBuilder);
506 }
507 bool translateFDiv(const User &U, MachineIRBuilder &MIRBuilder) {
508 return translateBinaryOp(TargetOpcode::G_FDIV, U, MIRBuilder);
509 }
510 bool translateFRem(const User &U, MachineIRBuilder &MIRBuilder) {
511 return translateBinaryOp(TargetOpcode::G_FREM, U, MIRBuilder);
512 }
513
514 bool translateVAArg(const User &U, MachineIRBuilder &MIRBuilder);
515
516 bool translateInsertElement(const User &U, MachineIRBuilder &MIRBuilder);
517
518 bool translateExtractElement(const User &U, MachineIRBuilder &MIRBuilder);
519
520 bool translateShuffleVector(const User &U, MachineIRBuilder &MIRBuilder);
521
522 bool translateAtomicCmpXchg(const User &U, MachineIRBuilder &MIRBuilder);
523 bool translateAtomicRMW(const User &U, MachineIRBuilder &MIRBuilder);
524 bool translateFence(const User &U, MachineIRBuilder &MIRBuilder);
525 bool translateFreeze(const User &U, MachineIRBuilder &MIRBuilder);
526
527 // Stubs to keep the compiler happy while we implement the rest of the
528 // translation.
529 bool translateResume(const User &U, MachineIRBuilder &MIRBuilder) {
530 return false;
531 }
532 bool translateCleanupRet(const User &U, MachineIRBuilder &MIRBuilder) {
533 return false;
534 }
535 bool translateCatchRet(const User &U, MachineIRBuilder &MIRBuilder) {
536 return false;
537 }
538 bool translateCatchSwitch(const User &U, MachineIRBuilder &MIRBuilder) {
539 return false;
540 }
541 bool translateAddrSpaceCast(const User &U, MachineIRBuilder &MIRBuilder) {
542 return translateCast(TargetOpcode::G_ADDRSPACE_CAST, U, MIRBuilder);
543 }
544 bool translateCleanupPad(const User &U, MachineIRBuilder &MIRBuilder) {
545 return false;
546 }
547 bool translateCatchPad(const User &U, MachineIRBuilder &MIRBuilder) {
548 return false;
549 }
550 bool translateUserOp1(const User &U, MachineIRBuilder &MIRBuilder) {
551 return false;
552 }
553 bool translateUserOp2(const User &U, MachineIRBuilder &MIRBuilder) {
554 return false;
555 }
556
557 /// @}
558
559 // Builder for machine instruction a la IRBuilder.
560 // I.e., compared to regular MIBuilder, this one also inserts the instruction
561 // in the current block, it can creates block, etc., basically a kind of
562 // IRBuilder, but for Machine IR.
563 // CSEMIRBuilder CurBuilder;
564 std::unique_ptr<MachineIRBuilder> CurBuilder;
565
566 // Builder set to the entry block (just after ABI lowering instructions). Used
567 // as a convenient location for Constants.
568 // CSEMIRBuilder EntryBuilder;
569 std::unique_ptr<MachineIRBuilder> EntryBuilder;
570
571 // The MachineFunction currently being translated.
572 MachineFunction *MF = nullptr;
573
574 /// MachineRegisterInfo used to create virtual registers.
575 MachineRegisterInfo *MRI = nullptr;
576
577 const DataLayout *DL = nullptr;
578
579 /// Current target configuration. Controls how the pass handles errors.
580 const TargetPassConfig *TPC = nullptr;
581
582 CodeGenOpt::Level OptLevel;
583
584 /// Current optimization remark emitter. Used to report failures.
585 std::unique_ptr<OptimizationRemarkEmitter> ORE;
586
587 AAResults *AA = nullptr;
588 AssumptionCache *AC = nullptr;
589 const TargetLibraryInfo *LibInfo = nullptr;
590 FunctionLoweringInfo FuncInfo;
591
592 // True when either the Target Machine specifies no optimizations or the
593 // function has the optnone attribute.
594 bool EnableOpts = false;
595
596 /// True when the block contains a tail call. This allows the IRTranslator to
597 /// stop translating such blocks early.
598 bool HasTailCall = false;
599
600 StackProtectorDescriptor SPDescriptor;
601
602 /// Switch analysis and optimization.
603 class GISelSwitchLowering : public SwitchCG::SwitchLowering {
604 public:
605 GISelSwitchLowering(IRTranslator *irt, FunctionLoweringInfo &funcinfo)
606 : SwitchLowering(funcinfo), IRT(irt) {
607 assert(irt && "irt is null!");
608 }
609
610 void addSuccessorWithProb(
611 MachineBasicBlock *Src, MachineBasicBlock *Dst,
612 BranchProbability Prob = BranchProbability::getUnknown()) override {
613 IRT->addSuccessorWithProb(Src, Dst, Prob);
614 }
615
616 virtual ~GISelSwitchLowering() = default;
617
618 private:
619 IRTranslator *IRT;
620 };
621
622 std::unique_ptr<GISelSwitchLowering> SL;
623
624 // * Insert all the code needed to materialize the constants
625 // at the proper place. E.g., Entry block or dominator block
626 // of each constant depending on how fancy we want to be.
627 // * Clear the different maps.
628 void finalizeFunction();
629
630 // Processing steps done per block. E.g. emitting jump tables, stack
631 // protectors etc. Returns true if no errors, false if there was a problem
632 // that caused an abort.
633 bool finalizeBasicBlock(const BasicBlock &BB, MachineBasicBlock &MBB);
634
635 /// Codegen a new tail for a stack protector check ParentMBB which has had its
636 /// tail spliced into a stack protector check success bb.
637 ///
638 /// For a high level explanation of how this fits into the stack protector
639 /// generation see the comment on the declaration of class
640 /// StackProtectorDescriptor.
641 ///
642 /// \return true if there were no problems.
643 bool emitSPDescriptorParent(StackProtectorDescriptor &SPD,
644 MachineBasicBlock *ParentBB);
645
646 /// Codegen the failure basic block for a stack protector check.
647 ///
648 /// A failure stack protector machine basic block consists simply of a call to
649 /// __stack_chk_fail().
650 ///
651 /// For a high level explanation of how this fits into the stack protector
652 /// generation see the comment on the declaration of class
653 /// StackProtectorDescriptor.
654 ///
655 /// \return true if there were no problems.
656 bool emitSPDescriptorFailure(StackProtectorDescriptor &SPD,
657 MachineBasicBlock *FailureBB);
658
659 /// Get the VRegs that represent \p Val.
660 /// Non-aggregate types have just one corresponding VReg and the list can be
661 /// used as a single "unsigned". Aggregates get flattened. If such VRegs do
662 /// not exist, they are created.
663 ArrayRef<Register> getOrCreateVRegs(const Value &Val);
664
665 Register getOrCreateVReg(const Value &Val) {
666 auto Regs = getOrCreateVRegs(Val);
667 if (Regs.empty())
668 return 0;
669 assert(Regs.size() == 1 &&
670 "attempt to get single VReg for aggregate or void");
671 return Regs[0];
672 }
673
674 /// Allocate some vregs and offsets in the VMap. Then populate just the
675 /// offsets while leaving the vregs empty.
676 ValueToVRegInfo::VRegListT &allocateVRegs(const Value &Val);
677
678 /// Get the frame index that represents \p Val.
679 /// If such VReg does not exist, it is created.
680 int getOrCreateFrameIndex(const AllocaInst &AI);
681
682 /// Get the alignment of the given memory operation instruction. This will
683 /// either be the explicitly specified value or the ABI-required alignment for
684 /// the type being accessed (according to the Module's DataLayout).
685 Align getMemOpAlign(const Instruction &I);
686
687 /// Get the MachineBasicBlock that represents \p BB. Specifically, the block
688 /// returned will be the head of the translated block (suitable for branch
689 /// destinations).
690 MachineBasicBlock &getMBB(const BasicBlock &BB);
691
692 /// Record \p NewPred as a Machine predecessor to `Edge.second`, corresponding
693 /// to `Edge.first` at the IR level. This is used when IRTranslation creates
694 /// multiple MachineBasicBlocks for a given IR block and the CFG is no longer
695 /// represented simply by the IR-level CFG.
696 void addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred);
697
698 /// Returns the Machine IR predecessors for the given IR CFG edge. Usually
699 /// this is just the single MachineBasicBlock corresponding to the predecessor
700 /// in the IR. More complex lowering can result in multiple MachineBasicBlocks
701 /// preceding the original though (e.g. switch instructions).
702 SmallVector<MachineBasicBlock *, 1> getMachinePredBBs(CFGEdge Edge) {
703 auto RemappedEdge = MachinePreds.find(Edge);
704 if (RemappedEdge != MachinePreds.end())
705 return RemappedEdge->second;
706 return SmallVector<MachineBasicBlock *, 4>(1, &getMBB(*Edge.first));
707 }
708
709 /// Return branch probability calculated by BranchProbabilityInfo for IR
710 /// blocks.
711 BranchProbability getEdgeProbability(const MachineBasicBlock *Src,
712 const MachineBasicBlock *Dst) const;
713
714 void addSuccessorWithProb(
715 MachineBasicBlock *Src, MachineBasicBlock *Dst,
716 BranchProbability Prob = BranchProbability::getUnknown());
717
718public:
720
721 StringRef getPassName() const override { return "IRTranslator"; }
722
723 void getAnalysisUsage(AnalysisUsage &AU) const override;
724
725 // Algo:
726 // CallLowering = MF.subtarget.getCallLowering()
727 // F = MF.getParent()
728 // MIRBuilder.reset(MF)
729 // getMBB(F.getEntryBB())
730 // CallLowering->translateArguments(MIRBuilder, F, ValToVReg)
731 // for each bb in F
732 // getMBB(bb)
733 // for each inst in bb
734 // if (!translate(MIRBuilder, inst, ValToVReg, ConstantToSequence))
735 // report_fatal_error("Don't know how to translate input");
736 // finalize()
737 bool runOnMachineFunction(MachineFunction &MF) override;
738};
739
740} // end namespace llvm
741
742#endif // LLVM_CODEGEN_GLOBALISEL_IRTRANSLATOR_H
return AArch64::GPR64RegClass contains(Reg)
MachineBasicBlock & MBB
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
This file defines the BumpPtrAllocator interface.
SmallVector< MachineOperand, 4 > Cond
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file defines the DenseMap class.
#define I(x, y, z)
Definition: MD5.cpp:58
This file declares the MachineIRBuilder class.
unsigned Reg
typename CallsiteContextGraph< DerivedCCG, FuncTy, CallTy >::FuncInfo FuncInfo
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallVector class.
Represent the analysis usage information of a pass.
This class represents an incoming formal argument to a Function.
Definition: Argument.h:28
LLVM Basic Block Representation.
Definition: BasicBlock.h:56
static BranchProbability getUnknown()
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1190
This class represents a function call, abstracting a target machine's calling convention.
This is an important base class in LLVM.
Definition: Constant.h:41
This is the common base class for constrained floating point intrinsics.
This represents the llvm.dbg.declare instruction.
This represents the llvm.dbg.value instruction.
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:155
iterator end()
Definition: DenseMap.h:84
bool runOnMachineFunction(MachineFunction &MF) override
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
static char ID
Definition: IRTranslator.h:68
StringRef getPassName() const override
getPassName - Return a nice clean name for a pass.
Definition: IRTranslator.h:721
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
IRTranslator(CodeGenOpt::Level OptLevel=CodeGenOpt::None)
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
BasicBlockListType::iterator iterator
Helper class to build MachineInstr.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:577
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
A BumpPtrAllocator that allows only elements of a specific type to be allocated.
Definition: Allocator.h:382
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
SwitchLowering(FunctionLoweringInfo &funcinfo)
LLVM Value Representation.
Definition: Value.h:74
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
Level
Code generation optimization level.
Definition: CodeGen.h:57
@ BasicBlock
Various leaf nodes.
Definition: ISDOpcodes.h:71
CaseClusterVector::iterator CaseClusterIt
@ User
could "use" a pointer
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18