LLVM 20.0.0git
IRTranslator.h
Go to the documentation of this file.
1//===- llvm/CodeGen/GlobalISel/IRTranslator.h - IRTranslator ----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file declares the IRTranslator pass.
10/// This pass is responsible for translating LLVM IR into MachineInstr.
11/// It uses target hooks to lower the ABI but aside from that, the pass
12/// generated code is generic. This is the default translator used for
13/// GlobalISel.
14///
15/// \todo Replace the comments with actual doxygen comments.
16//===----------------------------------------------------------------------===//
17
18#ifndef LLVM_CODEGEN_GLOBALISEL_IRTRANSLATOR_H
19#define LLVM_CODEGEN_GLOBALISEL_IRTRANSLATOR_H
20
21#include "llvm/ADT/DenseMap.h"
31#include <memory>
32#include <utility>
33
34namespace llvm {
35
36class AllocaInst;
37class AssumptionCache;
38class BasicBlock;
39class CallInst;
40class CallLowering;
41class Constant;
42class ConstrainedFPIntrinsic;
43class DataLayout;
44class DbgDeclareInst;
45class DbgValueInst;
46class Instruction;
47class MachineBasicBlock;
48class MachineFunction;
49class MachineInstr;
50class MachineRegisterInfo;
51class OptimizationRemarkEmitter;
52class PHINode;
53class TargetLibraryInfo;
54class TargetPassConfig;
55class User;
56class Value;
57
58// Technically the pass should run on an hypothetical MachineModule,
59// since it should translate Global into some sort of MachineGlobal.
60// The MachineGlobal should ultimately just be a transfer of ownership of
61// the interesting bits that are relevant to represent a global value.
62// That being said, we could investigate what would it cost to just duplicate
63// the information from the LLVM IR.
64// The idea is that ultimately we would be able to free up the memory used
65// by the LLVM IR as soon as the translation is over.
67public:
68 static char ID;
69
70private:
71 /// Interface used to lower the everything related to calls.
72 const CallLowering *CLI = nullptr;
73
74 /// This class contains the mapping between the Values to vreg related data.
75 class ValueToVRegInfo {
76 public:
77 ValueToVRegInfo() = default;
78
79 using VRegListT = SmallVector<Register, 1>;
80 using OffsetListT = SmallVector<uint64_t, 1>;
81
82 using const_vreg_iterator =
84 using const_offset_iterator =
86
87 inline const_vreg_iterator vregs_end() const { return ValToVRegs.end(); }
88
89 VRegListT *getVRegs(const Value &V) {
90 auto It = ValToVRegs.find(&V);
91 if (It != ValToVRegs.end())
92 return It->second;
93
94 return insertVRegs(V);
95 }
96
97 OffsetListT *getOffsets(const Value &V) {
98 auto It = TypeToOffsets.find(V.getType());
99 if (It != TypeToOffsets.end())
100 return It->second;
101
102 return insertOffsets(V);
103 }
104
105 const_vreg_iterator findVRegs(const Value &V) const {
106 return ValToVRegs.find(&V);
107 }
108
109 bool contains(const Value &V) const { return ValToVRegs.contains(&V); }
110
111 void reset() {
112 ValToVRegs.clear();
113 TypeToOffsets.clear();
114 VRegAlloc.DestroyAll();
115 OffsetAlloc.DestroyAll();
116 }
117
118 private:
119 VRegListT *insertVRegs(const Value &V) {
120 assert(!ValToVRegs.contains(&V) && "Value already exists");
121
122 // We placement new using our fast allocator since we never try to free
123 // the vectors until translation is finished.
124 auto *VRegList = new (VRegAlloc.Allocate()) VRegListT();
125 ValToVRegs[&V] = VRegList;
126 return VRegList;
127 }
128
129 OffsetListT *insertOffsets(const Value &V) {
130 assert(!TypeToOffsets.contains(V.getType()) && "Type already exists");
131
132 auto *OffsetList = new (OffsetAlloc.Allocate()) OffsetListT();
133 TypeToOffsets[V.getType()] = OffsetList;
134 return OffsetList;
135 }
138
139 // We store pointers to vectors here since references may be invalidated
140 // while we hold them if we stored the vectors directly.
143 };
144
145 /// Mapping of the values of the current LLVM IR function to the related
146 /// virtual registers and offsets.
147 ValueToVRegInfo VMap;
148
149 // One BasicBlock can be translated to multiple MachineBasicBlocks. For such
150 // BasicBlocks translated to multiple MachineBasicBlocks, MachinePreds retains
151 // a mapping between the edges arriving at the BasicBlock to the corresponding
152 // created MachineBasicBlocks. Some BasicBlocks that get translated to a
153 // single MachineBasicBlock may also end up in this Map.
154 using CFGEdge = std::pair<const BasicBlock *, const BasicBlock *>;
156
157 // List of stubbed PHI instructions, for values and basic blocks to be filled
158 // in once all MachineBasicBlocks have been created.
160 PendingPHIs;
161
162 /// Record of what frame index has been allocated to specified allocas for
163 /// this function.
165
166 SwiftErrorValueTracking SwiftError;
167
168 /// \name Methods for translating form LLVM IR to MachineInstr.
169 /// \see ::translate for general information on the translate methods.
170 /// @{
171
172 /// Translate \p Inst into its corresponding MachineInstr instruction(s).
173 /// Insert the newly translated instruction(s) right where the CurBuilder
174 /// is set.
175 ///
176 /// The general algorithm is:
177 /// 1. Look for a virtual register for each operand or
178 /// create one.
179 /// 2 Update the VMap accordingly.
180 /// 2.alt. For constant arguments, if they are compile time constants,
181 /// produce an immediate in the right operand and do not touch
182 /// ValToReg. Actually we will go with a virtual register for each
183 /// constants because it may be expensive to actually materialize the
184 /// constant. Moreover, if the constant spans on several instructions,
185 /// CSE may not catch them.
186 /// => Update ValToVReg and remember that we saw a constant in Constants.
187 /// We will materialize all the constants in finalize.
188 /// Note: we would need to do something so that we can recognize such operand
189 /// as constants.
190 /// 3. Create the generic instruction.
191 ///
192 /// \return true if the translation succeeded.
193 bool translate(const Instruction &Inst);
194
195 /// Materialize \p C into virtual-register \p Reg. The generic instructions
196 /// performing this materialization will be inserted into the entry block of
197 /// the function.
198 ///
199 /// \return true if the materialization succeeded.
200 bool translate(const Constant &C, Register Reg);
201
202 /// Examine any debug-info attached to the instruction (in the form of
203 /// DbgRecords) and translate it.
204 void translateDbgInfo(const Instruction &Inst,
205 MachineIRBuilder &MIRBuilder);
206
207 /// Translate a debug-info record of a dbg.value into a DBG_* instruction.
208 /// Pass in all the contents of the record, rather than relying on how it's
209 /// stored.
210 void translateDbgValueRecord(Value *V, bool HasArgList,
211 const DILocalVariable *Variable,
212 const DIExpression *Expression, const DebugLoc &DL,
213 MachineIRBuilder &MIRBuilder);
214
215 /// Translate a debug-info record of a dbg.declare into an indirect DBG_*
216 /// instruction. Pass in all the contents of the record, rather than relying
217 /// on how it's stored.
218 void translateDbgDeclareRecord(Value *Address, bool HasArgList,
219 const DILocalVariable *Variable,
220 const DIExpression *Expression, const DebugLoc &DL,
221 MachineIRBuilder &MIRBuilder);
222
223 // Translate U as a copy of V.
224 bool translateCopy(const User &U, const Value &V,
225 MachineIRBuilder &MIRBuilder);
226
227 /// Translate an LLVM bitcast into generic IR. Either a COPY or a G_BITCAST is
228 /// emitted.
229 bool translateBitCast(const User &U, MachineIRBuilder &MIRBuilder);
230
231 /// Translate an LLVM load instruction into generic IR.
232 bool translateLoad(const User &U, MachineIRBuilder &MIRBuilder);
233
234 /// Translate an LLVM store instruction into generic IR.
235 bool translateStore(const User &U, MachineIRBuilder &MIRBuilder);
236
237 /// Translate an LLVM string intrinsic (memcpy, memset, ...).
238 bool translateMemFunc(const CallInst &CI, MachineIRBuilder &MIRBuilder,
239 unsigned Opcode);
240
241 /// Translate an LLVM trap intrinsic (trap, debugtrap, ubsantrap).
242 bool translateTrap(const CallInst &U, MachineIRBuilder &MIRBuilder,
243 unsigned Opcode);
244
245 // Translate @llvm.vector.interleave2 and
246 // @llvm.vector.deinterleave2 intrinsics for fixed-width vector
247 // types into vector shuffles.
248 bool translateVectorInterleave2Intrinsic(const CallInst &CI,
249 MachineIRBuilder &MIRBuilder);
250 bool translateVectorDeinterleave2Intrinsic(const CallInst &CI,
251 MachineIRBuilder &MIRBuilder);
252
253 void getStackGuard(Register DstReg, MachineIRBuilder &MIRBuilder);
254
255 bool translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
256 MachineIRBuilder &MIRBuilder);
257 bool translateFixedPointIntrinsic(unsigned Op, const CallInst &CI,
258 MachineIRBuilder &MIRBuilder);
259
260 /// Helper function for translateSimpleIntrinsic.
261 /// \return The generic opcode for \p IntrinsicID if \p IntrinsicID is a
262 /// simple intrinsic (ceil, fabs, etc.). Otherwise, returns
263 /// Intrinsic::not_intrinsic.
264 unsigned getSimpleIntrinsicOpcode(Intrinsic::ID ID);
265
266 /// Translates the intrinsics defined in getSimpleIntrinsicOpcode.
267 /// \return true if the translation succeeded.
268 bool translateSimpleIntrinsic(const CallInst &CI, Intrinsic::ID ID,
269 MachineIRBuilder &MIRBuilder);
270
271 bool translateConstrainedFPIntrinsic(const ConstrainedFPIntrinsic &FPI,
272 MachineIRBuilder &MIRBuilder);
273
274 bool translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
275 MachineIRBuilder &MIRBuilder);
276
277 /// Returns the single livein physical register Arg was lowered to, if
278 /// possible.
279 std::optional<MCRegister> getArgPhysReg(Argument &Arg);
280
281 /// If debug-info targets an Argument and its expression is an EntryValue,
282 /// lower it as either an entry in the MF debug table (dbg.declare), or a
283 /// DBG_VALUE targeting the corresponding livein register for that Argument
284 /// (dbg.value).
285 bool translateIfEntryValueArgument(bool isDeclare, Value *Arg,
286 const DILocalVariable *Var,
287 const DIExpression *Expr,
288 const DebugLoc &DL,
289 MachineIRBuilder &MIRBuilder);
290
291 bool translateInlineAsm(const CallBase &CB, MachineIRBuilder &MIRBuilder);
292
293 /// Common code for translating normal calls or invokes.
294 bool translateCallBase(const CallBase &CB, MachineIRBuilder &MIRBuilder);
295
296 /// Translate call instruction.
297 /// \pre \p U is a call instruction.
298 bool translateCall(const User &U, MachineIRBuilder &MIRBuilder);
299
300 /// When an invoke or a cleanupret unwinds to the next EH pad, there are
301 /// many places it could ultimately go. In the IR, we have a single unwind
302 /// destination, but in the machine CFG, we enumerate all the possible blocks.
303 /// This function skips over imaginary basic blocks that hold catchswitch
304 /// instructions, and finds all the "real" machine
305 /// basic block destinations. As those destinations may not be successors of
306 /// EHPadBB, here we also calculate the edge probability to those
307 /// destinations. The passed-in Prob is the edge probability to EHPadBB.
308 bool findUnwindDestinations(
309 const BasicBlock *EHPadBB, BranchProbability Prob,
310 SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
311 &UnwindDests);
312
313 bool translateInvoke(const User &U, MachineIRBuilder &MIRBuilder);
314
315 bool translateCallBr(const User &U, MachineIRBuilder &MIRBuilder);
316
317 bool translateLandingPad(const User &U, MachineIRBuilder &MIRBuilder);
318
319 /// Translate one of LLVM's cast instructions into MachineInstrs, with the
320 /// given generic Opcode.
321 bool translateCast(unsigned Opcode, const User &U,
322 MachineIRBuilder &MIRBuilder);
323
324 /// Translate a phi instruction.
325 bool translatePHI(const User &U, MachineIRBuilder &MIRBuilder);
326
327 /// Translate a comparison (icmp or fcmp) instruction or constant.
328 bool translateCompare(const User &U, MachineIRBuilder &MIRBuilder);
329
330 /// Translate an integer compare instruction (or constant).
331 bool translateICmp(const User &U, MachineIRBuilder &MIRBuilder) {
332 return translateCompare(U, MIRBuilder);
333 }
334
335 /// Translate a floating-point compare instruction (or constant).
336 bool translateFCmp(const User &U, MachineIRBuilder &MIRBuilder) {
337 return translateCompare(U, MIRBuilder);
338 }
339
340 /// Add remaining operands onto phis we've translated. Executed after all
341 /// MachineBasicBlocks for the function have been created.
342 void finishPendingPhis();
343
344 /// Translate \p Inst into a unary operation \p Opcode.
345 /// \pre \p U is a unary operation.
346 bool translateUnaryOp(unsigned Opcode, const User &U,
347 MachineIRBuilder &MIRBuilder);
348
349 /// Translate \p Inst into a binary operation \p Opcode.
350 /// \pre \p U is a binary operation.
351 bool translateBinaryOp(unsigned Opcode, const User &U,
352 MachineIRBuilder &MIRBuilder);
353
354 /// If the set of cases should be emitted as a series of branches, return
355 /// true. If we should emit this as a bunch of and/or'd together conditions,
356 /// return false.
357 bool shouldEmitAsBranches(const std::vector<SwitchCG::CaseBlock> &Cases);
358 /// Helper method for findMergedConditions.
359 /// This function emits a branch and is used at the leaves of an OR or an
360 /// AND operator tree.
361 void emitBranchForMergedCondition(const Value *Cond, MachineBasicBlock *TBB,
362 MachineBasicBlock *FBB,
363 MachineBasicBlock *CurBB,
364 MachineBasicBlock *SwitchBB,
365 BranchProbability TProb,
366 BranchProbability FProb, bool InvertCond);
367 /// Used during condbr translation to find trees of conditions that can be
368 /// optimized.
369 void findMergedConditions(const Value *Cond, MachineBasicBlock *TBB,
370 MachineBasicBlock *FBB, MachineBasicBlock *CurBB,
371 MachineBasicBlock *SwitchBB,
372 Instruction::BinaryOps Opc, BranchProbability TProb,
373 BranchProbability FProb, bool InvertCond);
374
375 /// Translate branch (br) instruction.
376 /// \pre \p U is a branch instruction.
377 bool translateBr(const User &U, MachineIRBuilder &MIRBuilder);
378
379 // Begin switch lowering functions.
380 bool emitJumpTableHeader(SwitchCG::JumpTable &JT,
381 SwitchCG::JumpTableHeader &JTH,
382 MachineBasicBlock *HeaderBB);
383 void emitJumpTable(SwitchCG::JumpTable &JT, MachineBasicBlock *MBB);
384
385 void emitSwitchCase(SwitchCG::CaseBlock &CB, MachineBasicBlock *SwitchBB,
386 MachineIRBuilder &MIB);
387
388 /// Generate for the BitTest header block, which precedes each sequence of
389 /// BitTestCases.
390 void emitBitTestHeader(SwitchCG::BitTestBlock &BTB,
391 MachineBasicBlock *SwitchMBB);
392 /// Generate code to produces one "bit test" for a given BitTestCase \p B.
393 void emitBitTestCase(SwitchCG::BitTestBlock &BB, MachineBasicBlock *NextMBB,
394 BranchProbability BranchProbToNext, Register Reg,
395 SwitchCG::BitTestCase &B, MachineBasicBlock *SwitchBB);
396
397 void splitWorkItem(SwitchCG::SwitchWorkList &WorkList,
398 const SwitchCG::SwitchWorkListItem &W, Value *Cond,
399 MachineBasicBlock *SwitchMBB, MachineIRBuilder &MIB);
400
401 bool lowerJumpTableWorkItem(
402 SwitchCG::SwitchWorkListItem W, MachineBasicBlock *SwitchMBB,
403 MachineBasicBlock *CurMBB, MachineBasicBlock *DefaultMBB,
404 MachineIRBuilder &MIB, MachineFunction::iterator BBI,
405 BranchProbability UnhandledProbs, SwitchCG::CaseClusterIt I,
406 MachineBasicBlock *Fallthrough, bool FallthroughUnreachable);
407
408 bool lowerSwitchRangeWorkItem(SwitchCG::CaseClusterIt I, Value *Cond,
409 MachineBasicBlock *Fallthrough,
410 bool FallthroughUnreachable,
411 BranchProbability UnhandledProbs,
412 MachineBasicBlock *CurMBB,
413 MachineIRBuilder &MIB,
414 MachineBasicBlock *SwitchMBB);
415
416 bool lowerBitTestWorkItem(
417 SwitchCG::SwitchWorkListItem W, MachineBasicBlock *SwitchMBB,
418 MachineBasicBlock *CurMBB, MachineBasicBlock *DefaultMBB,
419 MachineIRBuilder &MIB, MachineFunction::iterator BBI,
420 BranchProbability DefaultProb, BranchProbability UnhandledProbs,
421 SwitchCG::CaseClusterIt I, MachineBasicBlock *Fallthrough,
422 bool FallthroughUnreachable);
423
424 bool lowerSwitchWorkItem(SwitchCG::SwitchWorkListItem W, Value *Cond,
425 MachineBasicBlock *SwitchMBB,
426 MachineBasicBlock *DefaultMBB,
427 MachineIRBuilder &MIB);
428
429 bool translateSwitch(const User &U, MachineIRBuilder &MIRBuilder);
430 // End switch lowering section.
431
432 bool translateIndirectBr(const User &U, MachineIRBuilder &MIRBuilder);
433
434 bool translateExtractValue(const User &U, MachineIRBuilder &MIRBuilder);
435
436 bool translateInsertValue(const User &U, MachineIRBuilder &MIRBuilder);
437
438 bool translateSelect(const User &U, MachineIRBuilder &MIRBuilder);
439
440 bool translateGetElementPtr(const User &U, MachineIRBuilder &MIRBuilder);
441
442 bool translateAlloca(const User &U, MachineIRBuilder &MIRBuilder);
443
444 /// Translate return (ret) instruction.
445 /// The target needs to implement CallLowering::lowerReturn for
446 /// this to succeed.
447 /// \pre \p U is a return instruction.
448 bool translateRet(const User &U, MachineIRBuilder &MIRBuilder);
449
450 bool translateFNeg(const User &U, MachineIRBuilder &MIRBuilder);
451
452 bool translateAdd(const User &U, MachineIRBuilder &MIRBuilder) {
453 return translateBinaryOp(TargetOpcode::G_ADD, U, MIRBuilder);
454 }
455 bool translateSub(const User &U, MachineIRBuilder &MIRBuilder) {
456 return translateBinaryOp(TargetOpcode::G_SUB, U, MIRBuilder);
457 }
458 bool translateAnd(const User &U, MachineIRBuilder &MIRBuilder) {
459 return translateBinaryOp(TargetOpcode::G_AND, U, MIRBuilder);
460 }
461 bool translateMul(const User &U, MachineIRBuilder &MIRBuilder) {
462 return translateBinaryOp(TargetOpcode::G_MUL, U, MIRBuilder);
463 }
464 bool translateOr(const User &U, MachineIRBuilder &MIRBuilder) {
465 return translateBinaryOp(TargetOpcode::G_OR, U, MIRBuilder);
466 }
467 bool translateXor(const User &U, MachineIRBuilder &MIRBuilder) {
468 return translateBinaryOp(TargetOpcode::G_XOR, U, MIRBuilder);
469 }
470
471 bool translateUDiv(const User &U, MachineIRBuilder &MIRBuilder) {
472 return translateBinaryOp(TargetOpcode::G_UDIV, U, MIRBuilder);
473 }
474 bool translateSDiv(const User &U, MachineIRBuilder &MIRBuilder) {
475 return translateBinaryOp(TargetOpcode::G_SDIV, U, MIRBuilder);
476 }
477 bool translateURem(const User &U, MachineIRBuilder &MIRBuilder) {
478 return translateBinaryOp(TargetOpcode::G_UREM, U, MIRBuilder);
479 }
480 bool translateSRem(const User &U, MachineIRBuilder &MIRBuilder) {
481 return translateBinaryOp(TargetOpcode::G_SREM, U, MIRBuilder);
482 }
483 bool translateIntToPtr(const User &U, MachineIRBuilder &MIRBuilder) {
484 return translateCast(TargetOpcode::G_INTTOPTR, U, MIRBuilder);
485 }
486 bool translatePtrToInt(const User &U, MachineIRBuilder &MIRBuilder) {
487 return translateCast(TargetOpcode::G_PTRTOINT, U, MIRBuilder);
488 }
489 bool translateTrunc(const User &U, MachineIRBuilder &MIRBuilder) {
490 return translateCast(TargetOpcode::G_TRUNC, U, MIRBuilder);
491 }
492 bool translateFPTrunc(const User &U, MachineIRBuilder &MIRBuilder) {
493 return translateCast(TargetOpcode::G_FPTRUNC, U, MIRBuilder);
494 }
495 bool translateFPExt(const User &U, MachineIRBuilder &MIRBuilder) {
496 return translateCast(TargetOpcode::G_FPEXT, U, MIRBuilder);
497 }
498 bool translateFPToUI(const User &U, MachineIRBuilder &MIRBuilder) {
499 return translateCast(TargetOpcode::G_FPTOUI, U, MIRBuilder);
500 }
501 bool translateFPToSI(const User &U, MachineIRBuilder &MIRBuilder) {
502 return translateCast(TargetOpcode::G_FPTOSI, U, MIRBuilder);
503 }
504 bool translateUIToFP(const User &U, MachineIRBuilder &MIRBuilder) {
505 return translateCast(TargetOpcode::G_UITOFP, U, MIRBuilder);
506 }
507 bool translateSIToFP(const User &U, MachineIRBuilder &MIRBuilder) {
508 return translateCast(TargetOpcode::G_SITOFP, U, MIRBuilder);
509 }
510 bool translateUnreachable(const User &U, MachineIRBuilder &MIRBuilder);
511
512 bool translateSExt(const User &U, MachineIRBuilder &MIRBuilder) {
513 return translateCast(TargetOpcode::G_SEXT, U, MIRBuilder);
514 }
515
516 bool translateZExt(const User &U, MachineIRBuilder &MIRBuilder) {
517 return translateCast(TargetOpcode::G_ZEXT, U, MIRBuilder);
518 }
519
520 bool translateShl(const User &U, MachineIRBuilder &MIRBuilder) {
521 return translateBinaryOp(TargetOpcode::G_SHL, U, MIRBuilder);
522 }
523 bool translateLShr(const User &U, MachineIRBuilder &MIRBuilder) {
524 return translateBinaryOp(TargetOpcode::G_LSHR, U, MIRBuilder);
525 }
526 bool translateAShr(const User &U, MachineIRBuilder &MIRBuilder) {
527 return translateBinaryOp(TargetOpcode::G_ASHR, U, MIRBuilder);
528 }
529
530 bool translateFAdd(const User &U, MachineIRBuilder &MIRBuilder) {
531 return translateBinaryOp(TargetOpcode::G_FADD, U, MIRBuilder);
532 }
533 bool translateFSub(const User &U, MachineIRBuilder &MIRBuilder) {
534 return translateBinaryOp(TargetOpcode::G_FSUB, U, MIRBuilder);
535 }
536 bool translateFMul(const User &U, MachineIRBuilder &MIRBuilder) {
537 return translateBinaryOp(TargetOpcode::G_FMUL, U, MIRBuilder);
538 }
539 bool translateFDiv(const User &U, MachineIRBuilder &MIRBuilder) {
540 return translateBinaryOp(TargetOpcode::G_FDIV, U, MIRBuilder);
541 }
542 bool translateFRem(const User &U, MachineIRBuilder &MIRBuilder) {
543 return translateBinaryOp(TargetOpcode::G_FREM, U, MIRBuilder);
544 }
545
546 bool translateVAArg(const User &U, MachineIRBuilder &MIRBuilder);
547
548 bool translateInsertElement(const User &U, MachineIRBuilder &MIRBuilder);
549
550 bool translateExtractElement(const User &U, MachineIRBuilder &MIRBuilder);
551
552 bool translateShuffleVector(const User &U, MachineIRBuilder &MIRBuilder);
553
554 bool translateAtomicCmpXchg(const User &U, MachineIRBuilder &MIRBuilder);
555 bool translateAtomicRMW(const User &U, MachineIRBuilder &MIRBuilder);
556 bool translateFence(const User &U, MachineIRBuilder &MIRBuilder);
557 bool translateFreeze(const User &U, MachineIRBuilder &MIRBuilder);
558
559 // Stubs to keep the compiler happy while we implement the rest of the
560 // translation.
561 bool translateResume(const User &U, MachineIRBuilder &MIRBuilder) {
562 return false;
563 }
564 bool translateCleanupRet(const User &U, MachineIRBuilder &MIRBuilder) {
565 return false;
566 }
567 bool translateCatchRet(const User &U, MachineIRBuilder &MIRBuilder) {
568 return false;
569 }
570 bool translateCatchSwitch(const User &U, MachineIRBuilder &MIRBuilder) {
571 return false;
572 }
573 bool translateAddrSpaceCast(const User &U, MachineIRBuilder &MIRBuilder) {
574 return translateCast(TargetOpcode::G_ADDRSPACE_CAST, U, MIRBuilder);
575 }
576 bool translateCleanupPad(const User &U, MachineIRBuilder &MIRBuilder) {
577 return false;
578 }
579 bool translateCatchPad(const User &U, MachineIRBuilder &MIRBuilder) {
580 return false;
581 }
582 bool translateUserOp1(const User &U, MachineIRBuilder &MIRBuilder) {
583 return false;
584 }
585 bool translateUserOp2(const User &U, MachineIRBuilder &MIRBuilder) {
586 return false;
587 }
588
589 bool translateConvergenceControlIntrinsic(const CallInst &CI,
591 MachineIRBuilder &MIRBuilder);
592
593 /// @}
594
595 // Builder for machine instruction a la IRBuilder.
596 // I.e., compared to regular MIBuilder, this one also inserts the instruction
597 // in the current block, it can creates block, etc., basically a kind of
598 // IRBuilder, but for Machine IR.
599 // CSEMIRBuilder CurBuilder;
600 std::unique_ptr<MachineIRBuilder> CurBuilder;
601
602 // Builder set to the entry block (just after ABI lowering instructions). Used
603 // as a convenient location for Constants.
604 // CSEMIRBuilder EntryBuilder;
605 std::unique_ptr<MachineIRBuilder> EntryBuilder;
606
607 // The MachineFunction currently being translated.
608 MachineFunction *MF = nullptr;
609
610 /// MachineRegisterInfo used to create virtual registers.
611 MachineRegisterInfo *MRI = nullptr;
612
613 const DataLayout *DL = nullptr;
614
615 /// Current target configuration. Controls how the pass handles errors.
616 const TargetPassConfig *TPC = nullptr;
617
618 CodeGenOptLevel OptLevel;
619
620 /// Current optimization remark emitter. Used to report failures.
621 std::unique_ptr<OptimizationRemarkEmitter> ORE;
622
623 AAResults *AA = nullptr;
624 AssumptionCache *AC = nullptr;
625 const TargetLibraryInfo *LibInfo = nullptr;
626 const TargetLowering *TLI = nullptr;
627 FunctionLoweringInfo FuncInfo;
628
629 // True when either the Target Machine specifies no optimizations or the
630 // function has the optnone attribute.
631 bool EnableOpts = false;
632
633 /// True when the block contains a tail call. This allows the IRTranslator to
634 /// stop translating such blocks early.
635 bool HasTailCall = false;
636
637 StackProtectorDescriptor SPDescriptor;
638
639 /// Switch analysis and optimization.
640 class GISelSwitchLowering : public SwitchCG::SwitchLowering {
641 public:
642 GISelSwitchLowering(IRTranslator *irt, FunctionLoweringInfo &funcinfo)
643 : SwitchLowering(funcinfo), IRT(irt) {
644 assert(irt && "irt is null!");
645 }
646
647 void addSuccessorWithProb(
648 MachineBasicBlock *Src, MachineBasicBlock *Dst,
649 BranchProbability Prob = BranchProbability::getUnknown()) override {
650 IRT->addSuccessorWithProb(Src, Dst, Prob);
651 }
652
653 virtual ~GISelSwitchLowering() = default;
654
655 private:
656 IRTranslator *IRT;
657 };
658
659 std::unique_ptr<GISelSwitchLowering> SL;
660
661 // * Insert all the code needed to materialize the constants
662 // at the proper place. E.g., Entry block or dominator block
663 // of each constant depending on how fancy we want to be.
664 // * Clear the different maps.
665 void finalizeFunction();
666
667 // Processing steps done per block. E.g. emitting jump tables, stack
668 // protectors etc. Returns true if no errors, false if there was a problem
669 // that caused an abort.
670 bool finalizeBasicBlock(const BasicBlock &BB, MachineBasicBlock &MBB);
671
672 /// Codegen a new tail for a stack protector check ParentMBB which has had its
673 /// tail spliced into a stack protector check success bb.
674 ///
675 /// For a high level explanation of how this fits into the stack protector
676 /// generation see the comment on the declaration of class
677 /// StackProtectorDescriptor.
678 ///
679 /// \return true if there were no problems.
680 bool emitSPDescriptorParent(StackProtectorDescriptor &SPD,
681 MachineBasicBlock *ParentBB);
682
683 /// Codegen the failure basic block for a stack protector check.
684 ///
685 /// A failure stack protector machine basic block consists simply of a call to
686 /// __stack_chk_fail().
687 ///
688 /// For a high level explanation of how this fits into the stack protector
689 /// generation see the comment on the declaration of class
690 /// StackProtectorDescriptor.
691 ///
692 /// \return true if there were no problems.
693 bool emitSPDescriptorFailure(StackProtectorDescriptor &SPD,
694 MachineBasicBlock *FailureBB);
695
696 /// Get the VRegs that represent \p Val.
697 /// Non-aggregate types have just one corresponding VReg and the list can be
698 /// used as a single "unsigned". Aggregates get flattened. If such VRegs do
699 /// not exist, they are created.
700 ArrayRef<Register> getOrCreateVRegs(const Value &Val);
701
702 Register getOrCreateVReg(const Value &Val) {
703 auto Regs = getOrCreateVRegs(Val);
704 if (Regs.empty())
705 return 0;
706 assert(Regs.size() == 1 &&
707 "attempt to get single VReg for aggregate or void");
708 return Regs[0];
709 }
710
711 Register getOrCreateConvergenceTokenVReg(const Value &Token) {
712 assert(Token.getType()->isTokenTy());
713 auto &Regs = *VMap.getVRegs(Token);
714 if (!Regs.empty()) {
715 assert(Regs.size() == 1 &&
716 "Expected a single register for convergence tokens.");
717 return Regs[0];
718 }
719
720 auto Reg = MRI->createGenericVirtualRegister(LLT::token());
721 Regs.push_back(Reg);
722 auto &Offsets = *VMap.getOffsets(Token);
723 if (Offsets.empty())
724 Offsets.push_back(0);
725 return Reg;
726 }
727
728 /// Allocate some vregs and offsets in the VMap. Then populate just the
729 /// offsets while leaving the vregs empty.
730 ValueToVRegInfo::VRegListT &allocateVRegs(const Value &Val);
731
732 /// Get the frame index that represents \p Val.
733 /// If such VReg does not exist, it is created.
734 int getOrCreateFrameIndex(const AllocaInst &AI);
735
736 /// Get the alignment of the given memory operation instruction. This will
737 /// either be the explicitly specified value or the ABI-required alignment for
738 /// the type being accessed (according to the Module's DataLayout).
739 Align getMemOpAlign(const Instruction &I);
740
741 /// Get the MachineBasicBlock that represents \p BB. Specifically, the block
742 /// returned will be the head of the translated block (suitable for branch
743 /// destinations).
744 MachineBasicBlock &getMBB(const BasicBlock &BB);
745
746 /// Record \p NewPred as a Machine predecessor to `Edge.second`, corresponding
747 /// to `Edge.first` at the IR level. This is used when IRTranslation creates
748 /// multiple MachineBasicBlocks for a given IR block and the CFG is no longer
749 /// represented simply by the IR-level CFG.
750 void addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred);
751
752 /// Returns the Machine IR predecessors for the given IR CFG edge. Usually
753 /// this is just the single MachineBasicBlock corresponding to the predecessor
754 /// in the IR. More complex lowering can result in multiple MachineBasicBlocks
755 /// preceding the original though (e.g. switch instructions).
756 SmallVector<MachineBasicBlock *, 1> getMachinePredBBs(CFGEdge Edge) {
757 auto RemappedEdge = MachinePreds.find(Edge);
758 if (RemappedEdge != MachinePreds.end())
759 return RemappedEdge->second;
760 return SmallVector<MachineBasicBlock *, 4>(1, &getMBB(*Edge.first));
761 }
762
763 /// Return branch probability calculated by BranchProbabilityInfo for IR
764 /// blocks.
765 BranchProbability getEdgeProbability(const MachineBasicBlock *Src,
766 const MachineBasicBlock *Dst) const;
767
768 void addSuccessorWithProb(
769 MachineBasicBlock *Src, MachineBasicBlock *Dst,
770 BranchProbability Prob = BranchProbability::getUnknown());
771
772public:
774
775 StringRef getPassName() const override { return "IRTranslator"; }
776
777 void getAnalysisUsage(AnalysisUsage &AU) const override;
778
779 // Algo:
780 // CallLowering = MF.subtarget.getCallLowering()
781 // F = MF.getParent()
782 // MIRBuilder.reset(MF)
783 // getMBB(F.getEntryBB())
784 // CallLowering->translateArguments(MIRBuilder, F, ValToVReg)
785 // for each bb in F
786 // getMBB(bb)
787 // for each inst in bb
788 // if (!translate(MIRBuilder, inst, ValToVReg, ConstantToSequence))
789 // report_fatal_error("Don't know how to translate input");
790 // finalize()
791 bool runOnMachineFunction(MachineFunction &MF) override;
792};
793
794} // end namespace llvm
795
796#endif // LLVM_CODEGEN_GLOBALISEL_IRTRANSLATOR_H
return AArch64::GPR64RegClass contains(Reg)
unsigned const MachineRegisterInfo * MRI
MachineBasicBlock & MBB
This file defines the BumpPtrAllocator interface.
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file defines the DenseMap class.
#define I(x, y, z)
Definition: MD5.cpp:58
This file declares the MachineIRBuilder class.
unsigned Reg
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallVector class.
Represent the analysis usage information of a pass.
This class represents an incoming formal argument to a Function.
Definition: Argument.h:31
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
static BranchProbability getUnknown()
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1236
This class represents a function call, abstracting a target machine's calling convention.
This is an important base class in LLVM.
Definition: Constant.h:42
This is the common base class for constrained floating point intrinsics.
DWARF expression.
This class represents an Operation in the Expression.
A debug info location.
Definition: DebugLoc.h:33
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:155
iterator end()
Definition: DenseMap.h:84
Class representing an expression and its matching format.
bool runOnMachineFunction(MachineFunction &MF) override
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
IRTranslator(CodeGenOptLevel OptLevel=CodeGenOptLevel::None)
static char ID
Definition: IRTranslator.h:68
StringRef getPassName() const override
getPassName - Return a nice clean name for a pass.
Definition: IRTranslator.h:775
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
static constexpr LLT token()
Get a low-level token; just a scalar with zero bits (or no size).
Definition: LowLevelType.h:49
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
BasicBlockListType::iterator iterator
Helper class to build MachineInstr.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
A BumpPtrAllocator that allows only elements of a specific type to be allocated.
Definition: Allocator.h:389
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
SwitchLowering(FunctionLoweringInfo &funcinfo)
LLVM Value Representation.
Definition: Value.h:74
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition: ISDOpcodes.h:71
Offsets
Offsets in bytes from the start of the input buffer.
Definition: SIInstrInfo.h:1589
SmallVector< SwitchWorkListItem, 4 > SwitchWorkList
CaseClusterVector::iterator CaseClusterIt
@ User
could "use" a pointer
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
CodeGenOptLevel
Code generation optimization level.
Definition: CodeGen.h:54