LLVM 22.0.0git
IRTranslator.h
Go to the documentation of this file.
1//===- llvm/CodeGen/GlobalISel/IRTranslator.h - IRTranslator ----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file declares the IRTranslator pass.
10/// This pass is responsible for translating LLVM IR into MachineInstr.
11/// It uses target hooks to lower the ABI but aside from that, the pass
12/// generated code is generic. This is the default translator used for
13/// GlobalISel.
14///
15/// \todo Replace the comments with actual doxygen comments.
16//===----------------------------------------------------------------------===//
17
18#ifndef LLVM_CODEGEN_GLOBALISEL_IRTRANSLATOR_H
19#define LLVM_CODEGEN_GLOBALISEL_IRTRANSLATOR_H
20
21#include "llvm/ADT/DenseMap.h"
31#include <memory>
32#include <utility>
33
34namespace llvm {
35
36class AllocaInst;
37class AssumptionCache;
38class BasicBlock;
39class CallInst;
40class CallLowering;
41class Constant;
43class DataLayout;
44class DbgDeclareInst;
45class DbgValueInst;
46class Instruction;
48class MachineFunction;
49class MachineInstr;
52class PHINode;
55class User;
56class Value;
57
58// Technically the pass should run on an hypothetical MachineModule,
59// since it should translate Global into some sort of MachineGlobal.
60// The MachineGlobal should ultimately just be a transfer of ownership of
61// the interesting bits that are relevant to represent a global value.
62// That being said, we could investigate what would it cost to just duplicate
63// the information from the LLVM IR.
64// The idea is that ultimately we would be able to free up the memory used
65// by the LLVM IR as soon as the translation is over.
67public:
68 static char ID;
69
70private:
71 /// Interface used to lower the everything related to calls.
72 const CallLowering *CLI = nullptr;
73
74 /// This class contains the mapping between the Values to vreg related data.
75 class ValueToVRegInfo {
76 public:
77 ValueToVRegInfo() = default;
78
79 using VRegListT = SmallVector<Register, 1>;
80 using OffsetListT = SmallVector<uint64_t, 1>;
81
82 using const_vreg_iterator =
84 using const_offset_iterator =
86
87 inline const_vreg_iterator vregs_end() const { return ValToVRegs.end(); }
88
89 VRegListT *getVRegs(const Value &V) {
90 auto It = ValToVRegs.find(&V);
91 if (It != ValToVRegs.end())
92 return It->second;
93
94 return insertVRegs(V);
95 }
96
97 OffsetListT *getOffsets(const Value &V) {
98 auto It = TypeToOffsets.find(V.getType());
99 if (It != TypeToOffsets.end())
100 return It->second;
101
102 return insertOffsets(V);
103 }
104
105 const_vreg_iterator findVRegs(const Value &V) const {
106 return ValToVRegs.find(&V);
107 }
108
109 bool contains(const Value &V) const { return ValToVRegs.contains(&V); }
110
111 void reset() {
112 ValToVRegs.clear();
113 TypeToOffsets.clear();
114 VRegAlloc.DestroyAll();
115 OffsetAlloc.DestroyAll();
116 }
117
118 private:
119 VRegListT *insertVRegs(const Value &V) {
120 assert(!ValToVRegs.contains(&V) && "Value already exists");
121
122 // We placement new using our fast allocator since we never try to free
123 // the vectors until translation is finished.
124 auto *VRegList = new (VRegAlloc.Allocate()) VRegListT();
125 ValToVRegs[&V] = VRegList;
126 return VRegList;
127 }
128
129 OffsetListT *insertOffsets(const Value &V) {
130 assert(!TypeToOffsets.contains(V.getType()) && "Type already exists");
131
132 auto *OffsetList = new (OffsetAlloc.Allocate()) OffsetListT();
133 TypeToOffsets[V.getType()] = OffsetList;
134 return OffsetList;
135 }
138
139 // We store pointers to vectors here since references may be invalidated
140 // while we hold them if we stored the vectors directly.
143 };
144
145 /// Mapping of the values of the current LLVM IR function to the related
146 /// virtual registers and offsets.
147 ValueToVRegInfo VMap;
148
149 // One BasicBlock can be translated to multiple MachineBasicBlocks. For such
150 // BasicBlocks translated to multiple MachineBasicBlocks, MachinePreds retains
151 // a mapping between the edges arriving at the BasicBlock to the corresponding
152 // created MachineBasicBlocks. Some BasicBlocks that get translated to a
153 // single MachineBasicBlock may also end up in this Map.
154 using CFGEdge = std::pair<const BasicBlock *, const BasicBlock *>;
156
157 // List of stubbed PHI instructions, for values and basic blocks to be filled
158 // in once all MachineBasicBlocks have been created.
160 PendingPHIs;
161
162 /// Record of what frame index has been allocated to specified allocas for
163 /// this function.
165
166 SwiftErrorValueTracking SwiftError;
167
168 /// \name Methods for translating form LLVM IR to MachineInstr.
169 /// \see ::translate for general information on the translate methods.
170 /// @{
171
172 /// Translate \p Inst into its corresponding MachineInstr instruction(s).
173 /// Insert the newly translated instruction(s) right where the CurBuilder
174 /// is set.
175 ///
176 /// The general algorithm is:
177 /// 1. Look for a virtual register for each operand or
178 /// create one.
179 /// 2 Update the VMap accordingly.
180 /// 2.alt. For constant arguments, if they are compile time constants,
181 /// produce an immediate in the right operand and do not touch
182 /// ValToReg. Actually we will go with a virtual register for each
183 /// constants because it may be expensive to actually materialize the
184 /// constant. Moreover, if the constant spans on several instructions,
185 /// CSE may not catch them.
186 /// => Update ValToVReg and remember that we saw a constant in Constants.
187 /// We will materialize all the constants in finalize.
188 /// Note: we would need to do something so that we can recognize such operand
189 /// as constants.
190 /// 3. Create the generic instruction.
191 ///
192 /// \return true if the translation succeeded.
193 bool translate(const Instruction &Inst);
194
195 /// Materialize \p C into virtual-register \p Reg. The generic instructions
196 /// performing this materialization will be inserted into the entry block of
197 /// the function.
198 ///
199 /// \return true if the materialization succeeded.
200 bool translate(const Constant &C, Register Reg);
201
202 /// Examine any debug-info attached to the instruction (in the form of
203 /// DbgRecords) and translate it.
204 void translateDbgInfo(const Instruction &Inst,
205 MachineIRBuilder &MIRBuilder);
206
207 /// Translate a debug-info record of a dbg.value into a DBG_* instruction.
208 /// Pass in all the contents of the record, rather than relying on how it's
209 /// stored.
210 void translateDbgValueRecord(Value *V, bool HasArgList,
211 const DILocalVariable *Variable,
212 const DIExpression *Expression, const DebugLoc &DL,
213 MachineIRBuilder &MIRBuilder);
214
215 /// Translate a debug-info record of a dbg.declare into an indirect DBG_*
216 /// instruction. Pass in all the contents of the record, rather than relying
217 /// on how it's stored.
218 void translateDbgDeclareRecord(Value *Address, bool HasArgList,
219 const DILocalVariable *Variable,
220 const DIExpression *Expression, const DebugLoc &DL,
221 MachineIRBuilder &MIRBuilder);
222
223 // Translate U as a copy of V.
224 bool translateCopy(const User &U, const Value &V,
225 MachineIRBuilder &MIRBuilder);
226
227 /// Translate an LLVM bitcast into generic IR. Either a COPY or a G_BITCAST is
228 /// emitted.
229 bool translateBitCast(const User &U, MachineIRBuilder &MIRBuilder);
230
231 /// Translate an LLVM load instruction into generic IR.
232 bool translateLoad(const User &U, MachineIRBuilder &MIRBuilder);
233
234 /// Translate an LLVM store instruction into generic IR.
235 bool translateStore(const User &U, MachineIRBuilder &MIRBuilder);
236
237 /// Translate an LLVM string intrinsic (memcpy, memset, ...).
238 bool translateMemFunc(const CallInst &CI, MachineIRBuilder &MIRBuilder,
239 unsigned Opcode);
240
241 /// Translate an LLVM trap intrinsic (trap, debugtrap, ubsantrap).
242 bool translateTrap(const CallInst &U, MachineIRBuilder &MIRBuilder,
243 unsigned Opcode);
244
245 // Translate @llvm.vector.interleave2 and
246 // @llvm.vector.deinterleave2 intrinsics for fixed-width vector
247 // types into vector shuffles.
248 bool translateVectorInterleave2Intrinsic(const CallInst &CI,
249 MachineIRBuilder &MIRBuilder);
250 bool translateVectorDeinterleave2Intrinsic(const CallInst &CI,
251 MachineIRBuilder &MIRBuilder);
252
253 void getStackGuard(Register DstReg, MachineIRBuilder &MIRBuilder);
254
255 bool translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
256 MachineIRBuilder &MIRBuilder);
257 bool translateFixedPointIntrinsic(unsigned Op, const CallInst &CI,
258 MachineIRBuilder &MIRBuilder);
259
260 /// Helper function for translateSimpleIntrinsic.
261 /// \return The generic opcode for \p IntrinsicID if \p IntrinsicID is a
262 /// simple intrinsic (ceil, fabs, etc.). Otherwise, returns
263 /// Intrinsic::not_intrinsic.
264 unsigned getSimpleIntrinsicOpcode(Intrinsic::ID ID);
265
266 /// Translates the intrinsics defined in getSimpleIntrinsicOpcode.
267 /// \return true if the translation succeeded.
268 bool translateSimpleIntrinsic(const CallInst &CI, Intrinsic::ID ID,
269 MachineIRBuilder &MIRBuilder);
270
271 bool translateConstrainedFPIntrinsic(const ConstrainedFPIntrinsic &FPI,
272 MachineIRBuilder &MIRBuilder);
273
274 bool translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
275 MachineIRBuilder &MIRBuilder);
276
277 /// Returns the single livein physical register Arg was lowered to, if
278 /// possible.
279 std::optional<MCRegister> getArgPhysReg(Argument &Arg);
280
281 /// If debug-info targets an Argument and its expression is an EntryValue,
282 /// lower it as either an entry in the MF debug table (dbg.declare), or a
283 /// DBG_VALUE targeting the corresponding livein register for that Argument
284 /// (dbg.value).
285 bool translateIfEntryValueArgument(bool isDeclare, Value *Arg,
286 const DILocalVariable *Var,
287 const DIExpression *Expr,
288 const DebugLoc &DL,
289 MachineIRBuilder &MIRBuilder);
290
291 bool translateInlineAsm(const CallBase &CB, MachineIRBuilder &MIRBuilder);
292
293 /// Common code for translating normal calls or invokes.
294 bool translateCallBase(const CallBase &CB, MachineIRBuilder &MIRBuilder);
295
296 /// Translate call instruction.
297 /// \pre \p U is a call instruction.
298 bool translateCall(const User &U, MachineIRBuilder &MIRBuilder);
299
300 bool translateIntrinsic(
301 const CallBase &CB, Intrinsic::ID ID, MachineIRBuilder &MIRBuilder,
302 const TargetLowering::IntrinsicInfo *TgtMemIntrinsicInfo = nullptr);
303
304 /// When an invoke or a cleanupret unwinds to the next EH pad, there are
305 /// many places it could ultimately go. In the IR, we have a single unwind
306 /// destination, but in the machine CFG, we enumerate all the possible blocks.
307 /// This function skips over imaginary basic blocks that hold catchswitch
308 /// instructions, and finds all the "real" machine
309 /// basic block destinations. As those destinations may not be successors of
310 /// EHPadBB, here we also calculate the edge probability to those
311 /// destinations. The passed-in Prob is the edge probability to EHPadBB.
313 const BasicBlock *EHPadBB, BranchProbability Prob,
314 SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
315 &UnwindDests);
316
317 bool translateInvoke(const User &U, MachineIRBuilder &MIRBuilder);
318
319 bool translateCallBr(const User &U, MachineIRBuilder &MIRBuilder);
320 bool translateCallBrIntrinsic(const CallBrInst &I,
321 MachineIRBuilder &MIRBuilder);
322
323 bool translateLandingPad(const User &U, MachineIRBuilder &MIRBuilder);
324
325 /// Translate one of LLVM's cast instructions into MachineInstrs, with the
326 /// given generic Opcode.
327 bool translateCast(unsigned Opcode, const User &U,
328 MachineIRBuilder &MIRBuilder);
329
330 /// Translate a phi instruction.
331 bool translatePHI(const User &U, MachineIRBuilder &MIRBuilder);
332
333 /// Translate a comparison (icmp or fcmp) instruction or constant.
334 bool translateCompare(const User &U, MachineIRBuilder &MIRBuilder);
335
336 /// Translate an integer compare instruction (or constant).
337 bool translateICmp(const User &U, MachineIRBuilder &MIRBuilder) {
338 return translateCompare(U, MIRBuilder);
339 }
340
341 /// Translate a floating-point compare instruction (or constant).
342 bool translateFCmp(const User &U, MachineIRBuilder &MIRBuilder) {
343 return translateCompare(U, MIRBuilder);
344 }
345
346 /// Add remaining operands onto phis we've translated. Executed after all
347 /// MachineBasicBlocks for the function have been created.
348 void finishPendingPhis();
349
350 /// Translate \p Inst into a unary operation \p Opcode.
351 /// \pre \p U is a unary operation.
352 bool translateUnaryOp(unsigned Opcode, const User &U,
353 MachineIRBuilder &MIRBuilder);
354
355 /// Translate \p Inst into a binary operation \p Opcode.
356 /// \pre \p U is a binary operation.
357 bool translateBinaryOp(unsigned Opcode, const User &U,
358 MachineIRBuilder &MIRBuilder);
359
360 /// If the set of cases should be emitted as a series of branches, return
361 /// true. If we should emit this as a bunch of and/or'd together conditions,
362 /// return false.
363 bool shouldEmitAsBranches(const std::vector<SwitchCG::CaseBlock> &Cases);
364 /// Helper method for findMergedConditions.
365 /// This function emits a branch and is used at the leaves of an OR or an
366 /// AND operator tree.
367 void emitBranchForMergedCondition(const Value *Cond, MachineBasicBlock *TBB,
368 MachineBasicBlock *FBB,
369 MachineBasicBlock *CurBB,
370 MachineBasicBlock *SwitchBB,
371 BranchProbability TProb,
372 BranchProbability FProb, bool InvertCond);
373 /// Used during condbr translation to find trees of conditions that can be
374 /// optimized.
375 void findMergedConditions(const Value *Cond, MachineBasicBlock *TBB,
376 MachineBasicBlock *FBB, MachineBasicBlock *CurBB,
377 MachineBasicBlock *SwitchBB,
378 Instruction::BinaryOps Opc, BranchProbability TProb,
379 BranchProbability FProb, bool InvertCond);
380
381 /// Translate branch (br) instruction.
382 /// \pre \p U is a branch instruction.
383 bool translateBr(const User &U, MachineIRBuilder &MIRBuilder);
384
385 // Begin switch lowering functions.
386 bool emitJumpTableHeader(SwitchCG::JumpTable &JT,
387 SwitchCG::JumpTableHeader &JTH,
388 MachineBasicBlock *HeaderBB);
389 void emitJumpTable(SwitchCG::JumpTable &JT, MachineBasicBlock *MBB);
390
391 void emitSwitchCase(SwitchCG::CaseBlock &CB, MachineBasicBlock *SwitchBB,
392 MachineIRBuilder &MIB);
393
394 /// Generate for the BitTest header block, which precedes each sequence of
395 /// BitTestCases.
396 void emitBitTestHeader(SwitchCG::BitTestBlock &BTB,
397 MachineBasicBlock *SwitchMBB);
398 /// Generate code to produces one "bit test" for a given BitTestCase \p B.
399 void emitBitTestCase(SwitchCG::BitTestBlock &BB, MachineBasicBlock *NextMBB,
400 BranchProbability BranchProbToNext, Register Reg,
401 SwitchCG::BitTestCase &B, MachineBasicBlock *SwitchBB);
402
403 void splitWorkItem(SwitchCG::SwitchWorkList &WorkList,
404 const SwitchCG::SwitchWorkListItem &W, Value *Cond,
405 MachineBasicBlock *SwitchMBB, MachineIRBuilder &MIB);
406
407 bool lowerJumpTableWorkItem(
408 SwitchCG::SwitchWorkListItem W, MachineBasicBlock *SwitchMBB,
409 MachineBasicBlock *CurMBB, MachineBasicBlock *DefaultMBB,
410 MachineIRBuilder &MIB, MachineFunction::iterator BBI,
411 BranchProbability UnhandledProbs, SwitchCG::CaseClusterIt I,
412 MachineBasicBlock *Fallthrough, bool FallthroughUnreachable);
413
414 bool lowerSwitchRangeWorkItem(SwitchCG::CaseClusterIt I, Value *Cond,
415 MachineBasicBlock *Fallthrough,
416 bool FallthroughUnreachable,
417 BranchProbability UnhandledProbs,
418 MachineBasicBlock *CurMBB,
419 MachineIRBuilder &MIB,
420 MachineBasicBlock *SwitchMBB);
421
422 bool lowerBitTestWorkItem(
423 SwitchCG::SwitchWorkListItem W, MachineBasicBlock *SwitchMBB,
424 MachineBasicBlock *CurMBB, MachineBasicBlock *DefaultMBB,
425 MachineIRBuilder &MIB, MachineFunction::iterator BBI,
426 BranchProbability DefaultProb, BranchProbability UnhandledProbs,
427 SwitchCG::CaseClusterIt I, MachineBasicBlock *Fallthrough,
428 bool FallthroughUnreachable);
429
430 bool lowerSwitchWorkItem(SwitchCG::SwitchWorkListItem W, Value *Cond,
431 MachineBasicBlock *SwitchMBB,
432 MachineBasicBlock *DefaultMBB,
433 MachineIRBuilder &MIB);
434
435 bool translateSwitch(const User &U, MachineIRBuilder &MIRBuilder);
436 // End switch lowering section.
437
438 bool translateIndirectBr(const User &U, MachineIRBuilder &MIRBuilder);
439
440 bool translateExtractValue(const User &U, MachineIRBuilder &MIRBuilder);
441
442 bool translateInsertValue(const User &U, MachineIRBuilder &MIRBuilder);
443
444 bool translateSelect(const User &U, MachineIRBuilder &MIRBuilder);
445
446 bool translateGetElementPtr(const User &U, MachineIRBuilder &MIRBuilder);
447
448 bool translateAlloca(const User &U, MachineIRBuilder &MIRBuilder);
449
450 /// Translate return (ret) instruction.
451 /// The target needs to implement CallLowering::lowerReturn for
452 /// this to succeed.
453 /// \pre \p U is a return instruction.
454 bool translateRet(const User &U, MachineIRBuilder &MIRBuilder);
455
456 bool translateFNeg(const User &U, MachineIRBuilder &MIRBuilder);
457
458 bool translateAdd(const User &U, MachineIRBuilder &MIRBuilder) {
459 return translateBinaryOp(TargetOpcode::G_ADD, U, MIRBuilder);
460 }
461 bool translateSub(const User &U, MachineIRBuilder &MIRBuilder) {
462 return translateBinaryOp(TargetOpcode::G_SUB, U, MIRBuilder);
463 }
464 bool translateAnd(const User &U, MachineIRBuilder &MIRBuilder) {
465 return translateBinaryOp(TargetOpcode::G_AND, U, MIRBuilder);
466 }
467 bool translateMul(const User &U, MachineIRBuilder &MIRBuilder) {
468 return translateBinaryOp(TargetOpcode::G_MUL, U, MIRBuilder);
469 }
470 bool translateOr(const User &U, MachineIRBuilder &MIRBuilder) {
471 return translateBinaryOp(TargetOpcode::G_OR, U, MIRBuilder);
472 }
473 bool translateXor(const User &U, MachineIRBuilder &MIRBuilder) {
474 return translateBinaryOp(TargetOpcode::G_XOR, U, MIRBuilder);
475 }
476
477 bool translateUDiv(const User &U, MachineIRBuilder &MIRBuilder) {
478 return translateBinaryOp(TargetOpcode::G_UDIV, U, MIRBuilder);
479 }
480 bool translateSDiv(const User &U, MachineIRBuilder &MIRBuilder) {
481 return translateBinaryOp(TargetOpcode::G_SDIV, U, MIRBuilder);
482 }
483 bool translateURem(const User &U, MachineIRBuilder &MIRBuilder) {
484 return translateBinaryOp(TargetOpcode::G_UREM, U, MIRBuilder);
485 }
486 bool translateSRem(const User &U, MachineIRBuilder &MIRBuilder) {
487 return translateBinaryOp(TargetOpcode::G_SREM, U, MIRBuilder);
488 }
489 bool translateIntToPtr(const User &U, MachineIRBuilder &MIRBuilder) {
490 return translateCast(TargetOpcode::G_INTTOPTR, U, MIRBuilder);
491 }
492 bool translatePtrToInt(const User &U, MachineIRBuilder &MIRBuilder) {
493 return translateCast(TargetOpcode::G_PTRTOINT, U, MIRBuilder);
494 }
495 bool translatePtrToAddr(const User &U, MachineIRBuilder &MIRBuilder) {
496 // FIXME: this is not correct for pointers with addr width != pointer width
497 return translatePtrToInt(U, MIRBuilder);
498 }
499 bool translateTrunc(const User &U, MachineIRBuilder &MIRBuilder) {
500 return translateCast(TargetOpcode::G_TRUNC, U, MIRBuilder);
501 }
502 bool translateFPTrunc(const User &U, MachineIRBuilder &MIRBuilder) {
503 return translateCast(TargetOpcode::G_FPTRUNC, U, MIRBuilder);
504 }
505 bool translateFPExt(const User &U, MachineIRBuilder &MIRBuilder) {
506 return translateCast(TargetOpcode::G_FPEXT, U, MIRBuilder);
507 }
508 bool translateFPToUI(const User &U, MachineIRBuilder &MIRBuilder) {
509 return translateCast(TargetOpcode::G_FPTOUI, U, MIRBuilder);
510 }
511 bool translateFPToSI(const User &U, MachineIRBuilder &MIRBuilder) {
512 return translateCast(TargetOpcode::G_FPTOSI, U, MIRBuilder);
513 }
514 bool translateUIToFP(const User &U, MachineIRBuilder &MIRBuilder) {
515 return translateCast(TargetOpcode::G_UITOFP, U, MIRBuilder);
516 }
517 bool translateSIToFP(const User &U, MachineIRBuilder &MIRBuilder) {
518 return translateCast(TargetOpcode::G_SITOFP, U, MIRBuilder);
519 }
520 bool translateUnreachable(const User &U, MachineIRBuilder &MIRBuilder);
521
522 bool translateSExt(const User &U, MachineIRBuilder &MIRBuilder) {
523 return translateCast(TargetOpcode::G_SEXT, U, MIRBuilder);
524 }
525
526 bool translateZExt(const User &U, MachineIRBuilder &MIRBuilder) {
527 return translateCast(TargetOpcode::G_ZEXT, U, MIRBuilder);
528 }
529
530 bool translateShl(const User &U, MachineIRBuilder &MIRBuilder) {
531 return translateBinaryOp(TargetOpcode::G_SHL, U, MIRBuilder);
532 }
533 bool translateLShr(const User &U, MachineIRBuilder &MIRBuilder) {
534 return translateBinaryOp(TargetOpcode::G_LSHR, U, MIRBuilder);
535 }
536 bool translateAShr(const User &U, MachineIRBuilder &MIRBuilder) {
537 return translateBinaryOp(TargetOpcode::G_ASHR, U, MIRBuilder);
538 }
539
540 bool translateFAdd(const User &U, MachineIRBuilder &MIRBuilder) {
541 return translateBinaryOp(TargetOpcode::G_FADD, U, MIRBuilder);
542 }
543 bool translateFSub(const User &U, MachineIRBuilder &MIRBuilder) {
544 return translateBinaryOp(TargetOpcode::G_FSUB, U, MIRBuilder);
545 }
546 bool translateFMul(const User &U, MachineIRBuilder &MIRBuilder) {
547 return translateBinaryOp(TargetOpcode::G_FMUL, U, MIRBuilder);
548 }
549 bool translateFDiv(const User &U, MachineIRBuilder &MIRBuilder) {
550 return translateBinaryOp(TargetOpcode::G_FDIV, U, MIRBuilder);
551 }
552 bool translateFRem(const User &U, MachineIRBuilder &MIRBuilder) {
553 return translateBinaryOp(TargetOpcode::G_FREM, U, MIRBuilder);
554 }
555
556 bool translateVAArg(const User &U, MachineIRBuilder &MIRBuilder);
557
558 bool translateInsertElement(const User &U, MachineIRBuilder &MIRBuilder);
559 bool translateInsertVector(const User &U, MachineIRBuilder &MIRBuilder);
560
561 bool translateExtractElement(const User &U, MachineIRBuilder &MIRBuilder);
562 bool translateExtractVector(const User &U, MachineIRBuilder &MIRBuilder);
563
564 bool translateShuffleVector(const User &U, MachineIRBuilder &MIRBuilder);
565
566 bool translateAtomicCmpXchg(const User &U, MachineIRBuilder &MIRBuilder);
567 bool translateAtomicRMW(const User &U, MachineIRBuilder &MIRBuilder);
568 bool translateFence(const User &U, MachineIRBuilder &MIRBuilder);
569 bool translateFreeze(const User &U, MachineIRBuilder &MIRBuilder);
570
571 // Stubs to keep the compiler happy while we implement the rest of the
572 // translation.
573 bool translateResume(const User &U, MachineIRBuilder &MIRBuilder) {
574 return false;
575 }
576 bool translateCleanupRet(const User &U, MachineIRBuilder &MIRBuilder) {
577 return false;
578 }
579 bool translateCatchRet(const User &U, MachineIRBuilder &MIRBuilder) {
580 return false;
581 }
582 bool translateCatchSwitch(const User &U, MachineIRBuilder &MIRBuilder) {
583 return false;
584 }
585 bool translateAddrSpaceCast(const User &U, MachineIRBuilder &MIRBuilder) {
586 return translateCast(TargetOpcode::G_ADDRSPACE_CAST, U, MIRBuilder);
587 }
588 bool translateCleanupPad(const User &U, MachineIRBuilder &MIRBuilder) {
589 return false;
590 }
591 bool translateCatchPad(const User &U, MachineIRBuilder &MIRBuilder) {
592 return false;
593 }
594 bool translateUserOp1(const User &U, MachineIRBuilder &MIRBuilder) {
595 return false;
596 }
597 bool translateUserOp2(const User &U, MachineIRBuilder &MIRBuilder) {
598 return false;
599 }
600
601 bool translateConvergenceControlIntrinsic(const CallInst &CI,
603 MachineIRBuilder &MIRBuilder);
604
605 /// @}
606
607 // Builder for machine instruction a la IRBuilder.
608 // I.e., compared to regular MIBuilder, this one also inserts the instruction
609 // in the current block, it can creates block, etc., basically a kind of
610 // IRBuilder, but for Machine IR.
611 // CSEMIRBuilder CurBuilder;
612 std::unique_ptr<MachineIRBuilder> CurBuilder;
613
614 // Builder set to the entry block (just after ABI lowering instructions). Used
615 // as a convenient location for Constants.
616 // CSEMIRBuilder EntryBuilder;
617 std::unique_ptr<MachineIRBuilder> EntryBuilder;
618
619 // The MachineFunction currently being translated.
620 MachineFunction *MF = nullptr;
621
622 /// MachineRegisterInfo used to create virtual registers.
623 MachineRegisterInfo *MRI = nullptr;
624
625 const DataLayout *DL = nullptr;
626
627 /// Current target configuration. Controls how the pass handles errors.
628 const TargetPassConfig *TPC = nullptr;
629
630 CodeGenOptLevel OptLevel;
631
632 /// Current optimization remark emitter. Used to report failures.
633 std::unique_ptr<OptimizationRemarkEmitter> ORE;
634
635 AAResults *AA = nullptr;
636 AssumptionCache *AC = nullptr;
637 const TargetLibraryInfo *LibInfo = nullptr;
638 const TargetLowering *TLI = nullptr;
639 FunctionLoweringInfo FuncInfo;
640
641 // True when either the Target Machine specifies no optimizations or the
642 // function has the optnone attribute.
643 bool EnableOpts = false;
644
645 /// True when the block contains a tail call. This allows the IRTranslator to
646 /// stop translating such blocks early.
647 bool HasTailCall = false;
648
649 StackProtectorDescriptor SPDescriptor;
650
651 /// Switch analysis and optimization.
652 class GISelSwitchLowering : public SwitchCG::SwitchLowering {
653 public:
654 GISelSwitchLowering(IRTranslator *irt, FunctionLoweringInfo &funcinfo)
655 : SwitchLowering(funcinfo), IRT(irt) {
656 assert(irt && "irt is null!");
657 }
658
659 void addSuccessorWithProb(
660 MachineBasicBlock *Src, MachineBasicBlock *Dst,
661 BranchProbability Prob = BranchProbability::getUnknown()) override {
662 IRT->addSuccessorWithProb(Src, Dst, Prob);
663 }
664
665 ~GISelSwitchLowering() override = default;
666
667 private:
668 IRTranslator *IRT;
669 };
670
671 std::unique_ptr<GISelSwitchLowering> SL;
672
673 // * Insert all the code needed to materialize the constants
674 // at the proper place. E.g., Entry block or dominator block
675 // of each constant depending on how fancy we want to be.
676 // * Clear the different maps.
677 void finalizeFunction();
678
679 // Processing steps done per block. E.g. emitting jump tables, stack
680 // protectors etc. Returns true if no errors, false if there was a problem
681 // that caused an abort.
682 bool finalizeBasicBlock(const BasicBlock &BB, MachineBasicBlock &MBB);
683
684 /// Codegen a new tail for a stack protector check ParentMBB which has had its
685 /// tail spliced into a stack protector check success bb.
686 ///
687 /// For a high level explanation of how this fits into the stack protector
688 /// generation see the comment on the declaration of class
689 /// StackProtectorDescriptor.
690 ///
691 /// \return true if there were no problems.
692 bool emitSPDescriptorParent(StackProtectorDescriptor &SPD,
693 MachineBasicBlock *ParentBB);
694
695 /// Codegen the failure basic block for a stack protector check.
696 ///
697 /// A failure stack protector machine basic block consists simply of a call to
698 /// __stack_chk_fail().
699 ///
700 /// For a high level explanation of how this fits into the stack protector
701 /// generation see the comment on the declaration of class
702 /// StackProtectorDescriptor.
703 ///
704 /// \return true if there were no problems.
705 bool emitSPDescriptorFailure(StackProtectorDescriptor &SPD,
706 MachineBasicBlock *FailureBB);
707
708 /// Get the VRegs that represent \p Val.
709 /// Non-aggregate types have just one corresponding VReg and the list can be
710 /// used as a single "unsigned". Aggregates get flattened. If such VRegs do
711 /// not exist, they are created.
712 ArrayRef<Register> getOrCreateVRegs(const Value &Val);
713
714 Register getOrCreateVReg(const Value &Val) {
715 auto Regs = getOrCreateVRegs(Val);
716 if (Regs.empty())
717 return 0;
718 assert(Regs.size() == 1 &&
719 "attempt to get single VReg for aggregate or void");
720 return Regs[0];
721 }
722
723 Register getOrCreateConvergenceTokenVReg(const Value &Token) {
724 assert(Token.getType()->isTokenTy());
725 auto &Regs = *VMap.getVRegs(Token);
726 if (!Regs.empty()) {
727 assert(Regs.size() == 1 &&
728 "Expected a single register for convergence tokens.");
729 return Regs[0];
730 }
731
732 auto Reg = MRI->createGenericVirtualRegister(LLT::token());
733 Regs.push_back(Reg);
734 auto &Offsets = *VMap.getOffsets(Token);
735 if (Offsets.empty())
736 Offsets.push_back(0);
737 return Reg;
738 }
739
740 /// Allocate some vregs and offsets in the VMap. Then populate just the
741 /// offsets while leaving the vregs empty.
742 ValueToVRegInfo::VRegListT &allocateVRegs(const Value &Val);
743
744 /// Get the frame index that represents \p Val.
745 /// If such VReg does not exist, it is created.
746 int getOrCreateFrameIndex(const AllocaInst &AI);
747
748 /// Get the alignment of the given memory operation instruction. This will
749 /// either be the explicitly specified value or the ABI-required alignment for
750 /// the type being accessed (according to the Module's DataLayout).
751 Align getMemOpAlign(const Instruction &I);
752
753 /// Get the MachineBasicBlock that represents \p BB. Specifically, the block
754 /// returned will be the head of the translated block (suitable for branch
755 /// destinations).
756 MachineBasicBlock &getMBB(const BasicBlock &BB);
757
758 /// Record \p NewPred as a Machine predecessor to `Edge.second`, corresponding
759 /// to `Edge.first` at the IR level. This is used when IRTranslation creates
760 /// multiple MachineBasicBlocks for a given IR block and the CFG is no longer
761 /// represented simply by the IR-level CFG.
762 void addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred);
763
764 /// Returns the Machine IR predecessors for the given IR CFG edge. Usually
765 /// this is just the single MachineBasicBlock corresponding to the predecessor
766 /// in the IR. More complex lowering can result in multiple MachineBasicBlocks
767 /// preceding the original though (e.g. switch instructions).
768 SmallVector<MachineBasicBlock *, 1> getMachinePredBBs(CFGEdge Edge) {
769 auto RemappedEdge = MachinePreds.find(Edge);
770 if (RemappedEdge != MachinePreds.end())
771 return RemappedEdge->second;
772 return SmallVector<MachineBasicBlock *, 4>(1, &getMBB(*Edge.first));
773 }
774
775 /// Return branch probability calculated by BranchProbabilityInfo for IR
776 /// blocks.
777 BranchProbability getEdgeProbability(const MachineBasicBlock *Src,
778 const MachineBasicBlock *Dst) const;
779
780 void addSuccessorWithProb(
781 MachineBasicBlock *Src, MachineBasicBlock *Dst,
782 BranchProbability Prob = BranchProbability::getUnknown());
783
784public:
786
787 StringRef getPassName() const override { return "IRTranslator"; }
788
789 void getAnalysisUsage(AnalysisUsage &AU) const override;
790
791 // Algo:
792 // CallLowering = MF.subtarget.getCallLowering()
793 // F = MF.getParent()
794 // MIRBuilder.reset(MF)
795 // getMBB(F.getEntryBB())
796 // CallLowering->translateArguments(MIRBuilder, F, ValToVReg)
797 // for each bb in F
798 // getMBB(bb)
799 // for each inst in bb
800 // if (!translate(MIRBuilder, inst, ValToVReg, ConstantToSequence))
801 // report_fatal_error("Don't know how to translate input");
802 // finalize()
803 bool runOnMachineFunction(MachineFunction &MF) override;
804};
805
806} // end namespace llvm
807
808#endif // LLVM_CODEGEN_GLOBALISEL_IRTRANSLATOR_H
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file defines the BumpPtrAllocator interface.
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file defines the DenseMap class.
#define I(x, y, z)
Definition MD5.cpp:57
This file declares the MachineIRBuilder class.
Register Reg
Promote Memory to Register
Definition Mem2Reg.cpp:110
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
std::pair< BasicBlock *, BasicBlock * > Edge
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition Value.cpp:480
static void findUnwindDestinations(FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB, BranchProbability Prob, SmallVectorImpl< std::pair< MachineBasicBlock *, BranchProbability > > &UnwindDests)
When an invoke or a cleanupret unwinds to the next EH pad, there are many places it could ultimately ...
This file defines the SmallVector class.
static Value * getStackGuard(const TargetLoweringBase *TLI, Module *M, IRBuilder<> &B, bool *SupportsSelectionDAGSP=nullptr)
Create a stack guard loading and populate whether SelectionDAG SSP is supported.
an instruction to allocate memory on the stack
Represent the analysis usage information of a pass.
This class represents an incoming formal argument to a Function.
Definition Argument.h:32
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
static BranchProbability getUnknown()
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
CallBr instruction, tracking function calls that may not return control but instead transfer it to a ...
This class represents a function call, abstracting a target machine's calling convention.
This is an important base class in LLVM.
Definition Constant.h:43
This is the common base class for constrained floating point intrinsics.
DWARF expression.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
This represents the llvm.dbg.declare instruction.
This represents the llvm.dbg.value instruction.
A debug info location.
Definition DebugLoc.h:123
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT, true > const_iterator
Definition DenseMap.h:75
Class representing an expression and its matching format.
bool runOnMachineFunction(MachineFunction &MF) override
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
IRTranslator(CodeGenOptLevel OptLevel=CodeGenOptLevel::None)
StringRef getPassName() const override
getPassName - Return a nice clean name for a pass.
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
static constexpr LLT token()
Get a low-level token; just a scalar with zero bits (or no size).
BasicBlockListType::iterator iterator
Helper class to build MachineInstr.
Representation of each machine instruction.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
The optimization diagnostic interface.
Wrapper class representing virtual and physical registers.
Definition Register.h:20
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
A BumpPtrAllocator that allows only elements of a specific type to be allocated.
Definition Allocator.h:390
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
SwitchLowering(FunctionLoweringInfo &funcinfo)
Provides information about what library functions are available for the current target.
Target-Independent Code Generator Pass Configuration Options.
LLVM Value Representation.
Definition Value.h:75
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
Offsets
Offsets in bytes from the start of the input buffer.
SmallVector< SwitchWorkListItem, 4 > SwitchWorkList
CaseClusterVector::iterator CaseClusterIt
This is an optimization pass for GlobalISel generic memory operations.
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
CodeGenOptLevel
Code generation optimization level.
Definition CodeGen.h:82
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >