LLVM 19.0.0git
IRTranslator.cpp
Go to the documentation of this file.
1//===- llvm/CodeGen/GlobalISel/IRTranslator.cpp - IRTranslator ---*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the IRTranslator class.
10//===----------------------------------------------------------------------===//
11
14#include "llvm/ADT/STLExtras.h"
15#include "llvm/ADT/ScopeExit.h"
16#include "llvm/ADT/SmallSet.h"
21#include "llvm/Analysis/Loads.h"
52#include "llvm/IR/BasicBlock.h"
53#include "llvm/IR/CFG.h"
54#include "llvm/IR/Constant.h"
55#include "llvm/IR/Constants.h"
56#include "llvm/IR/DataLayout.h"
59#include "llvm/IR/Function.h"
61#include "llvm/IR/InlineAsm.h"
62#include "llvm/IR/InstrTypes.h"
65#include "llvm/IR/Intrinsics.h"
66#include "llvm/IR/IntrinsicsAMDGPU.h"
67#include "llvm/IR/LLVMContext.h"
68#include "llvm/IR/Metadata.h"
70#include "llvm/IR/Statepoint.h"
71#include "llvm/IR/Type.h"
72#include "llvm/IR/User.h"
73#include "llvm/IR/Value.h"
75#include "llvm/MC/MCContext.h"
76#include "llvm/Pass.h"
79#include "llvm/Support/Debug.h"
87#include <algorithm>
88#include <cassert>
89#include <cstdint>
90#include <iterator>
91#include <optional>
92#include <string>
93#include <utility>
94#include <vector>
95
96#define DEBUG_TYPE "irtranslator"
97
98using namespace llvm;
99
100static cl::opt<bool>
101 EnableCSEInIRTranslator("enable-cse-in-irtranslator",
102 cl::desc("Should enable CSE in irtranslator"),
103 cl::Optional, cl::init(false));
104char IRTranslator::ID = 0;
105
106INITIALIZE_PASS_BEGIN(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
107 false, false)
115
120 MF.getProperties().set(MachineFunctionProperties::Property::FailedISel);
121
122 // Print the function name explicitly if we don't have a debug location (which
123 // makes the diagnostic less useful) or if we're going to emit a raw error.
124 if (!R.getLocation().isValid() || TPC.isGlobalISelAbortEnabled())
125 R << (" (in function: " + MF.getName() + ")").str();
126
127 if (TPC.isGlobalISelAbortEnabled())
128 report_fatal_error(Twine(R.getMsg()));
129 else
130 ORE.emit(R);
131}
132
134 : MachineFunctionPass(ID), OptLevel(optlevel) {}
135
136#ifndef NDEBUG
137namespace {
138/// Verify that every instruction created has the same DILocation as the
139/// instruction being translated.
140class DILocationVerifier : public GISelChangeObserver {
141 const Instruction *CurrInst = nullptr;
142
143public:
144 DILocationVerifier() = default;
145 ~DILocationVerifier() = default;
146
147 const Instruction *getCurrentInst() const { return CurrInst; }
148 void setCurrentInst(const Instruction *Inst) { CurrInst = Inst; }
149
150 void erasingInstr(MachineInstr &MI) override {}
151 void changingInstr(MachineInstr &MI) override {}
152 void changedInstr(MachineInstr &MI) override {}
153
154 void createdInstr(MachineInstr &MI) override {
155 assert(getCurrentInst() && "Inserted instruction without a current MI");
156
157 // Only print the check message if we're actually checking it.
158#ifndef NDEBUG
159 LLVM_DEBUG(dbgs() << "Checking DILocation from " << *CurrInst
160 << " was copied to " << MI);
161#endif
162 // We allow insts in the entry block to have no debug loc because
163 // they could have originated from constants, and we don't want a jumpy
164 // debug experience.
165 assert((CurrInst->getDebugLoc() == MI.getDebugLoc() ||
166 (MI.getParent()->isEntryBlock() && !MI.getDebugLoc()) ||
167 (MI.isDebugInstr())) &&
168 "Line info was not transferred to all instructions");
169 }
170};
171} // namespace
172#endif // ifndef NDEBUG
173
174
180 if (OptLevel != CodeGenOptLevel::None) {
183 }
188}
189
191IRTranslator::allocateVRegs(const Value &Val) {
192 auto VRegsIt = VMap.findVRegs(Val);
193 if (VRegsIt != VMap.vregs_end())
194 return *VRegsIt->second;
195 auto *Regs = VMap.getVRegs(Val);
196 auto *Offsets = VMap.getOffsets(Val);
197 SmallVector<LLT, 4> SplitTys;
198 computeValueLLTs(*DL, *Val.getType(), SplitTys,
199 Offsets->empty() ? Offsets : nullptr);
200 for (unsigned i = 0; i < SplitTys.size(); ++i)
201 Regs->push_back(0);
202 return *Regs;
203}
204
205ArrayRef<Register> IRTranslator::getOrCreateVRegs(const Value &Val) {
206 auto VRegsIt = VMap.findVRegs(Val);
207 if (VRegsIt != VMap.vregs_end())
208 return *VRegsIt->second;
209
210 if (Val.getType()->isVoidTy())
211 return *VMap.getVRegs(Val);
212
213 // Create entry for this type.
214 auto *VRegs = VMap.getVRegs(Val);
215 auto *Offsets = VMap.getOffsets(Val);
216
217 if (!Val.getType()->isTokenTy())
218 assert(Val.getType()->isSized() &&
219 "Don't know how to create an empty vreg");
220
221 SmallVector<LLT, 4> SplitTys;
222 computeValueLLTs(*DL, *Val.getType(), SplitTys,
223 Offsets->empty() ? Offsets : nullptr);
224
225 if (!isa<Constant>(Val)) {
226 for (auto Ty : SplitTys)
227 VRegs->push_back(MRI->createGenericVirtualRegister(Ty));
228 return *VRegs;
229 }
230
231 if (Val.getType()->isAggregateType()) {
232 // UndefValue, ConstantAggregateZero
233 auto &C = cast<Constant>(Val);
234 unsigned Idx = 0;
235 while (auto Elt = C.getAggregateElement(Idx++)) {
236 auto EltRegs = getOrCreateVRegs(*Elt);
237 llvm::copy(EltRegs, std::back_inserter(*VRegs));
238 }
239 } else {
240 assert(SplitTys.size() == 1 && "unexpectedly split LLT");
241 VRegs->push_back(MRI->createGenericVirtualRegister(SplitTys[0]));
242 bool Success = translate(cast<Constant>(Val), VRegs->front());
243 if (!Success) {
244 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
246 &MF->getFunction().getEntryBlock());
247 R << "unable to translate constant: " << ore::NV("Type", Val.getType());
248 reportTranslationError(*MF, *TPC, *ORE, R);
249 return *VRegs;
250 }
251 }
252
253 return *VRegs;
254}
255
256int IRTranslator::getOrCreateFrameIndex(const AllocaInst &AI) {
257 auto MapEntry = FrameIndices.find(&AI);
258 if (MapEntry != FrameIndices.end())
259 return MapEntry->second;
260
261 uint64_t ElementSize = DL->getTypeAllocSize(AI.getAllocatedType());
262 uint64_t Size =
263 ElementSize * cast<ConstantInt>(AI.getArraySize())->getZExtValue();
264
265 // Always allocate at least one byte.
266 Size = std::max<uint64_t>(Size, 1u);
267
268 int &FI = FrameIndices[&AI];
269 FI = MF->getFrameInfo().CreateStackObject(Size, AI.getAlign(), false, &AI);
270 return FI;
271}
272
273Align IRTranslator::getMemOpAlign(const Instruction &I) {
274 if (const StoreInst *SI = dyn_cast<StoreInst>(&I))
275 return SI->getAlign();
276 if (const LoadInst *LI = dyn_cast<LoadInst>(&I))
277 return LI->getAlign();
278 if (const AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(&I))
279 return AI->getAlign();
280 if (const AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(&I))
281 return AI->getAlign();
282
283 OptimizationRemarkMissed R("gisel-irtranslator", "", &I);
284 R << "unable to translate memop: " << ore::NV("Opcode", &I);
285 reportTranslationError(*MF, *TPC, *ORE, R);
286 return Align(1);
287}
288
289MachineBasicBlock &IRTranslator::getMBB(const BasicBlock &BB) {
290 MachineBasicBlock *&MBB = BBToMBB[&BB];
291 assert(MBB && "BasicBlock was not encountered before");
292 return *MBB;
293}
294
295void IRTranslator::addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred) {
296 assert(NewPred && "new predecessor must be a real MachineBasicBlock");
297 MachinePreds[Edge].push_back(NewPred);
298}
299
300bool IRTranslator::translateBinaryOp(unsigned Opcode, const User &U,
301 MachineIRBuilder &MIRBuilder) {
302 // Get or create a virtual register for each value.
303 // Unless the value is a Constant => loadimm cst?
304 // or inline constant each time?
305 // Creation of a virtual register needs to have a size.
306 Register Op0 = getOrCreateVReg(*U.getOperand(0));
307 Register Op1 = getOrCreateVReg(*U.getOperand(1));
308 Register Res = getOrCreateVReg(U);
309 uint32_t Flags = 0;
310 if (isa<Instruction>(U)) {
311 const Instruction &I = cast<Instruction>(U);
313 }
314
315 MIRBuilder.buildInstr(Opcode, {Res}, {Op0, Op1}, Flags);
316 return true;
317}
318
319bool IRTranslator::translateUnaryOp(unsigned Opcode, const User &U,
320 MachineIRBuilder &MIRBuilder) {
321 Register Op0 = getOrCreateVReg(*U.getOperand(0));
322 Register Res = getOrCreateVReg(U);
323 uint32_t Flags = 0;
324 if (isa<Instruction>(U)) {
325 const Instruction &I = cast<Instruction>(U);
327 }
328 MIRBuilder.buildInstr(Opcode, {Res}, {Op0}, Flags);
329 return true;
330}
331
332bool IRTranslator::translateFNeg(const User &U, MachineIRBuilder &MIRBuilder) {
333 return translateUnaryOp(TargetOpcode::G_FNEG, U, MIRBuilder);
334}
335
336bool IRTranslator::translateCompare(const User &U,
337 MachineIRBuilder &MIRBuilder) {
338 auto *CI = cast<CmpInst>(&U);
339 Register Op0 = getOrCreateVReg(*U.getOperand(0));
340 Register Op1 = getOrCreateVReg(*U.getOperand(1));
341 Register Res = getOrCreateVReg(U);
342 CmpInst::Predicate Pred = CI->getPredicate();
343 if (CmpInst::isIntPredicate(Pred))
344 MIRBuilder.buildICmp(Pred, Res, Op0, Op1);
345 else if (Pred == CmpInst::FCMP_FALSE)
346 MIRBuilder.buildCopy(
347 Res, getOrCreateVReg(*Constant::getNullValue(U.getType())));
348 else if (Pred == CmpInst::FCMP_TRUE)
349 MIRBuilder.buildCopy(
350 Res, getOrCreateVReg(*Constant::getAllOnesValue(U.getType())));
351 else {
352 uint32_t Flags = 0;
353 if (CI)
355 MIRBuilder.buildFCmp(Pred, Res, Op0, Op1, Flags);
356 }
357
358 return true;
359}
360
361bool IRTranslator::translateRet(const User &U, MachineIRBuilder &MIRBuilder) {
362 const ReturnInst &RI = cast<ReturnInst>(U);
363 const Value *Ret = RI.getReturnValue();
364 if (Ret && DL->getTypeStoreSize(Ret->getType()).isZero())
365 Ret = nullptr;
366
367 ArrayRef<Register> VRegs;
368 if (Ret)
369 VRegs = getOrCreateVRegs(*Ret);
370
371 Register SwiftErrorVReg = 0;
372 if (CLI->supportSwiftError() && SwiftError.getFunctionArg()) {
373 SwiftErrorVReg = SwiftError.getOrCreateVRegUseAt(
374 &RI, &MIRBuilder.getMBB(), SwiftError.getFunctionArg());
375 }
376
377 // The target may mess up with the insertion point, but
378 // this is not important as a return is the last instruction
379 // of the block anyway.
380 return CLI->lowerReturn(MIRBuilder, Ret, VRegs, FuncInfo, SwiftErrorVReg);
381}
382
383void IRTranslator::emitBranchForMergedCondition(
385 MachineBasicBlock *CurBB, MachineBasicBlock *SwitchBB,
386 BranchProbability TProb, BranchProbability FProb, bool InvertCond) {
387 // If the leaf of the tree is a comparison, merge the condition into
388 // the caseblock.
389 if (const CmpInst *BOp = dyn_cast<CmpInst>(Cond)) {
390 CmpInst::Predicate Condition;
391 if (const ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) {
392 Condition = InvertCond ? IC->getInversePredicate() : IC->getPredicate();
393 } else {
394 const FCmpInst *FC = cast<FCmpInst>(Cond);
395 Condition = InvertCond ? FC->getInversePredicate() : FC->getPredicate();
396 }
397
398 SwitchCG::CaseBlock CB(Condition, false, BOp->getOperand(0),
399 BOp->getOperand(1), nullptr, TBB, FBB, CurBB,
400 CurBuilder->getDebugLoc(), TProb, FProb);
401 SL->SwitchCases.push_back(CB);
402 return;
403 }
404
405 // Create a CaseBlock record representing this branch.
408 Pred, false, Cond, ConstantInt::getTrue(MF->getFunction().getContext()),
409 nullptr, TBB, FBB, CurBB, CurBuilder->getDebugLoc(), TProb, FProb);
410 SL->SwitchCases.push_back(CB);
411}
412
413static bool isValInBlock(const Value *V, const BasicBlock *BB) {
414 if (const Instruction *I = dyn_cast<Instruction>(V))
415 return I->getParent() == BB;
416 return true;
417}
418
419void IRTranslator::findMergedConditions(
421 MachineBasicBlock *CurBB, MachineBasicBlock *SwitchBB,
423 BranchProbability FProb, bool InvertCond) {
424 using namespace PatternMatch;
425 assert((Opc == Instruction::And || Opc == Instruction::Or) &&
426 "Expected Opc to be AND/OR");
427 // Skip over not part of the tree and remember to invert op and operands at
428 // next level.
429 Value *NotCond;
430 if (match(Cond, m_OneUse(m_Not(m_Value(NotCond)))) &&
431 isValInBlock(NotCond, CurBB->getBasicBlock())) {
432 findMergedConditions(NotCond, TBB, FBB, CurBB, SwitchBB, Opc, TProb, FProb,
433 !InvertCond);
434 return;
435 }
436
437 const Instruction *BOp = dyn_cast<Instruction>(Cond);
438 const Value *BOpOp0, *BOpOp1;
439 // Compute the effective opcode for Cond, taking into account whether it needs
440 // to be inverted, e.g.
441 // and (not (or A, B)), C
442 // gets lowered as
443 // and (and (not A, not B), C)
445 if (BOp) {
446 BOpc = match(BOp, m_LogicalAnd(m_Value(BOpOp0), m_Value(BOpOp1)))
447 ? Instruction::And
448 : (match(BOp, m_LogicalOr(m_Value(BOpOp0), m_Value(BOpOp1)))
449 ? Instruction::Or
451 if (InvertCond) {
452 if (BOpc == Instruction::And)
453 BOpc = Instruction::Or;
454 else if (BOpc == Instruction::Or)
455 BOpc = Instruction::And;
456 }
457 }
458
459 // If this node is not part of the or/and tree, emit it as a branch.
460 // Note that all nodes in the tree should have same opcode.
461 bool BOpIsInOrAndTree = BOpc && BOpc == Opc && BOp->hasOneUse();
462 if (!BOpIsInOrAndTree || BOp->getParent() != CurBB->getBasicBlock() ||
463 !isValInBlock(BOpOp0, CurBB->getBasicBlock()) ||
464 !isValInBlock(BOpOp1, CurBB->getBasicBlock())) {
465 emitBranchForMergedCondition(Cond, TBB, FBB, CurBB, SwitchBB, TProb, FProb,
466 InvertCond);
467 return;
468 }
469
470 // Create TmpBB after CurBB.
471 MachineFunction::iterator BBI(CurBB);
472 MachineBasicBlock *TmpBB =
474 CurBB->getParent()->insert(++BBI, TmpBB);
475
476 if (Opc == Instruction::Or) {
477 // Codegen X | Y as:
478 // BB1:
479 // jmp_if_X TBB
480 // jmp TmpBB
481 // TmpBB:
482 // jmp_if_Y TBB
483 // jmp FBB
484 //
485
486 // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
487 // The requirement is that
488 // TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB)
489 // = TrueProb for original BB.
490 // Assuming the original probabilities are A and B, one choice is to set
491 // BB1's probabilities to A/2 and A/2+B, and set TmpBB's probabilities to
492 // A/(1+B) and 2B/(1+B). This choice assumes that
493 // TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB.
494 // Another choice is to assume TrueProb for BB1 equals to TrueProb for
495 // TmpBB, but the math is more complicated.
496
497 auto NewTrueProb = TProb / 2;
498 auto NewFalseProb = TProb / 2 + FProb;
499 // Emit the LHS condition.
500 findMergedConditions(BOpOp0, TBB, TmpBB, CurBB, SwitchBB, Opc, NewTrueProb,
501 NewFalseProb, InvertCond);
502
503 // Normalize A/2 and B to get A/(1+B) and 2B/(1+B).
504 SmallVector<BranchProbability, 2> Probs{TProb / 2, FProb};
505 BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
506 // Emit the RHS condition into TmpBB.
507 findMergedConditions(BOpOp1, TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0],
508 Probs[1], InvertCond);
509 } else {
510 assert(Opc == Instruction::And && "Unknown merge op!");
511 // Codegen X & Y as:
512 // BB1:
513 // jmp_if_X TmpBB
514 // jmp FBB
515 // TmpBB:
516 // jmp_if_Y TBB
517 // jmp FBB
518 //
519 // This requires creation of TmpBB after CurBB.
520
521 // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
522 // The requirement is that
523 // FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB)
524 // = FalseProb for original BB.
525 // Assuming the original probabilities are A and B, one choice is to set
526 // BB1's probabilities to A+B/2 and B/2, and set TmpBB's probabilities to
527 // 2A/(1+A) and B/(1+A). This choice assumes that FalseProb for BB1 ==
528 // TrueProb for BB1 * FalseProb for TmpBB.
529
530 auto NewTrueProb = TProb + FProb / 2;
531 auto NewFalseProb = FProb / 2;
532 // Emit the LHS condition.
533 findMergedConditions(BOpOp0, TmpBB, FBB, CurBB, SwitchBB, Opc, NewTrueProb,
534 NewFalseProb, InvertCond);
535
536 // Normalize A and B/2 to get 2A/(1+A) and B/(1+A).
537 SmallVector<BranchProbability, 2> Probs{TProb, FProb / 2};
538 BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
539 // Emit the RHS condition into TmpBB.
540 findMergedConditions(BOpOp1, TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0],
541 Probs[1], InvertCond);
542 }
543}
544
545bool IRTranslator::shouldEmitAsBranches(
546 const std::vector<SwitchCG::CaseBlock> &Cases) {
547 // For multiple cases, it's better to emit as branches.
548 if (Cases.size() != 2)
549 return true;
550
551 // If this is two comparisons of the same values or'd or and'd together, they
552 // will get folded into a single comparison, so don't emit two blocks.
553 if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
554 Cases[0].CmpRHS == Cases[1].CmpRHS) ||
555 (Cases[0].CmpRHS == Cases[1].CmpLHS &&
556 Cases[0].CmpLHS == Cases[1].CmpRHS)) {
557 return false;
558 }
559
560 // Handle: (X != null) | (Y != null) --> (X|Y) != 0
561 // Handle: (X == null) & (Y == null) --> (X|Y) == 0
562 if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
563 Cases[0].PredInfo.Pred == Cases[1].PredInfo.Pred &&
564 isa<Constant>(Cases[0].CmpRHS) &&
565 cast<Constant>(Cases[0].CmpRHS)->isNullValue()) {
566 if (Cases[0].PredInfo.Pred == CmpInst::ICMP_EQ &&
567 Cases[0].TrueBB == Cases[1].ThisBB)
568 return false;
569 if (Cases[0].PredInfo.Pred == CmpInst::ICMP_NE &&
570 Cases[0].FalseBB == Cases[1].ThisBB)
571 return false;
572 }
573
574 return true;
575}
576
577bool IRTranslator::translateBr(const User &U, MachineIRBuilder &MIRBuilder) {
578 const BranchInst &BrInst = cast<BranchInst>(U);
579 auto &CurMBB = MIRBuilder.getMBB();
580 auto *Succ0MBB = &getMBB(*BrInst.getSuccessor(0));
581
582 if (BrInst.isUnconditional()) {
583 // If the unconditional target is the layout successor, fallthrough.
584 if (OptLevel == CodeGenOptLevel::None ||
585 !CurMBB.isLayoutSuccessor(Succ0MBB))
586 MIRBuilder.buildBr(*Succ0MBB);
587
588 // Link successors.
589 for (const BasicBlock *Succ : successors(&BrInst))
590 CurMBB.addSuccessor(&getMBB(*Succ));
591 return true;
592 }
593
594 // If this condition is one of the special cases we handle, do special stuff
595 // now.
596 const Value *CondVal = BrInst.getCondition();
597 MachineBasicBlock *Succ1MBB = &getMBB(*BrInst.getSuccessor(1));
598
599 // If this is a series of conditions that are or'd or and'd together, emit
600 // this as a sequence of branches instead of setcc's with and/or operations.
601 // As long as jumps are not expensive (exceptions for multi-use logic ops,
602 // unpredictable branches, and vector extracts because those jumps are likely
603 // expensive for any target), this should improve performance.
604 // For example, instead of something like:
605 // cmp A, B
606 // C = seteq
607 // cmp D, E
608 // F = setle
609 // or C, F
610 // jnz foo
611 // Emit:
612 // cmp A, B
613 // je foo
614 // cmp D, E
615 // jle foo
616 using namespace PatternMatch;
617 const Instruction *CondI = dyn_cast<Instruction>(CondVal);
618 if (!TLI->isJumpExpensive() && CondI && CondI->hasOneUse() &&
619 !BrInst.hasMetadata(LLVMContext::MD_unpredictable)) {
621 Value *Vec;
622 const Value *BOp0, *BOp1;
623 if (match(CondI, m_LogicalAnd(m_Value(BOp0), m_Value(BOp1))))
624 Opcode = Instruction::And;
625 else if (match(CondI, m_LogicalOr(m_Value(BOp0), m_Value(BOp1))))
626 Opcode = Instruction::Or;
627
628 if (Opcode && !(match(BOp0, m_ExtractElt(m_Value(Vec), m_Value())) &&
629 match(BOp1, m_ExtractElt(m_Specific(Vec), m_Value())))) {
630 findMergedConditions(CondI, Succ0MBB, Succ1MBB, &CurMBB, &CurMBB, Opcode,
631 getEdgeProbability(&CurMBB, Succ0MBB),
632 getEdgeProbability(&CurMBB, Succ1MBB),
633 /*InvertCond=*/false);
634 assert(SL->SwitchCases[0].ThisBB == &CurMBB && "Unexpected lowering!");
635
636 // Allow some cases to be rejected.
637 if (shouldEmitAsBranches(SL->SwitchCases)) {
638 // Emit the branch for this block.
639 emitSwitchCase(SL->SwitchCases[0], &CurMBB, *CurBuilder);
640 SL->SwitchCases.erase(SL->SwitchCases.begin());
641 return true;
642 }
643
644 // Okay, we decided not to do this, remove any inserted MBB's and clear
645 // SwitchCases.
646 for (unsigned I = 1, E = SL->SwitchCases.size(); I != E; ++I)
647 MF->erase(SL->SwitchCases[I].ThisBB);
648
649 SL->SwitchCases.clear();
650 }
651 }
652
653 // Create a CaseBlock record representing this branch.
654 SwitchCG::CaseBlock CB(CmpInst::ICMP_EQ, false, CondVal,
656 nullptr, Succ0MBB, Succ1MBB, &CurMBB,
657 CurBuilder->getDebugLoc());
658
659 // Use emitSwitchCase to actually insert the fast branch sequence for this
660 // cond branch.
661 emitSwitchCase(CB, &CurMBB, *CurBuilder);
662 return true;
663}
664
665void IRTranslator::addSuccessorWithProb(MachineBasicBlock *Src,
667 BranchProbability Prob) {
668 if (!FuncInfo.BPI) {
669 Src->addSuccessorWithoutProb(Dst);
670 return;
671 }
672 if (Prob.isUnknown())
673 Prob = getEdgeProbability(Src, Dst);
674 Src->addSuccessor(Dst, Prob);
675}
676
678IRTranslator::getEdgeProbability(const MachineBasicBlock *Src,
679 const MachineBasicBlock *Dst) const {
680 const BasicBlock *SrcBB = Src->getBasicBlock();
681 const BasicBlock *DstBB = Dst->getBasicBlock();
682 if (!FuncInfo.BPI) {
683 // If BPI is not available, set the default probability as 1 / N, where N is
684 // the number of successors.
685 auto SuccSize = std::max<uint32_t>(succ_size(SrcBB), 1);
686 return BranchProbability(1, SuccSize);
687 }
688 return FuncInfo.BPI->getEdgeProbability(SrcBB, DstBB);
689}
690
691bool IRTranslator::translateSwitch(const User &U, MachineIRBuilder &MIB) {
692 using namespace SwitchCG;
693 // Extract cases from the switch.
694 const SwitchInst &SI = cast<SwitchInst>(U);
695 BranchProbabilityInfo *BPI = FuncInfo.BPI;
696 CaseClusterVector Clusters;
697 Clusters.reserve(SI.getNumCases());
698 for (const auto &I : SI.cases()) {
699 MachineBasicBlock *Succ = &getMBB(*I.getCaseSuccessor());
700 assert(Succ && "Could not find successor mbb in mapping");
701 const ConstantInt *CaseVal = I.getCaseValue();
702 BranchProbability Prob =
703 BPI ? BPI->getEdgeProbability(SI.getParent(), I.getSuccessorIndex())
704 : BranchProbability(1, SI.getNumCases() + 1);
705 Clusters.push_back(CaseCluster::range(CaseVal, CaseVal, Succ, Prob));
706 }
707
708 MachineBasicBlock *DefaultMBB = &getMBB(*SI.getDefaultDest());
709
710 // Cluster adjacent cases with the same destination. We do this at all
711 // optimization levels because it's cheap to do and will make codegen faster
712 // if there are many clusters.
713 sortAndRangeify(Clusters);
714
715 MachineBasicBlock *SwitchMBB = &getMBB(*SI.getParent());
716
717 // If there is only the default destination, jump there directly.
718 if (Clusters.empty()) {
719 SwitchMBB->addSuccessor(DefaultMBB);
720 if (DefaultMBB != SwitchMBB->getNextNode())
721 MIB.buildBr(*DefaultMBB);
722 return true;
723 }
724
725 SL->findJumpTables(Clusters, &SI, std::nullopt, DefaultMBB, nullptr, nullptr);
726 SL->findBitTestClusters(Clusters, &SI);
727
728 LLVM_DEBUG({
729 dbgs() << "Case clusters: ";
730 for (const CaseCluster &C : Clusters) {
731 if (C.Kind == CC_JumpTable)
732 dbgs() << "JT:";
733 if (C.Kind == CC_BitTests)
734 dbgs() << "BT:";
735
736 C.Low->getValue().print(dbgs(), true);
737 if (C.Low != C.High) {
738 dbgs() << '-';
739 C.High->getValue().print(dbgs(), true);
740 }
741 dbgs() << ' ';
742 }
743 dbgs() << '\n';
744 });
745
746 assert(!Clusters.empty());
747 SwitchWorkList WorkList;
748 CaseClusterIt First = Clusters.begin();
749 CaseClusterIt Last = Clusters.end() - 1;
750 auto DefaultProb = getEdgeProbability(SwitchMBB, DefaultMBB);
751 WorkList.push_back({SwitchMBB, First, Last, nullptr, nullptr, DefaultProb});
752
753 while (!WorkList.empty()) {
754 SwitchWorkListItem W = WorkList.pop_back_val();
755
756 unsigned NumClusters = W.LastCluster - W.FirstCluster + 1;
757 // For optimized builds, lower large range as a balanced binary tree.
758 if (NumClusters > 3 &&
760 !DefaultMBB->getParent()->getFunction().hasMinSize()) {
761 splitWorkItem(WorkList, W, SI.getCondition(), SwitchMBB, MIB);
762 continue;
763 }
764
765 if (!lowerSwitchWorkItem(W, SI.getCondition(), SwitchMBB, DefaultMBB, MIB))
766 return false;
767 }
768 return true;
769}
770
771void IRTranslator::splitWorkItem(SwitchCG::SwitchWorkList &WorkList,
773 Value *Cond, MachineBasicBlock *SwitchMBB,
774 MachineIRBuilder &MIB) {
775 using namespace SwitchCG;
776 assert(W.FirstCluster->Low->getValue().slt(W.LastCluster->Low->getValue()) &&
777 "Clusters not sorted?");
778 assert(W.LastCluster - W.FirstCluster + 1 >= 2 && "Too small to split!");
779
780 auto [LastLeft, FirstRight, LeftProb, RightProb] =
781 SL->computeSplitWorkItemInfo(W);
782
783 // Use the first element on the right as pivot since we will make less-than
784 // comparisons against it.
785 CaseClusterIt PivotCluster = FirstRight;
786 assert(PivotCluster > W.FirstCluster);
787 assert(PivotCluster <= W.LastCluster);
788
789 CaseClusterIt FirstLeft = W.FirstCluster;
790 CaseClusterIt LastRight = W.LastCluster;
791
792 const ConstantInt *Pivot = PivotCluster->Low;
793
794 // New blocks will be inserted immediately after the current one.
796 ++BBI;
797
798 // We will branch to the LHS if Value < Pivot. If LHS is a single cluster,
799 // we can branch to its destination directly if it's squeezed exactly in
800 // between the known lower bound and Pivot - 1.
801 MachineBasicBlock *LeftMBB;
802 if (FirstLeft == LastLeft && FirstLeft->Kind == CC_Range &&
803 FirstLeft->Low == W.GE &&
804 (FirstLeft->High->getValue() + 1LL) == Pivot->getValue()) {
805 LeftMBB = FirstLeft->MBB;
806 } else {
807 LeftMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock());
808 FuncInfo.MF->insert(BBI, LeftMBB);
809 WorkList.push_back(
810 {LeftMBB, FirstLeft, LastLeft, W.GE, Pivot, W.DefaultProb / 2});
811 }
812
813 // Similarly, we will branch to the RHS if Value >= Pivot. If RHS is a
814 // single cluster, RHS.Low == Pivot, and we can branch to its destination
815 // directly if RHS.High equals the current upper bound.
816 MachineBasicBlock *RightMBB;
817 if (FirstRight == LastRight && FirstRight->Kind == CC_Range && W.LT &&
818 (FirstRight->High->getValue() + 1ULL) == W.LT->getValue()) {
819 RightMBB = FirstRight->MBB;
820 } else {
821 RightMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock());
822 FuncInfo.MF->insert(BBI, RightMBB);
823 WorkList.push_back(
824 {RightMBB, FirstRight, LastRight, Pivot, W.LT, W.DefaultProb / 2});
825 }
826
827 // Create the CaseBlock record that will be used to lower the branch.
828 CaseBlock CB(ICmpInst::Predicate::ICMP_SLT, false, Cond, Pivot, nullptr,
829 LeftMBB, RightMBB, W.MBB, MIB.getDebugLoc(), LeftProb,
830 RightProb);
831
832 if (W.MBB == SwitchMBB)
833 emitSwitchCase(CB, SwitchMBB, MIB);
834 else
835 SL->SwitchCases.push_back(CB);
836}
837
838void IRTranslator::emitJumpTable(SwitchCG::JumpTable &JT,
840 // Emit the code for the jump table
841 assert(JT.Reg != -1U && "Should lower JT Header first!");
843 MIB.setMBB(*MBB);
844 MIB.setDebugLoc(CurBuilder->getDebugLoc());
845
847 const LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
848
849 auto Table = MIB.buildJumpTable(PtrTy, JT.JTI);
850 MIB.buildBrJT(Table.getReg(0), JT.JTI, JT.Reg);
851}
852
853bool IRTranslator::emitJumpTableHeader(SwitchCG::JumpTable &JT,
855 MachineBasicBlock *HeaderBB) {
856 MachineIRBuilder MIB(*HeaderBB->getParent());
857 MIB.setMBB(*HeaderBB);
858 MIB.setDebugLoc(CurBuilder->getDebugLoc());
859
860 const Value &SValue = *JTH.SValue;
861 // Subtract the lowest switch case value from the value being switched on.
862 const LLT SwitchTy = getLLTForType(*SValue.getType(), *DL);
863 Register SwitchOpReg = getOrCreateVReg(SValue);
864 auto FirstCst = MIB.buildConstant(SwitchTy, JTH.First);
865 auto Sub = MIB.buildSub({SwitchTy}, SwitchOpReg, FirstCst);
866
867 // This value may be smaller or larger than the target's pointer type, and
868 // therefore require extension or truncating.
869 auto *PtrIRTy = PointerType::getUnqual(SValue.getContext());
870 const LLT PtrScalarTy = LLT::scalar(DL->getTypeSizeInBits(PtrIRTy));
871 Sub = MIB.buildZExtOrTrunc(PtrScalarTy, Sub);
872
873 JT.Reg = Sub.getReg(0);
874
875 if (JTH.FallthroughUnreachable) {
876 if (JT.MBB != HeaderBB->getNextNode())
877 MIB.buildBr(*JT.MBB);
878 return true;
879 }
880
881 // Emit the range check for the jump table, and branch to the default block
882 // for the switch statement if the value being switched on exceeds the
883 // largest case in the switch.
884 auto Cst = getOrCreateVReg(
885 *ConstantInt::get(SValue.getType(), JTH.Last - JTH.First));
886 Cst = MIB.buildZExtOrTrunc(PtrScalarTy, Cst).getReg(0);
887 auto Cmp = MIB.buildICmp(CmpInst::ICMP_UGT, LLT::scalar(1), Sub, Cst);
888
889 auto BrCond = MIB.buildBrCond(Cmp.getReg(0), *JT.Default);
890
891 // Avoid emitting unnecessary branches to the next block.
892 if (JT.MBB != HeaderBB->getNextNode())
893 BrCond = MIB.buildBr(*JT.MBB);
894 return true;
895}
896
897void IRTranslator::emitSwitchCase(SwitchCG::CaseBlock &CB,
898 MachineBasicBlock *SwitchBB,
899 MachineIRBuilder &MIB) {
900 Register CondLHS = getOrCreateVReg(*CB.CmpLHS);
902 DebugLoc OldDbgLoc = MIB.getDebugLoc();
903 MIB.setDebugLoc(CB.DbgLoc);
904 MIB.setMBB(*CB.ThisBB);
905
906 if (CB.PredInfo.NoCmp) {
907 // Branch or fall through to TrueBB.
908 addSuccessorWithProb(CB.ThisBB, CB.TrueBB, CB.TrueProb);
909 addMachineCFGPred({SwitchBB->getBasicBlock(), CB.TrueBB->getBasicBlock()},
910 CB.ThisBB);
912 if (CB.TrueBB != CB.ThisBB->getNextNode())
913 MIB.buildBr(*CB.TrueBB);
914 MIB.setDebugLoc(OldDbgLoc);
915 return;
916 }
917
918 const LLT i1Ty = LLT::scalar(1);
919 // Build the compare.
920 if (!CB.CmpMHS) {
921 const auto *CI = dyn_cast<ConstantInt>(CB.CmpRHS);
922 // For conditional branch lowering, we might try to do something silly like
923 // emit an G_ICMP to compare an existing G_ICMP i1 result with true. If so,
924 // just re-use the existing condition vreg.
925 if (MRI->getType(CondLHS).getSizeInBits() == 1 && CI && CI->isOne() &&
927 Cond = CondLHS;
928 } else {
929 Register CondRHS = getOrCreateVReg(*CB.CmpRHS);
931 Cond =
932 MIB.buildFCmp(CB.PredInfo.Pred, i1Ty, CondLHS, CondRHS).getReg(0);
933 else
934 Cond =
935 MIB.buildICmp(CB.PredInfo.Pred, i1Ty, CondLHS, CondRHS).getReg(0);
936 }
937 } else {
939 "Can only handle SLE ranges");
940
941 const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue();
942 const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue();
943
944 Register CmpOpReg = getOrCreateVReg(*CB.CmpMHS);
945 if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) {
946 Register CondRHS = getOrCreateVReg(*CB.CmpRHS);
947 Cond =
948 MIB.buildICmp(CmpInst::ICMP_SLE, i1Ty, CmpOpReg, CondRHS).getReg(0);
949 } else {
950 const LLT CmpTy = MRI->getType(CmpOpReg);
951 auto Sub = MIB.buildSub({CmpTy}, CmpOpReg, CondLHS);
952 auto Diff = MIB.buildConstant(CmpTy, High - Low);
953 Cond = MIB.buildICmp(CmpInst::ICMP_ULE, i1Ty, Sub, Diff).getReg(0);
954 }
955 }
956
957 // Update successor info
958 addSuccessorWithProb(CB.ThisBB, CB.TrueBB, CB.TrueProb);
959
960 addMachineCFGPred({SwitchBB->getBasicBlock(), CB.TrueBB->getBasicBlock()},
961 CB.ThisBB);
962
963 // TrueBB and FalseBB are always different unless the incoming IR is
964 // degenerate. This only happens when running llc on weird IR.
965 if (CB.TrueBB != CB.FalseBB)
966 addSuccessorWithProb(CB.ThisBB, CB.FalseBB, CB.FalseProb);
968
969 addMachineCFGPred({SwitchBB->getBasicBlock(), CB.FalseBB->getBasicBlock()},
970 CB.ThisBB);
971
972 MIB.buildBrCond(Cond, *CB.TrueBB);
973 MIB.buildBr(*CB.FalseBB);
974 MIB.setDebugLoc(OldDbgLoc);
975}
976
977bool IRTranslator::lowerJumpTableWorkItem(SwitchCG::SwitchWorkListItem W,
978 MachineBasicBlock *SwitchMBB,
979 MachineBasicBlock *CurMBB,
980 MachineBasicBlock *DefaultMBB,
981 MachineIRBuilder &MIB,
983 BranchProbability UnhandledProbs,
985 MachineBasicBlock *Fallthrough,
986 bool FallthroughUnreachable) {
987 using namespace SwitchCG;
988 MachineFunction *CurMF = SwitchMBB->getParent();
989 // FIXME: Optimize away range check based on pivot comparisons.
990 JumpTableHeader *JTH = &SL->JTCases[I->JTCasesIndex].first;
991 SwitchCG::JumpTable *JT = &SL->JTCases[I->JTCasesIndex].second;
992 BranchProbability DefaultProb = W.DefaultProb;
993
994 // The jump block hasn't been inserted yet; insert it here.
995 MachineBasicBlock *JumpMBB = JT->MBB;
996 CurMF->insert(BBI, JumpMBB);
997
998 // Since the jump table block is separate from the switch block, we need
999 // to keep track of it as a machine predecessor to the default block,
1000 // otherwise we lose the phi edges.
1001 addMachineCFGPred({SwitchMBB->getBasicBlock(), DefaultMBB->getBasicBlock()},
1002 CurMBB);
1003 addMachineCFGPred({SwitchMBB->getBasicBlock(), DefaultMBB->getBasicBlock()},
1004 JumpMBB);
1005
1006 auto JumpProb = I->Prob;
1007 auto FallthroughProb = UnhandledProbs;
1008
1009 // If the default statement is a target of the jump table, we evenly
1010 // distribute the default probability to successors of CurMBB. Also
1011 // update the probability on the edge from JumpMBB to Fallthrough.
1012 for (MachineBasicBlock::succ_iterator SI = JumpMBB->succ_begin(),
1013 SE = JumpMBB->succ_end();
1014 SI != SE; ++SI) {
1015 if (*SI == DefaultMBB) {
1016 JumpProb += DefaultProb / 2;
1017 FallthroughProb -= DefaultProb / 2;
1018 JumpMBB->setSuccProbability(SI, DefaultProb / 2);
1019 JumpMBB->normalizeSuccProbs();
1020 } else {
1021 // Also record edges from the jump table block to it's successors.
1022 addMachineCFGPred({SwitchMBB->getBasicBlock(), (*SI)->getBasicBlock()},
1023 JumpMBB);
1024 }
1025 }
1026
1027 if (FallthroughUnreachable)
1028 JTH->FallthroughUnreachable = true;
1029
1030 if (!JTH->FallthroughUnreachable)
1031 addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb);
1032 addSuccessorWithProb(CurMBB, JumpMBB, JumpProb);
1033 CurMBB->normalizeSuccProbs();
1034
1035 // The jump table header will be inserted in our current block, do the
1036 // range check, and fall through to our fallthrough block.
1037 JTH->HeaderBB = CurMBB;
1038 JT->Default = Fallthrough; // FIXME: Move Default to JumpTableHeader.
1039
1040 // If we're in the right place, emit the jump table header right now.
1041 if (CurMBB == SwitchMBB) {
1042 if (!emitJumpTableHeader(*JT, *JTH, CurMBB))
1043 return false;
1044 JTH->Emitted = true;
1045 }
1046 return true;
1047}
1048bool IRTranslator::lowerSwitchRangeWorkItem(SwitchCG::CaseClusterIt I,
1049 Value *Cond,
1050 MachineBasicBlock *Fallthrough,
1051 bool FallthroughUnreachable,
1052 BranchProbability UnhandledProbs,
1053 MachineBasicBlock *CurMBB,
1054 MachineIRBuilder &MIB,
1055 MachineBasicBlock *SwitchMBB) {
1056 using namespace SwitchCG;
1057 const Value *RHS, *LHS, *MHS;
1058 CmpInst::Predicate Pred;
1059 if (I->Low == I->High) {
1060 // Check Cond == I->Low.
1061 Pred = CmpInst::ICMP_EQ;
1062 LHS = Cond;
1063 RHS = I->Low;
1064 MHS = nullptr;
1065 } else {
1066 // Check I->Low <= Cond <= I->High.
1067 Pred = CmpInst::ICMP_SLE;
1068 LHS = I->Low;
1069 MHS = Cond;
1070 RHS = I->High;
1071 }
1072
1073 // If Fallthrough is unreachable, fold away the comparison.
1074 // The false probability is the sum of all unhandled cases.
1075 CaseBlock CB(Pred, FallthroughUnreachable, LHS, RHS, MHS, I->MBB, Fallthrough,
1076 CurMBB, MIB.getDebugLoc(), I->Prob, UnhandledProbs);
1077
1078 emitSwitchCase(CB, SwitchMBB, MIB);
1079 return true;
1080}
1081
1082void IRTranslator::emitBitTestHeader(SwitchCG::BitTestBlock &B,
1083 MachineBasicBlock *SwitchBB) {
1084 MachineIRBuilder &MIB = *CurBuilder;
1085 MIB.setMBB(*SwitchBB);
1086
1087 // Subtract the minimum value.
1088 Register SwitchOpReg = getOrCreateVReg(*B.SValue);
1089
1090 LLT SwitchOpTy = MRI->getType(SwitchOpReg);
1091 Register MinValReg = MIB.buildConstant(SwitchOpTy, B.First).getReg(0);
1092 auto RangeSub = MIB.buildSub(SwitchOpTy, SwitchOpReg, MinValReg);
1093
1095 const LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
1096
1097 LLT MaskTy = SwitchOpTy;
1098 if (MaskTy.getSizeInBits() > PtrTy.getSizeInBits() ||
1099 !llvm::has_single_bit<uint32_t>(MaskTy.getSizeInBits()))
1100 MaskTy = LLT::scalar(PtrTy.getSizeInBits());
1101 else {
1102 // Ensure that the type will fit the mask value.
1103 for (unsigned I = 0, E = B.Cases.size(); I != E; ++I) {
1104 if (!isUIntN(SwitchOpTy.getSizeInBits(), B.Cases[I].Mask)) {
1105 // Switch table case range are encoded into series of masks.
1106 // Just use pointer type, it's guaranteed to fit.
1107 MaskTy = LLT::scalar(PtrTy.getSizeInBits());
1108 break;
1109 }
1110 }
1111 }
1112 Register SubReg = RangeSub.getReg(0);
1113 if (SwitchOpTy != MaskTy)
1114 SubReg = MIB.buildZExtOrTrunc(MaskTy, SubReg).getReg(0);
1115
1116 B.RegVT = getMVTForLLT(MaskTy);
1117 B.Reg = SubReg;
1118
1119 MachineBasicBlock *MBB = B.Cases[0].ThisBB;
1120
1121 if (!B.FallthroughUnreachable)
1122 addSuccessorWithProb(SwitchBB, B.Default, B.DefaultProb);
1123 addSuccessorWithProb(SwitchBB, MBB, B.Prob);
1124
1125 SwitchBB->normalizeSuccProbs();
1126
1127 if (!B.FallthroughUnreachable) {
1128 // Conditional branch to the default block.
1129 auto RangeCst = MIB.buildConstant(SwitchOpTy, B.Range);
1130 auto RangeCmp = MIB.buildICmp(CmpInst::Predicate::ICMP_UGT, LLT::scalar(1),
1131 RangeSub, RangeCst);
1132 MIB.buildBrCond(RangeCmp, *B.Default);
1133 }
1134
1135 // Avoid emitting unnecessary branches to the next block.
1136 if (MBB != SwitchBB->getNextNode())
1137 MIB.buildBr(*MBB);
1138}
1139
1140void IRTranslator::emitBitTestCase(SwitchCG::BitTestBlock &BB,
1141 MachineBasicBlock *NextMBB,
1142 BranchProbability BranchProbToNext,
1144 MachineBasicBlock *SwitchBB) {
1145 MachineIRBuilder &MIB = *CurBuilder;
1146 MIB.setMBB(*SwitchBB);
1147
1148 LLT SwitchTy = getLLTForMVT(BB.RegVT);
1149 Register Cmp;
1150 unsigned PopCount = llvm::popcount(B.Mask);
1151 if (PopCount == 1) {
1152 // Testing for a single bit; just compare the shift count with what it
1153 // would need to be to shift a 1 bit in that position.
1154 auto MaskTrailingZeros =
1155 MIB.buildConstant(SwitchTy, llvm::countr_zero(B.Mask));
1156 Cmp =
1157 MIB.buildICmp(ICmpInst::ICMP_EQ, LLT::scalar(1), Reg, MaskTrailingZeros)
1158 .getReg(0);
1159 } else if (PopCount == BB.Range) {
1160 // There is only one zero bit in the range, test for it directly.
1161 auto MaskTrailingOnes =
1162 MIB.buildConstant(SwitchTy, llvm::countr_one(B.Mask));
1163 Cmp = MIB.buildICmp(CmpInst::ICMP_NE, LLT::scalar(1), Reg, MaskTrailingOnes)
1164 .getReg(0);
1165 } else {
1166 // Make desired shift.
1167 auto CstOne = MIB.buildConstant(SwitchTy, 1);
1168 auto SwitchVal = MIB.buildShl(SwitchTy, CstOne, Reg);
1169
1170 // Emit bit tests and jumps.
1171 auto CstMask = MIB.buildConstant(SwitchTy, B.Mask);
1172 auto AndOp = MIB.buildAnd(SwitchTy, SwitchVal, CstMask);
1173 auto CstZero = MIB.buildConstant(SwitchTy, 0);
1174 Cmp = MIB.buildICmp(CmpInst::ICMP_NE, LLT::scalar(1), AndOp, CstZero)
1175 .getReg(0);
1176 }
1177
1178 // The branch probability from SwitchBB to B.TargetBB is B.ExtraProb.
1179 addSuccessorWithProb(SwitchBB, B.TargetBB, B.ExtraProb);
1180 // The branch probability from SwitchBB to NextMBB is BranchProbToNext.
1181 addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext);
1182 // It is not guaranteed that the sum of B.ExtraProb and BranchProbToNext is
1183 // one as they are relative probabilities (and thus work more like weights),
1184 // and hence we need to normalize them to let the sum of them become one.
1185 SwitchBB->normalizeSuccProbs();
1186
1187 // Record the fact that the IR edge from the header to the bit test target
1188 // will go through our new block. Neeeded for PHIs to have nodes added.
1189 addMachineCFGPred({BB.Parent->getBasicBlock(), B.TargetBB->getBasicBlock()},
1190 SwitchBB);
1191
1192 MIB.buildBrCond(Cmp, *B.TargetBB);
1193
1194 // Avoid emitting unnecessary branches to the next block.
1195 if (NextMBB != SwitchBB->getNextNode())
1196 MIB.buildBr(*NextMBB);
1197}
1198
1199bool IRTranslator::lowerBitTestWorkItem(
1201 MachineBasicBlock *CurMBB, MachineBasicBlock *DefaultMBB,
1203 BranchProbability DefaultProb, BranchProbability UnhandledProbs,
1205 bool FallthroughUnreachable) {
1206 using namespace SwitchCG;
1207 MachineFunction *CurMF = SwitchMBB->getParent();
1208 // FIXME: Optimize away range check based on pivot comparisons.
1209 BitTestBlock *BTB = &SL->BitTestCases[I->BTCasesIndex];
1210 // The bit test blocks haven't been inserted yet; insert them here.
1211 for (BitTestCase &BTC : BTB->Cases)
1212 CurMF->insert(BBI, BTC.ThisBB);
1213
1214 // Fill in fields of the BitTestBlock.
1215 BTB->Parent = CurMBB;
1216 BTB->Default = Fallthrough;
1217
1218 BTB->DefaultProb = UnhandledProbs;
1219 // If the cases in bit test don't form a contiguous range, we evenly
1220 // distribute the probability on the edge to Fallthrough to two
1221 // successors of CurMBB.
1222 if (!BTB->ContiguousRange) {
1223 BTB->Prob += DefaultProb / 2;
1224 BTB->DefaultProb -= DefaultProb / 2;
1225 }
1226
1227 if (FallthroughUnreachable)
1228 BTB->FallthroughUnreachable = true;
1229
1230 // If we're in the right place, emit the bit test header right now.
1231 if (CurMBB == SwitchMBB) {
1232 emitBitTestHeader(*BTB, SwitchMBB);
1233 BTB->Emitted = true;
1234 }
1235 return true;
1236}
1237
1238bool IRTranslator::lowerSwitchWorkItem(SwitchCG::SwitchWorkListItem W,
1239 Value *Cond,
1240 MachineBasicBlock *SwitchMBB,
1241 MachineBasicBlock *DefaultMBB,
1242 MachineIRBuilder &MIB) {
1243 using namespace SwitchCG;
1244 MachineFunction *CurMF = FuncInfo.MF;
1245 MachineBasicBlock *NextMBB = nullptr;
1247 if (++BBI != FuncInfo.MF->end())
1248 NextMBB = &*BBI;
1249
1250 if (EnableOpts) {
1251 // Here, we order cases by probability so the most likely case will be
1252 // checked first. However, two clusters can have the same probability in
1253 // which case their relative ordering is non-deterministic. So we use Low
1254 // as a tie-breaker as clusters are guaranteed to never overlap.
1255 llvm::sort(W.FirstCluster, W.LastCluster + 1,
1256 [](const CaseCluster &a, const CaseCluster &b) {
1257 return a.Prob != b.Prob
1258 ? a.Prob > b.Prob
1259 : a.Low->getValue().slt(b.Low->getValue());
1260 });
1261
1262 // Rearrange the case blocks so that the last one falls through if possible
1263 // without changing the order of probabilities.
1264 for (CaseClusterIt I = W.LastCluster; I > W.FirstCluster;) {
1265 --I;
1266 if (I->Prob > W.LastCluster->Prob)
1267 break;
1268 if (I->Kind == CC_Range && I->MBB == NextMBB) {
1269 std::swap(*I, *W.LastCluster);
1270 break;
1271 }
1272 }
1273 }
1274
1275 // Compute total probability.
1276 BranchProbability DefaultProb = W.DefaultProb;
1277 BranchProbability UnhandledProbs = DefaultProb;
1278 for (CaseClusterIt I = W.FirstCluster; I <= W.LastCluster; ++I)
1279 UnhandledProbs += I->Prob;
1280
1281 MachineBasicBlock *CurMBB = W.MBB;
1282 for (CaseClusterIt I = W.FirstCluster, E = W.LastCluster; I <= E; ++I) {
1283 bool FallthroughUnreachable = false;
1284 MachineBasicBlock *Fallthrough;
1285 if (I == W.LastCluster) {
1286 // For the last cluster, fall through to the default destination.
1287 Fallthrough = DefaultMBB;
1288 FallthroughUnreachable = isa<UnreachableInst>(
1289 DefaultMBB->getBasicBlock()->getFirstNonPHIOrDbg());
1290 } else {
1291 Fallthrough = CurMF->CreateMachineBasicBlock(CurMBB->getBasicBlock());
1292 CurMF->insert(BBI, Fallthrough);
1293 }
1294 UnhandledProbs -= I->Prob;
1295
1296 switch (I->Kind) {
1297 case CC_BitTests: {
1298 if (!lowerBitTestWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI,
1299 DefaultProb, UnhandledProbs, I, Fallthrough,
1300 FallthroughUnreachable)) {
1301 LLVM_DEBUG(dbgs() << "Failed to lower bit test for switch");
1302 return false;
1303 }
1304 break;
1305 }
1306
1307 case CC_JumpTable: {
1308 if (!lowerJumpTableWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI,
1309 UnhandledProbs, I, Fallthrough,
1310 FallthroughUnreachable)) {
1311 LLVM_DEBUG(dbgs() << "Failed to lower jump table");
1312 return false;
1313 }
1314 break;
1315 }
1316 case CC_Range: {
1317 if (!lowerSwitchRangeWorkItem(I, Cond, Fallthrough,
1318 FallthroughUnreachable, UnhandledProbs,
1319 CurMBB, MIB, SwitchMBB)) {
1320 LLVM_DEBUG(dbgs() << "Failed to lower switch range");
1321 return false;
1322 }
1323 break;
1324 }
1325 }
1326 CurMBB = Fallthrough;
1327 }
1328
1329 return true;
1330}
1331
1332bool IRTranslator::translateIndirectBr(const User &U,
1333 MachineIRBuilder &MIRBuilder) {
1334 const IndirectBrInst &BrInst = cast<IndirectBrInst>(U);
1335
1336 const Register Tgt = getOrCreateVReg(*BrInst.getAddress());
1337 MIRBuilder.buildBrIndirect(Tgt);
1338
1339 // Link successors.
1341 MachineBasicBlock &CurBB = MIRBuilder.getMBB();
1342 for (const BasicBlock *Succ : successors(&BrInst)) {
1343 // It's legal for indirectbr instructions to have duplicate blocks in the
1344 // destination list. We don't allow this in MIR. Skip anything that's
1345 // already a successor.
1346 if (!AddedSuccessors.insert(Succ).second)
1347 continue;
1348 CurBB.addSuccessor(&getMBB(*Succ));
1349 }
1350
1351 return true;
1352}
1353
1354static bool isSwiftError(const Value *V) {
1355 if (auto Arg = dyn_cast<Argument>(V))
1356 return Arg->hasSwiftErrorAttr();
1357 if (auto AI = dyn_cast<AllocaInst>(V))
1358 return AI->isSwiftError();
1359 return false;
1360}
1361
1362bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) {
1363 const LoadInst &LI = cast<LoadInst>(U);
1364 TypeSize StoreSize = DL->getTypeStoreSize(LI.getType());
1365 if (StoreSize.isZero())
1366 return true;
1367
1368 ArrayRef<Register> Regs = getOrCreateVRegs(LI);
1369 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(LI);
1370 Register Base = getOrCreateVReg(*LI.getPointerOperand());
1371 AAMDNodes AAInfo = LI.getAAMetadata();
1372
1373 const Value *Ptr = LI.getPointerOperand();
1374 Type *OffsetIRTy = DL->getIndexType(Ptr->getType());
1375 LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
1376
1377 if (CLI->supportSwiftError() && isSwiftError(Ptr)) {
1378 assert(Regs.size() == 1 && "swifterror should be single pointer");
1379 Register VReg =
1380 SwiftError.getOrCreateVRegUseAt(&LI, &MIRBuilder.getMBB(), Ptr);
1381 MIRBuilder.buildCopy(Regs[0], VReg);
1382 return true;
1383 }
1384
1386 TLI->getLoadMemOperandFlags(LI, *DL, AC, LibInfo);
1387 if (AA && !(Flags & MachineMemOperand::MOInvariant)) {
1388 if (AA->pointsToConstantMemory(
1389 MemoryLocation(Ptr, LocationSize::precise(StoreSize), AAInfo))) {
1391 }
1392 }
1393
1394 const MDNode *Ranges =
1395 Regs.size() == 1 ? LI.getMetadata(LLVMContext::MD_range) : nullptr;
1396 for (unsigned i = 0; i < Regs.size(); ++i) {
1397 Register Addr;
1398 MIRBuilder.materializePtrAdd(Addr, Base, OffsetTy, Offsets[i] / 8);
1399
1400 MachinePointerInfo Ptr(LI.getPointerOperand(), Offsets[i] / 8);
1401 Align BaseAlign = getMemOpAlign(LI);
1402 auto MMO = MF->getMachineMemOperand(
1403 Ptr, Flags, MRI->getType(Regs[i]),
1404 commonAlignment(BaseAlign, Offsets[i] / 8), AAInfo, Ranges,
1405 LI.getSyncScopeID(), LI.getOrdering());
1406 MIRBuilder.buildLoad(Regs[i], Addr, *MMO);
1407 }
1408
1409 return true;
1410}
1411
1412bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) {
1413 const StoreInst &SI = cast<StoreInst>(U);
1414 if (DL->getTypeStoreSize(SI.getValueOperand()->getType()).isZero())
1415 return true;
1416
1417 ArrayRef<Register> Vals = getOrCreateVRegs(*SI.getValueOperand());
1418 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*SI.getValueOperand());
1419 Register Base = getOrCreateVReg(*SI.getPointerOperand());
1420
1421 Type *OffsetIRTy = DL->getIndexType(SI.getPointerOperandType());
1422 LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
1423
1424 if (CLI->supportSwiftError() && isSwiftError(SI.getPointerOperand())) {
1425 assert(Vals.size() == 1 && "swifterror should be single pointer");
1426
1427 Register VReg = SwiftError.getOrCreateVRegDefAt(&SI, &MIRBuilder.getMBB(),
1428 SI.getPointerOperand());
1429 MIRBuilder.buildCopy(VReg, Vals[0]);
1430 return true;
1431 }
1432
1434
1435 for (unsigned i = 0; i < Vals.size(); ++i) {
1436 Register Addr;
1437 MIRBuilder.materializePtrAdd(Addr, Base, OffsetTy, Offsets[i] / 8);
1438
1439 MachinePointerInfo Ptr(SI.getPointerOperand(), Offsets[i] / 8);
1440 Align BaseAlign = getMemOpAlign(SI);
1441 auto MMO = MF->getMachineMemOperand(
1442 Ptr, Flags, MRI->getType(Vals[i]),
1443 commonAlignment(BaseAlign, Offsets[i] / 8), SI.getAAMetadata(), nullptr,
1444 SI.getSyncScopeID(), SI.getOrdering());
1445 MIRBuilder.buildStore(Vals[i], Addr, *MMO);
1446 }
1447 return true;
1448}
1449
1451 const Value *Src = U.getOperand(0);
1452 Type *Int32Ty = Type::getInt32Ty(U.getContext());
1453
1454 // getIndexedOffsetInType is designed for GEPs, so the first index is the
1455 // usual array element rather than looking into the actual aggregate.
1457 Indices.push_back(ConstantInt::get(Int32Ty, 0));
1458
1459 if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(&U)) {
1460 for (auto Idx : EVI->indices())
1461 Indices.push_back(ConstantInt::get(Int32Ty, Idx));
1462 } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(&U)) {
1463 for (auto Idx : IVI->indices())
1464 Indices.push_back(ConstantInt::get(Int32Ty, Idx));
1465 } else {
1466 for (unsigned i = 1; i < U.getNumOperands(); ++i)
1467 Indices.push_back(U.getOperand(i));
1468 }
1469
1470 return 8 * static_cast<uint64_t>(
1471 DL.getIndexedOffsetInType(Src->getType(), Indices));
1472}
1473
1474bool IRTranslator::translateExtractValue(const User &U,
1475 MachineIRBuilder &MIRBuilder) {
1476 const Value *Src = U.getOperand(0);
1478 ArrayRef<Register> SrcRegs = getOrCreateVRegs(*Src);
1479 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*Src);
1480 unsigned Idx = llvm::lower_bound(Offsets, Offset) - Offsets.begin();
1481 auto &DstRegs = allocateVRegs(U);
1482
1483 for (unsigned i = 0; i < DstRegs.size(); ++i)
1484 DstRegs[i] = SrcRegs[Idx++];
1485
1486 return true;
1487}
1488
1489bool IRTranslator::translateInsertValue(const User &U,
1490 MachineIRBuilder &MIRBuilder) {
1491 const Value *Src = U.getOperand(0);
1493 auto &DstRegs = allocateVRegs(U);
1494 ArrayRef<uint64_t> DstOffsets = *VMap.getOffsets(U);
1495 ArrayRef<Register> SrcRegs = getOrCreateVRegs(*Src);
1496 ArrayRef<Register> InsertedRegs = getOrCreateVRegs(*U.getOperand(1));
1497 auto *InsertedIt = InsertedRegs.begin();
1498
1499 for (unsigned i = 0; i < DstRegs.size(); ++i) {
1500 if (DstOffsets[i] >= Offset && InsertedIt != InsertedRegs.end())
1501 DstRegs[i] = *InsertedIt++;
1502 else
1503 DstRegs[i] = SrcRegs[i];
1504 }
1505
1506 return true;
1507}
1508
1509bool IRTranslator::translateSelect(const User &U,
1510 MachineIRBuilder &MIRBuilder) {
1511 Register Tst = getOrCreateVReg(*U.getOperand(0));
1512 ArrayRef<Register> ResRegs = getOrCreateVRegs(U);
1513 ArrayRef<Register> Op0Regs = getOrCreateVRegs(*U.getOperand(1));
1514 ArrayRef<Register> Op1Regs = getOrCreateVRegs(*U.getOperand(2));
1515
1516 uint32_t Flags = 0;
1517 if (const SelectInst *SI = dyn_cast<SelectInst>(&U))
1519
1520 for (unsigned i = 0; i < ResRegs.size(); ++i) {
1521 MIRBuilder.buildSelect(ResRegs[i], Tst, Op0Regs[i], Op1Regs[i], Flags);
1522 }
1523
1524 return true;
1525}
1526
1527bool IRTranslator::translateCopy(const User &U, const Value &V,
1528 MachineIRBuilder &MIRBuilder) {
1529 Register Src = getOrCreateVReg(V);
1530 auto &Regs = *VMap.getVRegs(U);
1531 if (Regs.empty()) {
1532 Regs.push_back(Src);
1533 VMap.getOffsets(U)->push_back(0);
1534 } else {
1535 // If we already assigned a vreg for this instruction, we can't change that.
1536 // Emit a copy to satisfy the users we already emitted.
1537 MIRBuilder.buildCopy(Regs[0], Src);
1538 }
1539 return true;
1540}
1541
1542bool IRTranslator::translateBitCast(const User &U,
1543 MachineIRBuilder &MIRBuilder) {
1544 // If we're bitcasting to the source type, we can reuse the source vreg.
1545 if (getLLTForType(*U.getOperand(0)->getType(), *DL) ==
1546 getLLTForType(*U.getType(), *DL)) {
1547 // If the source is a ConstantInt then it was probably created by
1548 // ConstantHoisting and we should leave it alone.
1549 if (isa<ConstantInt>(U.getOperand(0)))
1550 return translateCast(TargetOpcode::G_CONSTANT_FOLD_BARRIER, U,
1551 MIRBuilder);
1552 return translateCopy(U, *U.getOperand(0), MIRBuilder);
1553 }
1554
1555 return translateCast(TargetOpcode::G_BITCAST, U, MIRBuilder);
1556}
1557
1558bool IRTranslator::translateCast(unsigned Opcode, const User &U,
1559 MachineIRBuilder &MIRBuilder) {
1560 if (U.getType()->getScalarType()->isBFloatTy() ||
1561 U.getOperand(0)->getType()->getScalarType()->isBFloatTy())
1562 return false;
1563
1564 uint32_t Flags = 0;
1565 if (const Instruction *I = dyn_cast<Instruction>(&U))
1567
1568 Register Op = getOrCreateVReg(*U.getOperand(0));
1569 Register Res = getOrCreateVReg(U);
1570 MIRBuilder.buildInstr(Opcode, {Res}, {Op}, Flags);
1571 return true;
1572}
1573
1574bool IRTranslator::translateGetElementPtr(const User &U,
1575 MachineIRBuilder &MIRBuilder) {
1576 Value &Op0 = *U.getOperand(0);
1577 Register BaseReg = getOrCreateVReg(Op0);
1578 Type *PtrIRTy = Op0.getType();
1579 LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
1580 Type *OffsetIRTy = DL->getIndexType(PtrIRTy);
1581 LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
1582
1583 uint32_t Flags = 0;
1584 if (isa<Instruction>(U)) {
1585 const Instruction &I = cast<Instruction>(U);
1587 }
1588
1589 // Normalize Vector GEP - all scalar operands should be converted to the
1590 // splat vector.
1591 unsigned VectorWidth = 0;
1592
1593 // True if we should use a splat vector; using VectorWidth alone is not
1594 // sufficient.
1595 bool WantSplatVector = false;
1596 if (auto *VT = dyn_cast<VectorType>(U.getType())) {
1597 VectorWidth = cast<FixedVectorType>(VT)->getNumElements();
1598 // We don't produce 1 x N vectors; those are treated as scalars.
1599 WantSplatVector = VectorWidth > 1;
1600 }
1601
1602 // We might need to splat the base pointer into a vector if the offsets
1603 // are vectors.
1604 if (WantSplatVector && !PtrTy.isVector()) {
1605 BaseReg = MIRBuilder
1606 .buildSplatBuildVector(LLT::fixed_vector(VectorWidth, PtrTy),
1607 BaseReg)
1608 .getReg(0);
1609 PtrIRTy = FixedVectorType::get(PtrIRTy, VectorWidth);
1610 PtrTy = getLLTForType(*PtrIRTy, *DL);
1611 OffsetIRTy = DL->getIndexType(PtrIRTy);
1612 OffsetTy = getLLTForType(*OffsetIRTy, *DL);
1613 }
1614
1615 int64_t Offset = 0;
1616 for (gep_type_iterator GTI = gep_type_begin(&U), E = gep_type_end(&U);
1617 GTI != E; ++GTI) {
1618 const Value *Idx = GTI.getOperand();
1619 if (StructType *StTy = GTI.getStructTypeOrNull()) {
1620 unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
1622 continue;
1623 } else {
1624 uint64_t ElementSize = GTI.getSequentialElementStride(*DL);
1625
1626 // If this is a scalar constant or a splat vector of constants,
1627 // handle it quickly.
1628 if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
1629 if (std::optional<int64_t> Val = CI->getValue().trySExtValue()) {
1630 Offset += ElementSize * *Val;
1631 continue;
1632 }
1633 }
1634
1635 if (Offset != 0) {
1636 auto OffsetMIB = MIRBuilder.buildConstant({OffsetTy}, Offset);
1637 BaseReg = MIRBuilder.buildPtrAdd(PtrTy, BaseReg, OffsetMIB.getReg(0))
1638 .getReg(0);
1639 Offset = 0;
1640 }
1641
1642 Register IdxReg = getOrCreateVReg(*Idx);
1643 LLT IdxTy = MRI->getType(IdxReg);
1644 if (IdxTy != OffsetTy) {
1645 if (!IdxTy.isVector() && WantSplatVector) {
1646 IdxReg = MIRBuilder
1648 IdxReg)
1649 .getReg(0);
1650 }
1651
1652 IdxReg = MIRBuilder.buildSExtOrTrunc(OffsetTy, IdxReg).getReg(0);
1653 }
1654
1655 // N = N + Idx * ElementSize;
1656 // Avoid doing it for ElementSize of 1.
1657 Register GepOffsetReg;
1658 if (ElementSize != 1) {
1659 auto ElementSizeMIB = MIRBuilder.buildConstant(
1660 getLLTForType(*OffsetIRTy, *DL), ElementSize);
1661 GepOffsetReg =
1662 MIRBuilder.buildMul(OffsetTy, IdxReg, ElementSizeMIB).getReg(0);
1663 } else
1664 GepOffsetReg = IdxReg;
1665
1666 BaseReg = MIRBuilder.buildPtrAdd(PtrTy, BaseReg, GepOffsetReg).getReg(0);
1667 }
1668 }
1669
1670 if (Offset != 0) {
1671 auto OffsetMIB =
1672 MIRBuilder.buildConstant(OffsetTy, Offset);
1673
1674 if (int64_t(Offset) >= 0 && cast<GEPOperator>(U).isInBounds())
1676
1677 MIRBuilder.buildPtrAdd(getOrCreateVReg(U), BaseReg, OffsetMIB.getReg(0),
1678 Flags);
1679 return true;
1680 }
1681
1682 MIRBuilder.buildCopy(getOrCreateVReg(U), BaseReg);
1683 return true;
1684}
1685
1686bool IRTranslator::translateMemFunc(const CallInst &CI,
1687 MachineIRBuilder &MIRBuilder,
1688 unsigned Opcode) {
1689 const Value *SrcPtr = CI.getArgOperand(1);
1690 // If the source is undef, then just emit a nop.
1691 if (isa<UndefValue>(SrcPtr))
1692 return true;
1693
1695
1696 unsigned MinPtrSize = UINT_MAX;
1697 for (auto AI = CI.arg_begin(), AE = CI.arg_end(); std::next(AI) != AE; ++AI) {
1698 Register SrcReg = getOrCreateVReg(**AI);
1699 LLT SrcTy = MRI->getType(SrcReg);
1700 if (SrcTy.isPointer())
1701 MinPtrSize = std::min<unsigned>(SrcTy.getSizeInBits(), MinPtrSize);
1702 SrcRegs.push_back(SrcReg);
1703 }
1704
1705 LLT SizeTy = LLT::scalar(MinPtrSize);
1706
1707 // The size operand should be the minimum of the pointer sizes.
1708 Register &SizeOpReg = SrcRegs[SrcRegs.size() - 1];
1709 if (MRI->getType(SizeOpReg) != SizeTy)
1710 SizeOpReg = MIRBuilder.buildZExtOrTrunc(SizeTy, SizeOpReg).getReg(0);
1711
1712 auto ICall = MIRBuilder.buildInstr(Opcode);
1713 for (Register SrcReg : SrcRegs)
1714 ICall.addUse(SrcReg);
1715
1716 Align DstAlign;
1717 Align SrcAlign;
1718 unsigned IsVol =
1719 cast<ConstantInt>(CI.getArgOperand(CI.arg_size() - 1))->getZExtValue();
1720
1721 ConstantInt *CopySize = nullptr;
1722
1723 if (auto *MCI = dyn_cast<MemCpyInst>(&CI)) {
1724 DstAlign = MCI->getDestAlign().valueOrOne();
1725 SrcAlign = MCI->getSourceAlign().valueOrOne();
1726 CopySize = dyn_cast<ConstantInt>(MCI->getArgOperand(2));
1727 } else if (auto *MCI = dyn_cast<MemCpyInlineInst>(&CI)) {
1728 DstAlign = MCI->getDestAlign().valueOrOne();
1729 SrcAlign = MCI->getSourceAlign().valueOrOne();
1730 CopySize = dyn_cast<ConstantInt>(MCI->getArgOperand(2));
1731 } else if (auto *MMI = dyn_cast<MemMoveInst>(&CI)) {
1732 DstAlign = MMI->getDestAlign().valueOrOne();
1733 SrcAlign = MMI->getSourceAlign().valueOrOne();
1734 CopySize = dyn_cast<ConstantInt>(MMI->getArgOperand(2));
1735 } else {
1736 auto *MSI = cast<MemSetInst>(&CI);
1737 DstAlign = MSI->getDestAlign().valueOrOne();
1738 }
1739
1740 if (Opcode != TargetOpcode::G_MEMCPY_INLINE) {
1741 // We need to propagate the tail call flag from the IR inst as an argument.
1742 // Otherwise, we have to pessimize and assume later that we cannot tail call
1743 // any memory intrinsics.
1744 ICall.addImm(CI.isTailCall() ? 1 : 0);
1745 }
1746
1747 // Create mem operands to store the alignment and volatile info.
1750 if (IsVol) {
1751 LoadFlags |= MachineMemOperand::MOVolatile;
1752 StoreFlags |= MachineMemOperand::MOVolatile;
1753 }
1754
1755 AAMDNodes AAInfo = CI.getAAMetadata();
1756 if (AA && CopySize &&
1758 SrcPtr, LocationSize::precise(CopySize->getZExtValue()), AAInfo))) {
1759 LoadFlags |= MachineMemOperand::MOInvariant;
1760
1761 // FIXME: pointsToConstantMemory probably does not imply dereferenceable,
1762 // but the previous usage implied it did. Probably should check
1763 // isDereferenceableAndAlignedPointer.
1765 }
1766
1767 ICall.addMemOperand(
1769 StoreFlags, 1, DstAlign, AAInfo));
1770 if (Opcode != TargetOpcode::G_MEMSET)
1771 ICall.addMemOperand(MF->getMachineMemOperand(
1772 MachinePointerInfo(SrcPtr), LoadFlags, 1, SrcAlign, AAInfo));
1773
1774 return true;
1775}
1776
1777bool IRTranslator::translateTrap(const CallInst &CI,
1778 MachineIRBuilder &MIRBuilder,
1779 unsigned Opcode) {
1780 StringRef TrapFuncName =
1781 CI.getAttributes().getFnAttr("trap-func-name").getValueAsString();
1782 if (TrapFuncName.empty()) {
1783 if (Opcode == TargetOpcode::G_UBSANTRAP) {
1784 uint64_t Code = cast<ConstantInt>(CI.getOperand(0))->getZExtValue();
1785 MIRBuilder.buildInstr(Opcode, {}, ArrayRef<llvm::SrcOp>{Code});
1786 } else {
1787 MIRBuilder.buildInstr(Opcode);
1788 }
1789 return true;
1790 }
1791
1793 if (Opcode == TargetOpcode::G_UBSANTRAP)
1794 Info.OrigArgs.push_back({getOrCreateVRegs(*CI.getArgOperand(0)),
1795 CI.getArgOperand(0)->getType(), 0});
1796
1797 Info.Callee = MachineOperand::CreateES(TrapFuncName.data());
1798 Info.CB = &CI;
1799 Info.OrigRet = {Register(), Type::getVoidTy(CI.getContext()), 0};
1800 return CLI->lowerCall(MIRBuilder, Info);
1801}
1802
1803bool IRTranslator::translateVectorInterleave2Intrinsic(
1804 const CallInst &CI, MachineIRBuilder &MIRBuilder) {
1805 assert(CI.getIntrinsicID() == Intrinsic::vector_interleave2 &&
1806 "This function can only be called on the interleave2 intrinsic!");
1807 // Canonicalize interleave2 to G_SHUFFLE_VECTOR (similar to SelectionDAG).
1808 Register Op0 = getOrCreateVReg(*CI.getOperand(0));
1809 Register Op1 = getOrCreateVReg(*CI.getOperand(1));
1810 Register Res = getOrCreateVReg(CI);
1811
1812 LLT OpTy = MRI->getType(Op0);
1813 MIRBuilder.buildShuffleVector(Res, Op0, Op1,
1815
1816 return true;
1817}
1818
1819bool IRTranslator::translateVectorDeinterleave2Intrinsic(
1820 const CallInst &CI, MachineIRBuilder &MIRBuilder) {
1821 assert(CI.getIntrinsicID() == Intrinsic::vector_deinterleave2 &&
1822 "This function can only be called on the deinterleave2 intrinsic!");
1823 // Canonicalize deinterleave2 to shuffles that extract sub-vectors (similar to
1824 // SelectionDAG).
1825 Register Op = getOrCreateVReg(*CI.getOperand(0));
1826 auto Undef = MIRBuilder.buildUndef(MRI->getType(Op));
1827 ArrayRef<Register> Res = getOrCreateVRegs(CI);
1828
1829 LLT ResTy = MRI->getType(Res[0]);
1830 MIRBuilder.buildShuffleVector(Res[0], Op, Undef,
1831 createStrideMask(0, 2, ResTy.getNumElements()));
1832 MIRBuilder.buildShuffleVector(Res[1], Op, Undef,
1833 createStrideMask(1, 2, ResTy.getNumElements()));
1834
1835 return true;
1836}
1837
1838void IRTranslator::getStackGuard(Register DstReg,
1839 MachineIRBuilder &MIRBuilder) {
1841 MRI->setRegClass(DstReg, TRI->getPointerRegClass(*MF));
1842 auto MIB =
1843 MIRBuilder.buildInstr(TargetOpcode::LOAD_STACK_GUARD, {DstReg}, {});
1844
1846 if (!Global)
1847 return;
1848
1849 unsigned AddrSpace = Global->getType()->getPointerAddressSpace();
1850 LLT PtrTy = LLT::pointer(AddrSpace, DL->getPointerSizeInBits(AddrSpace));
1851
1852 MachinePointerInfo MPInfo(Global);
1856 MPInfo, Flags, PtrTy, DL->getPointerABIAlignment(AddrSpace));
1857 MIB.setMemRefs({MemRef});
1858}
1859
1860bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
1861 MachineIRBuilder &MIRBuilder) {
1862 ArrayRef<Register> ResRegs = getOrCreateVRegs(CI);
1863 MIRBuilder.buildInstr(
1864 Op, {ResRegs[0], ResRegs[1]},
1865 {getOrCreateVReg(*CI.getOperand(0)), getOrCreateVReg(*CI.getOperand(1))});
1866
1867 return true;
1868}
1869
1870bool IRTranslator::translateFixedPointIntrinsic(unsigned Op, const CallInst &CI,
1871 MachineIRBuilder &MIRBuilder) {
1872 Register Dst = getOrCreateVReg(CI);
1873 Register Src0 = getOrCreateVReg(*CI.getOperand(0));
1874 Register Src1 = getOrCreateVReg(*CI.getOperand(1));
1875 uint64_t Scale = cast<ConstantInt>(CI.getOperand(2))->getZExtValue();
1876 MIRBuilder.buildInstr(Op, {Dst}, { Src0, Src1, Scale });
1877 return true;
1878}
1879
1880unsigned IRTranslator::getSimpleIntrinsicOpcode(Intrinsic::ID ID) {
1881 switch (ID) {
1882 default:
1883 break;
1884 case Intrinsic::bswap:
1885 return TargetOpcode::G_BSWAP;
1886 case Intrinsic::bitreverse:
1887 return TargetOpcode::G_BITREVERSE;
1888 case Intrinsic::fshl:
1889 return TargetOpcode::G_FSHL;
1890 case Intrinsic::fshr:
1891 return TargetOpcode::G_FSHR;
1892 case Intrinsic::ceil:
1893 return TargetOpcode::G_FCEIL;
1894 case Intrinsic::cos:
1895 return TargetOpcode::G_FCOS;
1896 case Intrinsic::ctpop:
1897 return TargetOpcode::G_CTPOP;
1898 case Intrinsic::exp:
1899 return TargetOpcode::G_FEXP;
1900 case Intrinsic::exp2:
1901 return TargetOpcode::G_FEXP2;
1902 case Intrinsic::exp10:
1903 return TargetOpcode::G_FEXP10;
1904 case Intrinsic::fabs:
1905 return TargetOpcode::G_FABS;
1906 case Intrinsic::copysign:
1907 return TargetOpcode::G_FCOPYSIGN;
1908 case Intrinsic::minnum:
1909 return TargetOpcode::G_FMINNUM;
1910 case Intrinsic::maxnum:
1911 return TargetOpcode::G_FMAXNUM;
1912 case Intrinsic::minimum:
1913 return TargetOpcode::G_FMINIMUM;
1914 case Intrinsic::maximum:
1915 return TargetOpcode::G_FMAXIMUM;
1916 case Intrinsic::canonicalize:
1917 return TargetOpcode::G_FCANONICALIZE;
1918 case Intrinsic::floor:
1919 return TargetOpcode::G_FFLOOR;
1920 case Intrinsic::fma:
1921 return TargetOpcode::G_FMA;
1922 case Intrinsic::log:
1923 return TargetOpcode::G_FLOG;
1924 case Intrinsic::log2:
1925 return TargetOpcode::G_FLOG2;
1926 case Intrinsic::log10:
1927 return TargetOpcode::G_FLOG10;
1928 case Intrinsic::ldexp:
1929 return TargetOpcode::G_FLDEXP;
1930 case Intrinsic::nearbyint:
1931 return TargetOpcode::G_FNEARBYINT;
1932 case Intrinsic::pow:
1933 return TargetOpcode::G_FPOW;
1934 case Intrinsic::powi:
1935 return TargetOpcode::G_FPOWI;
1936 case Intrinsic::rint:
1937 return TargetOpcode::G_FRINT;
1938 case Intrinsic::round:
1939 return TargetOpcode::G_INTRINSIC_ROUND;
1940 case Intrinsic::roundeven:
1941 return TargetOpcode::G_INTRINSIC_ROUNDEVEN;
1942 case Intrinsic::sin:
1943 return TargetOpcode::G_FSIN;
1944 case Intrinsic::sqrt:
1945 return TargetOpcode::G_FSQRT;
1946 case Intrinsic::tan:
1947 return TargetOpcode::G_FTAN;
1948 case Intrinsic::trunc:
1949 return TargetOpcode::G_INTRINSIC_TRUNC;
1950 case Intrinsic::readcyclecounter:
1951 return TargetOpcode::G_READCYCLECOUNTER;
1952 case Intrinsic::readsteadycounter:
1953 return TargetOpcode::G_READSTEADYCOUNTER;
1954 case Intrinsic::ptrmask:
1955 return TargetOpcode::G_PTRMASK;
1956 case Intrinsic::lrint:
1957 return TargetOpcode::G_INTRINSIC_LRINT;
1958 case Intrinsic::llrint:
1959 return TargetOpcode::G_INTRINSIC_LLRINT;
1960 // FADD/FMUL require checking the FMF, so are handled elsewhere.
1961 case Intrinsic::vector_reduce_fmin:
1962 return TargetOpcode::G_VECREDUCE_FMIN;
1963 case Intrinsic::vector_reduce_fmax:
1964 return TargetOpcode::G_VECREDUCE_FMAX;
1965 case Intrinsic::vector_reduce_fminimum:
1966 return TargetOpcode::G_VECREDUCE_FMINIMUM;
1967 case Intrinsic::vector_reduce_fmaximum:
1968 return TargetOpcode::G_VECREDUCE_FMAXIMUM;
1969 case Intrinsic::vector_reduce_add:
1970 return TargetOpcode::G_VECREDUCE_ADD;
1971 case Intrinsic::vector_reduce_mul:
1972 return TargetOpcode::G_VECREDUCE_MUL;
1973 case Intrinsic::vector_reduce_and:
1974 return TargetOpcode::G_VECREDUCE_AND;
1975 case Intrinsic::vector_reduce_or:
1976 return TargetOpcode::G_VECREDUCE_OR;
1977 case Intrinsic::vector_reduce_xor:
1978 return TargetOpcode::G_VECREDUCE_XOR;
1979 case Intrinsic::vector_reduce_smax:
1980 return TargetOpcode::G_VECREDUCE_SMAX;
1981 case Intrinsic::vector_reduce_smin:
1982 return TargetOpcode::G_VECREDUCE_SMIN;
1983 case Intrinsic::vector_reduce_umax:
1984 return TargetOpcode::G_VECREDUCE_UMAX;
1985 case Intrinsic::vector_reduce_umin:
1986 return TargetOpcode::G_VECREDUCE_UMIN;
1987 case Intrinsic::lround:
1988 return TargetOpcode::G_LROUND;
1989 case Intrinsic::llround:
1990 return TargetOpcode::G_LLROUND;
1991 case Intrinsic::get_fpenv:
1992 return TargetOpcode::G_GET_FPENV;
1993 case Intrinsic::get_fpmode:
1994 return TargetOpcode::G_GET_FPMODE;
1995 }
1997}
1998
1999bool IRTranslator::translateSimpleIntrinsic(const CallInst &CI,
2001 MachineIRBuilder &MIRBuilder) {
2002
2003 unsigned Op = getSimpleIntrinsicOpcode(ID);
2004
2005 // Is this a simple intrinsic?
2007 return false;
2008
2009 // Yes. Let's translate it.
2011 for (const auto &Arg : CI.args())
2012 VRegs.push_back(getOrCreateVReg(*Arg));
2013
2014 MIRBuilder.buildInstr(Op, {getOrCreateVReg(CI)}, VRegs,
2016 return true;
2017}
2018
2019// TODO: Include ConstainedOps.def when all strict instructions are defined.
2021 switch (ID) {
2022 case Intrinsic::experimental_constrained_fadd:
2023 return TargetOpcode::G_STRICT_FADD;
2024 case Intrinsic::experimental_constrained_fsub:
2025 return TargetOpcode::G_STRICT_FSUB;
2026 case Intrinsic::experimental_constrained_fmul:
2027 return TargetOpcode::G_STRICT_FMUL;
2028 case Intrinsic::experimental_constrained_fdiv:
2029 return TargetOpcode::G_STRICT_FDIV;
2030 case Intrinsic::experimental_constrained_frem:
2031 return TargetOpcode::G_STRICT_FREM;
2032 case Intrinsic::experimental_constrained_fma:
2033 return TargetOpcode::G_STRICT_FMA;
2034 case Intrinsic::experimental_constrained_sqrt:
2035 return TargetOpcode::G_STRICT_FSQRT;
2036 case Intrinsic::experimental_constrained_ldexp:
2037 return TargetOpcode::G_STRICT_FLDEXP;
2038 default:
2039 return 0;
2040 }
2041}
2042
2043bool IRTranslator::translateConstrainedFPIntrinsic(
2044 const ConstrainedFPIntrinsic &FPI, MachineIRBuilder &MIRBuilder) {
2046
2047 unsigned Opcode = getConstrainedOpcode(FPI.getIntrinsicID());
2048 if (!Opcode)
2049 return false;
2050
2054
2056 for (unsigned I = 0, E = FPI.getNonMetadataArgCount(); I != E; ++I)
2057 VRegs.push_back(getOrCreateVReg(*FPI.getArgOperand(I)));
2058
2059 MIRBuilder.buildInstr(Opcode, {getOrCreateVReg(FPI)}, VRegs, Flags);
2060 return true;
2061}
2062
2063std::optional<MCRegister> IRTranslator::getArgPhysReg(Argument &Arg) {
2064 auto VRegs = getOrCreateVRegs(Arg);
2065 if (VRegs.size() != 1)
2066 return std::nullopt;
2067
2068 // Arguments are lowered as a copy of a livein physical register.
2069 auto *VRegDef = MF->getRegInfo().getVRegDef(VRegs[0]);
2070 if (!VRegDef || !VRegDef->isCopy())
2071 return std::nullopt;
2072 return VRegDef->getOperand(1).getReg().asMCReg();
2073}
2074
2075bool IRTranslator::translateIfEntryValueArgument(bool isDeclare, Value *Val,
2076 const DILocalVariable *Var,
2077 const DIExpression *Expr,
2078 const DebugLoc &DL,
2079 MachineIRBuilder &MIRBuilder) {
2080 auto *Arg = dyn_cast<Argument>(Val);
2081 if (!Arg)
2082 return false;
2083
2084 if (!Expr->isEntryValue())
2085 return false;
2086
2087 std::optional<MCRegister> PhysReg = getArgPhysReg(*Arg);
2088 if (!PhysReg) {
2089 LLVM_DEBUG(dbgs() << "Dropping dbg." << (isDeclare ? "declare" : "value")
2090 << ": expression is entry_value but "
2091 << "couldn't find a physical register\n");
2092 LLVM_DEBUG(dbgs() << *Var << "\n");
2093 return true;
2094 }
2095
2096 if (isDeclare) {
2097 // Append an op deref to account for the fact that this is a dbg_declare.
2098 Expr = DIExpression::append(Expr, dwarf::DW_OP_deref);
2099 MF->setVariableDbgInfo(Var, Expr, *PhysReg, DL);
2100 } else {
2101 MIRBuilder.buildDirectDbgValue(*PhysReg, Var, Expr);
2102 }
2103
2104 return true;
2105}
2106
2108 switch (ID) {
2109 default:
2110 llvm_unreachable("Unexpected intrinsic");
2111 case Intrinsic::experimental_convergence_anchor:
2112 return TargetOpcode::CONVERGENCECTRL_ANCHOR;
2113 case Intrinsic::experimental_convergence_entry:
2114 return TargetOpcode::CONVERGENCECTRL_ENTRY;
2115 case Intrinsic::experimental_convergence_loop:
2116 return TargetOpcode::CONVERGENCECTRL_LOOP;
2117 }
2118}
2119
2120bool IRTranslator::translateConvergenceControlIntrinsic(
2121 const CallInst &CI, Intrinsic::ID ID, MachineIRBuilder &MIRBuilder) {
2123 Register OutputReg = getOrCreateConvergenceTokenVReg(CI);
2124 MIB.addDef(OutputReg);
2125
2126 if (ID == Intrinsic::experimental_convergence_loop) {
2128 assert(Bundle && "Expected a convergence control token.");
2129 Register InputReg =
2130 getOrCreateConvergenceTokenVReg(*Bundle->Inputs[0].get());
2131 MIB.addUse(InputReg);
2132 }
2133
2134 return true;
2135}
2136
2137bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
2138 MachineIRBuilder &MIRBuilder) {
2139 if (auto *MI = dyn_cast<AnyMemIntrinsic>(&CI)) {
2140 if (ORE->enabled()) {
2141 if (MemoryOpRemark::canHandle(MI, *LibInfo)) {
2142 MemoryOpRemark R(*ORE, "gisel-irtranslator-memsize", *DL, *LibInfo);
2143 R.visit(MI);
2144 }
2145 }
2146 }
2147
2148 // If this is a simple intrinsic (that is, we just need to add a def of
2149 // a vreg, and uses for each arg operand, then translate it.
2150 if (translateSimpleIntrinsic(CI, ID, MIRBuilder))
2151 return true;
2152
2153 switch (ID) {
2154 default:
2155 break;
2156 case Intrinsic::lifetime_start:
2157 case Intrinsic::lifetime_end: {
2158 // No stack colouring in O0, discard region information.
2160 return true;
2161
2162 unsigned Op = ID == Intrinsic::lifetime_start ? TargetOpcode::LIFETIME_START
2163 : TargetOpcode::LIFETIME_END;
2164
2165 // Get the underlying objects for the location passed on the lifetime
2166 // marker.
2168 getUnderlyingObjects(CI.getArgOperand(1), Allocas);
2169
2170 // Iterate over each underlying object, creating lifetime markers for each
2171 // static alloca. Quit if we find a non-static alloca.
2172 for (const Value *V : Allocas) {
2173 const AllocaInst *AI = dyn_cast<AllocaInst>(V);
2174 if (!AI)
2175 continue;
2176
2177 if (!AI->isStaticAlloca())
2178 return true;
2179
2180 MIRBuilder.buildInstr(Op).addFrameIndex(getOrCreateFrameIndex(*AI));
2181 }
2182 return true;
2183 }
2184 case Intrinsic::dbg_declare: {
2185 const DbgDeclareInst &DI = cast<DbgDeclareInst>(CI);
2186 assert(DI.getVariable() && "Missing variable");
2187 translateDbgDeclareRecord(DI.getAddress(), DI.hasArgList(), DI.getVariable(),
2188 DI.getExpression(), DI.getDebugLoc(), MIRBuilder);
2189 return true;
2190 }
2191 case Intrinsic::dbg_label: {
2192 const DbgLabelInst &DI = cast<DbgLabelInst>(CI);
2193 assert(DI.getLabel() && "Missing label");
2194
2196 MIRBuilder.getDebugLoc()) &&
2197 "Expected inlined-at fields to agree");
2198
2199 MIRBuilder.buildDbgLabel(DI.getLabel());
2200 return true;
2201 }
2202 case Intrinsic::vaend:
2203 // No target I know of cares about va_end. Certainly no in-tree target
2204 // does. Simplest intrinsic ever!
2205 return true;
2206 case Intrinsic::vastart: {
2207 Value *Ptr = CI.getArgOperand(0);
2208 unsigned ListSize = TLI->getVaListSizeInBits(*DL) / 8;
2209 Align Alignment = getKnownAlignment(Ptr, *DL);
2210
2211 MIRBuilder.buildInstr(TargetOpcode::G_VASTART, {}, {getOrCreateVReg(*Ptr)})
2212 .addMemOperand(MF->getMachineMemOperand(MachinePointerInfo(Ptr),
2214 ListSize, Alignment));
2215 return true;
2216 }
2217 case Intrinsic::dbg_assign:
2218 // A dbg.assign is a dbg.value with more information about stack locations,
2219 // typically produced during optimisation of variables with leaked
2220 // addresses. We can treat it like a normal dbg_value intrinsic here; to
2221 // benefit from the full analysis of stack/SSA locations, GlobalISel would
2222 // need to register for and use the AssignmentTrackingAnalysis pass.
2223 [[fallthrough]];
2224 case Intrinsic::dbg_value: {
2225 // This form of DBG_VALUE is target-independent.
2226 const DbgValueInst &DI = cast<DbgValueInst>(CI);
2227 translateDbgValueRecord(DI.getValue(), DI.hasArgList(), DI.getVariable(),
2228 DI.getExpression(), DI.getDebugLoc(), MIRBuilder);
2229 return true;
2230 }
2231 case Intrinsic::uadd_with_overflow:
2232 return translateOverflowIntrinsic(CI, TargetOpcode::G_UADDO, MIRBuilder);
2233 case Intrinsic::sadd_with_overflow:
2234 return translateOverflowIntrinsic(CI, TargetOpcode::G_SADDO, MIRBuilder);
2235 case Intrinsic::usub_with_overflow:
2236 return translateOverflowIntrinsic(CI, TargetOpcode::G_USUBO, MIRBuilder);
2237 case Intrinsic::ssub_with_overflow:
2238 return translateOverflowIntrinsic(CI, TargetOpcode::G_SSUBO, MIRBuilder);
2239 case Intrinsic::umul_with_overflow:
2240 return translateOverflowIntrinsic(CI, TargetOpcode::G_UMULO, MIRBuilder);
2241 case Intrinsic::smul_with_overflow:
2242 return translateOverflowIntrinsic(CI, TargetOpcode::G_SMULO, MIRBuilder);
2243 case Intrinsic::uadd_sat:
2244 return translateBinaryOp(TargetOpcode::G_UADDSAT, CI, MIRBuilder);
2245 case Intrinsic::sadd_sat:
2246 return translateBinaryOp(TargetOpcode::G_SADDSAT, CI, MIRBuilder);
2247 case Intrinsic::usub_sat:
2248 return translateBinaryOp(TargetOpcode::G_USUBSAT, CI, MIRBuilder);
2249 case Intrinsic::ssub_sat:
2250 return translateBinaryOp(TargetOpcode::G_SSUBSAT, CI, MIRBuilder);
2251 case Intrinsic::ushl_sat:
2252 return translateBinaryOp(TargetOpcode::G_USHLSAT, CI, MIRBuilder);
2253 case Intrinsic::sshl_sat:
2254 return translateBinaryOp(TargetOpcode::G_SSHLSAT, CI, MIRBuilder);
2255 case Intrinsic::umin:
2256 return translateBinaryOp(TargetOpcode::G_UMIN, CI, MIRBuilder);
2257 case Intrinsic::umax:
2258 return translateBinaryOp(TargetOpcode::G_UMAX, CI, MIRBuilder);
2259 case Intrinsic::smin:
2260 return translateBinaryOp(TargetOpcode::G_SMIN, CI, MIRBuilder);
2261 case Intrinsic::smax:
2262 return translateBinaryOp(TargetOpcode::G_SMAX, CI, MIRBuilder);
2263 case Intrinsic::abs:
2264 // TODO: Preserve "int min is poison" arg in GMIR?
2265 return translateUnaryOp(TargetOpcode::G_ABS, CI, MIRBuilder);
2266 case Intrinsic::smul_fix:
2267 return translateFixedPointIntrinsic(TargetOpcode::G_SMULFIX, CI, MIRBuilder);
2268 case Intrinsic::umul_fix:
2269 return translateFixedPointIntrinsic(TargetOpcode::G_UMULFIX, CI, MIRBuilder);
2270 case Intrinsic::smul_fix_sat:
2271 return translateFixedPointIntrinsic(TargetOpcode::G_SMULFIXSAT, CI, MIRBuilder);
2272 case Intrinsic::umul_fix_sat:
2273 return translateFixedPointIntrinsic(TargetOpcode::G_UMULFIXSAT, CI, MIRBuilder);
2274 case Intrinsic::sdiv_fix:
2275 return translateFixedPointIntrinsic(TargetOpcode::G_SDIVFIX, CI, MIRBuilder);
2276 case Intrinsic::udiv_fix:
2277 return translateFixedPointIntrinsic(TargetOpcode::G_UDIVFIX, CI, MIRBuilder);
2278 case Intrinsic::sdiv_fix_sat:
2279 return translateFixedPointIntrinsic(TargetOpcode::G_SDIVFIXSAT, CI, MIRBuilder);
2280 case Intrinsic::udiv_fix_sat:
2281 return translateFixedPointIntrinsic(TargetOpcode::G_UDIVFIXSAT, CI, MIRBuilder);
2282 case Intrinsic::fmuladd: {
2283 const TargetMachine &TM = MF->getTarget();
2284 Register Dst = getOrCreateVReg(CI);
2285 Register Op0 = getOrCreateVReg(*CI.getArgOperand(0));
2286 Register Op1 = getOrCreateVReg(*CI.getArgOperand(1));
2287 Register Op2 = getOrCreateVReg(*CI.getArgOperand(2));
2288 if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
2290 TLI->getValueType(*DL, CI.getType()))) {
2291 // TODO: Revisit this to see if we should move this part of the
2292 // lowering to the combiner.
2293 MIRBuilder.buildFMA(Dst, Op0, Op1, Op2,
2295 } else {
2296 LLT Ty = getLLTForType(*CI.getType(), *DL);
2297 auto FMul = MIRBuilder.buildFMul(
2298 Ty, Op0, Op1, MachineInstr::copyFlagsFromInstruction(CI));
2299 MIRBuilder.buildFAdd(Dst, FMul, Op2,
2301 }
2302 return true;
2303 }
2304 case Intrinsic::convert_from_fp16:
2305 // FIXME: This intrinsic should probably be removed from the IR.
2306 MIRBuilder.buildFPExt(getOrCreateVReg(CI),
2307 getOrCreateVReg(*CI.getArgOperand(0)),
2309 return true;
2310 case Intrinsic::convert_to_fp16:
2311 // FIXME: This intrinsic should probably be removed from the IR.
2312 MIRBuilder.buildFPTrunc(getOrCreateVReg(CI),
2313 getOrCreateVReg(*CI.getArgOperand(0)),
2315 return true;
2316 case Intrinsic::frexp: {
2317 ArrayRef<Register> VRegs = getOrCreateVRegs(CI);
2318 MIRBuilder.buildFFrexp(VRegs[0], VRegs[1],
2319 getOrCreateVReg(*CI.getArgOperand(0)),
2321 return true;
2322 }
2323 case Intrinsic::memcpy_inline:
2324 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMCPY_INLINE);
2325 case Intrinsic::memcpy:
2326 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMCPY);
2327 case Intrinsic::memmove:
2328 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMMOVE);
2329 case Intrinsic::memset:
2330 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMSET);
2331 case Intrinsic::eh_typeid_for: {
2333 Register Reg = getOrCreateVReg(CI);
2334 unsigned TypeID = MF->getTypeIDFor(GV);
2335 MIRBuilder.buildConstant(Reg, TypeID);
2336 return true;
2337 }
2338 case Intrinsic::objectsize:
2339 llvm_unreachable("llvm.objectsize.* should have been lowered already");
2340
2341 case Intrinsic::is_constant:
2342 llvm_unreachable("llvm.is.constant.* should have been lowered already");
2343
2344 case Intrinsic::stackguard:
2345 getStackGuard(getOrCreateVReg(CI), MIRBuilder);
2346 return true;
2347 case Intrinsic::stackprotector: {
2348 LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
2349 Register GuardVal;
2350 if (TLI->useLoadStackGuardNode()) {
2351 GuardVal = MRI->createGenericVirtualRegister(PtrTy);
2352 getStackGuard(GuardVal, MIRBuilder);
2353 } else
2354 GuardVal = getOrCreateVReg(*CI.getArgOperand(0)); // The guard's value.
2355
2356 AllocaInst *Slot = cast<AllocaInst>(CI.getArgOperand(1));
2357 int FI = getOrCreateFrameIndex(*Slot);
2359
2360 MIRBuilder.buildStore(
2361 GuardVal, getOrCreateVReg(*Slot),
2365 PtrTy, Align(8)));
2366 return true;
2367 }
2368 case Intrinsic::stacksave: {
2369 MIRBuilder.buildInstr(TargetOpcode::G_STACKSAVE, {getOrCreateVReg(CI)}, {});
2370 return true;
2371 }
2372 case Intrinsic::stackrestore: {
2373 MIRBuilder.buildInstr(TargetOpcode::G_STACKRESTORE, {},
2374 {getOrCreateVReg(*CI.getArgOperand(0))});
2375 return true;
2376 }
2377 case Intrinsic::cttz:
2378 case Intrinsic::ctlz: {
2379 ConstantInt *Cst = cast<ConstantInt>(CI.getArgOperand(1));
2380 bool isTrailing = ID == Intrinsic::cttz;
2381 unsigned Opcode = isTrailing
2382 ? Cst->isZero() ? TargetOpcode::G_CTTZ
2383 : TargetOpcode::G_CTTZ_ZERO_UNDEF
2384 : Cst->isZero() ? TargetOpcode::G_CTLZ
2385 : TargetOpcode::G_CTLZ_ZERO_UNDEF;
2386 MIRBuilder.buildInstr(Opcode, {getOrCreateVReg(CI)},
2387 {getOrCreateVReg(*CI.getArgOperand(0))});
2388 return true;
2389 }
2390 case Intrinsic::invariant_start: {
2391 LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
2393 MIRBuilder.buildUndef(Undef);
2394 return true;
2395 }
2396 case Intrinsic::invariant_end:
2397 return true;
2398 case Intrinsic::expect:
2399 case Intrinsic::annotation:
2400 case Intrinsic::ptr_annotation:
2401 case Intrinsic::launder_invariant_group:
2402 case Intrinsic::strip_invariant_group: {
2403 // Drop the intrinsic, but forward the value.
2404 MIRBuilder.buildCopy(getOrCreateVReg(CI),
2405 getOrCreateVReg(*CI.getArgOperand(0)));
2406 return true;
2407 }
2408 case Intrinsic::assume:
2409 case Intrinsic::experimental_noalias_scope_decl:
2410 case Intrinsic::var_annotation:
2411 case Intrinsic::sideeffect:
2412 // Discard annotate attributes, assumptions, and artificial side-effects.
2413 return true;
2414 case Intrinsic::read_volatile_register:
2415 case Intrinsic::read_register: {
2416 Value *Arg = CI.getArgOperand(0);
2417 MIRBuilder
2418 .buildInstr(TargetOpcode::G_READ_REGISTER, {getOrCreateVReg(CI)}, {})
2419 .addMetadata(cast<MDNode>(cast<MetadataAsValue>(Arg)->getMetadata()));
2420 return true;
2421 }
2422 case Intrinsic::write_register: {
2423 Value *Arg = CI.getArgOperand(0);
2424 MIRBuilder.buildInstr(TargetOpcode::G_WRITE_REGISTER)
2425 .addMetadata(cast<MDNode>(cast<MetadataAsValue>(Arg)->getMetadata()))
2426 .addUse(getOrCreateVReg(*CI.getArgOperand(1)));
2427 return true;
2428 }
2429 case Intrinsic::localescape: {
2430 MachineBasicBlock &EntryMBB = MF->front();
2432
2433 // Directly emit some LOCAL_ESCAPE machine instrs. Label assignment emission
2434 // is the same on all targets.
2435 for (unsigned Idx = 0, E = CI.arg_size(); Idx < E; ++Idx) {
2437 if (isa<ConstantPointerNull>(Arg))
2438 continue; // Skip null pointers. They represent a hole in index space.
2439
2440 int FI = getOrCreateFrameIndex(*cast<AllocaInst>(Arg));
2441 MCSymbol *FrameAllocSym =
2442 MF->getMMI().getContext().getOrCreateFrameAllocSymbol(EscapedName,
2443 Idx);
2444
2445 // This should be inserted at the start of the entry block.
2446 auto LocalEscape =
2447 MIRBuilder.buildInstrNoInsert(TargetOpcode::LOCAL_ESCAPE)
2448 .addSym(FrameAllocSym)
2449 .addFrameIndex(FI);
2450
2451 EntryMBB.insert(EntryMBB.begin(), LocalEscape);
2452 }
2453
2454 return true;
2455 }
2456 case Intrinsic::vector_reduce_fadd:
2457 case Intrinsic::vector_reduce_fmul: {
2458 // Need to check for the reassoc flag to decide whether we want a
2459 // sequential reduction opcode or not.
2460 Register Dst = getOrCreateVReg(CI);
2461 Register ScalarSrc = getOrCreateVReg(*CI.getArgOperand(0));
2462 Register VecSrc = getOrCreateVReg(*CI.getArgOperand(1));
2463 unsigned Opc = 0;
2464 if (!CI.hasAllowReassoc()) {
2465 // The sequential ordering case.
2466 Opc = ID == Intrinsic::vector_reduce_fadd
2467 ? TargetOpcode::G_VECREDUCE_SEQ_FADD
2468 : TargetOpcode::G_VECREDUCE_SEQ_FMUL;
2469 MIRBuilder.buildInstr(Opc, {Dst}, {ScalarSrc, VecSrc},
2471 return true;
2472 }
2473 // We split the operation into a separate G_FADD/G_FMUL + the reduce,
2474 // since the associativity doesn't matter.
2475 unsigned ScalarOpc;
2476 if (ID == Intrinsic::vector_reduce_fadd) {
2477 Opc = TargetOpcode::G_VECREDUCE_FADD;
2478 ScalarOpc = TargetOpcode::G_FADD;
2479 } else {
2480 Opc = TargetOpcode::G_VECREDUCE_FMUL;
2481 ScalarOpc = TargetOpcode::G_FMUL;
2482 }
2483 LLT DstTy = MRI->getType(Dst);
2484 auto Rdx = MIRBuilder.buildInstr(
2485 Opc, {DstTy}, {VecSrc}, MachineInstr::copyFlagsFromInstruction(CI));
2486 MIRBuilder.buildInstr(ScalarOpc, {Dst}, {ScalarSrc, Rdx},
2488
2489 return true;
2490 }
2491 case Intrinsic::trap:
2492 return translateTrap(CI, MIRBuilder, TargetOpcode::G_TRAP);
2493 case Intrinsic::debugtrap:
2494 return translateTrap(CI, MIRBuilder, TargetOpcode::G_DEBUGTRAP);
2495 case Intrinsic::ubsantrap:
2496 return translateTrap(CI, MIRBuilder, TargetOpcode::G_UBSANTRAP);
2497 case Intrinsic::allow_runtime_check:
2498 case Intrinsic::allow_ubsan_check:
2499 MIRBuilder.buildCopy(getOrCreateVReg(CI),
2500 getOrCreateVReg(*ConstantInt::getTrue(CI.getType())));
2501 return true;
2502 case Intrinsic::amdgcn_cs_chain:
2503 return translateCallBase(CI, MIRBuilder);
2504 case Intrinsic::fptrunc_round: {
2506
2507 // Convert the metadata argument to a constant integer
2508 Metadata *MD = cast<MetadataAsValue>(CI.getArgOperand(1))->getMetadata();
2509 std::optional<RoundingMode> RoundMode =
2510 convertStrToRoundingMode(cast<MDString>(MD)->getString());
2511
2512 // Add the Rounding mode as an integer
2513 MIRBuilder
2514 .buildInstr(TargetOpcode::G_INTRINSIC_FPTRUNC_ROUND,
2515 {getOrCreateVReg(CI)},
2516 {getOrCreateVReg(*CI.getArgOperand(0))}, Flags)
2517 .addImm((int)*RoundMode);
2518
2519 return true;
2520 }
2521 case Intrinsic::is_fpclass: {
2522 Value *FpValue = CI.getOperand(0);
2523 ConstantInt *TestMaskValue = cast<ConstantInt>(CI.getOperand(1));
2524
2525 MIRBuilder
2526 .buildInstr(TargetOpcode::G_IS_FPCLASS, {getOrCreateVReg(CI)},
2527 {getOrCreateVReg(*FpValue)})
2528 .addImm(TestMaskValue->getZExtValue());
2529
2530 return true;
2531 }
2532 case Intrinsic::set_fpenv: {
2533 Value *FPEnv = CI.getOperand(0);
2534 MIRBuilder.buildInstr(TargetOpcode::G_SET_FPENV, {},
2535 {getOrCreateVReg(*FPEnv)});
2536 return true;
2537 }
2538 case Intrinsic::reset_fpenv: {
2539 MIRBuilder.buildInstr(TargetOpcode::G_RESET_FPENV, {}, {});
2540 return true;
2541 }
2542 case Intrinsic::set_fpmode: {
2543 Value *FPState = CI.getOperand(0);
2544 MIRBuilder.buildInstr(TargetOpcode::G_SET_FPMODE, {},
2545 { getOrCreateVReg(*FPState) });
2546 return true;
2547 }
2548 case Intrinsic::reset_fpmode: {
2549 MIRBuilder.buildInstr(TargetOpcode::G_RESET_FPMODE, {}, {});
2550 return true;
2551 }
2552 case Intrinsic::vscale: {
2553 MIRBuilder.buildVScale(getOrCreateVReg(CI), 1);
2554 return true;
2555 }
2556 case Intrinsic::prefetch: {
2557 Value *Addr = CI.getOperand(0);
2558 unsigned RW = cast<ConstantInt>(CI.getOperand(1))->getZExtValue();
2559 unsigned Locality = cast<ConstantInt>(CI.getOperand(2))->getZExtValue();
2560 unsigned CacheType = cast<ConstantInt>(CI.getOperand(3))->getZExtValue();
2561
2563 auto &MMO = *MF->getMachineMemOperand(MachinePointerInfo(Addr), Flags,
2564 LLT(), Align());
2565
2566 MIRBuilder.buildPrefetch(getOrCreateVReg(*Addr), RW, Locality, CacheType,
2567 MMO);
2568
2569 return true;
2570 }
2571
2572 case Intrinsic::vector_interleave2:
2573 case Intrinsic::vector_deinterleave2: {
2574 // Both intrinsics have at least one operand.
2575 Value *Op0 = CI.getOperand(0);
2576 LLT ResTy = getLLTForType(*Op0->getType(), MIRBuilder.getDataLayout());
2577 if (!ResTy.isFixedVector())
2578 return false;
2579
2580 if (CI.getIntrinsicID() == Intrinsic::vector_interleave2)
2581 return translateVectorInterleave2Intrinsic(CI, MIRBuilder);
2582
2583 return translateVectorDeinterleave2Intrinsic(CI, MIRBuilder);
2584 }
2585
2586#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
2587 case Intrinsic::INTRINSIC:
2588#include "llvm/IR/ConstrainedOps.def"
2589 return translateConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(CI),
2590 MIRBuilder);
2591 case Intrinsic::experimental_convergence_anchor:
2592 case Intrinsic::experimental_convergence_entry:
2593 case Intrinsic::experimental_convergence_loop:
2594 return translateConvergenceControlIntrinsic(CI, ID, MIRBuilder);
2595 }
2596 return false;
2597}
2598
2599bool IRTranslator::translateInlineAsm(const CallBase &CB,
2600 MachineIRBuilder &MIRBuilder) {
2601
2603
2604 if (!ALI) {
2605 LLVM_DEBUG(
2606 dbgs() << "Inline asm lowering is not supported for this target yet\n");
2607 return false;
2608 }
2609
2610 return ALI->lowerInlineAsm(
2611 MIRBuilder, CB, [&](const Value &Val) { return getOrCreateVRegs(Val); });
2612}
2613
2614bool IRTranslator::translateCallBase(const CallBase &CB,
2615 MachineIRBuilder &MIRBuilder) {
2616 ArrayRef<Register> Res = getOrCreateVRegs(CB);
2617
2619 Register SwiftInVReg = 0;
2620 Register SwiftErrorVReg = 0;
2621 for (const auto &Arg : CB.args()) {
2622 if (CLI->supportSwiftError() && isSwiftError(Arg)) {
2623 assert(SwiftInVReg == 0 && "Expected only one swift error argument");
2624 LLT Ty = getLLTForType(*Arg->getType(), *DL);
2625 SwiftInVReg = MRI->createGenericVirtualRegister(Ty);
2626 MIRBuilder.buildCopy(SwiftInVReg, SwiftError.getOrCreateVRegUseAt(
2627 &CB, &MIRBuilder.getMBB(), Arg));
2628 Args.emplace_back(ArrayRef(SwiftInVReg));
2629 SwiftErrorVReg =
2630 SwiftError.getOrCreateVRegDefAt(&CB, &MIRBuilder.getMBB(), Arg);
2631 continue;
2632 }
2633 Args.push_back(getOrCreateVRegs(*Arg));
2634 }
2635
2636 if (auto *CI = dyn_cast<CallInst>(&CB)) {
2637 if (ORE->enabled()) {
2638 if (MemoryOpRemark::canHandle(CI, *LibInfo)) {
2639 MemoryOpRemark R(*ORE, "gisel-irtranslator-memsize", *DL, *LibInfo);
2640 R.visit(CI);
2641 }
2642 }
2643 }
2644
2645 std::optional<CallLowering::PtrAuthInfo> PAI;
2647 // Functions should never be ptrauth-called directly.
2648 assert(!CB.getCalledFunction() && "invalid direct ptrauth call");
2649
2650 auto PAB = CB.getOperandBundle("ptrauth");
2651 const Value *Key = PAB->Inputs[0];
2652 const Value *Discriminator = PAB->Inputs[1];
2653
2654 Register DiscReg = getOrCreateVReg(*Discriminator);
2655 PAI = CallLowering::PtrAuthInfo{cast<ConstantInt>(Key)->getZExtValue(),
2656 DiscReg};
2657 }
2658
2659 Register ConvergenceCtrlToken = 0;
2660 if (auto Bundle = CB.getOperandBundle(LLVMContext::OB_convergencectrl)) {
2661 const auto &Token = *Bundle->Inputs[0].get();
2662 ConvergenceCtrlToken = getOrCreateConvergenceTokenVReg(Token);
2663 }
2664
2665 // We don't set HasCalls on MFI here yet because call lowering may decide to
2666 // optimize into tail calls. Instead, we defer that to selection where a final
2667 // scan is done to check if any instructions are calls.
2668 bool Success = CLI->lowerCall(
2669 MIRBuilder, CB, Res, Args, SwiftErrorVReg, PAI, ConvergenceCtrlToken,
2670 [&]() { return getOrCreateVReg(*CB.getCalledOperand()); });
2671
2672 // Check if we just inserted a tail call.
2673 if (Success) {
2674 assert(!HasTailCall && "Can't tail call return twice from block?");
2676 HasTailCall = TII->isTailCall(*std::prev(MIRBuilder.getInsertPt()));
2677 }
2678
2679 return Success;
2680}
2681
2682bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) {
2683 const CallInst &CI = cast<CallInst>(U);
2684 auto TII = MF->getTarget().getIntrinsicInfo();
2685 const Function *F = CI.getCalledFunction();
2686
2687 // FIXME: support Windows dllimport function calls and calls through
2688 // weak symbols.
2689 if (F && (F->hasDLLImportStorageClass() ||
2691 F->hasExternalWeakLinkage())))
2692 return false;
2693
2694 // FIXME: support control flow guard targets.
2696 return false;
2697
2698 // FIXME: support statepoints and related.
2699 if (isa<GCStatepointInst, GCRelocateInst, GCResultInst>(U))
2700 return false;
2701
2702 if (CI.isInlineAsm())
2703 return translateInlineAsm(CI, MIRBuilder);
2704
2705 diagnoseDontCall(CI);
2706
2708 if (F && F->isIntrinsic()) {
2709 ID = F->getIntrinsicID();
2711 ID = static_cast<Intrinsic::ID>(TII->getIntrinsicID(F));
2712 }
2713
2714 if (!F || !F->isIntrinsic() || ID == Intrinsic::not_intrinsic)
2715 return translateCallBase(CI, MIRBuilder);
2716
2717 assert(ID != Intrinsic::not_intrinsic && "unknown intrinsic");
2718
2719 if (translateKnownIntrinsic(CI, ID, MIRBuilder))
2720 return true;
2721
2722 ArrayRef<Register> ResultRegs;
2723 if (!CI.getType()->isVoidTy())
2724 ResultRegs = getOrCreateVRegs(CI);
2725
2726 // Ignore the callsite attributes. Backend code is most likely not expecting
2727 // an intrinsic to sometimes have side effects and sometimes not.
2728 MachineInstrBuilder MIB = MIRBuilder.buildIntrinsic(ID, ResultRegs);
2729 if (isa<FPMathOperator>(CI))
2730 MIB->copyIRFlags(CI);
2731
2732 for (const auto &Arg : enumerate(CI.args())) {
2733 // If this is required to be an immediate, don't materialize it in a
2734 // register.
2735 if (CI.paramHasAttr(Arg.index(), Attribute::ImmArg)) {
2736 if (ConstantInt *CI = dyn_cast<ConstantInt>(Arg.value())) {
2737 // imm arguments are more convenient than cimm (and realistically
2738 // probably sufficient), so use them.
2739 assert(CI->getBitWidth() <= 64 &&
2740 "large intrinsic immediates not handled");
2741 MIB.addImm(CI->getSExtValue());
2742 } else {
2743 MIB.addFPImm(cast<ConstantFP>(Arg.value()));
2744 }
2745 } else if (auto *MDVal = dyn_cast<MetadataAsValue>(Arg.value())) {
2746 auto *MD = MDVal->getMetadata();
2747 auto *MDN = dyn_cast<MDNode>(MD);
2748 if (!MDN) {
2749 if (auto *ConstMD = dyn_cast<ConstantAsMetadata>(MD))
2750 MDN = MDNode::get(MF->getFunction().getContext(), ConstMD);
2751 else // This was probably an MDString.
2752 return false;
2753 }
2754 MIB.addMetadata(MDN);
2755 } else {
2756 ArrayRef<Register> VRegs = getOrCreateVRegs(*Arg.value());
2757 if (VRegs.size() > 1)
2758 return false;
2759 MIB.addUse(VRegs[0]);
2760 }
2761 }
2762
2763 // Add a MachineMemOperand if it is a target mem intrinsic.
2765 // TODO: Add a GlobalISel version of getTgtMemIntrinsic.
2766 if (TLI->getTgtMemIntrinsic(Info, CI, *MF, ID)) {
2767 Align Alignment = Info.align.value_or(
2768 DL->getABITypeAlign(Info.memVT.getTypeForEVT(F->getContext())));
2769 LLT MemTy = Info.memVT.isSimple()
2770 ? getLLTForMVT(Info.memVT.getSimpleVT())
2771 : LLT::scalar(Info.memVT.getStoreSizeInBits());
2772
2773 // TODO: We currently just fallback to address space 0 if getTgtMemIntrinsic
2774 // didn't yield anything useful.
2776 if (Info.ptrVal)
2777 MPI = MachinePointerInfo(Info.ptrVal, Info.offset);
2778 else if (Info.fallbackAddressSpace)
2779 MPI = MachinePointerInfo(*Info.fallbackAddressSpace);
2780 MIB.addMemOperand(
2781 MF->getMachineMemOperand(MPI, Info.flags, MemTy, Alignment, CI.getAAMetadata()));
2782 }
2783
2784 if (CI.isConvergent()) {
2785 if (auto Bundle = CI.getOperandBundle(LLVMContext::OB_convergencectrl)) {
2786 auto *Token = Bundle->Inputs[0].get();
2787 Register TokenReg = getOrCreateVReg(*Token);
2788 MIB.addUse(TokenReg, RegState::Implicit);
2789 }
2790 }
2791
2792 return true;
2793}
2794
2795bool IRTranslator::findUnwindDestinations(
2796 const BasicBlock *EHPadBB,
2797 BranchProbability Prob,
2798 SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
2799 &UnwindDests) {
2801 EHPadBB->getParent()->getFunction().getPersonalityFn());
2802 bool IsMSVCCXX = Personality == EHPersonality::MSVC_CXX;
2803 bool IsCoreCLR = Personality == EHPersonality::CoreCLR;
2804 bool IsWasmCXX = Personality == EHPersonality::Wasm_CXX;
2805 bool IsSEH = isAsynchronousEHPersonality(Personality);
2806
2807 if (IsWasmCXX) {
2808 // Ignore this for now.
2809 return false;
2810 }
2811
2812 while (EHPadBB) {
2813 const Instruction *Pad = EHPadBB->getFirstNonPHI();
2814 BasicBlock *NewEHPadBB = nullptr;
2815 if (isa<LandingPadInst>(Pad)) {
2816 // Stop on landingpads. They are not funclets.
2817 UnwindDests.emplace_back(&getMBB(*EHPadBB), Prob);
2818 break;
2819 }
2820 if (isa<CleanupPadInst>(Pad)) {
2821 // Stop on cleanup pads. Cleanups are always funclet entries for all known
2822 // personalities.
2823 UnwindDests.emplace_back(&getMBB(*EHPadBB), Prob);
2824 UnwindDests.back().first->setIsEHScopeEntry();
2825 UnwindDests.back().first->setIsEHFuncletEntry();
2826 break;
2827 }
2828 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
2829 // Add the catchpad handlers to the possible destinations.
2830 for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
2831 UnwindDests.emplace_back(&getMBB(*CatchPadBB), Prob);
2832 // For MSVC++ and the CLR, catchblocks are funclets and need prologues.
2833 if (IsMSVCCXX || IsCoreCLR)
2834 UnwindDests.back().first->setIsEHFuncletEntry();
2835 if (!IsSEH)
2836 UnwindDests.back().first->setIsEHScopeEntry();
2837 }
2838 NewEHPadBB = CatchSwitch->getUnwindDest();
2839 } else {
2840 continue;
2841 }
2842
2843 BranchProbabilityInfo *BPI = FuncInfo.BPI;
2844 if (BPI && NewEHPadBB)
2845 Prob *= BPI->getEdgeProbability(EHPadBB, NewEHPadBB);
2846 EHPadBB = NewEHPadBB;
2847 }
2848 return true;
2849}
2850
2851bool IRTranslator::translateInvoke(const User &U,
2852 MachineIRBuilder &MIRBuilder) {
2853 const InvokeInst &I = cast<InvokeInst>(U);
2854 MCContext &Context = MF->getContext();
2855
2856 const BasicBlock *ReturnBB = I.getSuccessor(0);
2857 const BasicBlock *EHPadBB = I.getSuccessor(1);
2858
2859 const Function *Fn = I.getCalledFunction();
2860
2861 // FIXME: support invoking patchpoint and statepoint intrinsics.
2862 if (Fn && Fn->isIntrinsic())
2863 return false;
2864
2865 // FIXME: support whatever these are.
2866 if (I.hasDeoptState())
2867 return false;
2868
2869 // FIXME: support control flow guard targets.
2870 if (I.countOperandBundlesOfType(LLVMContext::OB_cfguardtarget))
2871 return false;
2872
2873 // FIXME: support Windows exception handling.
2874 if (!isa<LandingPadInst>(EHPadBB->getFirstNonPHI()))
2875 return false;
2876
2877 // FIXME: support Windows dllimport function calls and calls through
2878 // weak symbols.
2879 if (Fn && (Fn->hasDLLImportStorageClass() ||
2881 Fn->hasExternalWeakLinkage())))
2882 return false;
2883
2884 bool LowerInlineAsm = I.isInlineAsm();
2885 bool NeedEHLabel = true;
2886
2887 // Emit the actual call, bracketed by EH_LABELs so that the MF knows about
2888 // the region covered by the try.
2889 MCSymbol *BeginSymbol = nullptr;
2890 if (NeedEHLabel) {
2891 MIRBuilder.buildInstr(TargetOpcode::G_INVOKE_REGION_START);
2892 BeginSymbol = Context.createTempSymbol();
2893 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(BeginSymbol);
2894 }
2895
2896 if (LowerInlineAsm) {
2897 if (!translateInlineAsm(I, MIRBuilder))
2898 return false;
2899 } else if (!translateCallBase(I, MIRBuilder))
2900 return false;
2901
2902 MCSymbol *EndSymbol = nullptr;
2903 if (NeedEHLabel) {
2904 EndSymbol = Context.createTempSymbol();
2905 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(EndSymbol);
2906 }
2907
2909 BranchProbabilityInfo *BPI = FuncInfo.BPI;
2910 MachineBasicBlock *InvokeMBB = &MIRBuilder.getMBB();
2911 BranchProbability EHPadBBProb =
2912 BPI ? BPI->getEdgeProbability(InvokeMBB->getBasicBlock(), EHPadBB)
2914
2915 if (!findUnwindDestinations(EHPadBB, EHPadBBProb, UnwindDests))
2916 return false;
2917
2918 MachineBasicBlock &EHPadMBB = getMBB(*EHPadBB),
2919 &ReturnMBB = getMBB(*ReturnBB);
2920 // Update successor info.
2921 addSuccessorWithProb(InvokeMBB, &ReturnMBB);
2922 for (auto &UnwindDest : UnwindDests) {
2923 UnwindDest.first->setIsEHPad();
2924 addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second);
2925 }
2926 InvokeMBB->normalizeSuccProbs();
2927
2928 if (NeedEHLabel) {
2929 assert(BeginSymbol && "Expected a begin symbol!");
2930 assert(EndSymbol && "Expected an end symbol!");
2931 MF->addInvoke(&EHPadMBB, BeginSymbol, EndSymbol);
2932 }
2933
2934 MIRBuilder.buildBr(ReturnMBB);
2935 return true;
2936}
2937
2938bool IRTranslator::translateCallBr(const User &U,
2939 MachineIRBuilder &MIRBuilder) {
2940 // FIXME: Implement this.
2941 return false;
2942}
2943
2944bool IRTranslator::translateLandingPad(const User &U,
2945 MachineIRBuilder &MIRBuilder) {
2946 const LandingPadInst &LP = cast<LandingPadInst>(U);
2947
2948 MachineBasicBlock &MBB = MIRBuilder.getMBB();
2949
2950 MBB.setIsEHPad();
2951
2952 // If there aren't registers to copy the values into (e.g., during SjLj
2953 // exceptions), then don't bother.
2954 const Constant *PersonalityFn = MF->getFunction().getPersonalityFn();
2955 if (TLI->getExceptionPointerRegister(PersonalityFn) == 0 &&
2956 TLI->getExceptionSelectorRegister(PersonalityFn) == 0)
2957 return true;
2958
2959 // If landingpad's return type is token type, we don't create DAG nodes
2960 // for its exception pointer and selector value. The extraction of exception
2961 // pointer or selector value from token type landingpads is not currently
2962 // supported.
2963 if (LP.getType()->isTokenTy())
2964 return true;
2965
2966 // Add a label to mark the beginning of the landing pad. Deletion of the
2967 // landing pad can thus be detected via the MachineModuleInfo.
2968 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL)
2969 .addSym(MF->addLandingPad(&MBB));
2970
2971 // If the unwinder does not preserve all registers, ensure that the
2972 // function marks the clobbered registers as used.
2974 if (auto *RegMask = TRI.getCustomEHPadPreservedMask(*MF))
2976
2977 LLT Ty = getLLTForType(*LP.getType(), *DL);
2979 MIRBuilder.buildUndef(Undef);
2980
2982 for (Type *Ty : cast<StructType>(LP.getType())->elements())
2983 Tys.push_back(getLLTForType(*Ty, *DL));
2984 assert(Tys.size() == 2 && "Only two-valued landingpads are supported");
2985
2986 // Mark exception register as live in.
2987 Register ExceptionReg = TLI->getExceptionPointerRegister(PersonalityFn);
2988 if (!ExceptionReg)
2989 return false;
2990
2991 MBB.addLiveIn(ExceptionReg);
2992 ArrayRef<Register> ResRegs = getOrCreateVRegs(LP);
2993 MIRBuilder.buildCopy(ResRegs[0], ExceptionReg);
2994
2995 Register SelectorReg = TLI->getExceptionSelectorRegister(PersonalityFn);
2996 if (!SelectorReg)
2997 return false;
2998
2999 MBB.addLiveIn(SelectorReg);
3000 Register PtrVReg = MRI->createGenericVirtualRegister(Tys[0]);
3001 MIRBuilder.buildCopy(PtrVReg, SelectorReg);
3002 MIRBuilder.buildCast(ResRegs[1], PtrVReg);
3003
3004 return true;
3005}
3006
3007bool IRTranslator::translateAlloca(const User &U,
3008 MachineIRBuilder &MIRBuilder) {
3009 auto &AI = cast<AllocaInst>(U);
3010
3011 if (AI.isSwiftError())
3012 return true;
3013
3014 if (AI.isStaticAlloca()) {
3015 Register Res = getOrCreateVReg(AI);
3016 int FI = getOrCreateFrameIndex(AI);
3017 MIRBuilder.buildFrameIndex(Res, FI);
3018 return true;
3019 }
3020
3021 // FIXME: support stack probing for Windows.
3023 return false;
3024
3025 // Now we're in the harder dynamic case.
3026 Register NumElts = getOrCreateVReg(*AI.getArraySize());
3027 Type *IntPtrIRTy = DL->getIntPtrType(AI.getType());
3028 LLT IntPtrTy = getLLTForType(*IntPtrIRTy, *DL);
3029 if (MRI->getType(NumElts) != IntPtrTy) {
3030 Register ExtElts = MRI->createGenericVirtualRegister(IntPtrTy);
3031 MIRBuilder.buildZExtOrTrunc(ExtElts, NumElts);
3032 NumElts = ExtElts;
3033 }
3034
3035 Type *Ty = AI.getAllocatedType();
3036
3037 Register AllocSize = MRI->createGenericVirtualRegister(IntPtrTy);
3038 Register TySize =
3039 getOrCreateVReg(*ConstantInt::get(IntPtrIRTy, DL->getTypeAllocSize(Ty)));
3040 MIRBuilder.buildMul(AllocSize, NumElts, TySize);
3041
3042 // Round the size of the allocation up to the stack alignment size
3043 // by add SA-1 to the size. This doesn't overflow because we're computing
3044 // an address inside an alloca.
3045 Align StackAlign = MF->getSubtarget().getFrameLowering()->getStackAlign();
3046 auto SAMinusOne = MIRBuilder.buildConstant(IntPtrTy, StackAlign.value() - 1);
3047 auto AllocAdd = MIRBuilder.buildAdd(IntPtrTy, AllocSize, SAMinusOne,
3049 auto AlignCst =
3050 MIRBuilder.buildConstant(IntPtrTy, ~(uint64_t)(StackAlign.value() - 1));
3051 auto AlignedAlloc = MIRBuilder.buildAnd(IntPtrTy, AllocAdd, AlignCst);
3052
3053 Align Alignment = std::max(AI.getAlign(), DL->getPrefTypeAlign(Ty));
3054 if (Alignment <= StackAlign)
3055 Alignment = Align(1);
3056 MIRBuilder.buildDynStackAlloc(getOrCreateVReg(AI), AlignedAlloc, Alignment);
3057
3058 MF->getFrameInfo().CreateVariableSizedObject(Alignment, &AI);
3060 return true;
3061}
3062
3063bool IRTranslator::translateVAArg(const User &U, MachineIRBuilder &MIRBuilder) {
3064 // FIXME: We may need more info about the type. Because of how LLT works,
3065 // we're completely discarding the i64/double distinction here (amongst
3066 // others). Fortunately the ABIs I know of where that matters don't use va_arg
3067 // anyway but that's not guaranteed.
3068 MIRBuilder.buildInstr(TargetOpcode::G_VAARG, {getOrCreateVReg(U)},
3069 {getOrCreateVReg(*U.getOperand(0)),
3070 DL->getABITypeAlign(U.getType()).value()});
3071 return true;
3072}
3073
3074bool IRTranslator::translateUnreachable(const User &U, MachineIRBuilder &MIRBuilder) {
3076 return true;
3077
3078 auto &UI = cast<UnreachableInst>(U);
3079 // We may be able to ignore unreachable behind a noreturn call.
3081 const BasicBlock &BB = *UI.getParent();
3082 if (&UI != &BB.front()) {
3084 std::prev(BasicBlock::const_iterator(UI));
3085 if (const CallInst *Call = dyn_cast<CallInst>(&*PredI)) {
3086 if (Call->doesNotReturn())
3087 return true;
3088 }
3089 }
3090 }
3091
3092 MIRBuilder.buildTrap();
3093 return true;
3094}
3095
3096bool IRTranslator::translateInsertElement(const User &U,
3097 MachineIRBuilder &MIRBuilder) {
3098 // If it is a <1 x Ty> vector, use the scalar as it is
3099 // not a legal vector type in LLT.
3100 if (auto *FVT = dyn_cast<FixedVectorType>(U.getType());
3101 FVT && FVT->getNumElements() == 1)
3102 return translateCopy(U, *U.getOperand(1), MIRBuilder);
3103
3104 Register Res = getOrCreateVReg(U);
3105 Register Val = getOrCreateVReg(*U.getOperand(0));
3106 Register Elt = getOrCreateVReg(*U.getOperand(1));
3107 unsigned PreferredVecIdxWidth = TLI->getVectorIdxTy(*DL).getSizeInBits();
3108 Register Idx;
3109 if (auto *CI = dyn_cast<ConstantInt>(U.getOperand(2))) {
3110 if (CI->getBitWidth() != PreferredVecIdxWidth) {
3111 APInt NewIdx = CI->getValue().zextOrTrunc(PreferredVecIdxWidth);
3112 auto *NewIdxCI = ConstantInt::get(CI->getContext(), NewIdx);
3113 Idx = getOrCreateVReg(*NewIdxCI);
3114 }
3115 }
3116 if (!Idx)
3117 Idx = getOrCreateVReg(*U.getOperand(2));
3118 if (MRI->getType(Idx).getSizeInBits() != PreferredVecIdxWidth) {
3119 const LLT VecIdxTy = LLT::scalar(PreferredVecIdxWidth);
3120 Idx = MIRBuilder.buildZExtOrTrunc(VecIdxTy, Idx).getReg(0);
3121 }
3122 MIRBuilder.buildInsertVectorElement(Res, Val, Elt, Idx);
3123 return true;
3124}
3125
3126bool IRTranslator::translateExtractElement(const User &U,
3127 MachineIRBuilder &MIRBuilder) {
3128 // If it is a <1 x Ty> vector, use the scalar as it is
3129 // not a legal vector type in LLT.
3130 if (cast<FixedVectorType>(U.getOperand(0)->getType())->getNumElements() == 1)
3131 return translateCopy(U, *U.getOperand(0), MIRBuilder);
3132
3133 Register Res = getOrCreateVReg(U);
3134 Register Val = getOrCreateVReg(*U.getOperand(0));
3135 unsigned PreferredVecIdxWidth = TLI->getVectorIdxTy(*DL).getSizeInBits();
3136 Register Idx;
3137 if (auto *CI = dyn_cast<ConstantInt>(U.getOperand(1))) {
3138 if (CI->getBitWidth() != PreferredVecIdxWidth) {
3139 APInt NewIdx = CI->getValue().zextOrTrunc(PreferredVecIdxWidth);
3140 auto *NewIdxCI = ConstantInt::get(CI->getContext(), NewIdx);
3141 Idx = getOrCreateVReg(*NewIdxCI);
3142 }
3143 }
3144 if (!Idx)
3145 Idx = getOrCreateVReg(*U.getOperand(1));
3146 if (MRI->getType(Idx).getSizeInBits() != PreferredVecIdxWidth) {
3147 const LLT VecIdxTy = LLT::scalar(PreferredVecIdxWidth);
3148 Idx = MIRBuilder.buildZExtOrTrunc(VecIdxTy, Idx).getReg(0);
3149 }
3150 MIRBuilder.buildExtractVectorElement(Res, Val, Idx);
3151 return true;
3152}
3153
3154bool IRTranslator::translateShuffleVector(const User &U,
3155 MachineIRBuilder &MIRBuilder) {
3156 // A ShuffleVector that has operates on scalable vectors is a splat vector
3157 // where the value of the splat vector is the 0th element of the first
3158 // operand, since the index mask operand is the zeroinitializer (undef and
3159 // poison are treated as zeroinitializer here).
3160 if (U.getOperand(0)->getType()->isScalableTy()) {
3161 Value *Op0 = U.getOperand(0);
3162 auto SplatVal = MIRBuilder.buildExtractVectorElementConstant(
3164 getOrCreateVReg(*Op0), 0);
3165 MIRBuilder.buildSplatVector(getOrCreateVReg(U), SplatVal);
3166 return true;
3167 }
3168
3170 if (auto *SVI = dyn_cast<ShuffleVectorInst>(&U))
3171 Mask = SVI->getShuffleMask();
3172 else
3173 Mask = cast<ConstantExpr>(U).getShuffleMask();
3174 ArrayRef<int> MaskAlloc = MF->allocateShuffleMask(Mask);
3175 MIRBuilder
3176 .buildInstr(TargetOpcode::G_SHUFFLE_VECTOR, {getOrCreateVReg(U)},
3177 {getOrCreateVReg(*U.getOperand(0)),
3178 getOrCreateVReg(*U.getOperand(1))})
3179 .addShuffleMask(MaskAlloc);
3180 return true;
3181}
3182
3183bool IRTranslator::translatePHI(const User &U, MachineIRBuilder &MIRBuilder) {
3184 const PHINode &PI = cast<PHINode>(U);
3185
3187 for (auto Reg : getOrCreateVRegs(PI)) {
3188 auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_PHI, {Reg}, {});
3189 Insts.push_back(MIB.getInstr());
3190 }
3191
3192 PendingPHIs.emplace_back(&PI, std::move(Insts));
3193 return true;
3194}
3195
3196bool IRTranslator::translateAtomicCmpXchg(const User &U,
3197 MachineIRBuilder &MIRBuilder) {
3198 const AtomicCmpXchgInst &I = cast<AtomicCmpXchgInst>(U);
3199
3200 auto Flags = TLI->getAtomicMemOperandFlags(I, *DL);
3201
3202 auto Res = getOrCreateVRegs(I);
3203 Register OldValRes = Res[0];
3204 Register SuccessRes = Res[1];
3205 Register Addr = getOrCreateVReg(*I.getPointerOperand());
3206 Register Cmp = getOrCreateVReg(*I.getCompareOperand());
3207 Register NewVal = getOrCreateVReg(*I.getNewValOperand());
3208
3210 OldValRes, SuccessRes, Addr, Cmp, NewVal,
3212 MachinePointerInfo(I.getPointerOperand()), Flags, MRI->getType(Cmp),
3213 getMemOpAlign(I), I.getAAMetadata(), nullptr, I.getSyncScopeID(),
3214 I.getSuccessOrdering(), I.getFailureOrdering()));
3215 return true;
3216}
3217
3218bool IRTranslator::translateAtomicRMW(const User &U,
3219 MachineIRBuilder &MIRBuilder) {
3220 const AtomicRMWInst &I = cast<AtomicRMWInst>(U);
3221 auto Flags = TLI->getAtomicMemOperandFlags(I, *DL);
3222
3223 Register Res = getOrCreateVReg(I);
3224 Register Addr = getOrCreateVReg(*I.getPointerOperand());
3225 Register Val = getOrCreateVReg(*I.getValOperand());
3226
3227 unsigned Opcode = 0;
3228 switch (I.getOperation()) {
3229 default:
3230 return false;
3232 Opcode = TargetOpcode::G_ATOMICRMW_XCHG;
3233 break;
3234 case AtomicRMWInst::Add:
3235 Opcode = TargetOpcode::G_ATOMICRMW_ADD;
3236 break;
3237 case AtomicRMWInst::Sub:
3238 Opcode = TargetOpcode::G_ATOMICRMW_SUB;
3239 break;
3240 case AtomicRMWInst::And:
3241 Opcode = TargetOpcode::G_ATOMICRMW_AND;
3242 break;
3244 Opcode = TargetOpcode::G_ATOMICRMW_NAND;
3245 break;
3246 case AtomicRMWInst::Or:
3247 Opcode = TargetOpcode::G_ATOMICRMW_OR;
3248 break;
3249 case AtomicRMWInst::Xor:
3250 Opcode = TargetOpcode::G_ATOMICRMW_XOR;
3251 break;
3252 case AtomicRMWInst::Max:
3253 Opcode = TargetOpcode::G_ATOMICRMW_MAX;
3254 break;
3255 case AtomicRMWInst::Min:
3256 Opcode = TargetOpcode::G_ATOMICRMW_MIN;
3257 break;
3259 Opcode = TargetOpcode::G_ATOMICRMW_UMAX;
3260 break;
3262 Opcode = TargetOpcode::G_ATOMICRMW_UMIN;
3263 break;
3265 Opcode = TargetOpcode::G_ATOMICRMW_FADD;
3266 break;
3268 Opcode = TargetOpcode::G_ATOMICRMW_FSUB;
3269 break;
3271 Opcode = TargetOpcode::G_ATOMICRMW_FMAX;
3272 break;
3274 Opcode = TargetOpcode::G_ATOMICRMW_FMIN;
3275 break;
3277 Opcode = TargetOpcode::G_ATOMICRMW_UINC_WRAP;
3278 break;
3280 Opcode = TargetOpcode::G_ATOMICRMW_UDEC_WRAP;
3281 break;
3282 }
3283
3284 MIRBuilder.buildAtomicRMW(
3285 Opcode, Res, Addr, Val,
3286 *MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
3287 Flags, MRI->getType(Val), getMemOpAlign(I),
3288 I.getAAMetadata(), nullptr, I.getSyncScopeID(),
3289 I.getOrdering()));
3290 return true;
3291}
3292
3293bool IRTranslator::translateFence(const User &U,
3294 MachineIRBuilder &MIRBuilder) {
3295 const FenceInst &Fence = cast<FenceInst>(U);
3296 MIRBuilder.buildFence(static_cast<unsigned>(Fence.getOrdering()),
3297 Fence.getSyncScopeID());
3298 return true;
3299}
3300
3301bool IRTranslator::translateFreeze(const User &U,
3302 MachineIRBuilder &MIRBuilder) {
3303 const ArrayRef<Register> DstRegs = getOrCreateVRegs(U);
3304 const ArrayRef<Register> SrcRegs = getOrCreateVRegs(*U.getOperand(0));
3305
3306 assert(DstRegs.size() == SrcRegs.size() &&
3307 "Freeze with different source and destination type?");
3308
3309 for (unsigned I = 0; I < DstRegs.size(); ++I) {
3310 MIRBuilder.buildFreeze(DstRegs[I], SrcRegs[I]);
3311 }
3312
3313 return true;
3314}
3315
3316void IRTranslator::finishPendingPhis() {
3317#ifndef NDEBUG
3318 DILocationVerifier Verifier;
3319 GISelObserverWrapper WrapperObserver(&Verifier);
3320 RAIIDelegateInstaller DelInstall(*MF, &WrapperObserver);
3321#endif // ifndef NDEBUG
3322 for (auto &Phi : PendingPHIs) {
3323 const PHINode *PI = Phi.first;
3324 if (PI->getType()->isEmptyTy())
3325 continue;
3326 ArrayRef<MachineInstr *> ComponentPHIs = Phi.second;
3327 MachineBasicBlock *PhiMBB = ComponentPHIs[0]->getParent();
3328 EntryBuilder->setDebugLoc(PI->getDebugLoc());
3329#ifndef NDEBUG
3330 Verifier.setCurrentInst(PI);
3331#endif // ifndef NDEBUG
3332
3334 for (unsigned i = 0; i < PI->getNumIncomingValues(); ++i) {
3335 auto IRPred = PI->getIncomingBlock(i);
3336 ArrayRef<Register> ValRegs = getOrCreateVRegs(*PI->getIncomingValue(i));
3337 for (auto *Pred : getMachinePredBBs({IRPred, PI->getParent()})) {
3338 if (SeenPreds.count(Pred) || !PhiMBB->isPredecessor(Pred))
3339 continue;
3340 SeenPreds.insert(Pred);
3341 for (unsigned j = 0; j < ValRegs.size(); ++j) {
3342 MachineInstrBuilder MIB(*MF, ComponentPHIs[j]);
3343 MIB.addUse(ValRegs[j]);
3344 MIB.addMBB(Pred);
3345 }
3346 }
3347 }
3348 }
3349}
3350
3351void IRTranslator::translateDbgValueRecord(Value *V, bool HasArgList,
3352 const DILocalVariable *Variable,
3353 const DIExpression *Expression,
3354 const DebugLoc &DL,
3355 MachineIRBuilder &MIRBuilder) {
3356 assert(Variable->isValidLocationForIntrinsic(DL) &&
3357 "Expected inlined-at fields to agree");
3358 // Act as if we're handling a debug intrinsic.
3359 MIRBuilder.setDebugLoc(DL);
3360
3361 if (!V || HasArgList) {
3362 // DI cannot produce a valid DBG_VALUE, so produce an undef DBG_VALUE to
3363 // terminate any prior location.
3364 MIRBuilder.buildIndirectDbgValue(0, Variable, Expression);
3365 return;
3366 }
3367
3368 if (const auto *CI = dyn_cast<Constant>(V)) {
3369 MIRBuilder.buildConstDbgValue(*CI, Variable, Expression);
3370 return;
3371 }
3372
3373 if (auto *AI = dyn_cast<AllocaInst>(V);
3374 AI && AI->isStaticAlloca() && Expression->startsWithDeref()) {
3375 // If the value is an alloca and the expression starts with a
3376 // dereference, track a stack slot instead of a register, as registers
3377 // may be clobbered.
3378 auto ExprOperands = Expression->getElements();
3379 auto *ExprDerefRemoved =
3380 DIExpression::get(AI->getContext(), ExprOperands.drop_front());
3381 MIRBuilder.buildFIDbgValue(getOrCreateFrameIndex(*AI), Variable,
3382 ExprDerefRemoved);
3383 return;
3384 }
3385 if (translateIfEntryValueArgument(false, V, Variable, Expression, DL,
3386 MIRBuilder))
3387 return;
3388 for (Register Reg : getOrCreateVRegs(*V)) {
3389 // FIXME: This does not handle register-indirect values at offset 0. The
3390 // direct/indirect thing shouldn't really be handled by something as
3391 // implicit as reg+noreg vs reg+imm in the first place, but it seems
3392 // pretty baked in right now.
3393 MIRBuilder.buildDirectDbgValue(Reg, Variable, Expression);
3394 }
3395 return;
3396}
3397
3398void IRTranslator::translateDbgDeclareRecord(Value *Address, bool HasArgList,
3399 const DILocalVariable *Variable,
3400 const DIExpression *Expression,
3401 const DebugLoc &DL,
3402 MachineIRBuilder &MIRBuilder) {
3403 if (!Address || isa<UndefValue>(Address)) {
3404 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *Variable << "\n");
3405 return;
3406 }
3407
3408 assert(Variable->isValidLocationForIntrinsic(DL) &&
3409 "Expected inlined-at fields to agree");
3410 auto AI = dyn_cast<AllocaInst>(Address);
3411 if (AI && AI->isStaticAlloca()) {
3412 // Static allocas are tracked at the MF level, no need for DBG_VALUE
3413 // instructions (in fact, they get ignored if they *do* exist).
3414 MF->setVariableDbgInfo(Variable, Expression,
3415 getOrCreateFrameIndex(*AI), DL);
3416 return;
3417 }
3418
3419 if (translateIfEntryValueArgument(true, Address, Variable,
3420 Expression, DL,
3421 MIRBuilder))
3422 return;
3423
3424 // A dbg.declare describes the address of a source variable, so lower it
3425 // into an indirect DBG_VALUE.
3426 MIRBuilder.setDebugLoc(DL);
3427 MIRBuilder.buildIndirectDbgValue(getOrCreateVReg(*Address),
3428 Variable, Expression);
3429 return;
3430}
3431
3432void IRTranslator::translateDbgInfo(const Instruction &Inst,
3433 MachineIRBuilder &MIRBuilder) {
3434 for (DbgRecord &DR : Inst.getDbgRecordRange()) {
3435 if (DbgLabelRecord *DLR = dyn_cast<DbgLabelRecord>(&DR)) {
3436 MIRBuilder.setDebugLoc(DLR->getDebugLoc());
3437 assert(DLR->getLabel() && "Missing label");
3438 assert(DLR->getLabel()->isValidLocationForIntrinsic(
3439 MIRBuilder.getDebugLoc()) &&
3440 "Expected inlined-at fields to agree");
3441 MIRBuilder.buildDbgLabel(DLR->getLabel());
3442 continue;
3443 }
3444 DbgVariableRecord &DVR = cast<DbgVariableRecord>(DR);
3445 const DILocalVariable *Variable = DVR.getVariable();
3446 const DIExpression *Expression = DVR.getExpression();
3447 Value *V = DVR.getVariableLocationOp(0);
3448 if (DVR.isDbgDeclare())
3449 translateDbgDeclareRecord(V, DVR.hasArgList(), Variable, Expression,
3450 DVR.getDebugLoc(), MIRBuilder);
3451 else
3452 translateDbgValueRecord(V, DVR.hasArgList(), Variable, Expression,
3453 DVR.getDebugLoc(), MIRBuilder);
3454 }
3455}
3456
3457bool IRTranslator::translate(const Instruction &Inst) {
3458 CurBuilder->setDebugLoc(Inst.getDebugLoc());
3459 CurBuilder->setPCSections(Inst.getMetadata(LLVMContext::MD_pcsections));
3460 CurBuilder->setMMRAMetadata(Inst.getMetadata(LLVMContext::MD_mmra));
3461
3462 if (TLI->fallBackToDAGISel(Inst))
3463 return false;
3464
3465 switch (Inst.getOpcode()) {
3466#define HANDLE_INST(NUM, OPCODE, CLASS) \
3467 case Instruction::OPCODE: \
3468 return translate##OPCODE(Inst, *CurBuilder.get());
3469#include "llvm/IR/Instruction.def"
3470 default:
3471 return false;
3472 }
3473}
3474
3475bool IRTranslator::translate(const Constant &C, Register Reg) {
3476 // We only emit constants into the entry block from here. To prevent jumpy
3477 // debug behaviour remove debug line.
3478 if (auto CurrInstDL = CurBuilder->getDL())
3479 EntryBuilder->setDebugLoc(DebugLoc());
3480
3481 if (auto CI = dyn_cast<ConstantInt>(&C))
3482 EntryBuilder->buildConstant(Reg, *CI);
3483 else if (auto CF = dyn_cast<ConstantFP>(&C))
3484 EntryBuilder->buildFConstant(Reg, *CF);
3485 else if (isa<UndefValue>(C))
3486 EntryBuilder->buildUndef(Reg);
3487 else if (isa<ConstantPointerNull>(C))
3488 EntryBuilder->buildConstant(Reg, 0);
3489 else if (auto GV = dyn_cast<GlobalValue>(&C))
3490 EntryBuilder->buildGlobalValue(Reg, GV);
3491 else if (auto CAZ = dyn_cast<ConstantAggregateZero>(&C)) {
3492 if (!isa<FixedVectorType>(CAZ->getType()))
3493 return false;
3494 // Return the scalar if it is a <1 x Ty> vector.
3495 unsigned NumElts = CAZ->getElementCount().getFixedValue();
3496 if (NumElts == 1)
3497 return translateCopy(C, *CAZ->getElementValue(0u), *EntryBuilder);
3499 for (unsigned I = 0; I < NumElts; ++I) {
3500 Constant &Elt = *CAZ->getElementValue(I);
3501 Ops.push_back(getOrCreateVReg(Elt));
3502 }
3503 EntryBuilder->buildBuildVector(Reg, Ops);
3504 } else if (auto CV = dyn_cast<ConstantDataVector>(&C)) {
3505 // Return the scalar if it is a <1 x Ty> vector.
3506 if (CV->getNumElements() == 1)
3507 return translateCopy(C, *CV->getElementAsConstant(0), *EntryBuilder);
3509 for (unsigned i = 0; i < CV->getNumElements(); ++i) {
3510 Constant &Elt = *CV->getElementAsConstant(i);
3511 Ops.push_back(getOrCreateVReg(Elt));
3512 }
3513 EntryBuilder->buildBuildVector(Reg, Ops);
3514 } else if (auto CE = dyn_cast<ConstantExpr>(&C)) {
3515 switch(CE->getOpcode()) {
3516#define HANDLE_INST(NUM, OPCODE, CLASS) \
3517 case Instruction::OPCODE: \
3518 return translate##OPCODE(*CE, *EntryBuilder.get());
3519#include "llvm/IR/Instruction.def"
3520 default:
3521 return false;
3522 }
3523 } else if (auto CV = dyn_cast<ConstantVector>(&C)) {
3524 if (CV->getNumOperands() == 1)
3525 return translateCopy(C, *CV->getOperand(0), *EntryBuilder);
3527 for (unsigned i = 0; i < CV->getNumOperands(); ++i) {
3528 Ops.push_back(getOrCreateVReg(*CV->getOperand(i)));
3529 }
3530 EntryBuilder->buildBuildVector(Reg, Ops);
3531 } else if (auto *BA = dyn_cast<BlockAddress>(&C)) {
3532 EntryBuilder->buildBlockAddress(Reg, BA);
3533 } else
3534 return false;
3535
3536 return true;
3537}
3538
3539bool IRTranslator::finalizeBasicBlock(const BasicBlock &BB,
3541 for (auto &BTB : SL->BitTestCases) {
3542 // Emit header first, if it wasn't already emitted.
3543 if (!BTB.Emitted)
3544 emitBitTestHeader(BTB, BTB.Parent);
3545
3546 BranchProbability UnhandledProb = BTB.Prob;
3547 for (unsigned j = 0, ej = BTB.Cases.size(); j != ej; ++j) {
3548 UnhandledProb -= BTB.Cases[j].ExtraProb;
3549 // Set the current basic block to the mbb we wish to insert the code into
3550 MachineBasicBlock *MBB = BTB.Cases[j].ThisBB;
3551 // If all cases cover a contiguous range, it is not necessary to jump to
3552 // the default block after the last bit test fails. This is because the
3553 // range check during bit test header creation has guaranteed that every
3554 // case here doesn't go outside the range. In this case, there is no need
3555 // to perform the last bit test, as it will always be true. Instead, make
3556 // the second-to-last bit-test fall through to the target of the last bit
3557 // test, and delete the last bit test.
3558
3559 MachineBasicBlock *NextMBB;
3560 if ((BTB.ContiguousRange || BTB.FallthroughUnreachable) && j + 2 == ej) {
3561 // Second-to-last bit-test with contiguous range: fall through to the
3562 // target of the final bit test.
3563 NextMBB = BTB.Cases[j + 1].TargetBB;
3564 } else if (j + 1 == ej) {
3565 // For the last bit test, fall through to Default.
3566 NextMBB = BTB.Default;
3567 } else {
3568 // Otherwise, fall through to the next bit test.
3569 NextMBB = BTB.Cases[j + 1].ThisBB;
3570 }
3571
3572 emitBitTestCase(BTB, NextMBB, UnhandledProb, BTB.Reg, BTB.Cases[j], MBB);
3573
3574 if ((BTB.ContiguousRange || BTB.FallthroughUnreachable) && j + 2 == ej) {
3575 // We need to record the replacement phi edge here that normally
3576 // happens in emitBitTestCase before we delete the case, otherwise the
3577 // phi edge will be lost.
3578 addMachineCFGPred({BTB.Parent->getBasicBlock(),
3579 BTB.Cases[ej - 1].TargetBB->getBasicBlock()},
3580 MBB);
3581 // Since we're not going to use the final bit test, remove it.
3582 BTB.Cases.pop_back();
3583 break;
3584 }
3585 }
3586 // This is "default" BB. We have two jumps to it. From "header" BB and from
3587 // last "case" BB, unless the latter was skipped.
3588 CFGEdge HeaderToDefaultEdge = {BTB.Parent->getBasicBlock(),
3589 BTB.Default->getBasicBlock()};
3590 addMachineCFGPred(HeaderToDefaultEdge, BTB.Parent);
3591 if (!BTB.ContiguousRange) {
3592 addMachineCFGPred(HeaderToDefaultEdge, BTB.Cases.back().ThisBB);
3593 }
3594 }
3595 SL->BitTestCases.clear();
3596
3597 for (auto &JTCase : SL->JTCases) {
3598 // Emit header first, if it wasn't already emitted.
3599 if (!JTCase.first.Emitted)
3600 emitJumpTableHeader(JTCase.second, JTCase.first, JTCase.first.HeaderBB);
3601
3602 emitJumpTable(JTCase.second, JTCase.second.MBB);
3603 }
3604 SL->JTCases.clear();
3605
3606 for (auto &SwCase : SL->SwitchCases)
3607 emitSwitchCase(SwCase, &CurBuilder->getMBB(), *CurBuilder);
3608 SL->SwitchCases.clear();
3609
3610 // Check if we need to generate stack-protector guard checks.
3611 StackProtector &SP = getAnalysis<StackProtector>();
3612 if (SP.shouldEmitSDCheck(BB)) {
3613 bool FunctionBasedInstrumentation =
3615 SPDescriptor.initialize(&BB, &MBB, FunctionBasedInstrumentation);
3616 }
3617 // Handle stack protector.
3618 if (SPDescriptor.shouldEmitFunctionBasedCheckStackProtector()) {
3619 LLVM_DEBUG(dbgs() << "Unimplemented stack protector case\n");
3620 return false;
3621 } else if (SPDescriptor.shouldEmitStackProtector()) {
3622 MachineBasicBlock *ParentMBB = SPDescriptor.getParentMBB();
3623 MachineBasicBlock *SuccessMBB = SPDescriptor.getSuccessMBB();
3624
3625 // Find the split point to split the parent mbb. At the same time copy all
3626 // physical registers used in the tail of parent mbb into virtual registers
3627 // before the split point and back into physical registers after the split
3628 // point. This prevents us needing to deal with Live-ins and many other
3629 // register allocation issues caused by us splitting the parent mbb. The
3630 // register allocator will clean up said virtual copies later on.
3632 ParentMBB, *MF->getSubtarget().getInstrInfo());
3633
3634 // Splice the terminator of ParentMBB into SuccessMBB.
3635 SuccessMBB->splice(SuccessMBB->end(), ParentMBB, SplitPoint,
3636 ParentMBB->end());
3637
3638 // Add compare/jump on neq/jump to the parent BB.
3639 if (!emitSPDescriptorParent(SPDescriptor, ParentMBB))
3640 return false;
3641
3642 // CodeGen Failure MBB if we have not codegened it yet.
3643 MachineBasicBlock *FailureMBB = SPDescriptor.getFailureMBB();
3644 if (FailureMBB->empty()) {
3645 if (!emitSPDescriptorFailure(SPDescriptor, FailureMBB))
3646 return false;
3647 }
3648
3649 // Clear the Per-BB State.
3650 SPDescriptor.resetPerBBState();
3651 }
3652 return true;
3653}
3654
3655bool IRTranslator::emitSPDescriptorParent(StackProtectorDescriptor &SPD,
3656 MachineBasicBlock *ParentBB) {
3657 CurBuilder->setInsertPt(*ParentBB, ParentBB->end());
3658 // First create the loads to the guard/stack slot for the comparison.
3660 const LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
3661 LLT PtrMemTy = getLLTForMVT(TLI->getPointerMemTy(*DL));
3662
3663 MachineFrameInfo &MFI = ParentBB->getParent()->getFrameInfo();
3664 int FI = MFI.getStackProtectorIndex();
3665
3666 Register Guard;
3667 Register StackSlotPtr = CurBuilder->buildFrameIndex(PtrTy, FI).getReg(0);
3668 const Module &M = *ParentBB->getParent()->getFunction().getParent();
3669 Align Align = DL->getPrefTypeAlign(PointerType::getUnqual(M.getContext()));
3670
3671 // Generate code to load the content of the guard slot.
3672 Register GuardVal =
3673 CurBuilder
3674 ->buildLoad(PtrMemTy, StackSlotPtr,
3677 .getReg(0);
3678
3679 if (TLI->useStackGuardXorFP()) {
3680 LLVM_DEBUG(dbgs() << "Stack protector xor'ing with FP not yet implemented");
3681 return false;
3682 }
3683
3684 // Retrieve guard check function, nullptr if instrumentation is inlined.
3685 if (const Function *GuardCheckFn = TLI->getSSPStackGuardCheck(M)) {
3686 // This path is currently untestable on GlobalISel, since the only platform
3687 // that needs this seems to be Windows, and we fall back on that currently.
3688 // The code still lives here in case that changes.
3689 // Silence warning about unused variable until the code below that uses
3690 // 'GuardCheckFn' is enabled.
3691 (void)GuardCheckFn;
3692 return false;
3693#if 0
3694 // The target provides a guard check function to validate the guard value.
3695 // Generate a call to that function with the content of the guard slot as
3696 // argument.
3697 FunctionType *FnTy = GuardCheckFn->getFunctionType();
3698 assert(FnTy->getNumParams() == 1 && "Invalid function signature");
3700 if (GuardCheckFn->hasAttribute(1, Attribute::AttrKind::InReg))
3701 Flags.setInReg();
3702 CallLowering::ArgInfo GuardArgInfo(
3703 {GuardVal, FnTy->getParamType(0), {Flags}});
3704
3706 Info.OrigArgs.push_back(GuardArgInfo);
3707 Info.CallConv = GuardCheckFn->getCallingConv();
3708 Info.Callee = MachineOperand::CreateGA(GuardCheckFn, 0);
3709 Info.OrigRet = {Register(), FnTy->getReturnType()};
3710 if (!CLI->lowerCall(MIRBuilder, Info)) {
3711 LLVM_DEBUG(dbgs() << "Failed to lower call to stack protector check\n");
3712 return false;
3713 }
3714 return true;
3715#endif
3716 }
3717
3718 // If useLoadStackGuardNode returns true, generate LOAD_STACK_GUARD.
3719 // Otherwise, emit a volatile load to retrieve the stack guard value.
3720 if (TLI->useLoadStackGuardNode()) {
3721 Guard =
3723 getStackGuard(Guard, *CurBuilder);
3724 } else {
3725 // TODO: test using android subtarget when we support @llvm.thread.pointer.
3726 const Value *IRGuard = TLI->getSDagStackGuard(M);
3727 Register GuardPtr = getOrCreateVReg(*IRGuard);
3728
3729 Guard = CurBuilder
3730 ->buildLoad(PtrMemTy, GuardPtr,
3734 .getReg(0);
3735 }
3736
3737 // Perform the comparison.
3738 auto Cmp =
3739 CurBuilder->buildICmp(CmpInst::ICMP_NE, LLT::scalar(1), Guard, GuardVal);
3740 // If the guard/stackslot do not equal, branch to failure MBB.
3741 CurBuilder->buildBrCond(Cmp, *SPD.getFailureMBB());
3742 // Otherwise branch to success MBB.
3743 CurBuilder->buildBr(*SPD.getSuccessMBB());
3744 return true;
3745}
3746
3747bool IRTranslator::emitSPDescriptorFailure(StackProtectorDescriptor &SPD,
3748 MachineBasicBlock *FailureBB) {
3749 CurBuilder->setInsertPt(*FailureBB, FailureBB->end());
3750
3751 const RTLIB::Libcall Libcall = RTLIB::STACKPROTECTOR_CHECK_FAIL;
3752 const char *Name = TLI->getLibcallName(Libcall);
3753
3755 Info.CallConv = TLI->getLibcallCallingConv(Libcall);
3757 Info.OrigRet = {Register(), Type::getVoidTy(MF->getFunction().getContext()),
3758 0};
3759 if (!CLI->lowerCall(*CurBuilder, Info)) {
3760 LLVM_DEBUG(dbgs() << "Failed to lower call to stack protector fail\n");
3761 return false;
3762 }
3763
3764 // On PS4/PS5, the "return address" must still be within the calling
3765 // function, even if it's at the very end, so emit an explicit TRAP here.
3766 // WebAssembly needs an unreachable instruction after a non-returning call,
3767 // because the function return type can be different from __stack_chk_fail's
3768 // return type (void).
3769 const TargetMachine &TM = MF->getTarget();
3770 if (TM.getTargetTriple().isPS() || TM.getTargetTriple().isWasm()) {
3771 LLVM_DEBUG(dbgs() << "Unhandled trap emission for stack protector fail\n");
3772 return false;
3773 }
3774 return true;
3775}
3776
3777void IRTranslator::finalizeFunction() {
3778 // Release the memory used by the different maps we
3779 // needed during the translation.
3780 PendingPHIs.clear();
3781 VMap.reset();
3782 FrameIndices.clear();
3783 MachinePreds.clear();
3784 // MachineIRBuilder::DebugLoc can outlive the DILocation it holds. Clear it
3785 // to avoid accessing free’d memory (in runOnMachineFunction) and to avoid
3786 // destroying it twice (in ~IRTranslator() and ~LLVMContext())
3787 EntryBuilder.reset();
3788 CurBuilder.reset();
3789 FuncInfo.clear();
3790 SPDescriptor.resetPerFunctionState();
3791}
3792
3793/// Returns true if a BasicBlock \p BB within a variadic function contains a
3794/// variadic musttail call.
3795static bool checkForMustTailInVarArgFn(bool IsVarArg, const BasicBlock &BB) {
3796 if (!IsVarArg)
3797 return false;
3798
3799 // Walk the block backwards, because tail calls usually only appear at the end
3800 // of a block.
3801 return llvm::any_of(llvm::reverse(BB), [](const Instruction &I) {
3802 const auto *CI = dyn_cast<CallInst>(&I);
3803 return CI && CI->isMustTailCall();
3804 });
3805}
3806
3808 MF = &CurMF;
3809 const Function &F = MF->getFunction();
3811 getAnalysis<GISelCSEAnalysisWrapperPass>().getCSEWrapper();
3812 // Set the CSEConfig and run the analysis.
3813 GISelCSEInfo *CSEInfo = nullptr;
3814 TPC = &getAnalysis<TargetPassConfig>();
3815 bool EnableCSE = EnableCSEInIRTranslator.getNumOccurrences()
3817 : TPC->isGISelCSEEnabled();
3818 TLI = MF->getSubtarget().getTargetLowering();
3819
3820 if (EnableCSE) {
3821 EntryBuilder = std::make_unique<CSEMIRBuilder>(CurMF);
3822 CSEInfo = &Wrapper.get(TPC->getCSEConfig());
3823 EntryBuilder->setCSEInfo(CSEInfo);
3824 CurBuilder = std::make_unique<CSEMIRBuilder>(CurMF);
3825 CurBuilder->setCSEInfo(CSEInfo);
3826 } else {
3827 EntryBuilder = std::make_unique<MachineIRBuilder>();
3828 CurBuilder = std::make_unique<MachineIRBuilder>();
3829 }
3830 CLI = MF->getSubtarget().getCallLowering();
3831 CurBuilder->setMF(*MF);
3832 EntryBuilder->setMF(*MF);
3833 MRI = &MF->getRegInfo();
3834 DL = &F.getParent()->getDataLayout();
3835 ORE = std::make_unique<OptimizationRemarkEmitter>(&F);
3836 const TargetMachine &TM = MF->getTarget();
3837 TM.resetTargetOptions(F);
3838 EnableOpts = OptLevel != CodeGenOptLevel::None && !skipFunction(F);
3839 FuncInfo.MF = MF;
3840 if (EnableOpts) {
3841 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
3842 FuncInfo.BPI = &getAnalysis<BranchProbabilityInfoWrapperPass>().getBPI();
3843 } else {
3844 AA = nullptr;
3845 FuncInfo.BPI = nullptr;
3846 }
3847
3848 AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(
3849 MF->getFunction());
3850 LibInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
3851 FuncInfo.CanLowerReturn = CLI->checkReturnTypeForCallConv(*MF);
3852
3853 SL = std::make_unique<GISelSwitchLowering>(this, FuncInfo);
3854 SL->init(*TLI, TM, *DL);
3855
3856 assert(PendingPHIs.empty() && "stale PHIs");
3857
3858 // Targets which want to use big endian can enable it using
3859 // enableBigEndian()
3860 if (!DL->isLittleEndian() && !CLI->enableBigEndian()) {
3861 // Currently we don't properly handle big endian code.
3862 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
3863 F.getSubprogram(), &F.getEntryBlock());
3864 R << "unable to translate in big endian mode";
3865 reportTranslationError(*MF, *TPC, *ORE, R);
3866 }
3867
3868 // Release the per-function state when we return, whether we succeeded or not.
3869 auto FinalizeOnReturn = make_scope_exit([this]() { finalizeFunction(); });
3870
3871 // Setup a separate basic-block for the arguments and constants
3873 MF->push_back(EntryBB);
3874 EntryBuilder->setMBB(*EntryBB);
3875
3876 DebugLoc DbgLoc = F.getEntryBlock().getFirstNonPHI()->getDebugLoc();
3877 SwiftError.setFunction(CurMF);
3878 SwiftError.createEntriesInEntryBlock(DbgLoc);
3879
3880 bool IsVarArg = F.isVarArg();
3881 bool HasMustTailInVarArgFn = false;
3882
3883 // Create all blocks, in IR order, to preserve the layout.
3884 for (const BasicBlock &BB: F) {
3885 auto *&MBB = BBToMBB[&BB];
3886
3887 MBB = MF->CreateMachineBasicBlock(&BB);
3888 MF->push_back(MBB);
3889
3890 if (BB.hasAddressTaken())
3891 MBB->setAddressTakenIRBlock(const_cast<BasicBlock *>(&BB));
3892
3893 if (!HasMustTailInVarArgFn)
3894 HasMustTailInVarArgFn = checkForMustTailInVarArgFn(IsVarArg, BB);
3895 }
3896
3897 MF->getFrameInfo().setHasMustTailInVarArgFunc(HasMustTailInVarArgFn);
3898
3899 // Make our arguments/constants entry block fallthrough to the IR entry block.
3900 EntryBB->addSuccessor(&getMBB(F.front()));
3901
3902 if (CLI->fallBackToDAGISel(*MF)) {
3903 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
3904 F.getSubprogram(), &F.getEntryBlock());
3905 R << "unable to lower function: " << ore::NV("Prototype", F.getType());
3906 reportTranslationError(*MF, *TPC, *ORE, R);
3907 return false;
3908 }
3909
3910 // Lower the actual args into this basic block.
3911 SmallVector<ArrayRef<Register>, 8> VRegArgs;
3912 for (const Argument &Arg: F.args()) {
3913 if (DL->getTypeStoreSize(Arg.getType()).isZero())
3914 continue; // Don't handle zero sized types.
3915 ArrayRef<Register> VRegs = getOrCreateVRegs(Arg);
3916 VRegArgs.push_back(VRegs);
3917
3918 if (Arg.hasSwiftErrorAttr()) {
3919 assert(VRegs.size() == 1 && "Too many vregs for Swift error");
3920 SwiftError.setCurrentVReg(EntryBB, SwiftError.getFunctionArg(), VRegs[0]);
3921 }
3922 }
3923
3924 if (!CLI->lowerFormalArguments(*EntryBuilder, F, VRegArgs, FuncInfo)) {
3925 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
3926 F.getSubprogram(), &F.getEntryBlock());
3927 R << "unable to lower arguments: " << ore::NV("Prototype", F.getType());
3928 reportTranslationError(*MF, *TPC, *ORE, R);
3929 return false;
3930 }
3931
3932 // Need to visit defs before uses when translating instructions.
3933 GISelObserverWrapper WrapperObserver;
3934 if (EnableCSE && CSEInfo)
3935 WrapperObserver.addObserver(CSEInfo);
3936 {
3938#ifndef NDEBUG
3939 DILocationVerifier Verifier;
3940 WrapperObserver.addObserver(&Verifier);
3941#endif // ifndef NDEBUG
3942 RAIIDelegateInstaller DelInstall(*MF, &WrapperObserver);
3943 RAIIMFObserverInstaller ObsInstall(*MF, WrapperObserver);
3944 for (const BasicBlock *BB : RPOT) {
3945 MachineBasicBlock &MBB = getMBB(*BB);
3946 // Set the insertion point of all the following translations to
3947 // the end of this basic block.
3948 CurBuilder->setMBB(MBB);
3949 HasTailCall = false;
3950 for (const Instruction &Inst : *BB) {
3951 // If we translated a tail call in the last step, then we know
3952 // everything after the call is either a return, or something that is
3953 // handled by the call itself. (E.g. a lifetime marker or assume
3954 // intrinsic.) In this case, we should stop translating the block and
3955 // move on.
3956 if (HasTailCall)
3957 break;
3958#ifndef NDEBUG
3959 Verifier.setCurrentInst(&Inst);
3960#endif // ifndef NDEBUG
3961
3962 // Translate any debug-info attached to the instruction.
3963 translateDbgInfo(Inst, *CurBuilder.get());
3964
3965 if (translate(Inst))
3966 continue;
3967
3968 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
3969 Inst.getDebugLoc(), BB);
3970 R << "unable to translate instruction: " << ore::NV("Opcode", &Inst);
3971
3972 if (ORE->allowExtraAnalysis("gisel-irtranslator")) {
3973 std::string InstStrStorage;
3974 raw_string_ostream InstStr(InstStrStorage);
3975 InstStr << Inst;
3976
3977 R << ": '" << InstStr.str() << "'";
3978 }
3979
3980 reportTranslationError(*MF, *TPC, *ORE, R);
3981 return false;
3982 }
3983
3984 if (!finalizeBasicBlock(*BB, MBB)) {
3985 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
3986 BB->getTerminator()->getDebugLoc(), BB);
3987 R << "unable to translate basic block";
3988 reportTranslationError(*MF, *TPC, *ORE, R);
3989 return false;
3990 }
3991 }
3992#ifndef NDEBUG
3993 WrapperObserver.removeObserver(&Verifier);
3994#endif
3995 }
3996
3997 finishPendingPhis();
3998
3999 SwiftError.propagateVRegs();
4000
4001 // Merge the argument lowering and constants block with its single
4002 // successor, the LLVM-IR entry block. We want the basic block to
4003 // be maximal.
4004 assert(EntryBB->succ_size() == 1 &&
4005 "Custom BB used for lowering should have only one successor");
4006 // Get the successor of the current entry block.
4007 MachineBasicBlock &NewEntryBB = **EntryBB->succ_begin();
4008 assert(NewEntryBB.pred_size() == 1 &&
4009 "LLVM-IR entry block has a predecessor!?");
4010 // Move all the instruction from the current entry block to the
4011 // new entry block.
4012 NewEntryBB.splice(NewEntryBB.begin(), EntryBB, EntryBB->begin(),
4013 EntryBB->end());
4014
4015 // Update the live-in information for the new entry block.
4016 for (const MachineBasicBlock::RegisterMaskPair &LiveIn : EntryBB->liveins())
4017 NewEntryBB.addLiveIn(LiveIn);
4018 NewEntryBB.sortUniqueLiveIns();
4019
4020 // Get rid of the now empty basic block.
4021 EntryBB->removeSuccessor(&NewEntryBB);
4022 MF->remove(EntryBB);
4023 MF->deleteMachineBasicBlock(EntryBB);
4024
4025 assert(&MF->front() == &NewEntryBB &&
4026 "New entry wasn't next in the list of basic block!");
4027
4028 // Initialize stack protector information.
4029 StackProtector &SP = getAnalysis<StackProtector>();
4031
4032 return false;
4033}
unsigned SubReg
#define Success
aarch64 promote const
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
amdgpu aa AMDGPU Address space based Alias Analysis Wrapper
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
Provides analysis for continuously CSEing during GISel passes.
This file implements a version of MachineIRBuilder which CSEs insts within a MachineBasicBlock.
This file describes how to lower LLVM calls to machine code calls.
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(X)
Definition: Debug.h:101
uint64_t Addr
std::string Name
uint64_t Size
This contains common code to allow clients to notify changes to machine instr.
const HexagonInstrInfo * TII
IRTranslator LLVM IR static false void reportTranslationError(MachineFunction &MF, const TargetPassConfig &TPC, OptimizationRemarkEmitter &ORE, OptimizationRemarkMissed &R)
static bool checkForMustTailInVarArgFn(bool IsVarArg, const BasicBlock &BB)
Returns true if a BasicBlock BB within a variadic function contains a variadic musttail call.
static unsigned getConvOpcode(Intrinsic::ID ID)
static uint64_t getOffsetFromIndices(const User &U, const DataLayout &DL)
static unsigned getConstrainedOpcode(Intrinsic::ID ID)
IRTranslator LLVM IR MI
#define DEBUG_TYPE
static cl::opt< bool > EnableCSEInIRTranslator("enable-cse-in-irtranslator", cl::desc("Should enable CSE in irtranslator"), cl::Optional, cl::init(false))
static bool isValInBlock(const Value *V, const BasicBlock *BB)
static bool isSwiftError(const Value *V)
This file declares the IRTranslator pass.
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
This file describes how to lower LLVM inline asm to machine code INLINEASM.
Legalize the Machine IR a function s Machine IR
Definition: Legalizer.cpp:81
Implement a low-level type suitable for MachineInstr level instruction selection.
Implement a low-level type suitable for MachineInstr level instruction selection.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file declares the MachineIRBuilder class.
unsigned const TargetRegisterInfo * TRI
This file contains the declarations for metadata subclasses.
uint64_t High
const char LLVMTargetMachineRef TM
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:55
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:59
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:52
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
verify safepoint Safepoint IR Verifier
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
This file defines the SmallSet class.
This file defines the SmallVector class.
This file describes how to lower LLVM code to machine code.
Target-Independent Code Generator Pass Configuration Options pass.
Value * RHS
Value * LHS
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal=false)
Checks whether the given location points to constant memory, or if OrLocal is true whether it points ...
Class for arbitrary precision integers.
Definition: APInt.h:77
an instruction to allocate memory on the stack
Definition: Instructions.h:60
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
Definition: Instructions.h:158
bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Definition: Instructions.h:133
PointerType * getType() const
Overload to return most specific pointer type.
Definition: Instructions.h:108
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Definition: Instructions.h:126
const Value * getArraySize() const
Get the number of elements allocated.
Definition: Instructions.h:104
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
This class represents an incoming formal argument to a Function.
Definition: Argument.h:31
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
iterator end() const
Definition: ArrayRef.h:154
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:165
iterator begin() const
Definition: ArrayRef.h:153
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:160
An immutable pass that tracks lazily created AssumptionCache objects.
An instruction that atomically checks whether a specified value is in a memory location,...
Definition: Instructions.h:540
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:749
@ Add
*p = old + v
Definition: Instructions.h:765
@ FAdd
*p = old + v
Definition: Instructions.h:786
@ Min
*p = old <signed v ? old : v
Definition: Instructions.h:779
@ Or
*p = old | v
Definition: Instructions.h:773
@ Sub
*p = old - v
Definition: Instructions.h:767
@ And
*p = old & v
Definition: Instructions.h:769
@ Xor
*p = old ^ v
Definition: Instructions.h:775
@ FSub
*p = old - v
Definition: Instructions.h:789
@ UIncWrap
Increment one up to a maximum value.
Definition: Instructions.h:801
@ Max
*p = old >signed v ? old : v
Definition: Instructions.h:777
@ UMin
*p = old <unsigned v ? old : v
Definition: Instructions.h:783
@ FMin
*p = minnum(old, v) minnum matches the behavior of llvm.minnum.
Definition: Instructions.h:797
@ UMax
*p = old >unsigned v ? old : v
Definition: Instructions.h:781
@ FMax
*p = maxnum(old, v) maxnum matches the behavior of llvm.maxnum.
Definition: Instructions.h:793
@ UDecWrap
Decrement one until a minimum value or zero.
Definition: Instructions.h:805
@ Nand
*p = ~(old & v)
Definition: Instructions.h:771
Attribute getFnAttr(Attribute::AttrKind Kind) const
Return the attribute object that exists for the function.
Definition: Attributes.h:847
StringRef getValueAsString() const
Return the attribute's value as a string.
Definition: Attributes.cpp:349
LLVM Basic Block Representation.
Definition: BasicBlock.h:60
bool hasAddressTaken() const
Returns true if there are any uses of this basic block other than direct branches,...
Definition: BasicBlock.h:640
InstListType::const_iterator const_iterator
Definition: BasicBlock.h:166
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
Definition: BasicBlock.cpp:360
const Instruction & front() const
Definition: BasicBlock.h:453
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:206
const Instruction * getFirstNonPHIOrDbg(bool SkipPseudoOp=true) const
Returns a pointer to the first instruction in this block that is not a PHINode or a debug intrinsic,...
Definition: BasicBlock.cpp:379
const Instruction & back() const
Definition: BasicBlock.h:455
Legacy analysis pass which computes BlockFrequencyInfo.
Conditional or Unconditional Branch instruction.
BasicBlock * getSuccessor(unsigned i) const
bool isUnconditional() const
Value * getCondition() const
Legacy analysis pass which computes BranchProbabilityInfo.
Analysis providing branch probability information.
BranchProbability getEdgeProbability(const BasicBlock *Src, unsigned IndexInSuccessors) const
Get an edge's probability, relative to other out-edges of the Src.
static BranchProbability getZero()
static void normalizeProbabilities(ProbabilityIter Begin, ProbabilityIter End)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1494
bool isInlineAsm() const
Check if this call is an inline asm statement.
Definition: InstrTypes.h:1817
std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
Definition: InstrTypes.h:2428
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
Definition: InstrTypes.h:1750
bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
Definition: InstrTypes.h:1670
unsigned countOperandBundlesOfType(StringRef Name) const
Return the number of operand bundles with the tag Name attached to this instruction.
Definition: InstrTypes.h:2404
Value * getCalledOperand() const
Definition: InstrTypes.h:1743
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1695
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
Definition: InstrTypes.h:1676
bool isConvergent() const
Determine if the invoke is convergent.
Definition: InstrTypes.h:2312
Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
Definition: InstrTypes.h:1686
unsigned arg_size() const
Definition: InstrTypes.h:1693
AttributeList getAttributes() const
Return the parameter attributes for this call.
Definition: InstrTypes.h:1827
This class represents a function call, abstracting a target machine's calling convention.
bool isTailCall() const
bool isMustTailCall() const
bool checkReturnTypeForCallConv(MachineFunction &MF) const
Toplevel function to check the return type based on the target calling convention.
virtual bool lowerFormalArguments(MachineIRBuilder &MIRBuilder, const Function &F, ArrayRef< ArrayRef< Register > > VRegs, FunctionLoweringInfo &FLI) const
This hook must be implemented to lower the incoming (formal) arguments, described by VRegs,...
Definition: CallLowering.h:554
virtual bool enableBigEndian() const
For targets which want to use big-endian can enable it with enableBigEndian() hook.
Definition: CallLowering.h:603
virtual bool supportSwiftError() const
Definition: CallLowering.h:457
virtual bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val, ArrayRef< Register > VRegs, FunctionLoweringInfo &FLI, Register SwiftErrorVReg) const
This hook must be implemented to lower outgoing return values, described by Val, into the specified v...
Definition: CallLowering.h:522
virtual bool lowerCall(MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info) const
This hook must be implemented to lower the given call instruction, including argument and return valu...
Definition: CallLowering.h:566
virtual bool fallBackToDAGISel(const MachineFunction &MF) const
Definition: CallLowering.h:540
This class is the base class for the comparison instructions.
Definition: InstrTypes.h:983
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:993
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
Definition: InstrTypes.h:1010
@ ICMP_SLT
signed less than
Definition: InstrTypes.h:1022
@ ICMP_SLE
signed less or equal
Definition: InstrTypes.h:1023
@ ICMP_UGT
unsigned greater than
Definition: InstrTypes.h:1016
@ ICMP_EQ
equal
Definition: InstrTypes.h:1014
@ ICMP_NE
not equal
Definition: InstrTypes.h:1015
@ ICMP_ULE
unsigned less or equal
Definition: InstrTypes.h:1019
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
Definition: InstrTypes.h:995
bool isFPPredicate() const
Definition: InstrTypes.h:1122
bool isIntPredicate() const
Definition: InstrTypes.h:1123
This is the shared class of boolean and integer constants.
Definition: Constants.h:81
static ConstantInt * getTrue(LLVMContext &Context)
Definition: Constants.cpp:850
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition: Constants.h:206
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:155
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition: Constants.h:146
This is an important base class in LLVM.
Definition: Constant.h:41
static Constant * getAllOnesValue(Type *Ty)
Definition: Constants.cpp:417
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:370
This is the common base class for constrained floating point intrinsics.
std::optional< fp::ExceptionBehavior > getExceptionBehavior() const
unsigned getNonMetadataArgCount() const
DWARF expression.
bool isEntryValue() const
Check if the expression consists of exactly one entry value operand.
static DIExpression * append(const DIExpression *Expr, ArrayRef< uint64_t > Ops)
Append the opcodes Ops to DIExpr.
bool isValidLocationForIntrinsic(const DILocation *DL) const
Check that a location is valid for this label.
bool isValidLocationForIntrinsic(const DILocation *DL) const
Check that a location is valid for this variable.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
unsigned getPointerSizeInBits(unsigned AS=0) const
Layout pointer size, in bits FIXME: The defaults need to be removed once all of the backends/clients ...
Definition: DataLayout.h:410
const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
Definition: DataLayout.cpp:720
IntegerType * getIndexType(LLVMContext &C, unsigned AddressSpace) const
Returns the type of a GEP index in AddressSpace.
Definition: DataLayout.cpp:905
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Definition: DataLayout.h:504
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
Definition: DataLayout.h:672
TypeSize getTypeStoreSize(Type *Ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type.
Definition: DataLayout.h:472
Align getPointerABIAlignment(unsigned AS) const
Layout pointer alignment.
Definition: DataLayout.cpp:742
This represents the llvm.dbg.declare instruction.
Value * getAddress() const
This represents the llvm.dbg.label instruction.
DILabel * getLabel() const
Records a position in IR for a source label (DILabel).
Base class for non-instruction debug metadata records that have positions within IR.
DebugLoc getDebugLoc() const
This represents the llvm.dbg.value instruction.
Value * getValue(unsigned OpIdx=0) const
DILocalVariable * getVariable() const
DIExpression * getExpression() const
Record of a variable value-assignment, aka a non instruction representation of the dbg....
DIExpression * getExpression() const
Value * getVariableLocationOp(unsigned OpIdx) const
DILocalVariable * getVariable() const
A debug info location.
Definition: DebugLoc.h:33
Class representing an expression and its matching format.
This instruction extracts a struct member or array element value from an aggregate value.
This instruction compares its operands according to the predicate given to the constructor.
An instruction for ordering other memory operations.
Definition: Instructions.h:461
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this fence instruction.
Definition: Instructions.h:499
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
Definition: Instructions.h:488
static FixedVectorType * get(Type *ElementType, unsigned NumElts)
Definition: Type.cpp:692
BranchProbabilityInfo * BPI
void clear()
clear - Clear out all the function-specific state.
bool CanLowerReturn
CanLowerReturn - true iff the function's return value can be lowered to registers.
bool skipFunction(const Function &F) const
Optional passes call this function to check whether the pass should be skipped.
Definition: Pass.cpp:178
const BasicBlock & getEntryBlock() const
Definition: Function.h:790
DISubprogram * getSubprogram() const
Get the attached subprogram.
Definition: Metadata.cpp:1830
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
Definition: Function.h:685
Constant * getPersonalityFn() const
Get the personality function associated with this function.
Definition: Function.cpp:1922
const Function & getFunction() const
Definition: Function.h:162
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
Definition: Function.h:237
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:356
The actual analysis pass wrapper.
Definition: CSEInfo.h:222
Simple wrapper that does the following.
Definition: CSEInfo.h:204
The CSE Analysis object.
Definition: CSEInfo.h:69
Abstract class that contains various methods for clients to notify about changes.
Simple wrapper observer that takes several observers, and calls each one for each event.
void removeObserver(GISelChangeObserver *O)
void addObserver(GISelChangeObserver *O)
static StringRef dropLLVMManglingEscape(StringRef Name)
If the given string begins with the GlobalValue name mangling escape character '\1',...
Definition: GlobalValue.h:566
bool hasExternalWeakLinkage() const
Definition: GlobalValue.h:528
bool hasDLLImportStorageClass() const
Definition: GlobalValue.h:277
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:655
bool isTailCall(const MachineInstr &MI) const override
This instruction compares its operands according to the predicate given to the constructor.
bool runOnMachineFunction(MachineFunction &MF) override
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
IRTranslator(CodeGenOptLevel OptLevel=CodeGenOptLevel::None)
static char ID
Definition: IRTranslator.h:68
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
Indirect Branch Instruction.
bool lowerInlineAsm(MachineIRBuilder &MIRBuilder, const CallBase &CB, std::function< ArrayRef< Register >(const Value &Val)> GetOrCreateVRegs) const
Lower the given inline asm call instruction GetOrCreateVRegs is a callback to materialize a register ...
This instruction inserts a struct field of array element value into an aggregate value.
iterator_range< simple_ilist< DbgRecord >::iterator > getDbgRecordRange() const
Return a range over the DbgRecords attached to this instruction.
Definition: Instruction.h:84
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:454
bool hasMetadata() const
Return true if this instruction has any metadata attached to it.
Definition: Instruction.h:341
const BasicBlock * getParent() const
Definition: Instruction.h:152
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Definition: Instruction.h:359
AAMDNodes getAAMetadata() const
Returns the AA metadata for this instruction.
Definition: Metadata.cpp:1706
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:252
bool hasAllowReassoc() const LLVM_READONLY
Determine whether the allow-reassociation flag is set.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
Definition: IntrinsicInst.h:55
Invoke instruction.
constexpr LLT changeElementType(LLT NewEltTy) const
If this type is a vector, return a vector with the same number of elements but the new element type.
Definition: LowLevelType.h:214
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
Definition: LowLevelType.h:42
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
Definition: LowLevelType.h:159
constexpr bool isVector() const
Definition: LowLevelType.h:148
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
Definition: LowLevelType.h:57
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
Definition: LowLevelType.h:193
constexpr bool isPointer() const
Definition: LowLevelType.h:149
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
Definition: LowLevelType.h:100
constexpr bool isFixedVector() const
Returns true if the LLT is a fixed vector.
Definition: LowLevelType.h:178
The landingpad instruction holds all of the information necessary to generate correct exception handl...
An instruction for reading from memory.
Definition: Instructions.h:185
Value * getPointerOperand()
Definition: Instructions.h:281
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
Definition: Instructions.h:246
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
Definition: Instructions.h:256
static LocationSize precise(uint64_t Value)
Context object for machine code objects.
Definition: MCContext.h:81
MCSymbol * createTempSymbol()
Create a temporary symbol with a unique name.
Definition: MCContext.cpp:322
MCSymbol * getOrCreateFrameAllocSymbol(const Twine &FuncName, unsigned Idx)
Gets a symbol that will be defined to the final stack offset of a local variable after codegen.
Definition: MCContext.cpp:215
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:40
Metadata node.
Definition: Metadata.h:1067
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1541
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
unsigned pred_size() const
void normalizeSuccProbs()
Normalize probabilities of all successors so that the sum of them becomes one.
void setAddressTakenIRBlock(BasicBlock *BB)
Set this block to reflect that it corresponds to an IR-level basic block with a BlockAddress.
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
void setSuccProbability(succ_iterator I, BranchProbability Prob)
Set successor probability of a given iterator.
std::vector< MachineBasicBlock * >::iterator succ_iterator
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
void sortUniqueLiveIns()
Sorts and uniques the LiveIns vector.
bool isPredecessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB is a predecessor of this block.
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
void setIsEHPad(bool V=true)
Indicates the block is a landing pad.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
bool hasVarSizedObjects() const
This method may be called any time after instruction selection is complete to determine if the stack ...
int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
int getStackProtectorIndex() const
Return the index for the stack protector object.
void setStackProtectorIndex(int I)
int CreateVariableSizedObject(Align Alignment, const AllocaInst *Alloca)
Notify the MachineFrameInfo object that a variable sized object has been created.
void setHasMustTailInVarArgFunc(bool B)
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
ArrayRef< int > allocateShuffleMask(ArrayRef< int > Mask)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
unsigned getTypeIDFor(const GlobalValue *TI)
Return the type id for the specified typeinfo. This is function wide.
void push_back(MachineBasicBlock *MBB)
MCContext & getContext() const
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
MCSymbol * addLandingPad(MachineBasicBlock *LandingPad)
Add a new panding pad, and extract the exception handling information from the landingpad instruction...
void deleteMachineBasicBlock(MachineBasicBlock *MBB)
DeleteMachineBasicBlock - Delete the given MachineBasicBlock.
Function & getFunction()
Return the LLVM function that this machine code represents.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
MachineModuleInfo & getMMI() const
void remove(iterator MBBI)
void setVariableDbgInfo(const DILocalVariable *Var, const DIExpression *Expr, int Slot, const DILocation *Loc)
Collect information used to emit debugging information of a variable in a stack slot.
const MachineBasicBlock & front() const
void addInvoke(MachineBasicBlock *LandingPad, MCSymbol *BeginLabel, MCSymbol *EndLabel)
Provide the begin and end labels of an invoke style call and associate it with a try landing pad bloc...
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
void erase(iterator MBBI)
void insert(iterator MBBI, MachineBasicBlock *MBB)
Helper class to build MachineInstr.
MachineInstrBuilder buildFMul(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
MachineInstrBuilder buildFreeze(const DstOp &Dst, const SrcOp &Src)
Build and insert Dst = G_FREEZE Src.
MachineInstrBuilder buildBr(MachineBasicBlock &Dest)
Build and insert G_BR Dest.
std::optional< MachineInstrBuilder > materializePtrAdd(Register &Res, Register Op0, const LLT ValueTy, uint64_t Value)
Materialize and insert Res = G_PTR_ADD Op0, (G_CONSTANT Value)
MachineInstrBuilder buildAdd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_ADD Op0, Op1.
MachineInstrBuilder buildUndef(const DstOp &Res)
Build and insert Res = IMPLICIT_DEF.
MachineInstrBuilder buildFPExt(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_FPEXT Op.
MachineInstrBuilder buildJumpTable(const LLT PtrTy, unsigned JTI)
Build and insert Res = G_JUMP_TABLE JTI.
MachineInstrBuilder buildFence(unsigned Ordering, unsigned Scope)
Build and insert G_FENCE Ordering, Scope.
MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_SELECT Tst, Op0, Op1.
MachineInstrBuilder buildFMA(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, const SrcOp &Src2, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_FMA Op0, Op1, Op2.
MachineInstrBuilder buildMul(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_MUL Op0, Op1.
MachineInstrBuilder buildAnd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1)
Build and insert Res = G_AND Op0, Op1.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
MachineInstrBuilder buildCast(const DstOp &Dst, const SrcOp &Src)
Build and insert an appropriate cast between two registers of equal size.
MachineBasicBlock::iterator getInsertPt()
Current insertion point for new instructions.
MachineInstrBuilder buildSExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_SEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildAtomicRMW(unsigned Opcode, const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_<Opcode> Addr, Val, MMO.
MachineInstrBuilder buildSub(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_SUB Op0, Op1.
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef< Register > Res, bool HasSideEffects, bool isConvergent)
Build and insert a G_INTRINSIC instruction.
MachineInstrBuilder buildVScale(const DstOp &Res, unsigned MinElts)
Build and insert Res = G_VSCALE MinElts.
MachineInstrBuilder buildSplatBuildVector(const DstOp &Res, const SrcOp &Src)
Build and insert Res = G_BUILD_VECTOR with Src replicated to fill the number of elements.
MachineInstrBuilder buildIndirectDbgValue(Register Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in me...
MachineInstrBuilder buildConstDbgValue(const Constant &C, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instructions specifying that Variable is given by C (suitably modified b...
MachineInstrBuilder buildBrCond(const SrcOp &Tst, MachineBasicBlock &Dest)
Build and insert G_BRCOND Tst, Dest.
MachineInstrBuilder buildExtractVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Idx)
Build and insert Res = G_EXTRACT_VECTOR_ELT Val, Idx.
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildPtrAdd(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_PTR_ADD Op0, Op1.
MachineInstrBuilder buildZExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildExtractVectorElementConstant(const DstOp &Res, const SrcOp &Val, const int Idx)
Build and insert Res = G_EXTRACT_VECTOR_ELT Val, Idx.
MachineInstrBuilder buildShl(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineInstrBuilder buildFrameIndex(const DstOp &Res, int Idx)
Build and insert Res = G_FRAME_INDEX Idx.
MachineInstrBuilder buildDirectDbgValue(Register Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in Re...
MachineInstrBuilder buildDbgLabel(const MDNode *Label)
Build and insert a DBG_LABEL instructions specifying that Label is given.
MachineInstrBuilder buildBrJT(Register TablePtr, unsigned JTI, Register IndexReg)
Build and insert G_BRJT TablePtr, JTI, IndexReg.
MachineInstrBuilder buildDynStackAlloc(const DstOp &Res, const SrcOp &Size, Align Alignment)
Build and insert Res = G_DYN_STACKALLOC Size, Align.
MachineInstrBuilder buildFIDbgValue(int FI, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in th...
void setDebugLoc(const DebugLoc &DL)
Set the debug location to DL for all the next build instructions.
const MachineBasicBlock & getMBB() const
Getter for the basic block we currently build.
MachineInstrBuilder buildInsertVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Elt, const SrcOp &Idx)
Build and insert Res = G_INSERT_VECTOR_ELT Val, Elt, Idx.
MachineInstrBuilder buildAtomicCmpXchgWithSuccess(const DstOp &OldValRes, const DstOp &SuccessRes, const SrcOp &Addr, const SrcOp &CmpVal, const SrcOp &NewVal, MachineMemOperand &MMO)
Build and insert OldValRes<def>, SuccessRes<def> = G_ATOMIC_CMPXCHG_WITH_SUCCESS Addr,...
void setMBB(MachineBasicBlock &MBB)
Set the insertion point to the end of MBB.
const DebugLoc & getDebugLoc()
Get the current instruction's debug location.
MachineInstrBuilder buildTrap(bool Debug=false)
Build and insert G_TRAP or G_DEBUGTRAP.
MachineInstrBuilder buildFFrexp(const DstOp &Fract, const DstOp &Exp, const SrcOp &Src, std::optional< unsigned > Flags=std::nullopt)
Build and insert Fract, Exp = G_FFREXP Src.
MachineInstrBuilder buildFPTrunc(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_FPTRUNC Op.
MachineInstrBuilder buildShuffleVector(const DstOp &Res, const SrcOp &Src1, const SrcOp &Src2, ArrayRef< int > Mask)
Build and insert Res = G_SHUFFLE_VECTOR Src1, Src2, Mask.