LLVM 20.0.0git
IRTranslator.cpp
Go to the documentation of this file.
1//===- llvm/CodeGen/GlobalISel/IRTranslator.cpp - IRTranslator ---*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the IRTranslator class.
10//===----------------------------------------------------------------------===//
11
14#include "llvm/ADT/STLExtras.h"
15#include "llvm/ADT/ScopeExit.h"
16#include "llvm/ADT/SmallSet.h"
21#include "llvm/Analysis/Loads.h"
52#include "llvm/IR/BasicBlock.h"
53#include "llvm/IR/CFG.h"
54#include "llvm/IR/Constant.h"
55#include "llvm/IR/Constants.h"
56#include "llvm/IR/DataLayout.h"
59#include "llvm/IR/Function.h"
61#include "llvm/IR/InlineAsm.h"
62#include "llvm/IR/InstrTypes.h"
65#include "llvm/IR/Intrinsics.h"
66#include "llvm/IR/IntrinsicsAMDGPU.h"
67#include "llvm/IR/LLVMContext.h"
68#include "llvm/IR/Metadata.h"
70#include "llvm/IR/Statepoint.h"
71#include "llvm/IR/Type.h"
72#include "llvm/IR/User.h"
73#include "llvm/IR/Value.h"
75#include "llvm/MC/MCContext.h"
76#include "llvm/Pass.h"
79#include "llvm/Support/Debug.h"
87#include <algorithm>
88#include <cassert>
89#include <cstdint>
90#include <iterator>
91#include <optional>
92#include <string>
93#include <utility>
94#include <vector>
95
96#define DEBUG_TYPE "irtranslator"
97
98using namespace llvm;
99
100static cl::opt<bool>
101 EnableCSEInIRTranslator("enable-cse-in-irtranslator",
102 cl::desc("Should enable CSE in irtranslator"),
103 cl::Optional, cl::init(false));
104char IRTranslator::ID = 0;
105
106INITIALIZE_PASS_BEGIN(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
107 false, false)
115
120 MF.getProperties().set(MachineFunctionProperties::Property::FailedISel);
121
122 // Print the function name explicitly if we don't have a debug location (which
123 // makes the diagnostic less useful) or if we're going to emit a raw error.
124 if (!R.getLocation().isValid() || TPC.isGlobalISelAbortEnabled())
125 R << (" (in function: " + MF.getName() + ")").str();
126
127 if (TPC.isGlobalISelAbortEnabled())
128 report_fatal_error(Twine(R.getMsg()));
129 else
130 ORE.emit(R);
131}
132
134 : MachineFunctionPass(ID), OptLevel(optlevel) {}
135
136#ifndef NDEBUG
137namespace {
138/// Verify that every instruction created has the same DILocation as the
139/// instruction being translated.
140class DILocationVerifier : public GISelChangeObserver {
141 const Instruction *CurrInst = nullptr;
142
143public:
144 DILocationVerifier() = default;
145 ~DILocationVerifier() = default;
146
147 const Instruction *getCurrentInst() const { return CurrInst; }
148 void setCurrentInst(const Instruction *Inst) { CurrInst = Inst; }
149
150 void erasingInstr(MachineInstr &MI) override {}
151 void changingInstr(MachineInstr &MI) override {}
152 void changedInstr(MachineInstr &MI) override {}
153
154 void createdInstr(MachineInstr &MI) override {
155 assert(getCurrentInst() && "Inserted instruction without a current MI");
156
157 // Only print the check message if we're actually checking it.
158#ifndef NDEBUG
159 LLVM_DEBUG(dbgs() << "Checking DILocation from " << *CurrInst
160 << " was copied to " << MI);
161#endif
162 // We allow insts in the entry block to have no debug loc because
163 // they could have originated from constants, and we don't want a jumpy
164 // debug experience.
165 assert((CurrInst->getDebugLoc() == MI.getDebugLoc() ||
166 (MI.getParent()->isEntryBlock() && !MI.getDebugLoc()) ||
167 (MI.isDebugInstr())) &&
168 "Line info was not transferred to all instructions");
169 }
170};
171} // namespace
172#endif // ifndef NDEBUG
173
174
180 if (OptLevel != CodeGenOptLevel::None) {
183 }
188}
189
191IRTranslator::allocateVRegs(const Value &Val) {
192 auto VRegsIt = VMap.findVRegs(Val);
193 if (VRegsIt != VMap.vregs_end())
194 return *VRegsIt->second;
195 auto *Regs = VMap.getVRegs(Val);
196 auto *Offsets = VMap.getOffsets(Val);
197 SmallVector<LLT, 4> SplitTys;
198 computeValueLLTs(*DL, *Val.getType(), SplitTys,
199 Offsets->empty() ? Offsets : nullptr);
200 for (unsigned i = 0; i < SplitTys.size(); ++i)
201 Regs->push_back(0);
202 return *Regs;
203}
204
205ArrayRef<Register> IRTranslator::getOrCreateVRegs(const Value &Val) {
206 auto VRegsIt = VMap.findVRegs(Val);
207 if (VRegsIt != VMap.vregs_end())
208 return *VRegsIt->second;
209
210 if (Val.getType()->isVoidTy())
211 return *VMap.getVRegs(Val);
212
213 // Create entry for this type.
214 auto *VRegs = VMap.getVRegs(Val);
215 auto *Offsets = VMap.getOffsets(Val);
216
217 if (!Val.getType()->isTokenTy())
218 assert(Val.getType()->isSized() &&
219 "Don't know how to create an empty vreg");
220
221 SmallVector<LLT, 4> SplitTys;
222 computeValueLLTs(*DL, *Val.getType(), SplitTys,
223 Offsets->empty() ? Offsets : nullptr);
224
225 if (!isa<Constant>(Val)) {
226 for (auto Ty : SplitTys)
227 VRegs->push_back(MRI->createGenericVirtualRegister(Ty));
228 return *VRegs;
229 }
230
231 if (Val.getType()->isAggregateType()) {
232 // UndefValue, ConstantAggregateZero
233 auto &C = cast<Constant>(Val);
234 unsigned Idx = 0;
235 while (auto Elt = C.getAggregateElement(Idx++)) {
236 auto EltRegs = getOrCreateVRegs(*Elt);
237 llvm::copy(EltRegs, std::back_inserter(*VRegs));
238 }
239 } else {
240 assert(SplitTys.size() == 1 && "unexpectedly split LLT");
241 VRegs->push_back(MRI->createGenericVirtualRegister(SplitTys[0]));
242 bool Success = translate(cast<Constant>(Val), VRegs->front());
243 if (!Success) {
244 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
246 &MF->getFunction().getEntryBlock());
247 R << "unable to translate constant: " << ore::NV("Type", Val.getType());
248 reportTranslationError(*MF, *TPC, *ORE, R);
249 return *VRegs;
250 }
251 }
252
253 return *VRegs;
254}
255
256int IRTranslator::getOrCreateFrameIndex(const AllocaInst &AI) {
257 auto MapEntry = FrameIndices.find(&AI);
258 if (MapEntry != FrameIndices.end())
259 return MapEntry->second;
260
261 uint64_t ElementSize = DL->getTypeAllocSize(AI.getAllocatedType());
262 uint64_t Size =
263 ElementSize * cast<ConstantInt>(AI.getArraySize())->getZExtValue();
264
265 // Always allocate at least one byte.
266 Size = std::max<uint64_t>(Size, 1u);
267
268 int &FI = FrameIndices[&AI];
269 FI = MF->getFrameInfo().CreateStackObject(Size, AI.getAlign(), false, &AI);
270 return FI;
271}
272
273Align IRTranslator::getMemOpAlign(const Instruction &I) {
274 if (const StoreInst *SI = dyn_cast<StoreInst>(&I))
275 return SI->getAlign();
276 if (const LoadInst *LI = dyn_cast<LoadInst>(&I))
277 return LI->getAlign();
278 if (const AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(&I))
279 return AI->getAlign();
280 if (const AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(&I))
281 return AI->getAlign();
282
283 OptimizationRemarkMissed R("gisel-irtranslator", "", &I);
284 R << "unable to translate memop: " << ore::NV("Opcode", &I);
285 reportTranslationError(*MF, *TPC, *ORE, R);
286 return Align(1);
287}
288
289MachineBasicBlock &IRTranslator::getMBB(const BasicBlock &BB) {
290 MachineBasicBlock *MBB = FuncInfo.getMBB(&BB);
291 assert(MBB && "BasicBlock was not encountered before");
292 return *MBB;
293}
294
295void IRTranslator::addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred) {
296 assert(NewPred && "new predecessor must be a real MachineBasicBlock");
297 MachinePreds[Edge].push_back(NewPred);
298}
299
300bool IRTranslator::translateBinaryOp(unsigned Opcode, const User &U,
301 MachineIRBuilder &MIRBuilder) {
302 // Get or create a virtual register for each value.
303 // Unless the value is a Constant => loadimm cst?
304 // or inline constant each time?
305 // Creation of a virtual register needs to have a size.
306 Register Op0 = getOrCreateVReg(*U.getOperand(0));
307 Register Op1 = getOrCreateVReg(*U.getOperand(1));
308 Register Res = getOrCreateVReg(U);
309 uint32_t Flags = 0;
310 if (isa<Instruction>(U)) {
311 const Instruction &I = cast<Instruction>(U);
313 }
314
315 MIRBuilder.buildInstr(Opcode, {Res}, {Op0, Op1}, Flags);
316 return true;
317}
318
319bool IRTranslator::translateUnaryOp(unsigned Opcode, const User &U,
320 MachineIRBuilder &MIRBuilder) {
321 Register Op0 = getOrCreateVReg(*U.getOperand(0));
322 Register Res = getOrCreateVReg(U);
323 uint32_t Flags = 0;
324 if (isa<Instruction>(U)) {
325 const Instruction &I = cast<Instruction>(U);
327 }
328 MIRBuilder.buildInstr(Opcode, {Res}, {Op0}, Flags);
329 return true;
330}
331
332bool IRTranslator::translateFNeg(const User &U, MachineIRBuilder &MIRBuilder) {
333 return translateUnaryOp(TargetOpcode::G_FNEG, U, MIRBuilder);
334}
335
336bool IRTranslator::translateCompare(const User &U,
337 MachineIRBuilder &MIRBuilder) {
338 auto *CI = cast<CmpInst>(&U);
339 Register Op0 = getOrCreateVReg(*U.getOperand(0));
340 Register Op1 = getOrCreateVReg(*U.getOperand(1));
341 Register Res = getOrCreateVReg(U);
342 CmpInst::Predicate Pred = CI->getPredicate();
343 if (CmpInst::isIntPredicate(Pred))
344 MIRBuilder.buildICmp(Pred, Res, Op0, Op1);
345 else if (Pred == CmpInst::FCMP_FALSE)
346 MIRBuilder.buildCopy(
347 Res, getOrCreateVReg(*Constant::getNullValue(U.getType())));
348 else if (Pred == CmpInst::FCMP_TRUE)
349 MIRBuilder.buildCopy(
350 Res, getOrCreateVReg(*Constant::getAllOnesValue(U.getType())));
351 else {
352 uint32_t Flags = 0;
353 if (CI)
355 MIRBuilder.buildFCmp(Pred, Res, Op0, Op1, Flags);
356 }
357
358 return true;
359}
360
361bool IRTranslator::translateRet(const User &U, MachineIRBuilder &MIRBuilder) {
362 const ReturnInst &RI = cast<ReturnInst>(U);
363 const Value *Ret = RI.getReturnValue();
364 if (Ret && DL->getTypeStoreSize(Ret->getType()).isZero())
365 Ret = nullptr;
366
367 ArrayRef<Register> VRegs;
368 if (Ret)
369 VRegs = getOrCreateVRegs(*Ret);
370
371 Register SwiftErrorVReg = 0;
372 if (CLI->supportSwiftError() && SwiftError.getFunctionArg()) {
373 SwiftErrorVReg = SwiftError.getOrCreateVRegUseAt(
374 &RI, &MIRBuilder.getMBB(), SwiftError.getFunctionArg());
375 }
376
377 // The target may mess up with the insertion point, but
378 // this is not important as a return is the last instruction
379 // of the block anyway.
380 return CLI->lowerReturn(MIRBuilder, Ret, VRegs, FuncInfo, SwiftErrorVReg);
381}
382
383void IRTranslator::emitBranchForMergedCondition(
385 MachineBasicBlock *CurBB, MachineBasicBlock *SwitchBB,
386 BranchProbability TProb, BranchProbability FProb, bool InvertCond) {
387 // If the leaf of the tree is a comparison, merge the condition into
388 // the caseblock.
389 if (const CmpInst *BOp = dyn_cast<CmpInst>(Cond)) {
390 CmpInst::Predicate Condition;
391 if (const ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) {
392 Condition = InvertCond ? IC->getInversePredicate() : IC->getPredicate();
393 } else {
394 const FCmpInst *FC = cast<FCmpInst>(Cond);
395 Condition = InvertCond ? FC->getInversePredicate() : FC->getPredicate();
396 }
397
398 SwitchCG::CaseBlock CB(Condition, false, BOp->getOperand(0),
399 BOp->getOperand(1), nullptr, TBB, FBB, CurBB,
400 CurBuilder->getDebugLoc(), TProb, FProb);
401 SL->SwitchCases.push_back(CB);
402 return;
403 }
404
405 // Create a CaseBlock record representing this branch.
408 Pred, false, Cond, ConstantInt::getTrue(MF->getFunction().getContext()),
409 nullptr, TBB, FBB, CurBB, CurBuilder->getDebugLoc(), TProb, FProb);
410 SL->SwitchCases.push_back(CB);
411}
412
413static bool isValInBlock(const Value *V, const BasicBlock *BB) {
414 if (const Instruction *I = dyn_cast<Instruction>(V))
415 return I->getParent() == BB;
416 return true;
417}
418
419void IRTranslator::findMergedConditions(
421 MachineBasicBlock *CurBB, MachineBasicBlock *SwitchBB,
423 BranchProbability FProb, bool InvertCond) {
424 using namespace PatternMatch;
425 assert((Opc == Instruction::And || Opc == Instruction::Or) &&
426 "Expected Opc to be AND/OR");
427 // Skip over not part of the tree and remember to invert op and operands at
428 // next level.
429 Value *NotCond;
430 if (match(Cond, m_OneUse(m_Not(m_Value(NotCond)))) &&
431 isValInBlock(NotCond, CurBB->getBasicBlock())) {
432 findMergedConditions(NotCond, TBB, FBB, CurBB, SwitchBB, Opc, TProb, FProb,
433 !InvertCond);
434 return;
435 }
436
437 const Instruction *BOp = dyn_cast<Instruction>(Cond);
438 const Value *BOpOp0, *BOpOp1;
439 // Compute the effective opcode for Cond, taking into account whether it needs
440 // to be inverted, e.g.
441 // and (not (or A, B)), C
442 // gets lowered as
443 // and (and (not A, not B), C)
445 if (BOp) {
446 BOpc = match(BOp, m_LogicalAnd(m_Value(BOpOp0), m_Value(BOpOp1)))
447 ? Instruction::And
448 : (match(BOp, m_LogicalOr(m_Value(BOpOp0), m_Value(BOpOp1)))
449 ? Instruction::Or
451 if (InvertCond) {
452 if (BOpc == Instruction::And)
453 BOpc = Instruction::Or;
454 else if (BOpc == Instruction::Or)
455 BOpc = Instruction::And;
456 }
457 }
458
459 // If this node is not part of the or/and tree, emit it as a branch.
460 // Note that all nodes in the tree should have same opcode.
461 bool BOpIsInOrAndTree = BOpc && BOpc == Opc && BOp->hasOneUse();
462 if (!BOpIsInOrAndTree || BOp->getParent() != CurBB->getBasicBlock() ||
463 !isValInBlock(BOpOp0, CurBB->getBasicBlock()) ||
464 !isValInBlock(BOpOp1, CurBB->getBasicBlock())) {
465 emitBranchForMergedCondition(Cond, TBB, FBB, CurBB, SwitchBB, TProb, FProb,
466 InvertCond);
467 return;
468 }
469
470 // Create TmpBB after CurBB.
471 MachineFunction::iterator BBI(CurBB);
472 MachineBasicBlock *TmpBB =
474 CurBB->getParent()->insert(++BBI, TmpBB);
475
476 if (Opc == Instruction::Or) {
477 // Codegen X | Y as:
478 // BB1:
479 // jmp_if_X TBB
480 // jmp TmpBB
481 // TmpBB:
482 // jmp_if_Y TBB
483 // jmp FBB
484 //
485
486 // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
487 // The requirement is that
488 // TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB)
489 // = TrueProb for original BB.
490 // Assuming the original probabilities are A and B, one choice is to set
491 // BB1's probabilities to A/2 and A/2+B, and set TmpBB's probabilities to
492 // A/(1+B) and 2B/(1+B). This choice assumes that
493 // TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB.
494 // Another choice is to assume TrueProb for BB1 equals to TrueProb for
495 // TmpBB, but the math is more complicated.
496
497 auto NewTrueProb = TProb / 2;
498 auto NewFalseProb = TProb / 2 + FProb;
499 // Emit the LHS condition.
500 findMergedConditions(BOpOp0, TBB, TmpBB, CurBB, SwitchBB, Opc, NewTrueProb,
501 NewFalseProb, InvertCond);
502
503 // Normalize A/2 and B to get A/(1+B) and 2B/(1+B).
504 SmallVector<BranchProbability, 2> Probs{TProb / 2, FProb};
505 BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
506 // Emit the RHS condition into TmpBB.
507 findMergedConditions(BOpOp1, TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0],
508 Probs[1], InvertCond);
509 } else {
510 assert(Opc == Instruction::And && "Unknown merge op!");
511 // Codegen X & Y as:
512 // BB1:
513 // jmp_if_X TmpBB
514 // jmp FBB
515 // TmpBB:
516 // jmp_if_Y TBB
517 // jmp FBB
518 //
519 // This requires creation of TmpBB after CurBB.
520
521 // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
522 // The requirement is that
523 // FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB)
524 // = FalseProb for original BB.
525 // Assuming the original probabilities are A and B, one choice is to set
526 // BB1's probabilities to A+B/2 and B/2, and set TmpBB's probabilities to
527 // 2A/(1+A) and B/(1+A). This choice assumes that FalseProb for BB1 ==
528 // TrueProb for BB1 * FalseProb for TmpBB.
529
530 auto NewTrueProb = TProb + FProb / 2;
531 auto NewFalseProb = FProb / 2;
532 // Emit the LHS condition.
533 findMergedConditions(BOpOp0, TmpBB, FBB, CurBB, SwitchBB, Opc, NewTrueProb,
534 NewFalseProb, InvertCond);
535
536 // Normalize A and B/2 to get 2A/(1+A) and B/(1+A).
537 SmallVector<BranchProbability, 2> Probs{TProb, FProb / 2};
538 BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
539 // Emit the RHS condition into TmpBB.
540 findMergedConditions(BOpOp1, TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0],
541 Probs[1], InvertCond);
542 }
543}
544
545bool IRTranslator::shouldEmitAsBranches(
546 const std::vector<SwitchCG::CaseBlock> &Cases) {
547 // For multiple cases, it's better to emit as branches.
548 if (Cases.size() != 2)
549 return true;
550
551 // If this is two comparisons of the same values or'd or and'd together, they
552 // will get folded into a single comparison, so don't emit two blocks.
553 if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
554 Cases[0].CmpRHS == Cases[1].CmpRHS) ||
555 (Cases[0].CmpRHS == Cases[1].CmpLHS &&
556 Cases[0].CmpLHS == Cases[1].CmpRHS)) {
557 return false;
558 }
559
560 // Handle: (X != null) | (Y != null) --> (X|Y) != 0
561 // Handle: (X == null) & (Y == null) --> (X|Y) == 0
562 if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
563 Cases[0].PredInfo.Pred == Cases[1].PredInfo.Pred &&
564 isa<Constant>(Cases[0].CmpRHS) &&
565 cast<Constant>(Cases[0].CmpRHS)->isNullValue()) {
566 if (Cases[0].PredInfo.Pred == CmpInst::ICMP_EQ &&
567 Cases[0].TrueBB == Cases[1].ThisBB)
568 return false;
569 if (Cases[0].PredInfo.Pred == CmpInst::ICMP_NE &&
570 Cases[0].FalseBB == Cases[1].ThisBB)
571 return false;
572 }
573
574 return true;
575}
576
577bool IRTranslator::translateBr(const User &U, MachineIRBuilder &MIRBuilder) {
578 const BranchInst &BrInst = cast<BranchInst>(U);
579 auto &CurMBB = MIRBuilder.getMBB();
580 auto *Succ0MBB = &getMBB(*BrInst.getSuccessor(0));
581
582 if (BrInst.isUnconditional()) {
583 // If the unconditional target is the layout successor, fallthrough.
584 if (OptLevel == CodeGenOptLevel::None ||
585 !CurMBB.isLayoutSuccessor(Succ0MBB))
586 MIRBuilder.buildBr(*Succ0MBB);
587
588 // Link successors.
589 for (const BasicBlock *Succ : successors(&BrInst))
590 CurMBB.addSuccessor(&getMBB(*Succ));
591 return true;
592 }
593
594 // If this condition is one of the special cases we handle, do special stuff
595 // now.
596 const Value *CondVal = BrInst.getCondition();
597 MachineBasicBlock *Succ1MBB = &getMBB(*BrInst.getSuccessor(1));
598
599 // If this is a series of conditions that are or'd or and'd together, emit
600 // this as a sequence of branches instead of setcc's with and/or operations.
601 // As long as jumps are not expensive (exceptions for multi-use logic ops,
602 // unpredictable branches, and vector extracts because those jumps are likely
603 // expensive for any target), this should improve performance.
604 // For example, instead of something like:
605 // cmp A, B
606 // C = seteq
607 // cmp D, E
608 // F = setle
609 // or C, F
610 // jnz foo
611 // Emit:
612 // cmp A, B
613 // je foo
614 // cmp D, E
615 // jle foo
616 using namespace PatternMatch;
617 const Instruction *CondI = dyn_cast<Instruction>(CondVal);
618 if (!TLI->isJumpExpensive() && CondI && CondI->hasOneUse() &&
619 !BrInst.hasMetadata(LLVMContext::MD_unpredictable)) {
621 Value *Vec;
622 const Value *BOp0, *BOp1;
623 if (match(CondI, m_LogicalAnd(m_Value(BOp0), m_Value(BOp1))))
624 Opcode = Instruction::And;
625 else if (match(CondI, m_LogicalOr(m_Value(BOp0), m_Value(BOp1))))
626 Opcode = Instruction::Or;
627
628 if (Opcode && !(match(BOp0, m_ExtractElt(m_Value(Vec), m_Value())) &&
629 match(BOp1, m_ExtractElt(m_Specific(Vec), m_Value())))) {
630 findMergedConditions(CondI, Succ0MBB, Succ1MBB, &CurMBB, &CurMBB, Opcode,
631 getEdgeProbability(&CurMBB, Succ0MBB),
632 getEdgeProbability(&CurMBB, Succ1MBB),
633 /*InvertCond=*/false);
634 assert(SL->SwitchCases[0].ThisBB == &CurMBB && "Unexpected lowering!");
635
636 // Allow some cases to be rejected.
637 if (shouldEmitAsBranches(SL->SwitchCases)) {
638 // Emit the branch for this block.
639 emitSwitchCase(SL->SwitchCases[0], &CurMBB, *CurBuilder);
640 SL->SwitchCases.erase(SL->SwitchCases.begin());
641 return true;
642 }
643
644 // Okay, we decided not to do this, remove any inserted MBB's and clear
645 // SwitchCases.
646 for (unsigned I = 1, E = SL->SwitchCases.size(); I != E; ++I)
647 MF->erase(SL->SwitchCases[I].ThisBB);
648
649 SL->SwitchCases.clear();
650 }
651 }
652
653 // Create a CaseBlock record representing this branch.
654 SwitchCG::CaseBlock CB(CmpInst::ICMP_EQ, false, CondVal,
656 nullptr, Succ0MBB, Succ1MBB, &CurMBB,
657 CurBuilder->getDebugLoc());
658
659 // Use emitSwitchCase to actually insert the fast branch sequence for this
660 // cond branch.
661 emitSwitchCase(CB, &CurMBB, *CurBuilder);
662 return true;
663}
664
665void IRTranslator::addSuccessorWithProb(MachineBasicBlock *Src,
667 BranchProbability Prob) {
668 if (!FuncInfo.BPI) {
669 Src->addSuccessorWithoutProb(Dst);
670 return;
671 }
672 if (Prob.isUnknown())
673 Prob = getEdgeProbability(Src, Dst);
674 Src->addSuccessor(Dst, Prob);
675}
676
678IRTranslator::getEdgeProbability(const MachineBasicBlock *Src,
679 const MachineBasicBlock *Dst) const {
680 const BasicBlock *SrcBB = Src->getBasicBlock();
681 const BasicBlock *DstBB = Dst->getBasicBlock();
682 if (!FuncInfo.BPI) {
683 // If BPI is not available, set the default probability as 1 / N, where N is
684 // the number of successors.
685 auto SuccSize = std::max<uint32_t>(succ_size(SrcBB), 1);
686 return BranchProbability(1, SuccSize);
687 }
688 return FuncInfo.BPI->getEdgeProbability(SrcBB, DstBB);
689}
690
691bool IRTranslator::translateSwitch(const User &U, MachineIRBuilder &MIB) {
692 using namespace SwitchCG;
693 // Extract cases from the switch.
694 const SwitchInst &SI = cast<SwitchInst>(U);
695 BranchProbabilityInfo *BPI = FuncInfo.BPI;
696 CaseClusterVector Clusters;
697 Clusters.reserve(SI.getNumCases());
698 for (const auto &I : SI.cases()) {
699 MachineBasicBlock *Succ = &getMBB(*I.getCaseSuccessor());
700 assert(Succ && "Could not find successor mbb in mapping");
701 const ConstantInt *CaseVal = I.getCaseValue();
702 BranchProbability Prob =
703 BPI ? BPI->getEdgeProbability(SI.getParent(), I.getSuccessorIndex())
704 : BranchProbability(1, SI.getNumCases() + 1);
705 Clusters.push_back(CaseCluster::range(CaseVal, CaseVal, Succ, Prob));
706 }
707
708 MachineBasicBlock *DefaultMBB = &getMBB(*SI.getDefaultDest());
709
710 // Cluster adjacent cases with the same destination. We do this at all
711 // optimization levels because it's cheap to do and will make codegen faster
712 // if there are many clusters.
713 sortAndRangeify(Clusters);
714
715 MachineBasicBlock *SwitchMBB = &getMBB(*SI.getParent());
716
717 // If there is only the default destination, jump there directly.
718 if (Clusters.empty()) {
719 SwitchMBB->addSuccessor(DefaultMBB);
720 if (DefaultMBB != SwitchMBB->getNextNode())
721 MIB.buildBr(*DefaultMBB);
722 return true;
723 }
724
725 SL->findJumpTables(Clusters, &SI, std::nullopt, DefaultMBB, nullptr, nullptr);
726 SL->findBitTestClusters(Clusters, &SI);
727
728 LLVM_DEBUG({
729 dbgs() << "Case clusters: ";
730 for (const CaseCluster &C : Clusters) {
731 if (C.Kind == CC_JumpTable)
732 dbgs() << "JT:";
733 if (C.Kind == CC_BitTests)
734 dbgs() << "BT:";
735
736 C.Low->getValue().print(dbgs(), true);
737 if (C.Low != C.High) {
738 dbgs() << '-';
739 C.High->getValue().print(dbgs(), true);
740 }
741 dbgs() << ' ';
742 }
743 dbgs() << '\n';
744 });
745
746 assert(!Clusters.empty());
747 SwitchWorkList WorkList;
748 CaseClusterIt First = Clusters.begin();
749 CaseClusterIt Last = Clusters.end() - 1;
750 auto DefaultProb = getEdgeProbability(SwitchMBB, DefaultMBB);
751 WorkList.push_back({SwitchMBB, First, Last, nullptr, nullptr, DefaultProb});
752
753 while (!WorkList.empty()) {
754 SwitchWorkListItem W = WorkList.pop_back_val();
755
756 unsigned NumClusters = W.LastCluster - W.FirstCluster + 1;
757 // For optimized builds, lower large range as a balanced binary tree.
758 if (NumClusters > 3 &&
760 !DefaultMBB->getParent()->getFunction().hasMinSize()) {
761 splitWorkItem(WorkList, W, SI.getCondition(), SwitchMBB, MIB);
762 continue;
763 }
764
765 if (!lowerSwitchWorkItem(W, SI.getCondition(), SwitchMBB, DefaultMBB, MIB))
766 return false;
767 }
768 return true;
769}
770
771void IRTranslator::splitWorkItem(SwitchCG::SwitchWorkList &WorkList,
773 Value *Cond, MachineBasicBlock *SwitchMBB,
774 MachineIRBuilder &MIB) {
775 using namespace SwitchCG;
776 assert(W.FirstCluster->Low->getValue().slt(W.LastCluster->Low->getValue()) &&
777 "Clusters not sorted?");
778 assert(W.LastCluster - W.FirstCluster + 1 >= 2 && "Too small to split!");
779
780 auto [LastLeft, FirstRight, LeftProb, RightProb] =
781 SL->computeSplitWorkItemInfo(W);
782
783 // Use the first element on the right as pivot since we will make less-than
784 // comparisons against it.
785 CaseClusterIt PivotCluster = FirstRight;
786 assert(PivotCluster > W.FirstCluster);
787 assert(PivotCluster <= W.LastCluster);
788
789 CaseClusterIt FirstLeft = W.FirstCluster;
790 CaseClusterIt LastRight = W.LastCluster;
791
792 const ConstantInt *Pivot = PivotCluster->Low;
793
794 // New blocks will be inserted immediately after the current one.
796 ++BBI;
797
798 // We will branch to the LHS if Value < Pivot. If LHS is a single cluster,
799 // we can branch to its destination directly if it's squeezed exactly in
800 // between the known lower bound and Pivot - 1.
801 MachineBasicBlock *LeftMBB;
802 if (FirstLeft == LastLeft && FirstLeft->Kind == CC_Range &&
803 FirstLeft->Low == W.GE &&
804 (FirstLeft->High->getValue() + 1LL) == Pivot->getValue()) {
805 LeftMBB = FirstLeft->MBB;
806 } else {
807 LeftMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock());
808 FuncInfo.MF->insert(BBI, LeftMBB);
809 WorkList.push_back(
810 {LeftMBB, FirstLeft, LastLeft, W.GE, Pivot, W.DefaultProb / 2});
811 }
812
813 // Similarly, we will branch to the RHS if Value >= Pivot. If RHS is a
814 // single cluster, RHS.Low == Pivot, and we can branch to its destination
815 // directly if RHS.High equals the current upper bound.
816 MachineBasicBlock *RightMBB;
817 if (FirstRight == LastRight && FirstRight->Kind == CC_Range && W.LT &&
818 (FirstRight->High->getValue() + 1ULL) == W.LT->getValue()) {
819 RightMBB = FirstRight->MBB;
820 } else {
821 RightMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock());
822 FuncInfo.MF->insert(BBI, RightMBB);
823 WorkList.push_back(
824 {RightMBB, FirstRight, LastRight, Pivot, W.LT, W.DefaultProb / 2});
825 }
826
827 // Create the CaseBlock record that will be used to lower the branch.
828 CaseBlock CB(ICmpInst::Predicate::ICMP_SLT, false, Cond, Pivot, nullptr,
829 LeftMBB, RightMBB, W.MBB, MIB.getDebugLoc(), LeftProb,
830 RightProb);
831
832 if (W.MBB == SwitchMBB)
833 emitSwitchCase(CB, SwitchMBB, MIB);
834 else
835 SL->SwitchCases.push_back(CB);
836}
837
838void IRTranslator::emitJumpTable(SwitchCG::JumpTable &JT,
840 // Emit the code for the jump table
841 assert(JT.Reg != -1U && "Should lower JT Header first!");
843 MIB.setMBB(*MBB);
844 MIB.setDebugLoc(CurBuilder->getDebugLoc());
845
847 const LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
848
849 auto Table = MIB.buildJumpTable(PtrTy, JT.JTI);
850 MIB.buildBrJT(Table.getReg(0), JT.JTI, JT.Reg);
851}
852
853bool IRTranslator::emitJumpTableHeader(SwitchCG::JumpTable &JT,
855 MachineBasicBlock *HeaderBB) {
856 MachineIRBuilder MIB(*HeaderBB->getParent());
857 MIB.setMBB(*HeaderBB);
858 MIB.setDebugLoc(CurBuilder->getDebugLoc());
859
860 const Value &SValue = *JTH.SValue;
861 // Subtract the lowest switch case value from the value being switched on.
862 const LLT SwitchTy = getLLTForType(*SValue.getType(), *DL);
863 Register SwitchOpReg = getOrCreateVReg(SValue);
864 auto FirstCst = MIB.buildConstant(SwitchTy, JTH.First);
865 auto Sub = MIB.buildSub({SwitchTy}, SwitchOpReg, FirstCst);
866
867 // This value may be smaller or larger than the target's pointer type, and
868 // therefore require extension or truncating.
869 auto *PtrIRTy = PointerType::getUnqual(SValue.getContext());
870 const LLT PtrScalarTy = LLT::scalar(DL->getTypeSizeInBits(PtrIRTy));
871 Sub = MIB.buildZExtOrTrunc(PtrScalarTy, Sub);
872
873 JT.Reg = Sub.getReg(0);
874
875 if (JTH.FallthroughUnreachable) {
876 if (JT.MBB != HeaderBB->getNextNode())
877 MIB.buildBr(*JT.MBB);
878 return true;
879 }
880
881 // Emit the range check for the jump table, and branch to the default block
882 // for the switch statement if the value being switched on exceeds the
883 // largest case in the switch.
884 auto Cst = getOrCreateVReg(
885 *ConstantInt::get(SValue.getType(), JTH.Last - JTH.First));
886 Cst = MIB.buildZExtOrTrunc(PtrScalarTy, Cst).getReg(0);
887 auto Cmp = MIB.buildICmp(CmpInst::ICMP_UGT, LLT::scalar(1), Sub, Cst);
888
889 auto BrCond = MIB.buildBrCond(Cmp.getReg(0), *JT.Default);
890
891 // Avoid emitting unnecessary branches to the next block.
892 if (JT.MBB != HeaderBB->getNextNode())
893 BrCond = MIB.buildBr(*JT.MBB);
894 return true;
895}
896
897void IRTranslator::emitSwitchCase(SwitchCG::CaseBlock &CB,
898 MachineBasicBlock *SwitchBB,
899 MachineIRBuilder &MIB) {
900 Register CondLHS = getOrCreateVReg(*CB.CmpLHS);
902 DebugLoc OldDbgLoc = MIB.getDebugLoc();
903 MIB.setDebugLoc(CB.DbgLoc);
904 MIB.setMBB(*CB.ThisBB);
905
906 if (CB.PredInfo.NoCmp) {
907 // Branch or fall through to TrueBB.
908 addSuccessorWithProb(CB.ThisBB, CB.TrueBB, CB.TrueProb);
909 addMachineCFGPred({SwitchBB->getBasicBlock(), CB.TrueBB->getBasicBlock()},
910 CB.ThisBB);
912 if (CB.TrueBB != CB.ThisBB->getNextNode())
913 MIB.buildBr(*CB.TrueBB);
914 MIB.setDebugLoc(OldDbgLoc);
915 return;
916 }
917
918 const LLT i1Ty = LLT::scalar(1);
919 // Build the compare.
920 if (!CB.CmpMHS) {
921 const auto *CI = dyn_cast<ConstantInt>(CB.CmpRHS);
922 // For conditional branch lowering, we might try to do something silly like
923 // emit an G_ICMP to compare an existing G_ICMP i1 result with true. If so,
924 // just re-use the existing condition vreg.
925 if (MRI->getType(CondLHS).getSizeInBits() == 1 && CI && CI->isOne() &&
927 Cond = CondLHS;
928 } else {
929 Register CondRHS = getOrCreateVReg(*CB.CmpRHS);
931 Cond =
932 MIB.buildFCmp(CB.PredInfo.Pred, i1Ty, CondLHS, CondRHS).getReg(0);
933 else
934 Cond =
935 MIB.buildICmp(CB.PredInfo.Pred, i1Ty, CondLHS, CondRHS).getReg(0);
936 }
937 } else {
939 "Can only handle SLE ranges");
940
941 const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue();
942 const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue();
943
944 Register CmpOpReg = getOrCreateVReg(*CB.CmpMHS);
945 if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) {
946 Register CondRHS = getOrCreateVReg(*CB.CmpRHS);
947 Cond =
948 MIB.buildICmp(CmpInst::ICMP_SLE, i1Ty, CmpOpReg, CondRHS).getReg(0);
949 } else {
950 const LLT CmpTy = MRI->getType(CmpOpReg);
951 auto Sub = MIB.buildSub({CmpTy}, CmpOpReg, CondLHS);
952 auto Diff = MIB.buildConstant(CmpTy, High - Low);
953 Cond = MIB.buildICmp(CmpInst::ICMP_ULE, i1Ty, Sub, Diff).getReg(0);
954 }
955 }
956
957 // Update successor info
958 addSuccessorWithProb(CB.ThisBB, CB.TrueBB, CB.TrueProb);
959
960 addMachineCFGPred({SwitchBB->getBasicBlock(), CB.TrueBB->getBasicBlock()},
961 CB.ThisBB);
962
963 // TrueBB and FalseBB are always different unless the incoming IR is
964 // degenerate. This only happens when running llc on weird IR.
965 if (CB.TrueBB != CB.FalseBB)
966 addSuccessorWithProb(CB.ThisBB, CB.FalseBB, CB.FalseProb);
968
969 addMachineCFGPred({SwitchBB->getBasicBlock(), CB.FalseBB->getBasicBlock()},
970 CB.ThisBB);
971
972 MIB.buildBrCond(Cond, *CB.TrueBB);
973 MIB.buildBr(*CB.FalseBB);
974 MIB.setDebugLoc(OldDbgLoc);
975}
976
977bool IRTranslator::lowerJumpTableWorkItem(SwitchCG::SwitchWorkListItem W,
978 MachineBasicBlock *SwitchMBB,
979 MachineBasicBlock *CurMBB,
980 MachineBasicBlock *DefaultMBB,
981 MachineIRBuilder &MIB,
983 BranchProbability UnhandledProbs,
985 MachineBasicBlock *Fallthrough,
986 bool FallthroughUnreachable) {
987 using namespace SwitchCG;
988 MachineFunction *CurMF = SwitchMBB->getParent();
989 // FIXME: Optimize away range check based on pivot comparisons.
990 JumpTableHeader *JTH = &SL->JTCases[I->JTCasesIndex].first;
991 SwitchCG::JumpTable *JT = &SL->JTCases[I->JTCasesIndex].second;
992 BranchProbability DefaultProb = W.DefaultProb;
993
994 // The jump block hasn't been inserted yet; insert it here.
995 MachineBasicBlock *JumpMBB = JT->MBB;
996 CurMF->insert(BBI, JumpMBB);
997
998 // Since the jump table block is separate from the switch block, we need
999 // to keep track of it as a machine predecessor to the default block,
1000 // otherwise we lose the phi edges.
1001 addMachineCFGPred({SwitchMBB->getBasicBlock(), DefaultMBB->getBasicBlock()},
1002 CurMBB);
1003 addMachineCFGPred({SwitchMBB->getBasicBlock(), DefaultMBB->getBasicBlock()},
1004 JumpMBB);
1005
1006 auto JumpProb = I->Prob;
1007 auto FallthroughProb = UnhandledProbs;
1008
1009 // If the default statement is a target of the jump table, we evenly
1010 // distribute the default probability to successors of CurMBB. Also
1011 // update the probability on the edge from JumpMBB to Fallthrough.
1012 for (MachineBasicBlock::succ_iterator SI = JumpMBB->succ_begin(),
1013 SE = JumpMBB->succ_end();
1014 SI != SE; ++SI) {
1015 if (*SI == DefaultMBB) {
1016 JumpProb += DefaultProb / 2;
1017 FallthroughProb -= DefaultProb / 2;
1018 JumpMBB->setSuccProbability(SI, DefaultProb / 2);
1019 JumpMBB->normalizeSuccProbs();
1020 } else {
1021 // Also record edges from the jump table block to it's successors.
1022 addMachineCFGPred({SwitchMBB->getBasicBlock(), (*SI)->getBasicBlock()},
1023 JumpMBB);
1024 }
1025 }
1026
1027 if (FallthroughUnreachable)
1028 JTH->FallthroughUnreachable = true;
1029
1030 if (!JTH->FallthroughUnreachable)
1031 addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb);
1032 addSuccessorWithProb(CurMBB, JumpMBB, JumpProb);
1033 CurMBB->normalizeSuccProbs();
1034
1035 // The jump table header will be inserted in our current block, do the
1036 // range check, and fall through to our fallthrough block.
1037 JTH->HeaderBB = CurMBB;
1038 JT->Default = Fallthrough; // FIXME: Move Default to JumpTableHeader.
1039
1040 // If we're in the right place, emit the jump table header right now.
1041 if (CurMBB == SwitchMBB) {
1042 if (!emitJumpTableHeader(*JT, *JTH, CurMBB))
1043 return false;
1044 JTH->Emitted = true;
1045 }
1046 return true;
1047}
1048bool IRTranslator::lowerSwitchRangeWorkItem(SwitchCG::CaseClusterIt I,
1049 Value *Cond,
1050 MachineBasicBlock *Fallthrough,
1051 bool FallthroughUnreachable,
1052 BranchProbability UnhandledProbs,
1053 MachineBasicBlock *CurMBB,
1054 MachineIRBuilder &MIB,
1055 MachineBasicBlock *SwitchMBB) {
1056 using namespace SwitchCG;
1057 const Value *RHS, *LHS, *MHS;
1058 CmpInst::Predicate Pred;
1059 if (I->Low == I->High) {
1060 // Check Cond == I->Low.
1061 Pred = CmpInst::ICMP_EQ;
1062 LHS = Cond;
1063 RHS = I->Low;
1064 MHS = nullptr;
1065 } else {
1066 // Check I->Low <= Cond <= I->High.
1067 Pred = CmpInst::ICMP_SLE;
1068 LHS = I->Low;
1069 MHS = Cond;
1070 RHS = I->High;
1071 }
1072
1073 // If Fallthrough is unreachable, fold away the comparison.
1074 // The false probability is the sum of all unhandled cases.
1075 CaseBlock CB(Pred, FallthroughUnreachable, LHS, RHS, MHS, I->MBB, Fallthrough,
1076 CurMBB, MIB.getDebugLoc(), I->Prob, UnhandledProbs);
1077
1078 emitSwitchCase(CB, SwitchMBB, MIB);
1079 return true;
1080}
1081
1082void IRTranslator::emitBitTestHeader(SwitchCG::BitTestBlock &B,
1083 MachineBasicBlock *SwitchBB) {
1084 MachineIRBuilder &MIB = *CurBuilder;
1085 MIB.setMBB(*SwitchBB);
1086
1087 // Subtract the minimum value.
1088 Register SwitchOpReg = getOrCreateVReg(*B.SValue);
1089
1090 LLT SwitchOpTy = MRI->getType(SwitchOpReg);
1091 Register MinValReg = MIB.buildConstant(SwitchOpTy, B.First).getReg(0);
1092 auto RangeSub = MIB.buildSub(SwitchOpTy, SwitchOpReg, MinValReg);
1093
1095 const LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
1096
1097 LLT MaskTy = SwitchOpTy;
1098 if (MaskTy.getSizeInBits() > PtrTy.getSizeInBits() ||
1099 !llvm::has_single_bit<uint32_t>(MaskTy.getSizeInBits()))
1100 MaskTy = LLT::scalar(PtrTy.getSizeInBits());
1101 else {
1102 // Ensure that the type will fit the mask value.
1103 for (unsigned I = 0, E = B.Cases.size(); I != E; ++I) {
1104 if (!isUIntN(SwitchOpTy.getSizeInBits(), B.Cases[I].Mask)) {
1105 // Switch table case range are encoded into series of masks.
1106 // Just use pointer type, it's guaranteed to fit.
1107 MaskTy = LLT::scalar(PtrTy.getSizeInBits());
1108 break;
1109 }
1110 }
1111 }
1112 Register SubReg = RangeSub.getReg(0);
1113 if (SwitchOpTy != MaskTy)
1114 SubReg = MIB.buildZExtOrTrunc(MaskTy, SubReg).getReg(0);
1115
1116 B.RegVT = getMVTForLLT(MaskTy);
1117 B.Reg = SubReg;
1118
1119 MachineBasicBlock *MBB = B.Cases[0].ThisBB;
1120
1121 if (!B.FallthroughUnreachable)
1122 addSuccessorWithProb(SwitchBB, B.Default, B.DefaultProb);
1123 addSuccessorWithProb(SwitchBB, MBB, B.Prob);
1124
1125 SwitchBB->normalizeSuccProbs();
1126
1127 if (!B.FallthroughUnreachable) {
1128 // Conditional branch to the default block.
1129 auto RangeCst = MIB.buildConstant(SwitchOpTy, B.Range);
1130 auto RangeCmp = MIB.buildICmp(CmpInst::Predicate::ICMP_UGT, LLT::scalar(1),
1131 RangeSub, RangeCst);
1132 MIB.buildBrCond(RangeCmp, *B.Default);
1133 }
1134
1135 // Avoid emitting unnecessary branches to the next block.
1136 if (MBB != SwitchBB->getNextNode())
1137 MIB.buildBr(*MBB);
1138}
1139
1140void IRTranslator::emitBitTestCase(SwitchCG::BitTestBlock &BB,
1141 MachineBasicBlock *NextMBB,
1142 BranchProbability BranchProbToNext,
1144 MachineBasicBlock *SwitchBB) {
1145 MachineIRBuilder &MIB = *CurBuilder;
1146 MIB.setMBB(*SwitchBB);
1147
1148 LLT SwitchTy = getLLTForMVT(BB.RegVT);
1149 Register Cmp;
1150 unsigned PopCount = llvm::popcount(B.Mask);
1151 if (PopCount == 1) {
1152 // Testing for a single bit; just compare the shift count with what it
1153 // would need to be to shift a 1 bit in that position.
1154 auto MaskTrailingZeros =
1155 MIB.buildConstant(SwitchTy, llvm::countr_zero(B.Mask));
1156 Cmp =
1157 MIB.buildICmp(ICmpInst::ICMP_EQ, LLT::scalar(1), Reg, MaskTrailingZeros)
1158 .getReg(0);
1159 } else if (PopCount == BB.Range) {
1160 // There is only one zero bit in the range, test for it directly.
1161 auto MaskTrailingOnes =
1162 MIB.buildConstant(SwitchTy, llvm::countr_one(B.Mask));
1163 Cmp = MIB.buildICmp(CmpInst::ICMP_NE, LLT::scalar(1), Reg, MaskTrailingOnes)
1164 .getReg(0);
1165 } else {
1166 // Make desired shift.
1167 auto CstOne = MIB.buildConstant(SwitchTy, 1);
1168 auto SwitchVal = MIB.buildShl(SwitchTy, CstOne, Reg);
1169
1170 // Emit bit tests and jumps.
1171 auto CstMask = MIB.buildConstant(SwitchTy, B.Mask);
1172 auto AndOp = MIB.buildAnd(SwitchTy, SwitchVal, CstMask);
1173 auto CstZero = MIB.buildConstant(SwitchTy, 0);
1174 Cmp = MIB.buildICmp(CmpInst::ICMP_NE, LLT::scalar(1), AndOp, CstZero)
1175 .getReg(0);
1176 }
1177
1178 // The branch probability from SwitchBB to B.TargetBB is B.ExtraProb.
1179 addSuccessorWithProb(SwitchBB, B.TargetBB, B.ExtraProb);
1180 // The branch probability from SwitchBB to NextMBB is BranchProbToNext.
1181 addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext);
1182 // It is not guaranteed that the sum of B.ExtraProb and BranchProbToNext is
1183 // one as they are relative probabilities (and thus work more like weights),
1184 // and hence we need to normalize them to let the sum of them become one.
1185 SwitchBB->normalizeSuccProbs();
1186
1187 // Record the fact that the IR edge from the header to the bit test target
1188 // will go through our new block. Neeeded for PHIs to have nodes added.
1189 addMachineCFGPred({BB.Parent->getBasicBlock(), B.TargetBB->getBasicBlock()},
1190 SwitchBB);
1191
1192 MIB.buildBrCond(Cmp, *B.TargetBB);
1193
1194 // Avoid emitting unnecessary branches to the next block.
1195 if (NextMBB != SwitchBB->getNextNode())
1196 MIB.buildBr(*NextMBB);
1197}
1198
1199bool IRTranslator::lowerBitTestWorkItem(
1201 MachineBasicBlock *CurMBB, MachineBasicBlock *DefaultMBB,
1203 BranchProbability DefaultProb, BranchProbability UnhandledProbs,
1205 bool FallthroughUnreachable) {
1206 using namespace SwitchCG;
1207 MachineFunction *CurMF = SwitchMBB->getParent();
1208 // FIXME: Optimize away range check based on pivot comparisons.
1209 BitTestBlock *BTB = &SL->BitTestCases[I->BTCasesIndex];
1210 // The bit test blocks haven't been inserted yet; insert them here.
1211 for (BitTestCase &BTC : BTB->Cases)
1212 CurMF->insert(BBI, BTC.ThisBB);
1213
1214 // Fill in fields of the BitTestBlock.
1215 BTB->Parent = CurMBB;
1216 BTB->Default = Fallthrough;
1217
1218 BTB->DefaultProb = UnhandledProbs;
1219 // If the cases in bit test don't form a contiguous range, we evenly
1220 // distribute the probability on the edge to Fallthrough to two
1221 // successors of CurMBB.
1222 if (!BTB->ContiguousRange) {
1223 BTB->Prob += DefaultProb / 2;
1224 BTB->DefaultProb -= DefaultProb / 2;
1225 }
1226
1227 if (FallthroughUnreachable)
1228 BTB->FallthroughUnreachable = true;
1229
1230 // If we're in the right place, emit the bit test header right now.
1231 if (CurMBB == SwitchMBB) {
1232 emitBitTestHeader(*BTB, SwitchMBB);
1233 BTB->Emitted = true;
1234 }
1235 return true;
1236}
1237
1238bool IRTranslator::lowerSwitchWorkItem(SwitchCG::SwitchWorkListItem W,
1239 Value *Cond,
1240 MachineBasicBlock *SwitchMBB,
1241 MachineBasicBlock *DefaultMBB,
1242 MachineIRBuilder &MIB) {
1243 using namespace SwitchCG;
1244 MachineFunction *CurMF = FuncInfo.MF;
1245 MachineBasicBlock *NextMBB = nullptr;
1247 if (++BBI != FuncInfo.MF->end())
1248 NextMBB = &*BBI;
1249
1250 if (EnableOpts) {
1251 // Here, we order cases by probability so the most likely case will be
1252 // checked first. However, two clusters can have the same probability in
1253 // which case their relative ordering is non-deterministic. So we use Low
1254 // as a tie-breaker as clusters are guaranteed to never overlap.
1255 llvm::sort(W.FirstCluster, W.LastCluster + 1,
1256 [](const CaseCluster &a, const CaseCluster &b) {
1257 return a.Prob != b.Prob
1258 ? a.Prob > b.Prob
1259 : a.Low->getValue().slt(b.Low->getValue());
1260 });
1261
1262 // Rearrange the case blocks so that the last one falls through if possible
1263 // without changing the order of probabilities.
1264 for (CaseClusterIt I = W.LastCluster; I > W.FirstCluster;) {
1265 --I;
1266 if (I->Prob > W.LastCluster->Prob)
1267 break;
1268 if (I->Kind == CC_Range && I->MBB == NextMBB) {
1269 std::swap(*I, *W.LastCluster);
1270 break;
1271 }
1272 }
1273 }
1274
1275 // Compute total probability.
1276 BranchProbability DefaultProb = W.DefaultProb;
1277 BranchProbability UnhandledProbs = DefaultProb;
1278 for (CaseClusterIt I = W.FirstCluster; I <= W.LastCluster; ++I)
1279 UnhandledProbs += I->Prob;
1280
1281 MachineBasicBlock *CurMBB = W.MBB;
1282 for (CaseClusterIt I = W.FirstCluster, E = W.LastCluster; I <= E; ++I) {
1283 bool FallthroughUnreachable = false;
1284 MachineBasicBlock *Fallthrough;
1285 if (I == W.LastCluster) {
1286 // For the last cluster, fall through to the default destination.
1287 Fallthrough = DefaultMBB;
1288 FallthroughUnreachable = isa<UnreachableInst>(
1289 DefaultMBB->getBasicBlock()->getFirstNonPHIOrDbg());
1290 } else {
1291 Fallthrough = CurMF->CreateMachineBasicBlock(CurMBB->getBasicBlock());
1292 CurMF->insert(BBI, Fallthrough);
1293 }
1294 UnhandledProbs -= I->Prob;
1295
1296 switch (I->Kind) {
1297 case CC_BitTests: {
1298 if (!lowerBitTestWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI,
1299 DefaultProb, UnhandledProbs, I, Fallthrough,
1300 FallthroughUnreachable)) {
1301 LLVM_DEBUG(dbgs() << "Failed to lower bit test for switch");
1302 return false;
1303 }
1304 break;
1305 }
1306
1307 case CC_JumpTable: {
1308 if (!lowerJumpTableWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI,
1309 UnhandledProbs, I, Fallthrough,
1310 FallthroughUnreachable)) {
1311 LLVM_DEBUG(dbgs() << "Failed to lower jump table");
1312 return false;
1313 }
1314 break;
1315 }
1316 case CC_Range: {
1317 if (!lowerSwitchRangeWorkItem(I, Cond, Fallthrough,
1318 FallthroughUnreachable, UnhandledProbs,
1319 CurMBB, MIB, SwitchMBB)) {
1320 LLVM_DEBUG(dbgs() << "Failed to lower switch range");
1321 return false;
1322 }
1323 break;
1324 }
1325 }
1326 CurMBB = Fallthrough;
1327 }
1328
1329 return true;
1330}
1331
1332bool IRTranslator::translateIndirectBr(const User &U,
1333 MachineIRBuilder &MIRBuilder) {
1334 const IndirectBrInst &BrInst = cast<IndirectBrInst>(U);
1335
1336 const Register Tgt = getOrCreateVReg(*BrInst.getAddress());
1337 MIRBuilder.buildBrIndirect(Tgt);
1338
1339 // Link successors.
1341 MachineBasicBlock &CurBB = MIRBuilder.getMBB();
1342 for (const BasicBlock *Succ : successors(&BrInst)) {
1343 // It's legal for indirectbr instructions to have duplicate blocks in the
1344 // destination list. We don't allow this in MIR. Skip anything that's
1345 // already a successor.
1346 if (!AddedSuccessors.insert(Succ).second)
1347 continue;
1348 CurBB.addSuccessor(&getMBB(*Succ));
1349 }
1350
1351 return true;
1352}
1353
1354static bool isSwiftError(const Value *V) {
1355 if (auto Arg = dyn_cast<Argument>(V))
1356 return Arg->hasSwiftErrorAttr();
1357 if (auto AI = dyn_cast<AllocaInst>(V))
1358 return AI->isSwiftError();
1359 return false;
1360}
1361
1362bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) {
1363 const LoadInst &LI = cast<LoadInst>(U);
1364 TypeSize StoreSize = DL->getTypeStoreSize(LI.getType());
1365 if (StoreSize.isZero())
1366 return true;
1367
1368 ArrayRef<Register> Regs = getOrCreateVRegs(LI);
1369 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(LI);
1370 Register Base = getOrCreateVReg(*LI.getPointerOperand());
1371 AAMDNodes AAInfo = LI.getAAMetadata();
1372
1373 const Value *Ptr = LI.getPointerOperand();
1374 Type *OffsetIRTy = DL->getIndexType(Ptr->getType());
1375 LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
1376
1377 if (CLI->supportSwiftError() && isSwiftError(Ptr)) {
1378 assert(Regs.size() == 1 && "swifterror should be single pointer");
1379 Register VReg =
1380 SwiftError.getOrCreateVRegUseAt(&LI, &MIRBuilder.getMBB(), Ptr);
1381 MIRBuilder.buildCopy(Regs[0], VReg);
1382 return true;
1383 }
1384
1386 TLI->getLoadMemOperandFlags(LI, *DL, AC, LibInfo);
1387 if (AA && !(Flags & MachineMemOperand::MOInvariant)) {
1388 if (AA->pointsToConstantMemory(
1389 MemoryLocation(Ptr, LocationSize::precise(StoreSize), AAInfo))) {
1391 }
1392 }
1393
1394 const MDNode *Ranges =
1395 Regs.size() == 1 ? LI.getMetadata(LLVMContext::MD_range) : nullptr;
1396 for (unsigned i = 0; i < Regs.size(); ++i) {
1397 Register Addr;
1398 MIRBuilder.materializePtrAdd(Addr, Base, OffsetTy, Offsets[i] / 8);
1399
1400 MachinePointerInfo Ptr(LI.getPointerOperand(), Offsets[i] / 8);
1401 Align BaseAlign = getMemOpAlign(LI);
1402 auto MMO = MF->getMachineMemOperand(
1403 Ptr, Flags, MRI->getType(Regs[i]),
1404 commonAlignment(BaseAlign, Offsets[i] / 8), AAInfo, Ranges,
1405 LI.getSyncScopeID(), LI.getOrdering());
1406 MIRBuilder.buildLoad(Regs[i], Addr, *MMO);
1407 }
1408
1409 return true;
1410}
1411
1412bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) {
1413 const StoreInst &SI = cast<StoreInst>(U);
1414 if (DL->getTypeStoreSize(SI.getValueOperand()->getType()).isZero())
1415 return true;
1416
1417 ArrayRef<Register> Vals = getOrCreateVRegs(*SI.getValueOperand());
1418 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*SI.getValueOperand());
1419 Register Base = getOrCreateVReg(*SI.getPointerOperand());
1420
1421 Type *OffsetIRTy = DL->getIndexType(SI.getPointerOperandType());
1422 LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
1423
1424 if (CLI->supportSwiftError() && isSwiftError(SI.getPointerOperand())) {
1425 assert(Vals.size() == 1 && "swifterror should be single pointer");
1426
1427 Register VReg = SwiftError.getOrCreateVRegDefAt(&SI, &MIRBuilder.getMBB(),
1428 SI.getPointerOperand());
1429 MIRBuilder.buildCopy(VReg, Vals[0]);
1430 return true;
1431 }
1432
1434
1435 for (unsigned i = 0; i < Vals.size(); ++i) {
1436 Register Addr;
1437 MIRBuilder.materializePtrAdd(Addr, Base, OffsetTy, Offsets[i] / 8);
1438
1439 MachinePointerInfo Ptr(SI.getPointerOperand(), Offsets[i] / 8);
1440 Align BaseAlign = getMemOpAlign(SI);
1441 auto MMO = MF->getMachineMemOperand(
1442 Ptr, Flags, MRI->getType(Vals[i]),
1443 commonAlignment(BaseAlign, Offsets[i] / 8), SI.getAAMetadata(), nullptr,
1444 SI.getSyncScopeID(), SI.getOrdering());
1445 MIRBuilder.buildStore(Vals[i], Addr, *MMO);
1446 }
1447 return true;
1448}
1449
1451 const Value *Src = U.getOperand(0);
1452 Type *Int32Ty = Type::getInt32Ty(U.getContext());
1453
1454 // getIndexedOffsetInType is designed for GEPs, so the first index is the
1455 // usual array element rather than looking into the actual aggregate.
1457 Indices.push_back(ConstantInt::get(Int32Ty, 0));
1458
1459 if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(&U)) {
1460 for (auto Idx : EVI->indices())
1461 Indices.push_back(ConstantInt::get(Int32Ty, Idx));
1462 } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(&U)) {
1463 for (auto Idx : IVI->indices())
1464 Indices.push_back(ConstantInt::get(Int32Ty, Idx));
1465 } else {
1466 for (Value *Op : drop_begin(U.operands()))
1467 Indices.push_back(Op);
1468 }
1469
1470 return 8 * static_cast<uint64_t>(
1471 DL.getIndexedOffsetInType(Src->getType(), Indices));
1472}
1473
1474bool IRTranslator::translateExtractValue(const User &U,
1475 MachineIRBuilder &MIRBuilder) {
1476 const Value *Src = U.getOperand(0);
1478 ArrayRef<Register> SrcRegs = getOrCreateVRegs(*Src);
1479 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*Src);
1480 unsigned Idx = llvm::lower_bound(Offsets, Offset) - Offsets.begin();
1481 auto &DstRegs = allocateVRegs(U);
1482
1483 for (unsigned i = 0; i < DstRegs.size(); ++i)
1484 DstRegs[i] = SrcRegs[Idx++];
1485
1486 return true;
1487}
1488
1489bool IRTranslator::translateInsertValue(const User &U,
1490 MachineIRBuilder &MIRBuilder) {
1491 const Value *Src = U.getOperand(0);
1493 auto &DstRegs = allocateVRegs(U);
1494 ArrayRef<uint64_t> DstOffsets = *VMap.getOffsets(U);
1495 ArrayRef<Register> SrcRegs = getOrCreateVRegs(*Src);
1496 ArrayRef<Register> InsertedRegs = getOrCreateVRegs(*U.getOperand(1));
1497 auto *InsertedIt = InsertedRegs.begin();
1498
1499 for (unsigned i = 0; i < DstRegs.size(); ++i) {
1500 if (DstOffsets[i] >= Offset && InsertedIt != InsertedRegs.end())
1501 DstRegs[i] = *InsertedIt++;
1502 else
1503 DstRegs[i] = SrcRegs[i];
1504 }
1505
1506 return true;
1507}
1508
1509bool IRTranslator::translateSelect(const User &U,
1510 MachineIRBuilder &MIRBuilder) {
1511 Register Tst = getOrCreateVReg(*U.getOperand(0));
1512 ArrayRef<Register> ResRegs = getOrCreateVRegs(U);
1513 ArrayRef<Register> Op0Regs = getOrCreateVRegs(*U.getOperand(1));
1514 ArrayRef<Register> Op1Regs = getOrCreateVRegs(*U.getOperand(2));
1515
1516 uint32_t Flags = 0;
1517 if (const SelectInst *SI = dyn_cast<SelectInst>(&U))
1519
1520 for (unsigned i = 0; i < ResRegs.size(); ++i) {
1521 MIRBuilder.buildSelect(ResRegs[i], Tst, Op0Regs[i], Op1Regs[i], Flags);
1522 }
1523
1524 return true;
1525}
1526
1527bool IRTranslator::translateCopy(const User &U, const Value &V,
1528 MachineIRBuilder &MIRBuilder) {
1529 Register Src = getOrCreateVReg(V);
1530 auto &Regs = *VMap.getVRegs(U);
1531 if (Regs.empty()) {
1532 Regs.push_back(Src);
1533 VMap.getOffsets(U)->push_back(0);
1534 } else {
1535 // If we already assigned a vreg for this instruction, we can't change that.
1536 // Emit a copy to satisfy the users we already emitted.
1537 MIRBuilder.buildCopy(Regs[0], Src);
1538 }
1539 return true;
1540}
1541
1542bool IRTranslator::translateBitCast(const User &U,
1543 MachineIRBuilder &MIRBuilder) {
1544 // If we're bitcasting to the source type, we can reuse the source vreg.
1545 if (getLLTForType(*U.getOperand(0)->getType(), *DL) ==
1546 getLLTForType(*U.getType(), *DL)) {
1547 // If the source is a ConstantInt then it was probably created by
1548 // ConstantHoisting and we should leave it alone.
1549 if (isa<ConstantInt>(U.getOperand(0)))
1550 return translateCast(TargetOpcode::G_CONSTANT_FOLD_BARRIER, U,
1551 MIRBuilder);
1552 return translateCopy(U, *U.getOperand(0), MIRBuilder);
1553 }
1554
1555 return translateCast(TargetOpcode::G_BITCAST, U, MIRBuilder);
1556}
1557
1558bool IRTranslator::translateCast(unsigned Opcode, const User &U,
1559 MachineIRBuilder &MIRBuilder) {
1560 if (U.getType()->getScalarType()->isBFloatTy() ||
1561 U.getOperand(0)->getType()->getScalarType()->isBFloatTy())
1562 return false;
1563
1564 uint32_t Flags = 0;
1565 if (const Instruction *I = dyn_cast<Instruction>(&U))
1567
1568 Register Op = getOrCreateVReg(*U.getOperand(0));
1569 Register Res = getOrCreateVReg(U);
1570 MIRBuilder.buildInstr(Opcode, {Res}, {Op}, Flags);
1571 return true;
1572}
1573
1574bool IRTranslator::translateGetElementPtr(const User &U,
1575 MachineIRBuilder &MIRBuilder) {
1576 Value &Op0 = *U.getOperand(0);
1577 Register BaseReg = getOrCreateVReg(Op0);
1578 Type *PtrIRTy = Op0.getType();
1579 LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
1580 Type *OffsetIRTy = DL->getIndexType(PtrIRTy);
1581 LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
1582
1583 uint32_t Flags = 0;
1584 if (const Instruction *I = dyn_cast<Instruction>(&U))
1586
1587 // Normalize Vector GEP - all scalar operands should be converted to the
1588 // splat vector.
1589 unsigned VectorWidth = 0;
1590
1591 // True if we should use a splat vector; using VectorWidth alone is not
1592 // sufficient.
1593 bool WantSplatVector = false;
1594 if (auto *VT = dyn_cast<VectorType>(U.getType())) {
1595 VectorWidth = cast<FixedVectorType>(VT)->getNumElements();
1596 // We don't produce 1 x N vectors; those are treated as scalars.
1597 WantSplatVector = VectorWidth > 1;
1598 }
1599
1600 // We might need to splat the base pointer into a vector if the offsets
1601 // are vectors.
1602 if (WantSplatVector && !PtrTy.isVector()) {
1603 BaseReg = MIRBuilder
1604 .buildSplatBuildVector(LLT::fixed_vector(VectorWidth, PtrTy),
1605 BaseReg)
1606 .getReg(0);
1607 PtrIRTy = FixedVectorType::get(PtrIRTy, VectorWidth);
1608 PtrTy = getLLTForType(*PtrIRTy, *DL);
1609 OffsetIRTy = DL->getIndexType(PtrIRTy);
1610 OffsetTy = getLLTForType(*OffsetIRTy, *DL);
1611 }
1612
1613 int64_t Offset = 0;
1614 for (gep_type_iterator GTI = gep_type_begin(&U), E = gep_type_end(&U);
1615 GTI != E; ++GTI) {
1616 const Value *Idx = GTI.getOperand();
1617 if (StructType *StTy = GTI.getStructTypeOrNull()) {
1618 unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
1620 continue;
1621 } else {
1622 uint64_t ElementSize = GTI.getSequentialElementStride(*DL);
1623
1624 // If this is a scalar constant or a splat vector of constants,
1625 // handle it quickly.
1626 if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
1627 if (std::optional<int64_t> Val = CI->getValue().trySExtValue()) {
1628 Offset += ElementSize * *Val;
1629 continue;
1630 }
1631 }
1632
1633 if (Offset != 0) {
1634 auto OffsetMIB = MIRBuilder.buildConstant({OffsetTy}, Offset);
1635 BaseReg = MIRBuilder.buildPtrAdd(PtrTy, BaseReg, OffsetMIB.getReg(0))
1636 .getReg(0);
1637 Offset = 0;
1638 }
1639
1640 Register IdxReg = getOrCreateVReg(*Idx);
1641 LLT IdxTy = MRI->getType(IdxReg);
1642 if (IdxTy != OffsetTy) {
1643 if (!IdxTy.isVector() && WantSplatVector) {
1644 IdxReg = MIRBuilder
1646 IdxReg)
1647 .getReg(0);
1648 }
1649
1650 IdxReg = MIRBuilder.buildSExtOrTrunc(OffsetTy, IdxReg).getReg(0);
1651 }
1652
1653 // N = N + Idx * ElementSize;
1654 // Avoid doing it for ElementSize of 1.
1655 Register GepOffsetReg;
1656 if (ElementSize != 1) {
1657 auto ElementSizeMIB = MIRBuilder.buildConstant(
1658 getLLTForType(*OffsetIRTy, *DL), ElementSize);
1659 GepOffsetReg =
1660 MIRBuilder.buildMul(OffsetTy, IdxReg, ElementSizeMIB).getReg(0);
1661 } else
1662 GepOffsetReg = IdxReg;
1663
1664 BaseReg = MIRBuilder.buildPtrAdd(PtrTy, BaseReg, GepOffsetReg).getReg(0);
1665 }
1666 }
1667
1668 if (Offset != 0) {
1669 auto OffsetMIB =
1670 MIRBuilder.buildConstant(OffsetTy, Offset);
1671
1672 if (int64_t(Offset) >= 0 && cast<GEPOperator>(U).isInBounds())
1674
1675 MIRBuilder.buildPtrAdd(getOrCreateVReg(U), BaseReg, OffsetMIB.getReg(0),
1676 Flags);
1677 return true;
1678 }
1679
1680 MIRBuilder.buildCopy(getOrCreateVReg(U), BaseReg);
1681 return true;
1682}
1683
1684bool IRTranslator::translateMemFunc(const CallInst &CI,
1685 MachineIRBuilder &MIRBuilder,
1686 unsigned Opcode) {
1687 const Value *SrcPtr = CI.getArgOperand(1);
1688 // If the source is undef, then just emit a nop.
1689 if (isa<UndefValue>(SrcPtr))
1690 return true;
1691
1693
1694 unsigned MinPtrSize = UINT_MAX;
1695 for (auto AI = CI.arg_begin(), AE = CI.arg_end(); std::next(AI) != AE; ++AI) {
1696 Register SrcReg = getOrCreateVReg(**AI);
1697 LLT SrcTy = MRI->getType(SrcReg);
1698 if (SrcTy.isPointer())
1699 MinPtrSize = std::min<unsigned>(SrcTy.getSizeInBits(), MinPtrSize);
1700 SrcRegs.push_back(SrcReg);
1701 }
1702
1703 LLT SizeTy = LLT::scalar(MinPtrSize);
1704
1705 // The size operand should be the minimum of the pointer sizes.
1706 Register &SizeOpReg = SrcRegs[SrcRegs.size() - 1];
1707 if (MRI->getType(SizeOpReg) != SizeTy)
1708 SizeOpReg = MIRBuilder.buildZExtOrTrunc(SizeTy, SizeOpReg).getReg(0);
1709
1710 auto ICall = MIRBuilder.buildInstr(Opcode);
1711 for (Register SrcReg : SrcRegs)
1712 ICall.addUse(SrcReg);
1713
1714 Align DstAlign;
1715 Align SrcAlign;
1716 unsigned IsVol =
1717 cast<ConstantInt>(CI.getArgOperand(CI.arg_size() - 1))->getZExtValue();
1718
1719 ConstantInt *CopySize = nullptr;
1720
1721 if (auto *MCI = dyn_cast<MemCpyInst>(&CI)) {
1722 DstAlign = MCI->getDestAlign().valueOrOne();
1723 SrcAlign = MCI->getSourceAlign().valueOrOne();
1724 CopySize = dyn_cast<ConstantInt>(MCI->getArgOperand(2));
1725 } else if (auto *MCI = dyn_cast<MemCpyInlineInst>(&CI)) {
1726 DstAlign = MCI->getDestAlign().valueOrOne();
1727 SrcAlign = MCI->getSourceAlign().valueOrOne();
1728 CopySize = dyn_cast<ConstantInt>(MCI->getArgOperand(2));
1729 } else if (auto *MMI = dyn_cast<MemMoveInst>(&CI)) {
1730 DstAlign = MMI->getDestAlign().valueOrOne();
1731 SrcAlign = MMI->getSourceAlign().valueOrOne();
1732 CopySize = dyn_cast<ConstantInt>(MMI->getArgOperand(2));
1733 } else {
1734 auto *MSI = cast<MemSetInst>(&CI);
1735 DstAlign = MSI->getDestAlign().valueOrOne();
1736 }
1737
1738 if (Opcode != TargetOpcode::G_MEMCPY_INLINE) {
1739 // We need to propagate the tail call flag from the IR inst as an argument.
1740 // Otherwise, we have to pessimize and assume later that we cannot tail call
1741 // any memory intrinsics.
1742 ICall.addImm(CI.isTailCall() ? 1 : 0);
1743 }
1744
1745 // Create mem operands to store the alignment and volatile info.
1748 if (IsVol) {
1749 LoadFlags |= MachineMemOperand::MOVolatile;
1750 StoreFlags |= MachineMemOperand::MOVolatile;
1751 }
1752
1753 AAMDNodes AAInfo = CI.getAAMetadata();
1754 if (AA && CopySize &&
1756 SrcPtr, LocationSize::precise(CopySize->getZExtValue()), AAInfo))) {
1757 LoadFlags |= MachineMemOperand::MOInvariant;
1758
1759 // FIXME: pointsToConstantMemory probably does not imply dereferenceable,
1760 // but the previous usage implied it did. Probably should check
1761 // isDereferenceableAndAlignedPointer.
1763 }
1764
1765 ICall.addMemOperand(
1767 StoreFlags, 1, DstAlign, AAInfo));
1768 if (Opcode != TargetOpcode::G_MEMSET)
1769 ICall.addMemOperand(MF->getMachineMemOperand(
1770 MachinePointerInfo(SrcPtr), LoadFlags, 1, SrcAlign, AAInfo));
1771
1772 return true;
1773}
1774
1775bool IRTranslator::translateTrap(const CallInst &CI,
1776 MachineIRBuilder &MIRBuilder,
1777 unsigned Opcode) {
1778 StringRef TrapFuncName =
1779 CI.getAttributes().getFnAttr("trap-func-name").getValueAsString();
1780 if (TrapFuncName.empty()) {
1781 if (Opcode == TargetOpcode::G_UBSANTRAP) {
1782 uint64_t Code = cast<ConstantInt>(CI.getOperand(0))->getZExtValue();
1783 MIRBuilder.buildInstr(Opcode, {}, ArrayRef<llvm::SrcOp>{Code});
1784 } else {
1785 MIRBuilder.buildInstr(Opcode);
1786 }
1787 return true;
1788 }
1789
1791 if (Opcode == TargetOpcode::G_UBSANTRAP)
1792 Info.OrigArgs.push_back({getOrCreateVRegs(*CI.getArgOperand(0)),
1793 CI.getArgOperand(0)->getType(), 0});
1794
1795 Info.Callee = MachineOperand::CreateES(TrapFuncName.data());
1796 Info.CB = &CI;
1797 Info.OrigRet = {Register(), Type::getVoidTy(CI.getContext()), 0};
1798 return CLI->lowerCall(MIRBuilder, Info);
1799}
1800
1801bool IRTranslator::translateVectorInterleave2Intrinsic(
1802 const CallInst &CI, MachineIRBuilder &MIRBuilder) {
1803 assert(CI.getIntrinsicID() == Intrinsic::vector_interleave2 &&
1804 "This function can only be called on the interleave2 intrinsic!");
1805 // Canonicalize interleave2 to G_SHUFFLE_VECTOR (similar to SelectionDAG).
1806 Register Op0 = getOrCreateVReg(*CI.getOperand(0));
1807 Register Op1 = getOrCreateVReg(*CI.getOperand(1));
1808 Register Res = getOrCreateVReg(CI);
1809
1810 LLT OpTy = MRI->getType(Op0);
1811 MIRBuilder.buildShuffleVector(Res, Op0, Op1,
1813
1814 return true;
1815}
1816
1817bool IRTranslator::translateVectorDeinterleave2Intrinsic(
1818 const CallInst &CI, MachineIRBuilder &MIRBuilder) {
1819 assert(CI.getIntrinsicID() == Intrinsic::vector_deinterleave2 &&
1820 "This function can only be called on the deinterleave2 intrinsic!");
1821 // Canonicalize deinterleave2 to shuffles that extract sub-vectors (similar to
1822 // SelectionDAG).
1823 Register Op = getOrCreateVReg(*CI.getOperand(0));
1824 auto Undef = MIRBuilder.buildUndef(MRI->getType(Op));
1825 ArrayRef<Register> Res = getOrCreateVRegs(CI);
1826
1827 LLT ResTy = MRI->getType(Res[0]);
1828 MIRBuilder.buildShuffleVector(Res[0], Op, Undef,
1829 createStrideMask(0, 2, ResTy.getNumElements()));
1830 MIRBuilder.buildShuffleVector(Res[1], Op, Undef,
1831 createStrideMask(1, 2, ResTy.getNumElements()));
1832
1833 return true;
1834}
1835
1836void IRTranslator::getStackGuard(Register DstReg,
1837 MachineIRBuilder &MIRBuilder) {
1839 MRI->setRegClass(DstReg, TRI->getPointerRegClass(*MF));
1840 auto MIB =
1841 MIRBuilder.buildInstr(TargetOpcode::LOAD_STACK_GUARD, {DstReg}, {});
1842
1844 if (!Global)
1845 return;
1846
1847 unsigned AddrSpace = Global->getType()->getPointerAddressSpace();
1848 LLT PtrTy = LLT::pointer(AddrSpace, DL->getPointerSizeInBits(AddrSpace));
1849
1850 MachinePointerInfo MPInfo(Global);
1854 MPInfo, Flags, PtrTy, DL->getPointerABIAlignment(AddrSpace));
1855 MIB.setMemRefs({MemRef});
1856}
1857
1858bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
1859 MachineIRBuilder &MIRBuilder) {
1860 ArrayRef<Register> ResRegs = getOrCreateVRegs(CI);
1861 MIRBuilder.buildInstr(
1862 Op, {ResRegs[0], ResRegs[1]},
1863 {getOrCreateVReg(*CI.getOperand(0)), getOrCreateVReg(*CI.getOperand(1))});
1864
1865 return true;
1866}
1867
1868bool IRTranslator::translateFixedPointIntrinsic(unsigned Op, const CallInst &CI,
1869 MachineIRBuilder &MIRBuilder) {
1870 Register Dst = getOrCreateVReg(CI);
1871 Register Src0 = getOrCreateVReg(*CI.getOperand(0));
1872 Register Src1 = getOrCreateVReg(*CI.getOperand(1));
1873 uint64_t Scale = cast<ConstantInt>(CI.getOperand(2))->getZExtValue();
1874 MIRBuilder.buildInstr(Op, {Dst}, { Src0, Src1, Scale });
1875 return true;
1876}
1877
1878unsigned IRTranslator::getSimpleIntrinsicOpcode(Intrinsic::ID ID) {
1879 switch (ID) {
1880 default:
1881 break;
1882 case Intrinsic::acos:
1883 return TargetOpcode::G_FACOS;
1884 case Intrinsic::asin:
1885 return TargetOpcode::G_FASIN;
1886 case Intrinsic::atan:
1887 return TargetOpcode::G_FATAN;
1888 case Intrinsic::bswap:
1889 return TargetOpcode::G_BSWAP;
1890 case Intrinsic::bitreverse:
1891 return TargetOpcode::G_BITREVERSE;
1892 case Intrinsic::fshl:
1893 return TargetOpcode::G_FSHL;
1894 case Intrinsic::fshr:
1895 return TargetOpcode::G_FSHR;
1896 case Intrinsic::ceil:
1897 return TargetOpcode::G_FCEIL;
1898 case Intrinsic::cos:
1899 return TargetOpcode::G_FCOS;
1900 case Intrinsic::cosh:
1901 return TargetOpcode::G_FCOSH;
1902 case Intrinsic::ctpop:
1903 return TargetOpcode::G_CTPOP;
1904 case Intrinsic::exp:
1905 return TargetOpcode::G_FEXP;
1906 case Intrinsic::exp2:
1907 return TargetOpcode::G_FEXP2;
1908 case Intrinsic::exp10:
1909 return TargetOpcode::G_FEXP10;
1910 case Intrinsic::fabs:
1911 return TargetOpcode::G_FABS;
1912 case Intrinsic::copysign:
1913 return TargetOpcode::G_FCOPYSIGN;
1914 case Intrinsic::minnum:
1915 return TargetOpcode::G_FMINNUM;
1916 case Intrinsic::maxnum:
1917 return TargetOpcode::G_FMAXNUM;
1918 case Intrinsic::minimum:
1919 return TargetOpcode::G_FMINIMUM;
1920 case Intrinsic::maximum:
1921 return TargetOpcode::G_FMAXIMUM;
1922 case Intrinsic::canonicalize:
1923 return TargetOpcode::G_FCANONICALIZE;
1924 case Intrinsic::floor:
1925 return TargetOpcode::G_FFLOOR;
1926 case Intrinsic::fma:
1927 return TargetOpcode::G_FMA;
1928 case Intrinsic::log:
1929 return TargetOpcode::G_FLOG;
1930 case Intrinsic::log2:
1931 return TargetOpcode::G_FLOG2;
1932 case Intrinsic::log10:
1933 return TargetOpcode::G_FLOG10;
1934 case Intrinsic::ldexp:
1935 return TargetOpcode::G_FLDEXP;
1936 case Intrinsic::nearbyint:
1937 return TargetOpcode::G_FNEARBYINT;
1938 case Intrinsic::pow:
1939 return TargetOpcode::G_FPOW;
1940 case Intrinsic::powi:
1941 return TargetOpcode::G_FPOWI;
1942 case Intrinsic::rint:
1943 return TargetOpcode::G_FRINT;
1944 case Intrinsic::round:
1945 return TargetOpcode::G_INTRINSIC_ROUND;
1946 case Intrinsic::roundeven:
1947 return TargetOpcode::G_INTRINSIC_ROUNDEVEN;
1948 case Intrinsic::sin:
1949 return TargetOpcode::G_FSIN;
1950 case Intrinsic::sinh:
1951 return TargetOpcode::G_FSINH;
1952 case Intrinsic::sqrt:
1953 return TargetOpcode::G_FSQRT;
1954 case Intrinsic::tan:
1955 return TargetOpcode::G_FTAN;
1956 case Intrinsic::tanh:
1957 return TargetOpcode::G_FTANH;
1958 case Intrinsic::trunc:
1959 return TargetOpcode::G_INTRINSIC_TRUNC;
1960 case Intrinsic::readcyclecounter:
1961 return TargetOpcode::G_READCYCLECOUNTER;
1962 case Intrinsic::readsteadycounter:
1963 return TargetOpcode::G_READSTEADYCOUNTER;
1964 case Intrinsic::ptrmask:
1965 return TargetOpcode::G_PTRMASK;
1966 case Intrinsic::lrint:
1967 return TargetOpcode::G_INTRINSIC_LRINT;
1968 case Intrinsic::llrint:
1969 return TargetOpcode::G_INTRINSIC_LLRINT;
1970 // FADD/FMUL require checking the FMF, so are handled elsewhere.
1971 case Intrinsic::vector_reduce_fmin:
1972 return TargetOpcode::G_VECREDUCE_FMIN;
1973 case Intrinsic::vector_reduce_fmax:
1974 return TargetOpcode::G_VECREDUCE_FMAX;
1975 case Intrinsic::vector_reduce_fminimum:
1976 return TargetOpcode::G_VECREDUCE_FMINIMUM;
1977 case Intrinsic::vector_reduce_fmaximum:
1978 return TargetOpcode::G_VECREDUCE_FMAXIMUM;
1979 case Intrinsic::vector_reduce_add:
1980 return TargetOpcode::G_VECREDUCE_ADD;
1981 case Intrinsic::vector_reduce_mul:
1982 return TargetOpcode::G_VECREDUCE_MUL;
1983 case Intrinsic::vector_reduce_and:
1984 return TargetOpcode::G_VECREDUCE_AND;
1985 case Intrinsic::vector_reduce_or:
1986 return TargetOpcode::G_VECREDUCE_OR;
1987 case Intrinsic::vector_reduce_xor:
1988 return TargetOpcode::G_VECREDUCE_XOR;
1989 case Intrinsic::vector_reduce_smax:
1990 return TargetOpcode::G_VECREDUCE_SMAX;
1991 case Intrinsic::vector_reduce_smin:
1992 return TargetOpcode::G_VECREDUCE_SMIN;
1993 case Intrinsic::vector_reduce_umax:
1994 return TargetOpcode::G_VECREDUCE_UMAX;
1995 case Intrinsic::vector_reduce_umin:
1996 return TargetOpcode::G_VECREDUCE_UMIN;
1997 case Intrinsic::experimental_vector_compress:
1998 return TargetOpcode::G_VECTOR_COMPRESS;
1999 case Intrinsic::lround:
2000 return TargetOpcode::G_LROUND;
2001 case Intrinsic::llround:
2002 return TargetOpcode::G_LLROUND;
2003 case Intrinsic::get_fpenv:
2004 return TargetOpcode::G_GET_FPENV;
2005 case Intrinsic::get_fpmode:
2006 return TargetOpcode::G_GET_FPMODE;
2007 }
2009}
2010
2011bool IRTranslator::translateSimpleIntrinsic(const CallInst &CI,
2013 MachineIRBuilder &MIRBuilder) {
2014
2015 unsigned Op = getSimpleIntrinsicOpcode(ID);
2016
2017 // Is this a simple intrinsic?
2019 return false;
2020
2021 // Yes. Let's translate it.
2023 for (const auto &Arg : CI.args())
2024 VRegs.push_back(getOrCreateVReg(*Arg));
2025
2026 MIRBuilder.buildInstr(Op, {getOrCreateVReg(CI)}, VRegs,
2028 return true;
2029}
2030
2031// TODO: Include ConstainedOps.def when all strict instructions are defined.
2033 switch (ID) {
2034 case Intrinsic::experimental_constrained_fadd:
2035 return TargetOpcode::G_STRICT_FADD;
2036 case Intrinsic::experimental_constrained_fsub:
2037 return TargetOpcode::G_STRICT_FSUB;
2038 case Intrinsic::experimental_constrained_fmul:
2039 return TargetOpcode::G_STRICT_FMUL;
2040 case Intrinsic::experimental_constrained_fdiv:
2041 return TargetOpcode::G_STRICT_FDIV;
2042 case Intrinsic::experimental_constrained_frem:
2043 return TargetOpcode::G_STRICT_FREM;
2044 case Intrinsic::experimental_constrained_fma:
2045 return TargetOpcode::G_STRICT_FMA;
2046 case Intrinsic::experimental_constrained_sqrt:
2047 return TargetOpcode::G_STRICT_FSQRT;
2048 case Intrinsic::experimental_constrained_ldexp:
2049 return TargetOpcode::G_STRICT_FLDEXP;
2050 default:
2051 return 0;
2052 }
2053}
2054
2055bool IRTranslator::translateConstrainedFPIntrinsic(
2056 const ConstrainedFPIntrinsic &FPI, MachineIRBuilder &MIRBuilder) {
2058
2059 unsigned Opcode = getConstrainedOpcode(FPI.getIntrinsicID());
2060 if (!Opcode)
2061 return false;
2062
2066
2068 for (unsigned I = 0, E = FPI.getNonMetadataArgCount(); I != E; ++I)
2069 VRegs.push_back(getOrCreateVReg(*FPI.getArgOperand(I)));
2070
2071 MIRBuilder.buildInstr(Opcode, {getOrCreateVReg(FPI)}, VRegs, Flags);
2072 return true;
2073}
2074
2075std::optional<MCRegister> IRTranslator::getArgPhysReg(Argument &Arg) {
2076 auto VRegs = getOrCreateVRegs(Arg);
2077 if (VRegs.size() != 1)
2078 return std::nullopt;
2079
2080 // Arguments are lowered as a copy of a livein physical register.
2081 auto *VRegDef = MF->getRegInfo().getVRegDef(VRegs[0]);
2082 if (!VRegDef || !VRegDef->isCopy())
2083 return std::nullopt;
2084 return VRegDef->getOperand(1).getReg().asMCReg();
2085}
2086
2087bool IRTranslator::translateIfEntryValueArgument(bool isDeclare, Value *Val,
2088 const DILocalVariable *Var,
2089 const DIExpression *Expr,
2090 const DebugLoc &DL,
2091 MachineIRBuilder &MIRBuilder) {
2092 auto *Arg = dyn_cast<Argument>(Val);
2093 if (!Arg)
2094 return false;
2095
2096 if (!Expr->isEntryValue())
2097 return false;
2098
2099 std::optional<MCRegister> PhysReg = getArgPhysReg(*Arg);
2100 if (!PhysReg) {
2101 LLVM_DEBUG(dbgs() << "Dropping dbg." << (isDeclare ? "declare" : "value")
2102 << ": expression is entry_value but "
2103 << "couldn't find a physical register\n");
2104 LLVM_DEBUG(dbgs() << *Var << "\n");
2105 return true;
2106 }
2107
2108 if (isDeclare) {
2109 // Append an op deref to account for the fact that this is a dbg_declare.
2110 Expr = DIExpression::append(Expr, dwarf::DW_OP_deref);
2111 MF->setVariableDbgInfo(Var, Expr, *PhysReg, DL);
2112 } else {
2113 MIRBuilder.buildDirectDbgValue(*PhysReg, Var, Expr);
2114 }
2115
2116 return true;
2117}
2118
2120 switch (ID) {
2121 default:
2122 llvm_unreachable("Unexpected intrinsic");
2123 case Intrinsic::experimental_convergence_anchor:
2124 return TargetOpcode::CONVERGENCECTRL_ANCHOR;
2125 case Intrinsic::experimental_convergence_entry:
2126 return TargetOpcode::CONVERGENCECTRL_ENTRY;
2127 case Intrinsic::experimental_convergence_loop:
2128 return TargetOpcode::CONVERGENCECTRL_LOOP;
2129 }
2130}
2131
2132bool IRTranslator::translateConvergenceControlIntrinsic(
2133 const CallInst &CI, Intrinsic::ID ID, MachineIRBuilder &MIRBuilder) {
2135 Register OutputReg = getOrCreateConvergenceTokenVReg(CI);
2136 MIB.addDef(OutputReg);
2137
2138 if (ID == Intrinsic::experimental_convergence_loop) {
2140 assert(Bundle && "Expected a convergence control token.");
2141 Register InputReg =
2142 getOrCreateConvergenceTokenVReg(*Bundle->Inputs[0].get());
2143 MIB.addUse(InputReg);
2144 }
2145
2146 return true;
2147}
2148
2149bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
2150 MachineIRBuilder &MIRBuilder) {
2151 if (auto *MI = dyn_cast<AnyMemIntrinsic>(&CI)) {
2152 if (ORE->enabled()) {
2153 if (MemoryOpRemark::canHandle(MI, *LibInfo)) {
2154 MemoryOpRemark R(*ORE, "gisel-irtranslator-memsize", *DL, *LibInfo);
2155 R.visit(MI);
2156 }
2157 }
2158 }
2159
2160 // If this is a simple intrinsic (that is, we just need to add a def of
2161 // a vreg, and uses for each arg operand, then translate it.
2162 if (translateSimpleIntrinsic(CI, ID, MIRBuilder))
2163 return true;
2164
2165 switch (ID) {
2166 default:
2167 break;
2168 case Intrinsic::lifetime_start:
2169 case Intrinsic::lifetime_end: {
2170 // No stack colouring in O0, discard region information.
2172 return true;
2173
2174 unsigned Op = ID == Intrinsic::lifetime_start ? TargetOpcode::LIFETIME_START
2175 : TargetOpcode::LIFETIME_END;
2176
2177 // Get the underlying objects for the location passed on the lifetime
2178 // marker.
2180 getUnderlyingObjects(CI.getArgOperand(1), Allocas);
2181
2182 // Iterate over each underlying object, creating lifetime markers for each
2183 // static alloca. Quit if we find a non-static alloca.
2184 for (const Value *V : Allocas) {
2185 const AllocaInst *AI = dyn_cast<AllocaInst>(V);
2186 if (!AI)
2187 continue;
2188
2189 if (!AI->isStaticAlloca())
2190 return true;
2191
2192 MIRBuilder.buildInstr(Op).addFrameIndex(getOrCreateFrameIndex(*AI));
2193 }
2194 return true;
2195 }
2196 case Intrinsic::dbg_declare: {
2197 const DbgDeclareInst &DI = cast<DbgDeclareInst>(CI);
2198 assert(DI.getVariable() && "Missing variable");
2199 translateDbgDeclareRecord(DI.getAddress(), DI.hasArgList(), DI.getVariable(),
2200 DI.getExpression(), DI.getDebugLoc(), MIRBuilder);
2201 return true;
2202 }
2203 case Intrinsic::dbg_label: {
2204 const DbgLabelInst &DI = cast<DbgLabelInst>(CI);
2205 assert(DI.getLabel() && "Missing label");
2206
2208 MIRBuilder.getDebugLoc()) &&
2209 "Expected inlined-at fields to agree");
2210
2211 MIRBuilder.buildDbgLabel(DI.getLabel());
2212 return true;
2213 }
2214 case Intrinsic::vaend:
2215 // No target I know of cares about va_end. Certainly no in-tree target
2216 // does. Simplest intrinsic ever!
2217 return true;
2218 case Intrinsic::vastart: {
2219 Value *Ptr = CI.getArgOperand(0);
2220 unsigned ListSize = TLI->getVaListSizeInBits(*DL) / 8;
2221 Align Alignment = getKnownAlignment(Ptr, *DL);
2222
2223 MIRBuilder.buildInstr(TargetOpcode::G_VASTART, {}, {getOrCreateVReg(*Ptr)})
2224 .addMemOperand(MF->getMachineMemOperand(MachinePointerInfo(Ptr),
2226 ListSize, Alignment));
2227 return true;
2228 }
2229 case Intrinsic::dbg_assign:
2230 // A dbg.assign is a dbg.value with more information about stack locations,
2231 // typically produced during optimisation of variables with leaked
2232 // addresses. We can treat it like a normal dbg_value intrinsic here; to
2233 // benefit from the full analysis of stack/SSA locations, GlobalISel would
2234 // need to register for and use the AssignmentTrackingAnalysis pass.
2235 [[fallthrough]];
2236 case Intrinsic::dbg_value: {
2237 // This form of DBG_VALUE is target-independent.
2238 const DbgValueInst &DI = cast<DbgValueInst>(CI);
2239 translateDbgValueRecord(DI.getValue(), DI.hasArgList(), DI.getVariable(),
2240 DI.getExpression(), DI.getDebugLoc(), MIRBuilder);
2241 return true;
2242 }
2243 case Intrinsic::uadd_with_overflow:
2244 return translateOverflowIntrinsic(CI, TargetOpcode::G_UADDO, MIRBuilder);
2245 case Intrinsic::sadd_with_overflow:
2246 return translateOverflowIntrinsic(CI, TargetOpcode::G_SADDO, MIRBuilder);
2247 case Intrinsic::usub_with_overflow:
2248 return translateOverflowIntrinsic(CI, TargetOpcode::G_USUBO, MIRBuilder);
2249 case Intrinsic::ssub_with_overflow:
2250 return translateOverflowIntrinsic(CI, TargetOpcode::G_SSUBO, MIRBuilder);
2251 case Intrinsic::umul_with_overflow:
2252 return translateOverflowIntrinsic(CI, TargetOpcode::G_UMULO, MIRBuilder);
2253 case Intrinsic::smul_with_overflow:
2254 return translateOverflowIntrinsic(CI, TargetOpcode::G_SMULO, MIRBuilder);
2255 case Intrinsic::uadd_sat:
2256 return translateBinaryOp(TargetOpcode::G_UADDSAT, CI, MIRBuilder);
2257 case Intrinsic::sadd_sat:
2258 return translateBinaryOp(TargetOpcode::G_SADDSAT, CI, MIRBuilder);
2259 case Intrinsic::usub_sat:
2260 return translateBinaryOp(TargetOpcode::G_USUBSAT, CI, MIRBuilder);
2261 case Intrinsic::ssub_sat:
2262 return translateBinaryOp(TargetOpcode::G_SSUBSAT, CI, MIRBuilder);
2263 case Intrinsic::ushl_sat:
2264 return translateBinaryOp(TargetOpcode::G_USHLSAT, CI, MIRBuilder);
2265 case Intrinsic::sshl_sat:
2266 return translateBinaryOp(TargetOpcode::G_SSHLSAT, CI, MIRBuilder);
2267 case Intrinsic::umin:
2268 return translateBinaryOp(TargetOpcode::G_UMIN, CI, MIRBuilder);
2269 case Intrinsic::umax:
2270 return translateBinaryOp(TargetOpcode::G_UMAX, CI, MIRBuilder);
2271 case Intrinsic::smin:
2272 return translateBinaryOp(TargetOpcode::G_SMIN, CI, MIRBuilder);
2273 case Intrinsic::smax:
2274 return translateBinaryOp(TargetOpcode::G_SMAX, CI, MIRBuilder);
2275 case Intrinsic::abs:
2276 // TODO: Preserve "int min is poison" arg in GMIR?
2277 return translateUnaryOp(TargetOpcode::G_ABS, CI, MIRBuilder);
2278 case Intrinsic::smul_fix:
2279 return translateFixedPointIntrinsic(TargetOpcode::G_SMULFIX, CI, MIRBuilder);
2280 case Intrinsic::umul_fix:
2281 return translateFixedPointIntrinsic(TargetOpcode::G_UMULFIX, CI, MIRBuilder);
2282 case Intrinsic::smul_fix_sat:
2283 return translateFixedPointIntrinsic(TargetOpcode::G_SMULFIXSAT, CI, MIRBuilder);
2284 case Intrinsic::umul_fix_sat:
2285 return translateFixedPointIntrinsic(TargetOpcode::G_UMULFIXSAT, CI, MIRBuilder);
2286 case Intrinsic::sdiv_fix:
2287 return translateFixedPointIntrinsic(TargetOpcode::G_SDIVFIX, CI, MIRBuilder);
2288 case Intrinsic::udiv_fix:
2289 return translateFixedPointIntrinsic(TargetOpcode::G_UDIVFIX, CI, MIRBuilder);
2290 case Intrinsic::sdiv_fix_sat:
2291 return translateFixedPointIntrinsic(TargetOpcode::G_SDIVFIXSAT, CI, MIRBuilder);
2292 case Intrinsic::udiv_fix_sat:
2293 return translateFixedPointIntrinsic(TargetOpcode::G_UDIVFIXSAT, CI, MIRBuilder);
2294 case Intrinsic::fmuladd: {
2295 const TargetMachine &TM = MF->getTarget();
2296 Register Dst = getOrCreateVReg(CI);
2297 Register Op0 = getOrCreateVReg(*CI.getArgOperand(0));
2298 Register Op1 = getOrCreateVReg(*CI.getArgOperand(1));
2299 Register Op2 = getOrCreateVReg(*CI.getArgOperand(2));
2300 if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
2302 TLI->getValueType(*DL, CI.getType()))) {
2303 // TODO: Revisit this to see if we should move this part of the
2304 // lowering to the combiner.
2305 MIRBuilder.buildFMA(Dst, Op0, Op1, Op2,
2307 } else {
2308 LLT Ty = getLLTForType(*CI.getType(), *DL);
2309 auto FMul = MIRBuilder.buildFMul(
2310 Ty, Op0, Op1, MachineInstr::copyFlagsFromInstruction(CI));
2311 MIRBuilder.buildFAdd(Dst, FMul, Op2,
2313 }
2314 return true;
2315 }
2316 case Intrinsic::convert_from_fp16:
2317 // FIXME: This intrinsic should probably be removed from the IR.
2318 MIRBuilder.buildFPExt(getOrCreateVReg(CI),
2319 getOrCreateVReg(*CI.getArgOperand(0)),
2321 return true;
2322 case Intrinsic::convert_to_fp16:
2323 // FIXME: This intrinsic should probably be removed from the IR.
2324 MIRBuilder.buildFPTrunc(getOrCreateVReg(CI),
2325 getOrCreateVReg(*CI.getArgOperand(0)),
2327 return true;
2328 case Intrinsic::frexp: {
2329 ArrayRef<Register> VRegs = getOrCreateVRegs(CI);
2330 MIRBuilder.buildFFrexp(VRegs[0], VRegs[1],
2331 getOrCreateVReg(*CI.getArgOperand(0)),
2333 return true;
2334 }
2335 case Intrinsic::memcpy_inline:
2336 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMCPY_INLINE);
2337 case Intrinsic::memcpy:
2338 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMCPY);
2339 case Intrinsic::memmove:
2340 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMMOVE);
2341 case Intrinsic::memset:
2342 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMSET);
2343 case Intrinsic::eh_typeid_for: {
2345 Register Reg = getOrCreateVReg(CI);
2346 unsigned TypeID = MF->getTypeIDFor(GV);
2347 MIRBuilder.buildConstant(Reg, TypeID);
2348 return true;
2349 }
2350 case Intrinsic::objectsize:
2351 llvm_unreachable("llvm.objectsize.* should have been lowered already");
2352
2353 case Intrinsic::is_constant:
2354 llvm_unreachable("llvm.is.constant.* should have been lowered already");
2355
2356 case Intrinsic::stackguard:
2357 getStackGuard(getOrCreateVReg(CI), MIRBuilder);
2358 return true;
2359 case Intrinsic::stackprotector: {
2360 LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
2361 Register GuardVal;
2362 if (TLI->useLoadStackGuardNode()) {
2363 GuardVal = MRI->createGenericVirtualRegister(PtrTy);
2364 getStackGuard(GuardVal, MIRBuilder);
2365 } else
2366 GuardVal = getOrCreateVReg(*CI.getArgOperand(0)); // The guard's value.
2367
2368 AllocaInst *Slot = cast<AllocaInst>(CI.getArgOperand(1));
2369 int FI = getOrCreateFrameIndex(*Slot);
2371
2372 MIRBuilder.buildStore(
2373 GuardVal, getOrCreateVReg(*Slot),
2377 PtrTy, Align(8)));
2378 return true;
2379 }
2380 case Intrinsic::stacksave: {
2381 MIRBuilder.buildInstr(TargetOpcode::G_STACKSAVE, {getOrCreateVReg(CI)}, {});
2382 return true;
2383 }
2384 case Intrinsic::stackrestore: {
2385 MIRBuilder.buildInstr(TargetOpcode::G_STACKRESTORE, {},
2386 {getOrCreateVReg(*CI.getArgOperand(0))});
2387 return true;
2388 }
2389 case Intrinsic::cttz:
2390 case Intrinsic::ctlz: {
2391 ConstantInt *Cst = cast<ConstantInt>(CI.getArgOperand(1));
2392 bool isTrailing = ID == Intrinsic::cttz;
2393 unsigned Opcode = isTrailing
2394 ? Cst->isZero() ? TargetOpcode::G_CTTZ
2395 : TargetOpcode::G_CTTZ_ZERO_UNDEF
2396 : Cst->isZero() ? TargetOpcode::G_CTLZ
2397 : TargetOpcode::G_CTLZ_ZERO_UNDEF;
2398 MIRBuilder.buildInstr(Opcode, {getOrCreateVReg(CI)},
2399 {getOrCreateVReg(*CI.getArgOperand(0))});
2400 return true;
2401 }
2402 case Intrinsic::invariant_start: {
2403 LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
2405 MIRBuilder.buildUndef(Undef);
2406 return true;
2407 }
2408 case Intrinsic::invariant_end:
2409 return true;
2410 case Intrinsic::expect:
2411 case Intrinsic::annotation:
2412 case Intrinsic::ptr_annotation:
2413 case Intrinsic::launder_invariant_group:
2414 case Intrinsic::strip_invariant_group: {
2415 // Drop the intrinsic, but forward the value.
2416 MIRBuilder.buildCopy(getOrCreateVReg(CI),
2417 getOrCreateVReg(*CI.getArgOperand(0)));
2418 return true;
2419 }
2420 case Intrinsic::assume:
2421 case Intrinsic::experimental_noalias_scope_decl:
2422 case Intrinsic::var_annotation:
2423 case Intrinsic::sideeffect:
2424 // Discard annotate attributes, assumptions, and artificial side-effects.
2425 return true;
2426 case Intrinsic::read_volatile_register:
2427 case Intrinsic::read_register: {
2428 Value *Arg = CI.getArgOperand(0);
2429 MIRBuilder
2430 .buildInstr(TargetOpcode::G_READ_REGISTER, {getOrCreateVReg(CI)}, {})
2431 .addMetadata(cast<MDNode>(cast<MetadataAsValue>(Arg)->getMetadata()));
2432 return true;
2433 }
2434 case Intrinsic::write_register: {
2435 Value *Arg = CI.getArgOperand(0);
2436 MIRBuilder.buildInstr(TargetOpcode::G_WRITE_REGISTER)
2437 .addMetadata(cast<MDNode>(cast<MetadataAsValue>(Arg)->getMetadata()))
2438 .addUse(getOrCreateVReg(*CI.getArgOperand(1)));
2439 return true;
2440 }
2441 case Intrinsic::localescape: {
2442 MachineBasicBlock &EntryMBB = MF->front();
2444
2445 // Directly emit some LOCAL_ESCAPE machine instrs. Label assignment emission
2446 // is the same on all targets.
2447 for (unsigned Idx = 0, E = CI.arg_size(); Idx < E; ++Idx) {
2449 if (isa<ConstantPointerNull>(Arg))
2450 continue; // Skip null pointers. They represent a hole in index space.
2451
2452 int FI = getOrCreateFrameIndex(*cast<AllocaInst>(Arg));
2453 MCSymbol *FrameAllocSym =
2454 MF->getContext().getOrCreateFrameAllocSymbol(EscapedName, Idx);
2455
2456 // This should be inserted at the start of the entry block.
2457 auto LocalEscape =
2458 MIRBuilder.buildInstrNoInsert(TargetOpcode::LOCAL_ESCAPE)
2459 .addSym(FrameAllocSym)
2460 .addFrameIndex(FI);
2461
2462 EntryMBB.insert(EntryMBB.begin(), LocalEscape);
2463 }
2464
2465 return true;
2466 }
2467 case Intrinsic::vector_reduce_fadd:
2468 case Intrinsic::vector_reduce_fmul: {
2469 // Need to check for the reassoc flag to decide whether we want a
2470 // sequential reduction opcode or not.
2471 Register Dst = getOrCreateVReg(CI);
2472 Register ScalarSrc = getOrCreateVReg(*CI.getArgOperand(0));
2473 Register VecSrc = getOrCreateVReg(*CI.getArgOperand(1));
2474 unsigned Opc = 0;
2475 if (!CI.hasAllowReassoc()) {
2476 // The sequential ordering case.
2477 Opc = ID == Intrinsic::vector_reduce_fadd
2478 ? TargetOpcode::G_VECREDUCE_SEQ_FADD
2479 : TargetOpcode::G_VECREDUCE_SEQ_FMUL;
2480 MIRBuilder.buildInstr(Opc, {Dst}, {ScalarSrc, VecSrc},
2482 return true;
2483 }
2484 // We split the operation into a separate G_FADD/G_FMUL + the reduce,
2485 // since the associativity doesn't matter.
2486 unsigned ScalarOpc;
2487 if (ID == Intrinsic::vector_reduce_fadd) {
2488 Opc = TargetOpcode::G_VECREDUCE_FADD;
2489 ScalarOpc = TargetOpcode::G_FADD;
2490 } else {
2491 Opc = TargetOpcode::G_VECREDUCE_FMUL;
2492 ScalarOpc = TargetOpcode::G_FMUL;
2493 }
2494 LLT DstTy = MRI->getType(Dst);
2495 auto Rdx = MIRBuilder.buildInstr(
2496 Opc, {DstTy}, {VecSrc}, MachineInstr::copyFlagsFromInstruction(CI));
2497 MIRBuilder.buildInstr(ScalarOpc, {Dst}, {ScalarSrc, Rdx},
2499
2500 return true;
2501 }
2502 case Intrinsic::trap:
2503 return translateTrap(CI, MIRBuilder, TargetOpcode::G_TRAP);
2504 case Intrinsic::debugtrap:
2505 return translateTrap(CI, MIRBuilder, TargetOpcode::G_DEBUGTRAP);
2506 case Intrinsic::ubsantrap:
2507 return translateTrap(CI, MIRBuilder, TargetOpcode::G_UBSANTRAP);
2508 case Intrinsic::allow_runtime_check:
2509 case Intrinsic::allow_ubsan_check:
2510 MIRBuilder.buildCopy(getOrCreateVReg(CI),
2511 getOrCreateVReg(*ConstantInt::getTrue(CI.getType())));
2512 return true;
2513 case Intrinsic::amdgcn_cs_chain:
2514 return translateCallBase(CI, MIRBuilder);
2515 case Intrinsic::fptrunc_round: {
2517
2518 // Convert the metadata argument to a constant integer
2519 Metadata *MD = cast<MetadataAsValue>(CI.getArgOperand(1))->getMetadata();
2520 std::optional<RoundingMode> RoundMode =
2521 convertStrToRoundingMode(cast<MDString>(MD)->getString());
2522
2523 // Add the Rounding mode as an integer
2524 MIRBuilder
2525 .buildInstr(TargetOpcode::G_INTRINSIC_FPTRUNC_ROUND,
2526 {getOrCreateVReg(CI)},
2527 {getOrCreateVReg(*CI.getArgOperand(0))}, Flags)
2528 .addImm((int)*RoundMode);
2529
2530 return true;
2531 }
2532 case Intrinsic::is_fpclass: {
2533 Value *FpValue = CI.getOperand(0);
2534 ConstantInt *TestMaskValue = cast<ConstantInt>(CI.getOperand(1));
2535
2536 MIRBuilder
2537 .buildInstr(TargetOpcode::G_IS_FPCLASS, {getOrCreateVReg(CI)},
2538 {getOrCreateVReg(*FpValue)})
2539 .addImm(TestMaskValue->getZExtValue());
2540
2541 return true;
2542 }
2543 case Intrinsic::set_fpenv: {
2544 Value *FPEnv = CI.getOperand(0);
2545 MIRBuilder.buildSetFPEnv(getOrCreateVReg(*FPEnv));
2546 return true;
2547 }
2548 case Intrinsic::reset_fpenv:
2549 MIRBuilder.buildResetFPEnv();
2550 return true;
2551 case Intrinsic::set_fpmode: {
2552 Value *FPState = CI.getOperand(0);
2553 MIRBuilder.buildSetFPMode(getOrCreateVReg(*FPState));
2554 return true;
2555 }
2556 case Intrinsic::reset_fpmode:
2557 MIRBuilder.buildResetFPMode();
2558 return true;
2559 case Intrinsic::vscale: {
2560 MIRBuilder.buildVScale(getOrCreateVReg(CI), 1);
2561 return true;
2562 }
2563 case Intrinsic::scmp:
2564 MIRBuilder.buildSCmp(getOrCreateVReg(CI),
2565 getOrCreateVReg(*CI.getOperand(0)),
2566 getOrCreateVReg(*CI.getOperand(1)));
2567 return true;
2568 case Intrinsic::ucmp:
2569 MIRBuilder.buildUCmp(getOrCreateVReg(CI),
2570 getOrCreateVReg(*CI.getOperand(0)),
2571 getOrCreateVReg(*CI.getOperand(1)));
2572 return true;
2573 case Intrinsic::prefetch: {
2574 Value *Addr = CI.getOperand(0);
2575 unsigned RW = cast<ConstantInt>(CI.getOperand(1))->getZExtValue();
2576 unsigned Locality = cast<ConstantInt>(CI.getOperand(2))->getZExtValue();
2577 unsigned CacheType = cast<ConstantInt>(CI.getOperand(3))->getZExtValue();
2578
2580 auto &MMO = *MF->getMachineMemOperand(MachinePointerInfo(Addr), Flags,
2581 LLT(), Align());
2582
2583 MIRBuilder.buildPrefetch(getOrCreateVReg(*Addr), RW, Locality, CacheType,
2584 MMO);
2585
2586 return true;
2587 }
2588
2589 case Intrinsic::vector_interleave2:
2590 case Intrinsic::vector_deinterleave2: {
2591 // Both intrinsics have at least one operand.
2592 Value *Op0 = CI.getOperand(0);
2593 LLT ResTy = getLLTForType(*Op0->getType(), MIRBuilder.getDataLayout());
2594 if (!ResTy.isFixedVector())
2595 return false;
2596
2597 if (CI.getIntrinsicID() == Intrinsic::vector_interleave2)
2598 return translateVectorInterleave2Intrinsic(CI, MIRBuilder);
2599
2600 return translateVectorDeinterleave2Intrinsic(CI, MIRBuilder);
2601 }
2602
2603#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
2604 case Intrinsic::INTRINSIC:
2605#include "llvm/IR/ConstrainedOps.def"
2606 return translateConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(CI),
2607 MIRBuilder);
2608 case Intrinsic::experimental_convergence_anchor:
2609 case Intrinsic::experimental_convergence_entry:
2610 case Intrinsic::experimental_convergence_loop:
2611 return translateConvergenceControlIntrinsic(CI, ID, MIRBuilder);
2612 }
2613 return false;
2614}
2615
2616bool IRTranslator::translateInlineAsm(const CallBase &CB,
2617 MachineIRBuilder &MIRBuilder) {
2618
2620
2621 if (!ALI) {
2622 LLVM_DEBUG(
2623 dbgs() << "Inline asm lowering is not supported for this target yet\n");
2624 return false;
2625 }
2626
2627 return ALI->lowerInlineAsm(
2628 MIRBuilder, CB, [&](const Value &Val) { return getOrCreateVRegs(Val); });
2629}
2630
2631bool IRTranslator::translateCallBase(const CallBase &CB,
2632 MachineIRBuilder &MIRBuilder) {
2633 ArrayRef<Register> Res = getOrCreateVRegs(CB);
2634
2636 Register SwiftInVReg = 0;
2637 Register SwiftErrorVReg = 0;
2638 for (const auto &Arg : CB.args()) {
2639 if (CLI->supportSwiftError() && isSwiftError(Arg)) {
2640 assert(SwiftInVReg == 0 && "Expected only one swift error argument");
2641 LLT Ty = getLLTForType(*Arg->getType(), *DL);
2642 SwiftInVReg = MRI->createGenericVirtualRegister(Ty);
2643 MIRBuilder.buildCopy(SwiftInVReg, SwiftError.getOrCreateVRegUseAt(
2644 &CB, &MIRBuilder.getMBB(), Arg));
2645 Args.emplace_back(ArrayRef(SwiftInVReg));
2646 SwiftErrorVReg =
2647 SwiftError.getOrCreateVRegDefAt(&CB, &MIRBuilder.getMBB(), Arg);
2648 continue;
2649 }
2650 Args.push_back(getOrCreateVRegs(*Arg));
2651 }
2652
2653 if (auto *CI = dyn_cast<CallInst>(&CB)) {
2654 if (ORE->enabled()) {
2655 if (MemoryOpRemark::canHandle(CI, *LibInfo)) {
2656 MemoryOpRemark R(*ORE, "gisel-irtranslator-memsize", *DL, *LibInfo);
2657 R.visit(CI);
2658 }
2659 }
2660 }
2661
2662 std::optional<CallLowering::PtrAuthInfo> PAI;
2663 if (auto Bundle = CB.getOperandBundle(LLVMContext::OB_ptrauth)) {
2664 // Functions should never be ptrauth-called directly.
2665 assert(!CB.getCalledFunction() && "invalid direct ptrauth call");
2666
2667 const Value *Key = Bundle->Inputs[0];
2668 const Value *Discriminator = Bundle->Inputs[1];
2669
2670 // Look through ptrauth constants to try to eliminate the matching bundle
2671 // and turn this into a direct call with no ptrauth.
2672 // CallLowering will use the raw pointer if it doesn't find the PAI.
2673 const auto *CalleeCPA = dyn_cast<ConstantPtrAuth>(CB.getCalledOperand());
2674 if (!CalleeCPA || !isa<Function>(CalleeCPA->getPointer()) ||
2675 !CalleeCPA->isKnownCompatibleWith(Key, Discriminator, *DL)) {
2676 // If we can't make it direct, package the bundle into PAI.
2677 Register DiscReg = getOrCreateVReg(*Discriminator);
2678 PAI = CallLowering::PtrAuthInfo{cast<ConstantInt>(Key)->getZExtValue(),
2679 DiscReg};
2680 }
2681 }
2682
2683 Register ConvergenceCtrlToken = 0;
2684 if (auto Bundle = CB.getOperandBundle(LLVMContext::OB_convergencectrl)) {
2685 const auto &Token = *Bundle->Inputs[0].get();
2686 ConvergenceCtrlToken = getOrCreateConvergenceTokenVReg(Token);
2687 }
2688
2689 // We don't set HasCalls on MFI here yet because call lowering may decide to
2690 // optimize into tail calls. Instead, we defer that to selection where a final
2691 // scan is done to check if any instructions are calls.
2692 bool Success = CLI->lowerCall(
2693 MIRBuilder, CB, Res, Args, SwiftErrorVReg, PAI, ConvergenceCtrlToken,
2694 [&]() { return getOrCreateVReg(*CB.getCalledOperand()); });
2695
2696 // Check if we just inserted a tail call.
2697 if (Success) {
2698 assert(!HasTailCall && "Can't tail call return twice from block?");
2700 HasTailCall = TII->isTailCall(*std::prev(MIRBuilder.getInsertPt()));
2701 }
2702
2703 return Success;
2704}
2705
2706bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) {
2707 const CallInst &CI = cast<CallInst>(U);
2708 auto TII = MF->getTarget().getIntrinsicInfo();
2709 const Function *F = CI.getCalledFunction();
2710
2711 // FIXME: support Windows dllimport function calls and calls through
2712 // weak symbols.
2713 if (F && (F->hasDLLImportStorageClass() ||
2715 F->hasExternalWeakLinkage())))
2716 return false;
2717
2718 // FIXME: support control flow guard targets.
2720 return false;
2721
2722 // FIXME: support statepoints and related.
2723 if (isa<GCStatepointInst, GCRelocateInst, GCResultInst>(U))
2724 return false;
2725
2726 if (CI.isInlineAsm())
2727 return translateInlineAsm(CI, MIRBuilder);
2728
2729 diagnoseDontCall(CI);
2730
2732 if (F && F->isIntrinsic()) {
2733 ID = F->getIntrinsicID();
2735 ID = static_cast<Intrinsic::ID>(TII->getIntrinsicID(F));
2736 }
2737
2738 if (!F || !F->isIntrinsic() || ID == Intrinsic::not_intrinsic)
2739 return translateCallBase(CI, MIRBuilder);
2740
2741 assert(ID != Intrinsic::not_intrinsic && "unknown intrinsic");
2742
2743 if (translateKnownIntrinsic(CI, ID, MIRBuilder))
2744 return true;
2745
2746 ArrayRef<Register> ResultRegs;
2747 if (!CI.getType()->isVoidTy())
2748 ResultRegs = getOrCreateVRegs(CI);
2749
2750 // Ignore the callsite attributes. Backend code is most likely not expecting
2751 // an intrinsic to sometimes have side effects and sometimes not.
2752 MachineInstrBuilder MIB = MIRBuilder.buildIntrinsic(ID, ResultRegs);
2753 if (isa<FPMathOperator>(CI))
2754 MIB->copyIRFlags(CI);
2755
2756 for (const auto &Arg : enumerate(CI.args())) {
2757 // If this is required to be an immediate, don't materialize it in a
2758 // register.
2759 if (CI.paramHasAttr(Arg.index(), Attribute::ImmArg)) {
2760 if (ConstantInt *CI = dyn_cast<ConstantInt>(Arg.value())) {
2761 // imm arguments are more convenient than cimm (and realistically
2762 // probably sufficient), so use them.
2763 assert(CI->getBitWidth() <= 64 &&
2764 "large intrinsic immediates not handled");
2765 MIB.addImm(CI->getSExtValue());
2766 } else {
2767 MIB.addFPImm(cast<ConstantFP>(Arg.value()));
2768 }
2769 } else if (auto *MDVal = dyn_cast<MetadataAsValue>(Arg.value())) {
2770 auto *MD = MDVal->getMetadata();
2771 auto *MDN = dyn_cast<MDNode>(MD);
2772 if (!MDN) {
2773 if (auto *ConstMD = dyn_cast<ConstantAsMetadata>(MD))
2774 MDN = MDNode::get(MF->getFunction().getContext(), ConstMD);
2775 else // This was probably an MDString.
2776 return false;
2777 }
2778 MIB.addMetadata(MDN);
2779 } else {
2780 ArrayRef<Register> VRegs = getOrCreateVRegs(*Arg.value());
2781 if (VRegs.size() > 1)
2782 return false;
2783 MIB.addUse(VRegs[0]);
2784 }
2785 }
2786
2787 // Add a MachineMemOperand if it is a target mem intrinsic.
2789 // TODO: Add a GlobalISel version of getTgtMemIntrinsic.
2790 if (TLI->getTgtMemIntrinsic(Info, CI, *MF, ID)) {
2791 Align Alignment = Info.align.value_or(
2792 DL->getABITypeAlign(Info.memVT.getTypeForEVT(F->getContext())));
2793 LLT MemTy = Info.memVT.isSimple()
2794 ? getLLTForMVT(Info.memVT.getSimpleVT())
2795 : LLT::scalar(Info.memVT.getStoreSizeInBits());
2796
2797 // TODO: We currently just fallback to address space 0 if getTgtMemIntrinsic
2798 // didn't yield anything useful.
2800 if (Info.ptrVal)
2801 MPI = MachinePointerInfo(Info.ptrVal, Info.offset);
2802 else if (Info.fallbackAddressSpace)
2803 MPI = MachinePointerInfo(*Info.fallbackAddressSpace);
2804 MIB.addMemOperand(
2805 MF->getMachineMemOperand(MPI, Info.flags, MemTy, Alignment, CI.getAAMetadata()));
2806 }
2807
2808 if (CI.isConvergent()) {
2809 if (auto Bundle = CI.getOperandBundle(LLVMContext::OB_convergencectrl)) {
2810 auto *Token = Bundle->Inputs[0].get();
2811 Register TokenReg = getOrCreateVReg(*Token);
2812 MIB.addUse(TokenReg, RegState::Implicit);
2813 }
2814 }
2815
2816 return true;
2817}
2818
2819bool IRTranslator::findUnwindDestinations(
2820 const BasicBlock *EHPadBB,
2821 BranchProbability Prob,
2822 SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
2823 &UnwindDests) {
2825 EHPadBB->getParent()->getFunction().getPersonalityFn());
2826 bool IsMSVCCXX = Personality == EHPersonality::MSVC_CXX;
2827 bool IsCoreCLR = Personality == EHPersonality::CoreCLR;
2828 bool IsWasmCXX = Personality == EHPersonality::Wasm_CXX;
2829 bool IsSEH = isAsynchronousEHPersonality(Personality);
2830
2831 if (IsWasmCXX) {
2832 // Ignore this for now.
2833 return false;
2834 }
2835
2836 while (EHPadBB) {
2837 const Instruction *Pad = EHPadBB->getFirstNonPHI();
2838 BasicBlock *NewEHPadBB = nullptr;
2839 if (isa<LandingPadInst>(Pad)) {
2840 // Stop on landingpads. They are not funclets.
2841 UnwindDests.emplace_back(&getMBB(*EHPadBB), Prob);
2842 break;
2843 }
2844 if (isa<CleanupPadInst>(Pad)) {
2845 // Stop on cleanup pads. Cleanups are always funclet entries for all known
2846 // personalities.
2847 UnwindDests.emplace_back(&getMBB(*EHPadBB), Prob);
2848 UnwindDests.back().first->setIsEHScopeEntry();
2849 UnwindDests.back().first->setIsEHFuncletEntry();
2850 break;
2851 }
2852 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
2853 // Add the catchpad handlers to the possible destinations.
2854 for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
2855 UnwindDests.emplace_back(&getMBB(*CatchPadBB), Prob);
2856 // For MSVC++ and the CLR, catchblocks are funclets and need prologues.
2857 if (IsMSVCCXX || IsCoreCLR)
2858 UnwindDests.back().first->setIsEHFuncletEntry();
2859 if (!IsSEH)
2860 UnwindDests.back().first->setIsEHScopeEntry();
2861 }
2862 NewEHPadBB = CatchSwitch->getUnwindDest();
2863 } else {
2864 continue;
2865 }
2866
2867 BranchProbabilityInfo *BPI = FuncInfo.BPI;
2868 if (BPI && NewEHPadBB)
2869 Prob *= BPI->getEdgeProbability(EHPadBB, NewEHPadBB);
2870 EHPadBB = NewEHPadBB;
2871 }
2872 return true;
2873}
2874
2875bool IRTranslator::translateInvoke(const User &U,
2876 MachineIRBuilder &MIRBuilder) {
2877 const InvokeInst &I = cast<InvokeInst>(U);
2878 MCContext &Context = MF->getContext();
2879
2880 const BasicBlock *ReturnBB = I.getSuccessor(0);
2881 const BasicBlock *EHPadBB = I.getSuccessor(1);
2882
2883 const Function *Fn = I.getCalledFunction();
2884
2885 // FIXME: support invoking patchpoint and statepoint intrinsics.
2886 if (Fn && Fn->isIntrinsic())
2887 return false;
2888
2889 // FIXME: support whatever these are.
2890 if (I.hasDeoptState())
2891 return false;
2892
2893 // FIXME: support control flow guard targets.
2894 if (I.countOperandBundlesOfType(LLVMContext::OB_cfguardtarget))
2895 return false;
2896
2897 // FIXME: support Windows exception handling.
2898 if (!isa<LandingPadInst>(EHPadBB->getFirstNonPHI()))
2899 return false;
2900
2901 // FIXME: support Windows dllimport function calls and calls through
2902 // weak symbols.
2903 if (Fn && (Fn->hasDLLImportStorageClass() ||
2905 Fn->hasExternalWeakLinkage())))
2906 return false;
2907
2908 bool LowerInlineAsm = I.isInlineAsm();
2909 bool NeedEHLabel = true;
2910
2911 // Emit the actual call, bracketed by EH_LABELs so that the MF knows about
2912 // the region covered by the try.
2913 MCSymbol *BeginSymbol = nullptr;
2914 if (NeedEHLabel) {
2915 MIRBuilder.buildInstr(TargetOpcode::G_INVOKE_REGION_START);
2916 BeginSymbol = Context.createTempSymbol();
2917 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(BeginSymbol);
2918 }
2919
2920 if (LowerInlineAsm) {
2921 if (!translateInlineAsm(I, MIRBuilder))
2922 return false;
2923 } else if (!translateCallBase(I, MIRBuilder))
2924 return false;
2925
2926 MCSymbol *EndSymbol = nullptr;
2927 if (NeedEHLabel) {
2928 EndSymbol = Context.createTempSymbol();
2929 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(EndSymbol);
2930 }
2931
2933 BranchProbabilityInfo *BPI = FuncInfo.BPI;
2934 MachineBasicBlock *InvokeMBB = &MIRBuilder.getMBB();
2935 BranchProbability EHPadBBProb =
2936 BPI ? BPI->getEdgeProbability(InvokeMBB->getBasicBlock(), EHPadBB)
2938
2939 if (!findUnwindDestinations(EHPadBB, EHPadBBProb, UnwindDests))
2940 return false;
2941
2942 MachineBasicBlock &EHPadMBB = getMBB(*EHPadBB),
2943 &ReturnMBB = getMBB(*ReturnBB);
2944 // Update successor info.
2945 addSuccessorWithProb(InvokeMBB, &ReturnMBB);
2946 for (auto &UnwindDest : UnwindDests) {
2947 UnwindDest.first->setIsEHPad();
2948 addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second);
2949 }
2950 InvokeMBB->normalizeSuccProbs();
2951
2952 if (NeedEHLabel) {
2953 assert(BeginSymbol && "Expected a begin symbol!");
2954 assert(EndSymbol && "Expected an end symbol!");
2955 MF->addInvoke(&EHPadMBB, BeginSymbol, EndSymbol);
2956 }
2957
2958 MIRBuilder.buildBr(ReturnMBB);
2959 return true;
2960}
2961
2962bool IRTranslator::translateCallBr(const User &U,
2963 MachineIRBuilder &MIRBuilder) {
2964 // FIXME: Implement this.
2965 return false;
2966}
2967
2968bool IRTranslator::translateLandingPad(const User &U,
2969 MachineIRBuilder &MIRBuilder) {
2970 const LandingPadInst &LP = cast<LandingPadInst>(U);
2971
2972 MachineBasicBlock &MBB = MIRBuilder.getMBB();
2973
2974 MBB.setIsEHPad();
2975
2976 // If there aren't registers to copy the values into (e.g., during SjLj
2977 // exceptions), then don't bother.
2978 const Constant *PersonalityFn = MF->getFunction().getPersonalityFn();
2979 if (TLI->getExceptionPointerRegister(PersonalityFn) == 0 &&
2980 TLI->getExceptionSelectorRegister(PersonalityFn) == 0)
2981 return true;
2982
2983 // If landingpad's return type is token type, we don't create DAG nodes
2984 // for its exception pointer and selector value. The extraction of exception
2985 // pointer or selector value from token type landingpads is not currently
2986 // supported.
2987 if (LP.getType()->isTokenTy())
2988 return true;
2989
2990 // Add a label to mark the beginning of the landing pad. Deletion of the
2991 // landing pad can thus be detected via the MachineModuleInfo.
2992 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL)
2993 .addSym(MF->addLandingPad(&MBB));
2994
2995 // If the unwinder does not preserve all registers, ensure that the
2996 // function marks the clobbered registers as used.
2998 if (auto *RegMask = TRI.getCustomEHPadPreservedMask(*MF))
3000
3001 LLT Ty = getLLTForType(*LP.getType(), *DL);
3003 MIRBuilder.buildUndef(Undef);
3004
3006 for (Type *Ty : cast<StructType>(LP.getType())->elements())
3007 Tys.push_back(getLLTForType(*Ty, *DL));
3008 assert(Tys.size() == 2 && "Only two-valued landingpads are supported");
3009
3010 // Mark exception register as live in.
3011 Register ExceptionReg = TLI->getExceptionPointerRegister(PersonalityFn);
3012 if (!ExceptionReg)
3013 return false;
3014
3015 MBB.addLiveIn(ExceptionReg);
3016 ArrayRef<Register> ResRegs = getOrCreateVRegs(LP);
3017 MIRBuilder.buildCopy(ResRegs[0], ExceptionReg);
3018
3019 Register SelectorReg = TLI->getExceptionSelectorRegister(PersonalityFn);
3020 if (!SelectorReg)
3021 return false;
3022
3023 MBB.addLiveIn(SelectorReg);
3024 Register PtrVReg = MRI->createGenericVirtualRegister(Tys[0]);
3025 MIRBuilder.buildCopy(PtrVReg, SelectorReg);
3026 MIRBuilder.buildCast(ResRegs[1], PtrVReg);
3027
3028 return true;
3029}
3030
3031bool IRTranslator::translateAlloca(const User &U,
3032 MachineIRBuilder &MIRBuilder) {
3033 auto &AI = cast<AllocaInst>(U);
3034
3035 if (AI.isSwiftError())
3036 return true;
3037
3038 if (AI.isStaticAlloca()) {
3039 Register Res = getOrCreateVReg(AI);
3040 int FI = getOrCreateFrameIndex(AI);
3041 MIRBuilder.buildFrameIndex(Res, FI);
3042 return true;
3043 }
3044
3045 // FIXME: support stack probing for Windows.
3047 return false;
3048
3049 // Now we're in the harder dynamic case.
3050 Register NumElts = getOrCreateVReg(*AI.getArraySize());
3051 Type *IntPtrIRTy = DL->getIntPtrType(AI.getType());
3052 LLT IntPtrTy = getLLTForType(*IntPtrIRTy, *DL);
3053 if (MRI->getType(NumElts) != IntPtrTy) {
3054 Register ExtElts = MRI->createGenericVirtualRegister(IntPtrTy);
3055 MIRBuilder.buildZExtOrTrunc(ExtElts, NumElts);
3056 NumElts = ExtElts;
3057 }
3058
3059 Type *Ty = AI.getAllocatedType();
3060
3061 Register AllocSize = MRI->createGenericVirtualRegister(IntPtrTy);
3062 Register TySize =
3063 getOrCreateVReg(*ConstantInt::get(IntPtrIRTy, DL->getTypeAllocSize(Ty)));
3064 MIRBuilder.buildMul(AllocSize, NumElts, TySize);
3065
3066 // Round the size of the allocation up to the stack alignment size
3067 // by add SA-1 to the size. This doesn't overflow because we're computing
3068 // an address inside an alloca.
3069 Align StackAlign = MF->getSubtarget().getFrameLowering()->getStackAlign();
3070 auto SAMinusOne = MIRBuilder.buildConstant(IntPtrTy, StackAlign.value() - 1);
3071 auto AllocAdd = MIRBuilder.buildAdd(IntPtrTy, AllocSize, SAMinusOne,
3073 auto AlignCst =
3074 MIRBuilder.buildConstant(IntPtrTy, ~(uint64_t)(StackAlign.value() - 1));
3075 auto AlignedAlloc = MIRBuilder.buildAnd(IntPtrTy, AllocAdd, AlignCst);
3076
3077 Align Alignment = std::max(AI.getAlign(), DL->getPrefTypeAlign(Ty));
3078 if (Alignment <= StackAlign)
3079 Alignment = Align(1);
3080 MIRBuilder.buildDynStackAlloc(getOrCreateVReg(AI), AlignedAlloc, Alignment);
3081
3082 MF->getFrameInfo().CreateVariableSizedObject(Alignment, &AI);
3084 return true;
3085}
3086
3087bool IRTranslator::translateVAArg(const User &U, MachineIRBuilder &MIRBuilder) {
3088 // FIXME: We may need more info about the type. Because of how LLT works,
3089 // we're completely discarding the i64/double distinction here (amongst
3090 // others). Fortunately the ABIs I know of where that matters don't use va_arg
3091 // anyway but that's not guaranteed.
3092 MIRBuilder.buildInstr(TargetOpcode::G_VAARG, {getOrCreateVReg(U)},
3093 {getOrCreateVReg(*U.getOperand(0)),
3094 DL->getABITypeAlign(U.getType()).value()});
3095 return true;
3096}
3097
3098bool IRTranslator::translateUnreachable(const User &U, MachineIRBuilder &MIRBuilder) {
3100 return true;
3101
3102 auto &UI = cast<UnreachableInst>(U);
3103
3104 // We may be able to ignore unreachable behind a noreturn call.
3105 if (const CallInst *Call = dyn_cast_or_null<CallInst>(UI.getPrevNode());
3106 Call && Call->doesNotReturn()) {
3108 return true;
3109 // Do not emit an additional trap instruction.
3110 if (Call->isNonContinuableTrap())
3111 return true;
3112 }
3113
3114 MIRBuilder.buildTrap();
3115 return true;
3116}
3117
3118bool IRTranslator::translateInsertElement(const User &U,
3119 MachineIRBuilder &MIRBuilder) {
3120 // If it is a <1 x Ty> vector, use the scalar as it is
3121 // not a legal vector type in LLT.
3122 if (auto *FVT = dyn_cast<FixedVectorType>(U.getType());
3123 FVT && FVT->getNumElements() == 1)
3124 return translateCopy(U, *U.getOperand(1), MIRBuilder);
3125
3126 Register Res = getOrCreateVReg(U);
3127 Register Val = getOrCreateVReg(*U.getOperand(0));
3128 Register Elt = getOrCreateVReg(*U.getOperand(1));
3129 unsigned PreferredVecIdxWidth = TLI->getVectorIdxTy(*DL).getSizeInBits();
3130 Register Idx;
3131 if (auto *CI = dyn_cast<ConstantInt>(U.getOperand(2))) {
3132 if (CI->getBitWidth() != PreferredVecIdxWidth) {
3133 APInt NewIdx = CI->getValue().zextOrTrunc(PreferredVecIdxWidth);
3134 auto *NewIdxCI = ConstantInt::get(CI->getContext(), NewIdx);
3135 Idx = getOrCreateVReg(*NewIdxCI);
3136 }
3137 }
3138 if (!Idx)
3139 Idx = getOrCreateVReg(*U.getOperand(2));
3140 if (MRI->getType(Idx).getSizeInBits() != PreferredVecIdxWidth) {
3141 const LLT VecIdxTy = LLT::scalar(PreferredVecIdxWidth);
3142 Idx = MIRBuilder.buildZExtOrTrunc(VecIdxTy, Idx).getReg(0);
3143 }
3144 MIRBuilder.buildInsertVectorElement(Res, Val, Elt, Idx);
3145 return true;
3146}
3147
3148bool IRTranslator::translateExtractElement(const User &U,
3149 MachineIRBuilder &MIRBuilder) {
3150 // If it is a <1 x Ty> vector, use the scalar as it is
3151 // not a legal vector type in LLT.
3152 if (cast<FixedVectorType>(U.getOperand(0)->getType())->getNumElements() == 1)
3153 return translateCopy(U, *U.getOperand(0), MIRBuilder);
3154
3155 Register Res = getOrCreateVReg(U);
3156 Register Val = getOrCreateVReg(*U.getOperand(0));
3157 unsigned PreferredVecIdxWidth = TLI->getVectorIdxTy(*DL).getSizeInBits();
3158 Register Idx;
3159 if (auto *CI = dyn_cast<ConstantInt>(U.getOperand(1))) {
3160 if (CI->getBitWidth() != PreferredVecIdxWidth) {
3161 APInt NewIdx = CI->getValue().zextOrTrunc(PreferredVecIdxWidth);
3162 auto *NewIdxCI = ConstantInt::get(CI->getContext(), NewIdx);
3163 Idx = getOrCreateVReg(*NewIdxCI);
3164 }
3165 }
3166 if (!Idx)
3167 Idx = getOrCreateVReg(*U.getOperand(1));
3168 if (MRI->getType(Idx).getSizeInBits() != PreferredVecIdxWidth) {
3169 const LLT VecIdxTy = LLT::scalar(PreferredVecIdxWidth);
3170 Idx = MIRBuilder.buildZExtOrTrunc(VecIdxTy, Idx).getReg(0);
3171 }
3172 MIRBuilder.buildExtractVectorElement(Res, Val, Idx);
3173 return true;
3174}
3175
3176bool IRTranslator::translateShuffleVector(const User &U,
3177 MachineIRBuilder &MIRBuilder) {
3178 // A ShuffleVector that has operates on scalable vectors is a splat vector
3179 // where the value of the splat vector is the 0th element of the first
3180 // operand, since the index mask operand is the zeroinitializer (undef and
3181 // poison are treated as zeroinitializer here).
3182 if (U.getOperand(0)->getType()->isScalableTy()) {
3183 Value *Op0 = U.getOperand(0);
3184 auto SplatVal = MIRBuilder.buildExtractVectorElementConstant(
3186 getOrCreateVReg(*Op0), 0);
3187 MIRBuilder.buildSplatVector(getOrCreateVReg(U), SplatVal);
3188 return true;
3189 }
3190
3192 if (auto *SVI = dyn_cast<ShuffleVectorInst>(&U))
3193 Mask = SVI->getShuffleMask();
3194 else
3195 Mask = cast<ConstantExpr>(U).getShuffleMask();
3196 ArrayRef<int> MaskAlloc = MF->allocateShuffleMask(Mask);
3197 MIRBuilder
3198 .buildInstr(TargetOpcode::G_SHUFFLE_VECTOR, {getOrCreateVReg(U)},
3199 {getOrCreateVReg(*U.getOperand(0)),
3200 getOrCreateVReg(*U.getOperand(1))})
3201 .addShuffleMask(MaskAlloc);
3202 return true;
3203}
3204
3205bool IRTranslator::translatePHI(const User &U, MachineIRBuilder &MIRBuilder) {
3206 const PHINode &PI = cast<PHINode>(U);
3207
3209 for (auto Reg : getOrCreateVRegs(PI)) {
3210 auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_PHI, {Reg}, {});
3211 Insts.push_back(MIB.getInstr());
3212 }
3213
3214 PendingPHIs.emplace_back(&PI, std::move(Insts));
3215 return true;
3216}
3217
3218bool IRTranslator::translateAtomicCmpXchg(const User &U,
3219 MachineIRBuilder &MIRBuilder) {
3220 const AtomicCmpXchgInst &I = cast<AtomicCmpXchgInst>(U);
3221
3222 auto Flags = TLI->getAtomicMemOperandFlags(I, *DL);
3223
3224 auto Res = getOrCreateVRegs(I);
3225 Register OldValRes = Res[0];
3226 Register SuccessRes = Res[1];
3227 Register Addr = getOrCreateVReg(*I.getPointerOperand());
3228 Register Cmp = getOrCreateVReg(*I.getCompareOperand());
3229 Register NewVal = getOrCreateVReg(*I.getNewValOperand());
3230
3232 OldValRes, SuccessRes, Addr, Cmp, NewVal,
3234 MachinePointerInfo(I.getPointerOperand()), Flags, MRI->getType(Cmp),
3235 getMemOpAlign(I), I.getAAMetadata(), nullptr, I.getSyncScopeID(),
3236 I.getSuccessOrdering(), I.getFailureOrdering()));
3237 return true;
3238}
3239
3240bool IRTranslator::translateAtomicRMW(const User &U,
3241 MachineIRBuilder &MIRBuilder) {
3242 const AtomicRMWInst &I = cast<AtomicRMWInst>(U);
3243 auto Flags = TLI->getAtomicMemOperandFlags(I, *DL);
3244
3245 Register Res = getOrCreateVReg(I);
3246 Register Addr = getOrCreateVReg(*I.getPointerOperand());
3247 Register Val = getOrCreateVReg(*I.getValOperand());
3248
3249 unsigned Opcode = 0;
3250 switch (I.getOperation()) {
3251 default:
3252 return false;
3254 Opcode = TargetOpcode::G_ATOMICRMW_XCHG;
3255 break;
3256 case AtomicRMWInst::Add:
3257 Opcode = TargetOpcode::G_ATOMICRMW_ADD;
3258 break;
3259 case AtomicRMWInst::Sub:
3260 Opcode = TargetOpcode::G_ATOMICRMW_SUB;
3261 break;
3262 case AtomicRMWInst::And:
3263 Opcode = TargetOpcode::G_ATOMICRMW_AND;
3264 break;
3266 Opcode = TargetOpcode::G_ATOMICRMW_NAND;
3267 break;
3268 case AtomicRMWInst::Or:
3269 Opcode = TargetOpcode::G_ATOMICRMW_OR;
3270 break;
3271 case AtomicRMWInst::Xor:
3272 Opcode = TargetOpcode::G_ATOMICRMW_XOR;
3273 break;
3274 case AtomicRMWInst::Max:
3275 Opcode = TargetOpcode::G_ATOMICRMW_MAX;
3276 break;
3277 case AtomicRMWInst::Min:
3278 Opcode = TargetOpcode::G_ATOMICRMW_MIN;
3279 break;
3281 Opcode = TargetOpcode::G_ATOMICRMW_UMAX;
3282 break;
3284 Opcode = TargetOpcode::G_ATOMICRMW_UMIN;
3285 break;
3287 Opcode = TargetOpcode::G_ATOMICRMW_FADD;
3288 break;
3290 Opcode = TargetOpcode::G_ATOMICRMW_FSUB;
3291 break;
3293 Opcode = TargetOpcode::G_ATOMICRMW_FMAX;
3294 break;
3296 Opcode = TargetOpcode::G_ATOMICRMW_FMIN;
3297 break;
3299 Opcode = TargetOpcode::G_ATOMICRMW_UINC_WRAP;
3300 break;
3302 Opcode = TargetOpcode::G_ATOMICRMW_UDEC_WRAP;
3303 break;
3304 }
3305
3306 MIRBuilder.buildAtomicRMW(
3307 Opcode, Res, Addr, Val,
3308 *MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
3309 Flags, MRI->getType(Val), getMemOpAlign(I),
3310 I.getAAMetadata(), nullptr, I.getSyncScopeID(),
3311 I.getOrdering()));
3312 return true;
3313}
3314
3315bool IRTranslator::translateFence(const User &U,
3316 MachineIRBuilder &MIRBuilder) {
3317 const FenceInst &Fence = cast<FenceInst>(U);
3318 MIRBuilder.buildFence(static_cast<unsigned>(Fence.getOrdering()),
3319 Fence.getSyncScopeID());
3320 return true;
3321}
3322
3323bool IRTranslator::translateFreeze(const User &U,
3324 MachineIRBuilder &MIRBuilder) {
3325 const ArrayRef<Register> DstRegs = getOrCreateVRegs(U);
3326 const ArrayRef<Register> SrcRegs = getOrCreateVRegs(*U.getOperand(0));
3327
3328 assert(DstRegs.size() == SrcRegs.size() &&
3329 "Freeze with different source and destination type?");
3330
3331 for (unsigned I = 0; I < DstRegs.size(); ++I) {
3332 MIRBuilder.buildFreeze(DstRegs[I], SrcRegs[I]);
3333 }
3334
3335 return true;
3336}
3337
3338void IRTranslator::finishPendingPhis() {
3339#ifndef NDEBUG
3340 DILocationVerifier Verifier;
3341 GISelObserverWrapper WrapperObserver(&Verifier);
3342 RAIIMFObsDelInstaller ObsInstall(*MF, WrapperObserver);
3343#endif // ifndef NDEBUG
3344 for (auto &Phi : PendingPHIs) {
3345 const PHINode *PI = Phi.first;
3346 if (PI->getType()->isEmptyTy())
3347 continue;
3348 ArrayRef<MachineInstr *> ComponentPHIs = Phi.second;
3349 MachineBasicBlock *PhiMBB = ComponentPHIs[0]->getParent();
3350 EntryBuilder->setDebugLoc(PI->getDebugLoc());
3351#ifndef NDEBUG
3352 Verifier.setCurrentInst(PI);
3353#endif // ifndef NDEBUG
3354
3356 for (unsigned i = 0; i < PI->getNumIncomingValues(); ++i) {
3357 auto IRPred = PI->getIncomingBlock(i);
3358 ArrayRef<Register> ValRegs = getOrCreateVRegs(*PI->getIncomingValue(i));
3359 for (auto *Pred : getMachinePredBBs({IRPred, PI->getParent()})) {
3360 if (SeenPreds.count(Pred) || !PhiMBB->isPredecessor(Pred))
3361 continue;
3362 SeenPreds.insert(Pred);
3363 for (unsigned j = 0; j < ValRegs.size(); ++j) {
3364 MachineInstrBuilder MIB(*MF, ComponentPHIs[j]);
3365 MIB.addUse(ValRegs[j]);
3366 MIB.addMBB(Pred);
3367 }
3368 }
3369 }
3370 }
3371}
3372
3373void IRTranslator::translateDbgValueRecord(Value *V, bool HasArgList,
3374 const DILocalVariable *Variable,
3375 const DIExpression *Expression,
3376 const DebugLoc &DL,
3377 MachineIRBuilder &MIRBuilder) {
3378 assert(Variable->isValidLocationForIntrinsic(DL) &&
3379 "Expected inlined-at fields to agree");
3380 // Act as if we're handling a debug intrinsic.
3381 MIRBuilder.setDebugLoc(DL);
3382
3383 if (!V || HasArgList) {
3384 // DI cannot produce a valid DBG_VALUE, so produce an undef DBG_VALUE to
3385 // terminate any prior location.
3386 MIRBuilder.buildIndirectDbgValue(0, Variable, Expression);
3387 return;
3388 }
3389
3390 if (const auto *CI = dyn_cast<Constant>(V)) {
3391 MIRBuilder.buildConstDbgValue(*CI, Variable, Expression);
3392 return;
3393 }
3394
3395 if (auto *AI = dyn_cast<AllocaInst>(V);
3396 AI && AI->isStaticAlloca() && Expression->startsWithDeref()) {
3397 // If the value is an alloca and the expression starts with a
3398 // dereference, track a stack slot instead of a register, as registers
3399 // may be clobbered.
3400 auto ExprOperands = Expression->getElements();
3401 auto *ExprDerefRemoved =
3402 DIExpression::get(AI->getContext(), ExprOperands.drop_front());
3403 MIRBuilder.buildFIDbgValue(getOrCreateFrameIndex(*AI), Variable,
3404 ExprDerefRemoved);
3405 return;
3406 }
3407 if (translateIfEntryValueArgument(false, V, Variable, Expression, DL,
3408 MIRBuilder))
3409 return;
3410 for (Register Reg : getOrCreateVRegs(*V)) {
3411 // FIXME: This does not handle register-indirect values at offset 0. The
3412 // direct/indirect thing shouldn't really be handled by something as
3413 // implicit as reg+noreg vs reg+imm in the first place, but it seems
3414 // pretty baked in right now.
3415 MIRBuilder.buildDirectDbgValue(Reg, Variable, Expression);
3416 }
3417 return;
3418}
3419
3420void IRTranslator::translateDbgDeclareRecord(Value *Address, bool HasArgList,
3421 const DILocalVariable *Variable,
3422 const DIExpression *Expression,
3423 const DebugLoc &DL,
3424 MachineIRBuilder &MIRBuilder) {
3425 if (!Address || isa<UndefValue>(Address)) {
3426 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *Variable << "\n");
3427 return;
3428 }
3429
3430 assert(Variable->isValidLocationForIntrinsic(DL) &&
3431 "Expected inlined-at fields to agree");
3432 auto AI = dyn_cast<AllocaInst>(Address);
3433 if (AI && AI->isStaticAlloca()) {
3434 // Static allocas are tracked at the MF level, no need for DBG_VALUE
3435 // instructions (in fact, they get ignored if they *do* exist).
3436 MF->setVariableDbgInfo(Variable, Expression,
3437 getOrCreateFrameIndex(*AI), DL);
3438 return;
3439 }
3440
3441 if (translateIfEntryValueArgument(true, Address, Variable,
3442 Expression, DL,
3443 MIRBuilder))
3444 return;
3445
3446 // A dbg.declare describes the address of a source variable, so lower it
3447 // into an indirect DBG_VALUE.
3448 MIRBuilder.setDebugLoc(DL);
3449 MIRBuilder.buildIndirectDbgValue(getOrCreateVReg(*Address),
3450 Variable, Expression);
3451 return;
3452}
3453
3454void IRTranslator::translateDbgInfo(const Instruction &Inst,
3455 MachineIRBuilder &MIRBuilder) {
3456 for (DbgRecord &DR : Inst.getDbgRecordRange()) {
3457 if (DbgLabelRecord *DLR = dyn_cast<DbgLabelRecord>(&DR)) {
3458 MIRBuilder.setDebugLoc(DLR->getDebugLoc());
3459 assert(DLR->getLabel() && "Missing label");
3460 assert(DLR->getLabel()->isValidLocationForIntrinsic(
3461 MIRBuilder.getDebugLoc()) &&
3462 "Expected inlined-at fields to agree");
3463 MIRBuilder.buildDbgLabel(DLR->getLabel());
3464 continue;
3465 }
3466 DbgVariableRecord &DVR = cast<DbgVariableRecord>(DR);
3467 const DILocalVariable *Variable = DVR.getVariable();
3468 const DIExpression *Expression = DVR.getExpression();
3469 Value *V = DVR.getVariableLocationOp(0);
3470 if (DVR.isDbgDeclare())
3471 translateDbgDeclareRecord(V, DVR.hasArgList(), Variable, Expression,
3472 DVR.getDebugLoc(), MIRBuilder);
3473 else
3474 translateDbgValueRecord(V, DVR.hasArgList(), Variable, Expression,
3475 DVR.getDebugLoc(), MIRBuilder);
3476 }
3477}
3478
3479bool IRTranslator::translate(const Instruction &Inst) {
3480 CurBuilder->setDebugLoc(Inst.getDebugLoc());
3481 CurBuilder->setPCSections(Inst.getMetadata(LLVMContext::MD_pcsections));
3482 CurBuilder->setMMRAMetadata(Inst.getMetadata(LLVMContext::MD_mmra));
3483
3484 if (TLI->fallBackToDAGISel(Inst))
3485 return false;
3486
3487 switch (Inst.getOpcode()) {
3488#define HANDLE_INST(NUM, OPCODE, CLASS) \
3489 case Instruction::OPCODE: \
3490 return translate##OPCODE(Inst, *CurBuilder.get());
3491#include "llvm/IR/Instruction.def"
3492 default:
3493 return false;
3494 }
3495}
3496
3497bool IRTranslator::translate(const Constant &C, Register Reg) {
3498 // We only emit constants into the entry block from here. To prevent jumpy
3499 // debug behaviour remove debug line.
3500 if (auto CurrInstDL = CurBuilder->getDL())
3501 EntryBuilder->setDebugLoc(DebugLoc());
3502
3503 if (auto CI = dyn_cast<ConstantInt>(&C))
3504 EntryBuilder->buildConstant(Reg, *CI);
3505 else if (auto CF = dyn_cast<ConstantFP>(&C))
3506 EntryBuilder->buildFConstant(Reg, *CF);
3507 else if (isa<UndefValue>(C))
3508 EntryBuilder->buildUndef(Reg);
3509 else if (isa<ConstantPointerNull>(C))
3510 EntryBuilder->buildConstant(Reg, 0);
3511 else if (auto GV = dyn_cast<GlobalValue>(&C))
3512 EntryBuilder->buildGlobalValue(Reg, GV);
3513 else if (auto CPA = dyn_cast<ConstantPtrAuth>(&C)) {
3514 Register Addr = getOrCreateVReg(*CPA->getPointer());
3515 Register AddrDisc = getOrCreateVReg(*CPA->getAddrDiscriminator());
3516 EntryBuilder->buildConstantPtrAuth(Reg, CPA, Addr, AddrDisc);
3517 } else if (auto CAZ = dyn_cast<ConstantAggregateZero>(&C)) {
3518 if (!isa<FixedVectorType>(CAZ->getType()))
3519 return false;
3520 // Return the scalar if it is a <1 x Ty> vector.
3521 unsigned NumElts = CAZ->getElementCount().getFixedValue();
3522 if (NumElts == 1)
3523 return translateCopy(C, *CAZ->getElementValue(0u), *EntryBuilder);
3525 for (unsigned I = 0; I < NumElts; ++I) {
3526 Constant &Elt = *CAZ->getElementValue(I);
3527 Ops.push_back(getOrCreateVReg(Elt));
3528 }
3529 EntryBuilder->buildBuildVector(Reg, Ops);
3530 } else if (auto CV = dyn_cast<ConstantDataVector>(&C)) {
3531 // Return the scalar if it is a <1 x Ty> vector.
3532 if (CV->getNumElements() == 1)
3533 return translateCopy(C, *CV->getElementAsConstant(0), *EntryBuilder);
3535 for (unsigned i = 0; i < CV->getNumElements(); ++i) {
3536 Constant &Elt = *CV->getElementAsConstant(i);
3537 Ops.push_back(getOrCreateVReg(Elt));
3538 }
3539 EntryBuilder->buildBuildVector(Reg, Ops);
3540 } else if (auto CE = dyn_cast<ConstantExpr>(&C)) {
3541 switch(CE->getOpcode()) {
3542#define HANDLE_INST(NUM, OPCODE, CLASS) \
3543 case Instruction::OPCODE: \
3544 return translate##OPCODE(*CE, *EntryBuilder.get());
3545#include "llvm/IR/Instruction.def"
3546 default:
3547 return false;
3548 }
3549 } else if (auto CV = dyn_cast<ConstantVector>(&C)) {
3550 if (CV->getNumOperands() == 1)
3551 return translateCopy(C, *CV->getOperand(0), *EntryBuilder);
3553 for (unsigned i = 0; i < CV->getNumOperands(); ++i) {
3554 Ops.push_back(getOrCreateVReg(*CV->getOperand(i)));
3555 }
3556 EntryBuilder->buildBuildVector(Reg, Ops);
3557 } else if (auto *BA = dyn_cast<BlockAddress>(&C)) {
3558 EntryBuilder->buildBlockAddress(Reg, BA);
3559 } else
3560 return false;
3561
3562 return true;
3563}
3564
3565bool IRTranslator::finalizeBasicBlock(const BasicBlock &BB,
3567 for (auto &BTB : SL->BitTestCases) {
3568 // Emit header first, if it wasn't already emitted.
3569 if (!BTB.Emitted)
3570 emitBitTestHeader(BTB, BTB.Parent);
3571
3572 BranchProbability UnhandledProb = BTB.Prob;
3573 for (unsigned j = 0, ej = BTB.Cases.size(); j != ej; ++j) {
3574 UnhandledProb -= BTB.Cases[j].ExtraProb;
3575 // Set the current basic block to the mbb we wish to insert the code into
3576 MachineBasicBlock *MBB = BTB.Cases[j].ThisBB;
3577 // If all cases cover a contiguous range, it is not necessary to jump to
3578 // the default block after the last bit test fails. This is because the
3579 // range check during bit test header creation has guaranteed that every
3580 // case here doesn't go outside the range. In this case, there is no need
3581 // to perform the last bit test, as it will always be true. Instead, make
3582 // the second-to-last bit-test fall through to the target of the last bit
3583 // test, and delete the last bit test.
3584
3585 MachineBasicBlock *NextMBB;
3586 if ((BTB.ContiguousRange || BTB.FallthroughUnreachable) && j + 2 == ej) {
3587 // Second-to-last bit-test with contiguous range: fall through to the
3588 // target of the final bit test.
3589 NextMBB = BTB.Cases[j + 1].TargetBB;
3590 } else if (j + 1 == ej) {
3591 // For the last bit test, fall through to Default.
3592 NextMBB = BTB.Default;
3593 } else {
3594 // Otherwise, fall through to the next bit test.
3595 NextMBB = BTB.Cases[j + 1].ThisBB;
3596 }
3597
3598 emitBitTestCase(BTB, NextMBB, UnhandledProb, BTB.Reg, BTB.Cases[j], MBB);
3599
3600 if ((BTB.ContiguousRange || BTB.FallthroughUnreachable) && j + 2 == ej) {
3601 // We need to record the replacement phi edge here that normally
3602 // happens in emitBitTestCase before we delete the case, otherwise the
3603 // phi edge will be lost.
3604 addMachineCFGPred({BTB.Parent->getBasicBlock(),
3605 BTB.Cases[ej - 1].TargetBB->getBasicBlock()},
3606 MBB);
3607 // Since we're not going to use the final bit test, remove it.
3608 BTB.Cases.pop_back();
3609 break;
3610 }
3611 }
3612 // This is "default" BB. We have two jumps to it. From "header" BB and from
3613 // last "case" BB, unless the latter was skipped.
3614 CFGEdge HeaderToDefaultEdge = {BTB.Parent->getBasicBlock(),
3615 BTB.Default->getBasicBlock()};
3616 addMachineCFGPred(HeaderToDefaultEdge, BTB.Parent);
3617 if (!BTB.ContiguousRange) {
3618 addMachineCFGPred(HeaderToDefaultEdge, BTB.Cases.back().ThisBB);
3619 }
3620 }
3621 SL->BitTestCases.clear();
3622
3623 for (auto &JTCase : SL->JTCases) {
3624 // Emit header first, if it wasn't already emitted.
3625 if (!JTCase.first.Emitted)
3626 emitJumpTableHeader(JTCase.second, JTCase.first, JTCase.first.HeaderBB);
3627
3628 emitJumpTable(JTCase.second, JTCase.second.MBB);
3629 }
3630 SL->JTCases.clear();
3631
3632 for (auto &SwCase : SL->SwitchCases)
3633 emitSwitchCase(SwCase, &CurBuilder->getMBB(), *CurBuilder);
3634 SL->SwitchCases.clear();
3635
3636 // Check if we need to generate stack-protector guard checks.
3637 StackProtector &SP = getAnalysis<StackProtector>();
3638 if (SP.shouldEmitSDCheck(BB)) {
3639 bool FunctionBasedInstrumentation =
3641 SPDescriptor.initialize(&BB, &MBB, FunctionBasedInstrumentation);
3642 }
3643 // Handle stack protector.
3644 if (SPDescriptor.shouldEmitFunctionBasedCheckStackProtector()) {
3645 LLVM_DEBUG(dbgs() << "Unimplemented stack protector case\n");
3646 return false;
3647 } else if (SPDescriptor.shouldEmitStackProtector()) {
3648 MachineBasicBlock *ParentMBB = SPDescriptor.getParentMBB();
3649 MachineBasicBlock *SuccessMBB = SPDescriptor.getSuccessMBB();
3650
3651 // Find the split point to split the parent mbb. At the same time copy all
3652 // physical registers used in the tail of parent mbb into virtual registers
3653 // before the split point and back into physical registers after the split
3654 // point. This prevents us needing to deal with Live-ins and many other
3655 // register allocation issues caused by us splitting the parent mbb. The
3656 // register allocator will clean up said virtual copies later on.
3658 ParentMBB, *MF->getSubtarget().getInstrInfo());
3659
3660 // Splice the terminator of ParentMBB into SuccessMBB.
3661 SuccessMBB->splice(SuccessMBB->end(), ParentMBB, SplitPoint,
3662 ParentMBB->end());
3663
3664 // Add compare/jump on neq/jump to the parent BB.
3665 if (!emitSPDescriptorParent(SPDescriptor, ParentMBB))
3666 return false;
3667
3668 // CodeGen Failure MBB if we have not codegened it yet.
3669 MachineBasicBlock *FailureMBB = SPDescriptor.getFailureMBB();
3670 if (FailureMBB->empty()) {
3671 if (!emitSPDescriptorFailure(SPDescriptor, FailureMBB))
3672 return false;
3673 }
3674
3675 // Clear the Per-BB State.
3676 SPDescriptor.resetPerBBState();
3677 }
3678 return true;
3679}
3680
3681bool IRTranslator::emitSPDescriptorParent(StackProtectorDescriptor &SPD,
3682 MachineBasicBlock *ParentBB) {
3683 CurBuilder->setInsertPt(*ParentBB, ParentBB->end());
3684 // First create the loads to the guard/stack slot for the comparison.
3686 const LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
3687 LLT PtrMemTy = getLLTForMVT(TLI->getPointerMemTy(*DL));
3688
3689 MachineFrameInfo &MFI = ParentBB->getParent()->getFrameInfo();
3690 int FI = MFI.getStackProtectorIndex();
3691
3692 Register Guard;
3693 Register StackSlotPtr = CurBuilder->buildFrameIndex(PtrTy, FI).getReg(0);
3694 const Module &M = *ParentBB->getParent()->getFunction().getParent();
3695 Align Align = DL->getPrefTypeAlign(PointerType::getUnqual(M.getContext()));
3696
3697 // Generate code to load the content of the guard slot.
3698 Register GuardVal =
3699 CurBuilder
3700 ->buildLoad(PtrMemTy, StackSlotPtr,
3703 .getReg(0);
3704
3705 if (TLI->useStackGuardXorFP()) {
3706 LLVM_DEBUG(dbgs() << "Stack protector xor'ing with FP not yet implemented");
3707 return false;
3708 }
3709
3710 // Retrieve guard check function, nullptr if instrumentation is inlined.
3711 if (const Function *GuardCheckFn = TLI->getSSPStackGuardCheck(M)) {
3712 // This path is currently untestable on GlobalISel, since the only platform
3713 // that needs this seems to be Windows, and we fall back on that currently.
3714 // The code still lives here in case that changes.
3715 // Silence warning about unused variable until the code below that uses
3716 // 'GuardCheckFn' is enabled.
3717 (void)GuardCheckFn;
3718 return false;
3719#if 0
3720 // The target provides a guard check function to validate the guard value.
3721 // Generate a call to that function with the content of the guard slot as
3722 // argument.
3723 FunctionType *FnTy = GuardCheckFn->getFunctionType();
3724 assert(FnTy->getNumParams() == 1 && "Invalid function signature");
3726 if (GuardCheckFn->hasAttribute(1, Attribute::AttrKind::InReg))
3727 Flags.setInReg();
3728 CallLowering::ArgInfo GuardArgInfo(
3729 {GuardVal, FnTy->getParamType(0), {Flags}});
3730
3732 Info.OrigArgs.push_back(GuardArgInfo);
3733 Info.CallConv = GuardCheckFn->getCallingConv();
3734 Info.Callee = MachineOperand::CreateGA(GuardCheckFn, 0);
3735 Info.OrigRet = {Register(), FnTy->getReturnType()};
3736 if (!CLI->lowerCall(MIRBuilder, Info)) {
3737 LLVM_DEBUG(dbgs() << "Failed to lower call to stack protector check\n");
3738 return false;
3739 }
3740 return true;
3741#endif
3742 }
3743
3744 // If useLoadStackGuardNode returns true, generate LOAD_STACK_GUARD.
3745 // Otherwise, emit a volatile load to retrieve the stack guard value.
3746 if (TLI->useLoadStackGuardNode()) {
3747 Guard =
3749 getStackGuard(Guard, *CurBuilder);
3750 } else {
3751 // TODO: test using android subtarget when we support @llvm.thread.pointer.
3752 const Value *IRGuard = TLI->getSDagStackGuard(M);
3753 Register GuardPtr = getOrCreateVReg(*IRGuard);
3754
3755 Guard = CurBuilder
3756 ->buildLoad(PtrMemTy, GuardPtr,
3760 .getReg(0);
3761 }
3762
3763 // Perform the comparison.
3764 auto Cmp =
3765 CurBuilder->buildICmp(CmpInst::ICMP_NE, LLT::scalar(1), Guard, GuardVal);
3766 // If the guard/stackslot do not equal, branch to failure MBB.
3767 CurBuilder->buildBrCond(Cmp, *SPD.getFailureMBB());
3768 // Otherwise branch to success MBB.
3769 CurBuilder->buildBr(*SPD.getSuccessMBB());
3770 return true;
3771}
3772
3773bool IRTranslator::emitSPDescriptorFailure(StackProtectorDescriptor &SPD,
3774 MachineBasicBlock *FailureBB) {
3775 CurBuilder->setInsertPt(*FailureBB, FailureBB->end());
3776
3777 const RTLIB::Libcall Libcall = RTLIB::STACKPROTECTOR_CHECK_FAIL;
3778 const char *Name = TLI->getLibcallName(Libcall);
3779
3781 Info.CallConv = TLI->getLibcallCallingConv(Libcall);
3783 Info.OrigRet = {Register(), Type::getVoidTy(MF->getFunction().getContext()),
3784 0};
3785 if (!CLI->lowerCall(*CurBuilder, Info)) {
3786 LLVM_DEBUG(dbgs() << "Failed to lower call to stack protector fail\n");
3787 return false;
3788 }
3789
3790 // On PS4/PS5, the "return address" must still be within the calling
3791 // function, even if it's at the very end, so emit an explicit TRAP here.
3792 // WebAssembly needs an unreachable instruction after a non-returning call,
3793 // because the function return type can be different from __stack_chk_fail's
3794 // return type (void).
3795 const TargetMachine &TM = MF->getTarget();
3796 if (TM.getTargetTriple().isPS() || TM.getTargetTriple().isWasm()) {
3797 LLVM_DEBUG(dbgs() << "Unhandled trap emission for stack protector fail\n");
3798 return false;
3799 }
3800 return true;
3801}
3802
3803void IRTranslator::finalizeFunction() {
3804 // Release the memory used by the different maps we
3805 // needed during the translation.
3806 PendingPHIs.clear();
3807 VMap.reset();
3808 FrameIndices.clear();
3809 MachinePreds.clear();
3810 // MachineIRBuilder::DebugLoc can outlive the DILocation it holds. Clear it
3811 // to avoid accessing free’d memory (in runOnMachineFunction) and to avoid
3812 // destroying it twice (in ~IRTranslator() and ~LLVMContext())
3813 EntryBuilder.reset();
3814 CurBuilder.reset();
3815 FuncInfo.clear();
3816 SPDescriptor.resetPerFunctionState();
3817}
3818
3819/// Returns true if a BasicBlock \p BB within a variadic function contains a
3820/// variadic musttail call.
3821static bool checkForMustTailInVarArgFn(bool IsVarArg, const BasicBlock &BB) {
3822 if (!IsVarArg)
3823 return false;
3824
3825 // Walk the block backwards, because tail calls usually only appear at the end
3826 // of a block.
3827 return llvm::any_of(llvm::reverse(BB), [](const Instruction &I) {
3828 const auto *CI = dyn_cast<CallInst>(&I);
3829 return CI && CI->isMustTailCall();
3830 });
3831}
3832
3834 MF = &CurMF;
3835 const Function &F = MF->getFunction();
3837 getAnalysis<GISelCSEAnalysisWrapperPass>().getCSEWrapper();
3838 // Set the CSEConfig and run the analysis.
3839 GISelCSEInfo *CSEInfo = nullptr;
3840 TPC = &getAnalysis<TargetPassConfig>();
3841 bool EnableCSE = EnableCSEInIRTranslator.getNumOccurrences()
3843 : TPC->isGISelCSEEnabled();
3844 TLI = MF->getSubtarget().getTargetLowering();
3845
3846 if (EnableCSE) {
3847 EntryBuilder = std::make_unique<CSEMIRBuilder>(CurMF);
3848 CSEInfo = &Wrapper.get(TPC->getCSEConfig());
3849 EntryBuilder->setCSEInfo(CSEInfo);
3850 CurBuilder = std::make_unique<CSEMIRBuilder>(CurMF);
3851 CurBuilder->setCSEInfo(CSEInfo);
3852 } else {
3853 EntryBuilder = std::make_unique<MachineIRBuilder>();
3854 CurBuilder = std::make_unique<MachineIRBuilder>();
3855 }
3856 CLI = MF->getSubtarget().getCallLowering();
3857 CurBuilder->setMF(*MF);
3858 EntryBuilder->setMF(*MF);
3859 MRI = &MF->getRegInfo();
3860 DL = &F.getDataLayout();
3861 ORE = std::make_unique<OptimizationRemarkEmitter>(&F);
3862 const TargetMachine &TM = MF->getTarget();
3863 TM.resetTargetOptions(F);
3864 EnableOpts = OptLevel != CodeGenOptLevel::None && !skipFunction(F);
3865 FuncInfo.MF = MF;
3866 if (EnableOpts) {
3867 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
3868 FuncInfo.BPI = &getAnalysis<BranchProbabilityInfoWrapperPass>().getBPI();
3869 } else {
3870 AA = nullptr;
3871 FuncInfo.BPI = nullptr;
3872 }
3873
3874 AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(
3875 MF->getFunction());
3876 LibInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
3877 FuncInfo.CanLowerReturn = CLI->checkReturnTypeForCallConv(*MF);
3878
3879 SL = std::make_unique<GISelSwitchLowering>(this, FuncInfo);
3880 SL->init(*TLI, TM, *DL);
3881
3882 assert(PendingPHIs.empty() && "stale PHIs");
3883
3884 // Targets which want to use big endian can enable it using
3885 // enableBigEndian()
3886 if (!DL->isLittleEndian() && !CLI->enableBigEndian()) {
3887 // Currently we don't properly handle big endian code.
3888 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
3889 F.getSubprogram(), &F.getEntryBlock());
3890 R << "unable to translate in big endian mode";
3891 reportTranslationError(*MF, *TPC, *ORE, R);
3892 return false;
3893 }
3894
3895 // Release the per-function state when we return, whether we succeeded or not.
3896 auto FinalizeOnReturn = make_scope_exit([this]() { finalizeFunction(); });
3897
3898 // Setup a separate basic-block for the arguments and constants
3900 MF->push_back(EntryBB);
3901 EntryBuilder->setMBB(*EntryBB);
3902
3903 DebugLoc DbgLoc = F.getEntryBlock().getFirstNonPHI()->getDebugLoc();
3904 SwiftError.setFunction(CurMF);
3905 SwiftError.createEntriesInEntryBlock(DbgLoc);
3906
3907 bool IsVarArg = F.isVarArg();
3908 bool HasMustTailInVarArgFn = false;
3909
3910 // Create all blocks, in IR order, to preserve the layout.
3911 FuncInfo.MBBMap.resize(F.getMaxBlockNumber());
3912 for (const BasicBlock &BB: F) {
3913 auto *&MBB = FuncInfo.MBBMap[BB.getNumber()];
3914
3915 MBB = MF->CreateMachineBasicBlock(&BB);
3916 MF->push_back(MBB);
3917
3918 if (BB.hasAddressTaken())
3919 MBB->setAddressTakenIRBlock(const_cast<BasicBlock *>(&BB));
3920
3921 if (!HasMustTailInVarArgFn)
3922 HasMustTailInVarArgFn = checkForMustTailInVarArgFn(IsVarArg, BB);
3923 }
3924
3925 MF->getFrameInfo().setHasMustTailInVarArgFunc(HasMustTailInVarArgFn);
3926
3927 // Make our arguments/constants entry block fallthrough to the IR entry block.
3928 EntryBB->addSuccessor(&getMBB(F.front()));
3929
3930 if (CLI->fallBackToDAGISel(*MF)) {
3931 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
3932 F.getSubprogram(), &F.getEntryBlock());
3933 R << "unable to lower function: " << ore::NV("Prototype", F.getType());
3934 reportTranslationError(*MF, *TPC, *ORE, R);
3935 return false;
3936 }
3937
3938 // Lower the actual args into this basic block.
3939 SmallVector<ArrayRef<Register>, 8> VRegArgs;
3940 for (const Argument &Arg: F.args()) {
3941 if (DL->getTypeStoreSize(Arg.getType()).isZero())
3942 continue; // Don't handle zero sized types.
3943 ArrayRef<Register> VRegs = getOrCreateVRegs(Arg);
3944 VRegArgs.push_back(VRegs);
3945
3946 if (Arg.hasSwiftErrorAttr()) {
3947 assert(VRegs.size() == 1 && "Too many vregs for Swift error");
3948 SwiftError.setCurrentVReg(EntryBB, SwiftError.getFunctionArg(), VRegs[0]);
3949 }
3950 }
3951
3952 if (!CLI->lowerFormalArguments(*EntryBuilder, F, VRegArgs, FuncInfo)) {
3953 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
3954 F.getSubprogram(), &F.getEntryBlock());
3955 R << "unable to lower arguments: " << ore::NV("Prototype", F.getType());
3956 reportTranslationError(*MF, *TPC, *ORE, R);
3957 return false;
3958 }
3959
3960 // Need to visit defs before uses when translating instructions.
3961 GISelObserverWrapper WrapperObserver;
3962 if (EnableCSE && CSEInfo)
3963 WrapperObserver.addObserver(CSEInfo);
3964 {
3966#ifndef NDEBUG
3967 DILocationVerifier Verifier;
3968 WrapperObserver.addObserver(&Verifier);
3969#endif // ifndef NDEBUG
3970 RAIIMFObsDelInstaller ObsInstall(*MF, WrapperObserver);
3971 for (const BasicBlock *BB : RPOT) {
3972 MachineBasicBlock &MBB = getMBB(*BB);
3973 // Set the insertion point of all the following translations to
3974 // the end of this basic block.
3975 CurBuilder->setMBB(MBB);
3976 HasTailCall = false;
3977 for (const Instruction &Inst : *BB) {
3978 // If we translated a tail call in the last step, then we know
3979 // everything after the call is either a return, or something that is
3980 // handled by the call itself. (E.g. a lifetime marker or assume
3981 // intrinsic.) In this case, we should stop translating the block and
3982 // move on.
3983 if (HasTailCall)
3984 break;
3985#ifndef NDEBUG
3986 Verifier.setCurrentInst(&Inst);
3987#endif // ifndef NDEBUG
3988
3989 // Translate any debug-info attached to the instruction.
3990 translateDbgInfo(Inst, *CurBuilder);
3991
3992 if (translate(Inst))
3993 continue;
3994
3995 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
3996 Inst.getDebugLoc(), BB);
3997 R << "unable to translate instruction: " << ore::NV("Opcode", &Inst);
3998
3999 if (ORE->allowExtraAnalysis("gisel-irtranslator")) {
4000 std::string InstStrStorage;
4001 raw_string_ostream InstStr(InstStrStorage);
4002 InstStr << Inst;
4003
4004 R << ": '" << InstStrStorage << "'";
4005 }
4006
4007 reportTranslationError(*MF, *TPC, *ORE, R);
4008 return false;
4009 }
4010
4011 if (!finalizeBasicBlock(*BB, MBB)) {
4012 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
4013 BB->getTerminator()->getDebugLoc(), BB);
4014 R << "unable to translate basic block";
4015 reportTranslationError(*MF, *TPC, *ORE, R);
4016 return false;
4017 }
4018 }
4019#ifndef NDEBUG
4020 WrapperObserver.removeObserver(&Verifier);
4021#endif
4022 }
4023
4024 finishPendingPhis();
4025
4026 SwiftError.propagateVRegs();
4027
4028 // Merge the argument lowering and constants block with its single
4029 // successor, the LLVM-IR entry block. We want the basic block to
4030 // be maximal.
4031 assert(EntryBB->succ_size() == 1 &&
4032 "Custom BB used for lowering should have only one successor");
4033 // Get the successor of the current entry block.
4034 MachineBasicBlock &NewEntryBB = **EntryBB->succ_begin();
4035 assert(NewEntryBB.pred_size() == 1 &&
4036 "LLVM-IR entry block has a predecessor!?");
4037 // Move all the instruction from the current entry block to the
4038 // new entry block.
4039 NewEntryBB.splice(NewEntryBB.begin(), EntryBB, EntryBB->begin(),
4040 EntryBB->end());
4041
4042 // Update the live-in information for the new entry block.
4043 for (const MachineBasicBlock::RegisterMaskPair &LiveIn : EntryBB->liveins())
4044 NewEntryBB.addLiveIn(LiveIn);
4045 NewEntryBB.sortUniqueLiveIns();
4046
4047 // Get rid of the now empty basic block.
4048 EntryBB->removeSuccessor(&NewEntryBB);
4049 MF->remove(EntryBB);
4050 MF->deleteMachineBasicBlock(EntryBB);
4051
4052 assert(&MF->front() == &NewEntryBB &&
4053 "New entry wasn't next in the list of basic block!");
4054
4055 // Initialize stack protector information.
4056 StackProtector &SP = getAnalysis<StackProtector>();
4058
4059 return false;
4060}
unsigned SubReg
#define Success
aarch64 promote const
amdgpu aa AMDGPU Address space based Alias Analysis Wrapper
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
Provides analysis for continuously CSEing during GISel passes.
This file implements a version of MachineIRBuilder which CSEs insts within a MachineBasicBlock.
This file describes how to lower LLVM calls to machine code calls.
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(X)
Definition: Debug.h:101
uint64_t Addr
std::string Name
uint64_t Size
This contains common code to allow clients to notify changes to machine instr.
const HexagonInstrInfo * TII
IRTranslator LLVM IR static false void reportTranslationError(MachineFunction &MF, const TargetPassConfig &TPC, OptimizationRemarkEmitter &ORE, OptimizationRemarkMissed &R)
static bool checkForMustTailInVarArgFn(bool IsVarArg, const BasicBlock &BB)
Returns true if a BasicBlock BB within a variadic function contains a variadic musttail call.
static unsigned getConvOpcode(Intrinsic::ID ID)
static uint64_t getOffsetFromIndices(const User &U, const DataLayout &DL)
static unsigned getConstrainedOpcode(Intrinsic::ID ID)
IRTranslator LLVM IR MI
#define DEBUG_TYPE
static cl::opt< bool > EnableCSEInIRTranslator("enable-cse-in-irtranslator", cl::desc("Should enable CSE in irtranslator"), cl::Optional, cl::init(false))
static bool isValInBlock(const Value *V, const BasicBlock *BB)
static bool isSwiftError(const Value *V)
This file declares the IRTranslator pass.
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
This file describes how to lower LLVM inline asm to machine code INLINEASM.
Legalize the Machine IR a function s Machine IR
Definition: Legalizer.cpp:81
Implement a low-level type suitable for MachineInstr level instruction selection.
Implement a low-level type suitable for MachineInstr level instruction selection.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file declares the MachineIRBuilder class.
unsigned const TargetRegisterInfo * TRI
This file contains the declarations for metadata subclasses.
uint64_t High
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:55
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:57
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:52
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
verify safepoint Safepoint IR Verifier
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
This file defines the SmallSet class.
This file defines the SmallVector class.
This file describes how to lower LLVM code to machine code.
Target-Independent Code Generator Pass Configuration Options pass.
Value * RHS
Value * LHS
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal=false)
Checks whether the given location points to constant memory, or if OrLocal is true whether it points ...
Class for arbitrary precision integers.
Definition: APInt.h:78
an instruction to allocate memory on the stack
Definition: Instructions.h:61
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
Definition: Instructions.h:147
bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Definition: Instructions.h:122
PointerType * getType() const
Overload to return most specific pointer type.
Definition: Instructions.h:97
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Definition: Instructions.h:115
const Value * getArraySize() const
Get the number of elements allocated.
Definition: Instructions.h:93
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
This class represents an incoming formal argument to a Function.
Definition: Argument.h:31
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
iterator end() const
Definition: ArrayRef.h:154
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:165
iterator begin() const
Definition: ArrayRef.h:153
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:160
An immutable pass that tracks lazily created AssumptionCache objects.
An instruction that atomically checks whether a specified value is in a memory location,...
Definition: Instructions.h:495
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:696
@ Add
*p = old + v
Definition: Instructions.h:712
@ FAdd
*p = old + v
Definition: Instructions.h:733
@ Min
*p = old <signed v ? old : v
Definition: Instructions.h:726
@ Or
*p = old | v
Definition: Instructions.h:720
@ Sub
*p = old - v
Definition: Instructions.h:714
@ And
*p = old & v
Definition: Instructions.h:716
@ Xor
*p = old ^ v
Definition: Instructions.h:722
@ FSub
*p = old - v
Definition: Instructions.h:736
@ UIncWrap
Increment one up to a maximum value.
Definition: Instructions.h:748
@ Max
*p = old >signed v ? old : v
Definition: Instructions.h:724
@ UMin
*p = old <unsigned v ? old : v
Definition: Instructions.h:730
@ FMin
*p = minnum(old, v) minnum matches the behavior of llvm.minnum.
Definition: Instructions.h:744
@ UMax
*p = old >unsigned v ? old : v
Definition: Instructions.h:728
@ FMax
*p = maxnum(old, v) maxnum matches the behavior of llvm.maxnum.
Definition: Instructions.h:740
@ UDecWrap
Decrement one until a minimum value or zero.
Definition: Instructions.h:752
@ Nand
*p = ~(old & v)
Definition: Instructions.h:718
Attribute getFnAttr(Attribute::AttrKind Kind) const
Return the attribute object that exists for the function.
Definition: Attributes.h:864
StringRef getValueAsString() const
Return the attribute's value as a string.
Definition: Attributes.cpp:392
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
unsigned getNumber() const
Definition: BasicBlock.h:104
bool hasAddressTaken() const
Returns true if there are any uses of this basic block other than direct branches,...
Definition: BasicBlock.h:658
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
Definition: BasicBlock.cpp:367
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:219
const Instruction * getFirstNonPHIOrDbg(bool SkipPseudoOp=true) const
Returns a pointer to the first instruction in this block that is not a PHINode or a debug intrinsic,...
Definition: BasicBlock.cpp:386
const Instruction & back() const
Definition: BasicBlock.h:473
Legacy analysis pass which computes BlockFrequencyInfo.
Conditional or Unconditional Branch instruction.
BasicBlock * getSuccessor(unsigned i) const
bool isUnconditional() const
Value * getCondition() const
Legacy analysis pass which computes BranchProbabilityInfo.
Analysis providing branch probability information.
BranchProbability getEdgeProbability(const BasicBlock *Src, unsigned IndexInSuccessors) const
Get an edge's probability, relative to other out-edges of the Src.
static BranchProbability getZero()
static void normalizeProbabilities(ProbabilityIter Begin, ProbabilityIter End)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1236
bool isInlineAsm() const
Check if this call is an inline asm statement.
Definition: InstrTypes.h:1532
std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
Definition: InstrTypes.h:2143
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
Definition: InstrTypes.h:1465
bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
Definition: InstrTypes.h:1385
unsigned countOperandBundlesOfType(StringRef Name) const
Return the number of operand bundles with the tag Name attached to this instruction.
Definition: InstrTypes.h:2119
Value * getCalledOperand() const
Definition: InstrTypes.h:1458
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1410
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
Definition: InstrTypes.h:1391
bool isConvergent() const
Determine if the invoke is convergent.
Definition: InstrTypes.h:2027
Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
Definition: InstrTypes.h:1401
unsigned arg_size() const
Definition: InstrTypes.h:1408
AttributeList getAttributes() const
Return the parameter attributes for this call.
Definition: InstrTypes.h:1542
This class represents a function call, abstracting a target machine's calling convention.
bool isTailCall() const
bool isMustTailCall() const
bool checkReturnTypeForCallConv(MachineFunction &MF) const
Toplevel function to check the return type based on the target calling convention.
virtual bool lowerFormalArguments(MachineIRBuilder &MIRBuilder, const Function &F, ArrayRef< ArrayRef< Register > > VRegs, FunctionLoweringInfo &FLI) const
This hook must be implemented to lower the incoming (formal) arguments, described by VRegs,...
Definition: CallLowering.h:554
virtual bool enableBigEndian() const
For targets which want to use big-endian can enable it with enableBigEndian() hook.
Definition: CallLowering.h:603
virtual bool supportSwiftError() const
Definition: CallLowering.h:457
virtual bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val, ArrayRef< Register > VRegs, FunctionLoweringInfo &FLI, Register SwiftErrorVReg) const
This hook must be implemented to lower outgoing return values, described by Val, into the specified v...
Definition: CallLowering.h:522
virtual bool lowerCall(MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info) const
This hook must be implemented to lower the given call instruction, including argument and return valu...
Definition: CallLowering.h:566
virtual bool fallBackToDAGISel(const MachineFunction &MF) const
Definition: CallLowering.h:540
This class is the base class for the comparison instructions.
Definition: InstrTypes.h:747
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:757
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
Definition: InstrTypes.h:774
@ ICMP_SLT
signed less than
Definition: InstrTypes.h:786
@ ICMP_SLE
signed less or equal
Definition: InstrTypes.h:787
@ ICMP_UGT
unsigned greater than
Definition: InstrTypes.h:780
@ ICMP_EQ
equal
Definition: InstrTypes.h:778
@ ICMP_NE
not equal
Definition: InstrTypes.h:779
@ ICMP_ULE
unsigned less or equal
Definition: InstrTypes.h:783
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
Definition: InstrTypes.h:759
bool isFPPredicate() const
Definition: InstrTypes.h:864
bool isIntPredicate() const
Definition: InstrTypes.h:865
This is the shared class of boolean and integer constants.
Definition: Constants.h:81
static ConstantInt * getTrue(LLVMContext &Context)
Definition: Constants.cpp:850
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition: Constants.h:206
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:155
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition: Constants.h:146
This is an important base class in LLVM.
Definition: Constant.h:42
static Constant * getAllOnesValue(Type *Ty)
Definition: Constants.cpp:417
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:370
This is the common base class for constrained floating point intrinsics.
std::optional< fp::ExceptionBehavior > getExceptionBehavior() const
unsigned getNonMetadataArgCount() const
DWARF expression.
bool isEntryValue() const
Check if the expression consists of exactly one entry value operand.
static DIExpression * append(const DIExpression *Expr, ArrayRef< uint64_t > Ops)
Append the opcodes Ops to DIExpr.
bool isValidLocationForIntrinsic(const DILocation *DL) const
Check that a location is valid for this label.
bool isValidLocationForIntrinsic(const DILocation *DL) const
Check that a location is valid for this variable.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
unsigned getPointerSizeInBits(unsigned AS=0) const
Layout pointer size, in bits FIXME: The defaults need to be removed once all of the backends/clients ...
Definition: DataLayout.h:367
const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
Definition: DataLayout.cpp:695
IntegerType * getIndexType(LLVMContext &C, unsigned AddressSpace) const
Returns the type of a GEP index in AddressSpace.
Definition: DataLayout.cpp:873
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Definition: DataLayout.h:461
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
Definition: DataLayout.h:621
TypeSize getTypeStoreSize(Type *Ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type.
Definition: DataLayout.h:429
Align getPointerABIAlignment(unsigned AS) const
Layout pointer alignment.
Definition: DataLayout.cpp:717
This represents the llvm.dbg.declare instruction.
Value * getAddress() const
This represents the llvm.dbg.label instruction.
DILabel * getLabel() const
Records a position in IR for a source label (DILabel).
Base class for non-instruction debug metadata records that have positions within IR.
DebugLoc getDebugLoc() const
This represents the llvm.dbg.value instruction.
Value * getValue(unsigned OpIdx=0) const
DILocalVariable * getVariable() const
DIExpression * getExpression() const
Record of a variable value-assignment, aka a non instruction representation of the dbg....
DIExpression * getExpression() const
Value * getVariableLocationOp(unsigned OpIdx) const
DILocalVariable * getVariable() const
A debug info location.
Definition: DebugLoc.h:33
Class representing an expression and its matching format.
This instruction extracts a struct member or array element value from an aggregate value.
This instruction compares its operands according to the predicate given to the constructor.
An instruction for ordering other memory operations.
Definition: Instructions.h:420
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this fence instruction.
Definition: Instructions.h:454
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
Definition: Instructions.h:443
static FixedVectorType * get(Type *ElementType, unsigned NumElts)
Definition: Type.cpp:680
BranchProbabilityInfo * BPI
void clear()
clear - Clear out all the function-specific state.
MachineBasicBlock * getMBB(const BasicBlock *BB) const
SmallVector< MachineBasicBlock * > MBBMap
A mapping from LLVM basic block number to their machine block.
bool CanLowerReturn
CanLowerReturn - true iff the function's return value can be lowered to registers.
bool skipFunction(const Function &F) const
Optional passes call this function to check whether the pass should be skipped.
Definition: Pass.cpp:178
const BasicBlock & getEntryBlock() const
Definition: Function.h:807
DISubprogram * getSubprogram() const
Get the attached subprogram.
Definition: Metadata.cpp:1837
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
Definition: Function.h:702
Constant * getPersonalityFn() const
Get the personality function associated with this function.
Definition: Function.cpp:1993
const Function & getFunction() const
Definition: Function.h:170
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
Definition: Function.h:254
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:380
The actual analysis pass wrapper.
Definition: CSEInfo.h:222
Simple wrapper that does the following.
Definition: CSEInfo.h:204
The CSE Analysis object.
Definition: CSEInfo.h:69
Abstract class that contains various methods for clients to notify about changes.
Simple wrapper observer that takes several observers, and calls each one for each event.
void removeObserver(GISelChangeObserver *O)
void addObserver(GISelChangeObserver *O)
static StringRef dropLLVMManglingEscape(StringRef Name)
If the given string begins with the GlobalValue name mangling escape character '\1',...
Definition: GlobalValue.h:567
bool hasExternalWeakLinkage() const
Definition: GlobalValue.h:529
bool hasDLLImportStorageClass() const
Definition: GlobalValue.h:278
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:656
bool isTailCall(const MachineInstr &MI) const override
This instruction compares its operands according to the predicate given to the constructor.
bool runOnMachineFunction(MachineFunction &MF) override
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
IRTranslator(CodeGenOptLevel OptLevel=CodeGenOptLevel::None)
static char ID
Definition: IRTranslator.h:68
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
Indirect Branch Instruction.
bool lowerInlineAsm(MachineIRBuilder &MIRBuilder, const CallBase &CB, std::function< ArrayRef< Register >(const Value &Val)> GetOrCreateVRegs) const
Lower the given inline asm call instruction GetOrCreateVRegs is a callback to materialize a register ...
This instruction inserts a struct field of array element value into an aggregate value.
iterator_range< simple_ilist< DbgRecord >::iterator > getDbgRecordRange() const
Return a range over the DbgRecords attached to this instruction.
Definition: Instruction.h:104
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:466
bool hasMetadata() const
Return true if this instruction has any metadata attached to it.
Definition: Instruction.h:363
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Definition: Instruction.h:381
AAMDNodes getAAMetadata() const
Returns the AA metadata for this instruction.
Definition: Metadata.cpp:1713
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:274
bool hasAllowReassoc() const LLVM_READONLY
Determine whether the allow-reassociation flag is set.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
Definition: IntrinsicInst.h:55
Invoke instruction.
constexpr LLT changeElementType(LLT NewEltTy) const
If this type is a vector, return a vector with the same number of elements but the new element type.
Definition: LowLevelType.h:214
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
Definition: LowLevelType.h:42
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
Definition: LowLevelType.h:159
constexpr bool isVector() const
Definition: LowLevelType.h:148
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
Definition: LowLevelType.h:57
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
Definition: LowLevelType.h:193
constexpr bool isPointer() const
Definition: LowLevelType.h:149
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
Definition: LowLevelType.h:100
constexpr bool isFixedVector() const
Returns true if the LLT is a fixed vector.
Definition: LowLevelType.h:178
The landingpad instruction holds all of the information necessary to generate correct exception handl...
An instruction for reading from memory.
Definition: Instructions.h:174
Value * getPointerOperand()
Definition: Instructions.h:253
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
Definition: Instructions.h:218
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
Definition: Instructions.h:228
static LocationSize precise(uint64_t Value)
Context object for machine code objects.
Definition: MCContext.h:83
MCSymbol * createTempSymbol()
Create a temporary symbol with a unique name.
Definition: MCContext.cpp:346
MCSymbol * getOrCreateFrameAllocSymbol(const Twine &FuncName, unsigned Idx)
Gets a symbol that will be defined to the final stack offset of a local variable after codegen.
Definition: MCContext.cpp:236
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:41
Metadata node.
Definition: Metadata.h:1069
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1542
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
unsigned pred_size() const
void normalizeSuccProbs()
Normalize probabilities of all successors so that the sum of them becomes one.
void setAddressTakenIRBlock(BasicBlock *BB)
Set this block to reflect that it corresponds to an IR-level basic block with a BlockAddress.
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
void setSuccProbability(succ_iterator I, BranchProbability Prob)
Set successor probability of a given iterator.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
SmallVectorImpl< MachineBasicBlock * >::iterator succ_iterator
void sortUniqueLiveIns()
Sorts and uniques the LiveIns vector.
bool isPredecessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB is a predecessor of this block.
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
void setIsEHPad(bool V=true)
Indicates the block is a landing pad.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
bool hasVarSizedObjects() const
This method may be called any time after instruction selection is complete to determine if the stack ...
int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
int getStackProtectorIndex() const
Return the index for the stack protector object.
void setStackProtectorIndex(int I)
int CreateVariableSizedObject(Align Alignment, const AllocaInst *Alloca)
Notify the MachineFrameInfo object that a variable sized object has been created.
void setHasMustTailInVarArgFunc(bool B)
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
ArrayRef< int > allocateShuffleMask(ArrayRef< int > Mask)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
unsigned getTypeIDFor(const GlobalValue *TI)
Return the type id for the specified typeinfo. This is function wide.
void push_back(MachineBasicBlock *MBB)
MCContext & getContext() const
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
MCSymbol * addLandingPad(MachineBasicBlock *LandingPad)
Add a new panding pad, and extract the exception handling information from the landingpad instruction...
void deleteMachineBasicBlock(MachineBasicBlock *MBB)
DeleteMachineBasicBlock - Delete the given MachineBasicBlock.
Function & getFunction()
Return the LLVM function that this machine code represents.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
void remove(iterator MBBI)
void setVariableDbgInfo(const DILocalVariable *Var, const DIExpression *Expr, int Slot, const DILocation *Loc)
Collect information used to emit debugging information of a variable in a stack slot.
const MachineBasicBlock & front() const
void addInvoke(MachineBasicBlock *LandingPad, MCSymbol *BeginLabel, MCSymbol *EndLabel)
Provide the begin and end labels of an invoke style call and associate it with a try landing pad bloc...
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
void erase(iterator MBBI)
void insert(iterator MBBI, MachineBasicBlock *MBB)
Helper class to build MachineInstr.
MachineInstrBuilder buildFMul(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
MachineInstrBuilder buildFreeze(const DstOp &Dst, const SrcOp &Src)
Build and insert Dst = G_FREEZE Src.
MachineInstrBuilder buildBr(MachineBasicBlock &Dest)
Build and insert G_BR Dest.
std::optional< MachineInstrBuilder > materializePtrAdd(Register &Res, Register Op0, const LLT ValueTy, uint64_t Value)
Materialize and insert Res = G_PTR_ADD Op0, (G_CONSTANT Value)
MachineInstrBuilder buildAdd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_ADD Op0, Op1.
MachineInstrBuilder buildUndef(const DstOp &Res)
Build and insert Res = IMPLICIT_DEF.
MachineInstrBuilder buildResetFPMode()
Build and insert G_RESET_FPMODE.
MachineInstrBuilder buildFPExt(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_FPEXT Op.
MachineInstrBuilder buildUCmp(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_UCMP Op0, Op1.
MachineInstrBuilder buildJumpTable(const LLT PtrTy, unsigned JTI)
Build and insert Res = G_JUMP_TABLE JTI.
MachineInstrBuilder buildSCmp(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_SCMP Op0, Op1.
MachineInstrBuilder buildFence(unsigned Ordering, unsigned Scope)
Build and insert G_FENCE Ordering, Scope.
MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_SELECT Tst, Op0, Op1.
MachineInstrBuilder buildFMA(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, const SrcOp &Src2, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_FMA Op0, Op1, Op2.
MachineInstrBuilder buildMul(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_MUL Op0, Op1.
MachineInstrBuilder buildAnd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1)
Build and insert Res = G_AND Op0, Op1.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
MachineInstrBuilder buildCast(const DstOp &Dst, const SrcOp &Src)
Build and insert an appropriate cast between two registers of equal size.
MachineBasicBlock::iterator getInsertPt()
Current insertion point for new instructions.
MachineInstrBuilder buildSExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_SEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildAtomicRMW(unsigned Opcode, const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_<Opcode> Addr, Val, MMO.
MachineInstrBuilder buildSub(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_SUB Op0, Op1.
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef< Register > Res, bool HasSideEffects, bool isConvergent)
Build and insert a G_INTRINSIC instruction.
MachineInstrBuilder buildVScale(const DstOp &Res, unsigned MinElts)
Build and insert Res = G_VSCALE MinElts.
MachineInstrBuilder buildSplatBuildVector(const DstOp &Res, const SrcOp &Src)
Build and insert Res = G_BUILD_VECTOR with Src replicated to fill the number of elements.
MachineInstrBuilder buildSetFPMode(const SrcOp &Src)
Build and insert G_SET_FPMODE Src.
MachineInstrBuilder buildIndirectDbgValue(Register Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in me...
MachineInstrBuilder buildConstDbgValue(const Constant &C, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instructions specifying that Variable is given by C (suitably modified b...
MachineInstrBuilder buildBrCond(const SrcOp &Tst, MachineBasicBlock &Dest)
Build and insert G_BRCOND Tst, Dest.
MachineInstrBuilder buildExtractVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Idx)
Build and insert Res = G_EXTRACT_VECTOR_ELT Val, Idx.
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildPtrAdd(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_PTR_ADD Op0, Op1.
MachineInstrBuilder buildZExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildExtractVectorElementConstant(const DstOp &Res, const SrcOp &Val, const int Idx)
Build and insert Res = G_EXTRACT_VECTOR_ELT Val, Idx.
MachineInstrBuilder buildShl(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineInstrBuilder buildFrameIndex(const DstOp &Res, int Idx)
Build and insert Res = G_FRAME_INDEX Idx.
MachineInstrBuilder buildDirectDbgValue(Register Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in Re...
MachineInstrBuilder buildDbgLabel(const MDNode *Label)
Build and insert a DBG_LABEL instructions specifying that Label is given.
MachineInstrBuilder buildBrJT(Register TablePtr, unsigned JTI, Register IndexReg)
Build and insert G_BRJT TablePtr, JTI, IndexReg.
MachineInstrBuilder buildDynStackAlloc(const DstOp &Res, const SrcOp &Size, Align Alignment)
Build and insert Res = G_DYN_STACKALLOC Size, Align.
MachineInstrBuilder buildFIDbgValue(int FI, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in th...
MachineInstrBuilder buildResetFPEnv()
Build and insert G_RESET_FPENV.
void setDebugLoc(const DebugLoc &DL)
Set the debug location to DL for all the next build instructions.
const MachineBasicBlock & getMBB() const
Getter for the basic block we currently build.
MachineInstrBuilder buildInsertVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Elt, const SrcOp &Idx)
Build and insert Res = G_INSERT_VECTOR_ELT Val, Elt, Idx.
MachineInstrBuilder buildAtomicCmpXchgWithSuccess(const DstOp &OldValRes, const DstOp &SuccessRes, const SrcOp &Addr, const SrcOp &CmpVal, const SrcOp &NewVal, MachineMemOperand &MMO)
Build and insert OldValRes<def>, SuccessRes<def> = G_ATOMIC_CMPXCHG_WITH_SUCCESS Addr,...
void setMBB(MachineBasicBlock &MBB)
Set the insertion point to the end of MBB.
const DebugLoc & getDebugLoc()
Get the current instruction's debug location.
MachineInstrBuilder buildTrap(bool Debug=false)
Build and insert G_TRAP or G_DEBUGTRAP.
MachineInstrBuilder buildFFrexp(const DstOp &Fract, const DstOp &Exp, const SrcOp &Src, std::optional< unsigned > Flags=std::nullopt)
Build and insert Fract, Exp = G_FFREXP Src.
MachineInstrBuilder buildFPTrunc(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_FPTRUNC Op.
MachineInstrBuilder buildShuffleVector(const DstOp &Res, const SrcOp &Src1, const SrcOp &Src2, ArrayRef< int > Mask)
Build and insert Res = G_SHUFFLE_VECTOR Src1, Src2, Mask.
MachineInstrBuilder buildInstrNoInsert(unsigned Opcode)
Build but don't insert <empty> = Opcode <empty>.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
MachineInstrBuilder buildPrefetch(const SrcOp &Addr, unsigned RW, unsigned Locality, unsigned CacheType, MachineMemOperand &MMO)
Build and insert G_PREFETCH Addr, RW, Locality, CacheType.
const DataLayout & getDataLayout() const
MachineInstrBuilder buildBrIndirect(Register Tgt)
Build and insert G_BRINDIRECT Tgt.
MachineInstrBuilder buildSplatVector(const DstOp &Res, const SrcOp &Val)
Build and insert Res = G_SPLAT_VECTOR Val.
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
MachineInstrBuilder buildFCmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_FCMP PredOp0, Op1.
MachineInstrBuilder buildFAdd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_FADD Op0, Op1.
MachineInstrBuilder buildSetFPEnv(const SrcOp &Src)
Build and insert G_SET_FPENV Src.
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addMetadata(const MDNode *MD) const
const MachineInstrBuilder & addSym(MCSymbol *Sym, unsigned char TargetFlags=0) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addFPImm(const ConstantFP *Val) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
Definition: MachineInstr.h:69
void copyIRFlags(const Instruction &I)
Copy all flags to MachineInst MIFlags.
static uint32_t copyFlagsFromInstruction(const Instruction &I)
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:579
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
static MachineOperand CreateES(const char *SymName, unsigned TargetFlags=0)
Register getReg() const
getReg - Returns the register number.
static MachineOperand CreateGA(const GlobalValue *GV, int64_t Offset, unsigned TargetFlags=0)
MachineInstr * getVRegDef(Register Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
LLT getType(Register Reg) const
Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register.
void setRegClass(Register Reg, const TargetRegisterClass *RC)
setRegClass - Set the register class of the specified virtual register.
Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
void addPhysRegsUsedFromRegMask(const uint32_t *RegMask)
addPhysRegsUsedFromRegMask - Mark any registers not in RegMask as used.
Representation for a specific memory location.
Root of the metadata hierarchy.
Definition: Metadata.h:62
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
The optimization diagnostic interface.
Diagnostic information for missed-optimization remarks.
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
Definition: DerivedTypes.h:662
Class to install both of the above.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
MCRegister asMCReg() const
Utility to check-convert this value to a MCRegister.
Definition: Register.h:110
Return a value (possibly void), from a function.
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
This class represents the LLVM 'select' instruction.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:367
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:502
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:135
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
Definition: SmallSet.h:166
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:179
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
Encapsulates all of the information needed to generate a stack protector check, and signals to isel w...
void initialize(const BasicBlock *BB, MachineBasicBlock *MBB, bool FunctionBasedInstrumentation)
Initialize the stack protector descriptor structure for a new basic block.
MachineBasicBlock * getSuccessMBB()
void resetPerBBState()
Reset state that changes when we handle different basic blocks.
void resetPerFunctionState()
Reset state that only changes when we switch functions.
MachineBasicBlock * getFailureMBB()
MachineBasicBlock * getParentMBB()
bool shouldEmitStackProtector() const
Returns true if all fields of the stack protector descriptor are initialized implying that we should/...
bool shouldEmitFunctionBasedCheckStackProtector() const
bool shouldEmitSDCheck(const BasicBlock &BB) const
void copyToMachineFrameInfo(MachineFrameInfo &MFI) const
An instruction for storing to memory.
Definition: Instructions.h:290
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
constexpr bool empty() const
empty - Check if the string is empty.
Definition: StringRef.h:134
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition: StringRef.h:131
TypeSize getElementOffset(unsigned Idx) const
Definition: DataLayout.h:600
Class to represent struct types.
Definition: DerivedTypes.h:216
bool createEntriesInEntryBlock(DebugLoc DbgLoc)
Create initial definitions of swifterror values in the entry block of the current function.
void setFunction(MachineFunction &MF)
Initialize data structures for specified new function.
void setCurrentVReg(const MachineBasicBlock *MBB, const Value *, Register)
Set the swifterror virtual register in the VRegDefMap for this basic block.
Register getOrCreateVRegUseAt(const Instruction *, const MachineBasicBlock *, const Value *)
Get or create the swifterror value virtual register for a use of a swifterror by an instruction.
Register getOrCreateVRegDefAt(const Instruction *, const MachineBasicBlock *, const Value *)
Get or create the swifterror value virtual register for a def of a swifterror by an instruction.
const Value * getFunctionArg() const
Get the (unique) function argument that was marked swifterror, or nullptr if this function has no swi...
void propagateVRegs()
Propagate assigned swifterror vregs through a function, synthesizing PHI nodes when needed to maintai...
Multiway switch.
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
TargetInstrInfo - Interface to description of machine instruction set.
virtual bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT) const
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
virtual unsigned getVaListSizeInBits(const DataLayout &DL) const
Returns the size of the platform's va_list object.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const
Get the CallingConv that should be used for the specified libcall.
virtual bool useStackGuardXorFP() const
If this function returns true, stack protection checks should XOR the frame pointer (or whichever poi...
virtual MVT getVectorIdxTy(const DataLayout &DL) const
Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...
virtual Value * getSDagStackGuard(const Module &M) const
Return the variable that's previously inserted by insertSSPDeclarations, if any, otherwise return nul...
bool isJumpExpensive() const
Return true if Flow Control is an expensive operation that should be avoided.
virtual Function * getSSPStackGuardCheck(const Module &M) const
If the target has a standard stack protection check function that performs validation and error handl...
MachineMemOperand::Flags getAtomicMemOperandFlags(const Instruction &AI, const DataLayout &DL) const
virtual bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &, MachineFunction &, unsigned) const
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
MachineMemOperand::Flags getLoadMemOperandFlags(const LoadInst &LI, const DataLayout &DL, AssumptionCache *AC=nullptr, const TargetLibraryInfo *LibInfo=nullptr) const
MachineMemOperand::Flags getStoreMemOperandFlags(const StoreInst &SI, const DataLayout &DL) const
virtual bool fallBackToDAGISel(const Instruction &Inst) const
virtual Register getExceptionPointerRegister(const Constant *PersonalityFn) const
If a physical register, this returns the register that receives the exception address on entry to an ...
const char * getLibcallName(RTLIB::Libcall Call) const
Get the libcall routine name for the specified libcall.
virtual Register getExceptionSelectorRegister(const Constant *PersonalityFn) const
If a physical register, this returns the register that receives the exception typeid on entry to a la...
virtual MVT getPointerMemTy(const DataLayout &DL, uint32_t AS=0) const
Return the in-memory pointer type for the given address space, defaults to the pointer type from the ...
virtual bool useLoadStackGuardNode() const
If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:77
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
virtual const TargetIntrinsicInfo * getIntrinsicInfo() const
If intrinsic information is available, return it. If not, return null.
const Triple & getTargetTriple() const
TargetOptions Options
unsigned NoTrapAfterNoreturn
Do not emit a trap instruction for 'unreachable' IR instructions behind noreturn calls,...
unsigned TrapUnreachable
Emit target-specific trap instruction for 'unreachable' IR instructions.
Target-Independent Code Generator Pass Configuration Options.
virtual std::unique_ptr< CSEConfigBase > getCSEConfig() const
Returns the CSEConfig object to use for the current optimization level.
virtual bool isGISelCSEEnabled() const
Check whether continuous CSE should be enabled in GISel passes.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const InlineAsmLowering * getInlineAsmLowering() const
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
virtual const CallLowering * getCallLowering() const
virtual const TargetFrameLowering * getFrameLowering() const
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetLowering * getTargetLowering() const
bool isOSWindows() const
Tests whether the OS is Windows.
Definition: Triple.h:624
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isEmptyTy() const
Return true if this type is empty, that is, it has no elements or all of its elements are empty.
TypeID
Definitions of all of the base types for the Type system.
Definition: Type.h:54
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
static Type * getVoidTy(LLVMContext &C)
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:298
bool isAggregateType() const
Return true if the type is an aggregate type.
Definition: Type.h:291
static IntegerType * getInt32Ty(LLVMContext &C)
bool isTokenTy() const
Return true if this is 'token'.
Definition: Type.h:221
bool isVoidTy() const
Return true if this is 'void'.
Definition: Type.h:139
Value * getOperand(unsigned i) const
Definition: User.h:169
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition: Value.h:434
const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition: Value.cpp:694
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1075
constexpr bool isZero() const
Definition: TypeSize.h:156
const ParentTy * getParent() const
Definition: ilist_node.h:32
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:353
A raw_ostream that writes to an std::string.
Definition: raw_ostream.h:661
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: Lint.cpp:86
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
Key
PAL metadata keys.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:121
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
Definition: PatternMatch.h:875
TwoOps_match< Val_t, Idx_t, Instruction::ExtractElement > m_ExtractElt(const Val_t &Val, const Idx_t &Idx)
Matches ExtractElementInst.
OneUse_match< T > m_OneUse(const T &SubPattern)
Definition: PatternMatch.h:67
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
Definition: PatternMatch.h:92
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
BinaryOp_match< cst_pred_ty< is_all_ones >, ValTy, Instruction::Xor, true > m_Not(const ValTy &V)
Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit.
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Undef
Value of the register doesn't matter.
Offsets
Offsets in bytes from the start of the input buffer.
Definition: SIInstrInfo.h:1589
SmallVector< SwitchWorkListItem, 4 > SwitchWorkList
std::vector< CaseCluster > CaseClusterVector
void sortAndRangeify(CaseClusterVector &Clusters)
Sort Clusters and merge adjacent cases.
CaseClusterVector::iterator CaseClusterIt
@ CC_Range
A cluster of adjacent case labels with the same destination, or just one case.
@ CC_JumpTable
A cluster of cases suitable for jump table lowering.
@ CC_BitTests
A cluster of cases suitable for bit test lowering.
@ CE
Windows NT (Windows on ARM)
Reg
All possible values of the reg field in the ModR/M byte.
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
ExceptionBehavior
Exception behavior used for floating point operations.
Definition: FPEnv.h:38
@ ebIgnore
This corresponds to "fpexcept.ignore".
Definition: FPEnv.h:39
DiagnosticInfoOptimizationBase::Argument NV
NodeAddr< PhiNode * > Phi
Definition: RDFGraph.h:390
NodeAddr< CodeNode * > Code
Definition: RDFGraph.h:388
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:329
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
@ Offset
Definition: DWP.cpp:480
int popcount(T Value) noexcept
Count the number of set bits in a value.
Definition: bit.h:385
bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
Definition: MathExtras.h:255
detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)
Definition: ScopeExit.h:59
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition: STLExtras.h:2431
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
Definition: bit.h:307
void diagnoseDontCall(const CallInst &CI)
auto successors(const MachineBasicBlock *BB)
MVT getMVTForLLT(LLT Ty)
Get a rough equivalent of an MVT for a given LLT.
gep_type_iterator gep_type_end(const User *GEP)
MachineBasicBlock::iterator findSplitPointForStackProtector(MachineBasicBlock *BB, const TargetInstrInfo &TII)
Find the split point at which to splice the end of BB into its success stack protector check machine ...
LLT getLLTForMVT(MVT Ty)
Get a rough equivalent of an LLT for a given MVT.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition: bit.h:215
Align getKnownAlignment(Value *V, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to infer an alignment for the specified pointer.
Definition: Local.h:242
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1729
llvm::SmallVector< int, 16 > createStrideMask(unsigned Start, unsigned Stride, unsigned VF)
Create a stride shuffle mask.
auto reverse(ContainerTy &&C)
Definition: STLExtras.h:419
void computeValueLLTs(const DataLayout &DL, Type &Ty, SmallVectorImpl< LLT > &ValueTys, SmallVectorImpl< uint64_t > *Offsets=nullptr, uint64_t StartingOffset=0)
computeValueLLTs - Given an LLVM IR type, compute a sequence of LLTs that represent all the individua...
Definition: Analysis.cpp:141
void sort(IteratorTy Start, IteratorTy End)
Definition: STLExtras.h:1647
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
CodeGenOptLevel
Code generation optimization level.
Definition: CodeGen.h:54
@ Global
Append to llvm.global_dtors.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
void getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU)
Modify analysis usage so it preserves passes required for the SelectionDAG fallback.
Definition: Utils.cpp:1168
auto lower_bound(R &&Range, T &&Value)
Provide wrappers to std::lower_bound which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:1961
llvm::SmallVector< int, 16 > createInterleaveMask(unsigned VF, unsigned NumVecs)
Create an interleave shuffle mask.
@ FMul
Product of floats.
bool isAsynchronousEHPersonality(EHPersonality Pers)
Returns true if this personality function catches asynchronous exceptions.
OutputIt copy(R &&Range, OutputIt Out)
Definition: STLExtras.h:1824
std::optional< RoundingMode > convertStrToRoundingMode(StringRef)
Returns a valid RoundingMode enumerator when given a string that is valid as input in constrained int...
Definition: FPEnv.cpp:24
gep_type_iterator gep_type_begin(const User *GEP)
GlobalValue * ExtractTypeInfo(Value *V)
ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
Definition: Analysis.cpp:177
void getUnderlyingObjects(const Value *V, SmallVectorImpl< const Value * > &Objects, const LoopInfo *LI=nullptr, unsigned MaxLookup=6)
This method is similar to getUnderlyingObject except that it can look through phi and select instruct...
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
Definition: Alignment.h:212
unsigned succ_size(const MachineBasicBlock *BB)
LLT getLLTForType(Type &Ty, const DataLayout &DL)
Construct a low-level type based on an LLVM type.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition: Metadata.h:760
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
uint64_t value() const
This is a hole in the type system and should not be abused.
Definition: Alignment.h:85
Pair of physical register and lane mask.
This class contains a discriminated union of information about pointers in memory operands,...
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
static bool canHandle(const Instruction *I, const TargetLibraryInfo &TLI)
This structure is used to communicate between SelectionDAGBuilder and SDISel for the code generation ...
struct PredInfoPair PredInfo