LLVM 22.0.0git
IRTranslator.cpp
Go to the documentation of this file.
1//===- llvm/CodeGen/GlobalISel/IRTranslator.cpp - IRTranslator ---*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the IRTranslator class.
10//===----------------------------------------------------------------------===//
11
14#include "llvm/ADT/STLExtras.h"
15#include "llvm/ADT/ScopeExit.h"
20#include "llvm/Analysis/Loads.h"
50#include "llvm/IR/BasicBlock.h"
51#include "llvm/IR/CFG.h"
52#include "llvm/IR/Constant.h"
53#include "llvm/IR/Constants.h"
54#include "llvm/IR/DataLayout.h"
57#include "llvm/IR/Function.h"
59#include "llvm/IR/InlineAsm.h"
60#include "llvm/IR/InstrTypes.h"
63#include "llvm/IR/Intrinsics.h"
64#include "llvm/IR/IntrinsicsAMDGPU.h"
65#include "llvm/IR/LLVMContext.h"
66#include "llvm/IR/Metadata.h"
68#include "llvm/IR/Statepoint.h"
69#include "llvm/IR/Type.h"
70#include "llvm/IR/User.h"
71#include "llvm/IR/Value.h"
73#include "llvm/MC/MCContext.h"
74#include "llvm/Pass.h"
77#include "llvm/Support/Debug.h"
84#include <algorithm>
85#include <cassert>
86#include <cstdint>
87#include <iterator>
88#include <optional>
89#include <string>
90#include <utility>
91#include <vector>
92
93#define DEBUG_TYPE "irtranslator"
94
95using namespace llvm;
96
97static cl::opt<bool>
98 EnableCSEInIRTranslator("enable-cse-in-irtranslator",
99 cl::desc("Should enable CSE in irtranslator"),
100 cl::Optional, cl::init(false));
101char IRTranslator::ID = 0;
102
103INITIALIZE_PASS_BEGIN(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
104 false, false)
110INITIALIZE_PASS_END(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
112
116 MF.getProperties().setFailedISel();
117 bool IsGlobalISelAbortEnabled =
118 MF.getTarget().Options.GlobalISelAbort == GlobalISelAbortMode::Enable;
119
120 // Print the function name explicitly if we don't have a debug location (which
121 // makes the diagnostic less useful) or if we're going to emit a raw error.
122 if (!R.getLocation().isValid() || IsGlobalISelAbortEnabled)
123 R << (" (in function: " + MF.getName() + ")").str();
124
125 if (IsGlobalISelAbortEnabled)
126 report_fatal_error(Twine(R.getMsg()));
127 else
128 ORE.emit(R);
129}
130
132 : MachineFunctionPass(ID), OptLevel(optlevel) {}
133
134#ifndef NDEBUG
135namespace {
136/// Verify that every instruction created has the same DILocation as the
137/// instruction being translated.
138class DILocationVerifier : public GISelChangeObserver {
139 const Instruction *CurrInst = nullptr;
140
141public:
142 DILocationVerifier() = default;
143 ~DILocationVerifier() override = default;
144
145 const Instruction *getCurrentInst() const { return CurrInst; }
146 void setCurrentInst(const Instruction *Inst) { CurrInst = Inst; }
147
148 void erasingInstr(MachineInstr &MI) override {}
149 void changingInstr(MachineInstr &MI) override {}
150 void changedInstr(MachineInstr &MI) override {}
151
152 void createdInstr(MachineInstr &MI) override {
153 assert(getCurrentInst() && "Inserted instruction without a current MI");
154
155 // Only print the check message if we're actually checking it.
156#ifndef NDEBUG
157 LLVM_DEBUG(dbgs() << "Checking DILocation from " << *CurrInst
158 << " was copied to " << MI);
159#endif
160 // We allow insts in the entry block to have no debug loc because
161 // they could have originated from constants, and we don't want a jumpy
162 // debug experience.
163 assert((CurrInst->getDebugLoc() == MI.getDebugLoc() ||
164 (MI.getParent()->isEntryBlock() && !MI.getDebugLoc()) ||
165 (MI.isDebugInstr())) &&
166 "Line info was not transferred to all instructions");
167 }
168};
169} // namespace
170#endif // ifndef NDEBUG
171
172
187
188IRTranslator::ValueToVRegInfo::VRegListT &
189IRTranslator::allocateVRegs(const Value &Val) {
190 auto VRegsIt = VMap.findVRegs(Val);
191 if (VRegsIt != VMap.vregs_end())
192 return *VRegsIt->second;
193 auto *Regs = VMap.getVRegs(Val);
194 auto *Offsets = VMap.getOffsets(Val);
195 SmallVector<LLT, 4> SplitTys;
196 computeValueLLTs(*DL, *Val.getType(), SplitTys,
197 Offsets->empty() ? Offsets : nullptr);
198 for (unsigned i = 0; i < SplitTys.size(); ++i)
199 Regs->push_back(0);
200 return *Regs;
201}
202
203ArrayRef<Register> IRTranslator::getOrCreateVRegs(const Value &Val) {
204 auto VRegsIt = VMap.findVRegs(Val);
205 if (VRegsIt != VMap.vregs_end())
206 return *VRegsIt->second;
207
208 if (Val.getType()->isVoidTy())
209 return *VMap.getVRegs(Val);
210
211 // Create entry for this type.
212 auto *VRegs = VMap.getVRegs(Val);
213 auto *Offsets = VMap.getOffsets(Val);
214
215 if (!Val.getType()->isTokenTy())
216 assert(Val.getType()->isSized() &&
217 "Don't know how to create an empty vreg");
218
219 SmallVector<LLT, 4> SplitTys;
220 computeValueLLTs(*DL, *Val.getType(), SplitTys,
221 Offsets->empty() ? Offsets : nullptr);
222
223 if (!isa<Constant>(Val)) {
224 for (auto Ty : SplitTys)
225 VRegs->push_back(MRI->createGenericVirtualRegister(Ty));
226 return *VRegs;
227 }
228
229 if (Val.getType()->isAggregateType()) {
230 // UndefValue, ConstantAggregateZero
231 auto &C = cast<Constant>(Val);
232 unsigned Idx = 0;
233 while (auto Elt = C.getAggregateElement(Idx++)) {
234 auto EltRegs = getOrCreateVRegs(*Elt);
235 llvm::append_range(*VRegs, EltRegs);
236 }
237 } else {
238 assert(SplitTys.size() == 1 && "unexpectedly split LLT");
239 VRegs->push_back(MRI->createGenericVirtualRegister(SplitTys[0]));
240 bool Success = translate(cast<Constant>(Val), VRegs->front());
241 if (!Success) {
242 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
243 MF->getFunction().getSubprogram(),
244 &MF->getFunction().getEntryBlock());
245 R << "unable to translate constant: " << ore::NV("Type", Val.getType());
246 reportTranslationError(*MF, *ORE, R);
247 return *VRegs;
248 }
249 }
250
251 return *VRegs;
252}
253
254int IRTranslator::getOrCreateFrameIndex(const AllocaInst &AI) {
255 auto [MapEntry, Inserted] = FrameIndices.try_emplace(&AI);
256 if (!Inserted)
257 return MapEntry->second;
258
259 uint64_t ElementSize = DL->getTypeAllocSize(AI.getAllocatedType());
260 uint64_t Size =
261 ElementSize * cast<ConstantInt>(AI.getArraySize())->getZExtValue();
262
263 // Always allocate at least one byte.
264 Size = std::max<uint64_t>(Size, 1u);
265
266 int &FI = MapEntry->second;
267 FI = MF->getFrameInfo().CreateStackObject(Size, AI.getAlign(), false, &AI);
268 return FI;
269}
270
271Align IRTranslator::getMemOpAlign(const Instruction &I) {
272 if (const StoreInst *SI = dyn_cast<StoreInst>(&I))
273 return SI->getAlign();
274 if (const LoadInst *LI = dyn_cast<LoadInst>(&I))
275 return LI->getAlign();
276 if (const AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(&I))
277 return AI->getAlign();
278 if (const AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(&I))
279 return AI->getAlign();
280
281 OptimizationRemarkMissed R("gisel-irtranslator", "", &I);
282 R << "unable to translate memop: " << ore::NV("Opcode", &I);
283 reportTranslationError(*MF, *ORE, R);
284 return Align(1);
285}
286
287MachineBasicBlock &IRTranslator::getMBB(const BasicBlock &BB) {
288 MachineBasicBlock *MBB = FuncInfo.getMBB(&BB);
289 assert(MBB && "BasicBlock was not encountered before");
290 return *MBB;
291}
292
293void IRTranslator::addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred) {
294 assert(NewPred && "new predecessor must be a real MachineBasicBlock");
295 MachinePreds[Edge].push_back(NewPred);
296}
297
299 return MF->getTarget().getTargetTriple().isSPIRV();
300}
301
302static bool containsBF16Type(const User &U) {
303 // BF16 cannot currently be represented by LLT, to avoid miscompiles we
304 // prevent any instructions using them. FIXME: This can be removed once LLT
305 // supports bfloat.
306 return U.getType()->getScalarType()->isBFloatTy() ||
307 any_of(U.operands(), [](Value *V) {
308 return V->getType()->getScalarType()->isBFloatTy();
309 });
310}
311
312bool IRTranslator::translateBinaryOp(unsigned Opcode, const User &U,
313 MachineIRBuilder &MIRBuilder) {
315 return false;
316
317 // Get or create a virtual register for each value.
318 // Unless the value is a Constant => loadimm cst?
319 // or inline constant each time?
320 // Creation of a virtual register needs to have a size.
321 Register Op0 = getOrCreateVReg(*U.getOperand(0));
322 Register Op1 = getOrCreateVReg(*U.getOperand(1));
323 Register Res = getOrCreateVReg(U);
324 uint32_t Flags = 0;
325 if (isa<Instruction>(U)) {
326 const Instruction &I = cast<Instruction>(U);
328 }
329
330 MIRBuilder.buildInstr(Opcode, {Res}, {Op0, Op1}, Flags);
331 return true;
332}
333
334bool IRTranslator::translateUnaryOp(unsigned Opcode, const User &U,
335 MachineIRBuilder &MIRBuilder) {
337 return false;
338
339 Register Op0 = getOrCreateVReg(*U.getOperand(0));
340 Register Res = getOrCreateVReg(U);
341 uint32_t Flags = 0;
342 if (isa<Instruction>(U)) {
343 const Instruction &I = cast<Instruction>(U);
345 }
346 MIRBuilder.buildInstr(Opcode, {Res}, {Op0}, Flags);
347 return true;
348}
349
350bool IRTranslator::translateFNeg(const User &U, MachineIRBuilder &MIRBuilder) {
351 return translateUnaryOp(TargetOpcode::G_FNEG, U, MIRBuilder);
352}
353
354bool IRTranslator::translateCompare(const User &U,
355 MachineIRBuilder &MIRBuilder) {
357 return false;
358
359 auto *CI = cast<CmpInst>(&U);
360 Register Op0 = getOrCreateVReg(*U.getOperand(0));
361 Register Op1 = getOrCreateVReg(*U.getOperand(1));
362 Register Res = getOrCreateVReg(U);
363 CmpInst::Predicate Pred = CI->getPredicate();
365 if (CmpInst::isIntPredicate(Pred))
366 MIRBuilder.buildICmp(Pred, Res, Op0, Op1, Flags);
367 else if (Pred == CmpInst::FCMP_FALSE)
368 MIRBuilder.buildCopy(
369 Res, getOrCreateVReg(*Constant::getNullValue(U.getType())));
370 else if (Pred == CmpInst::FCMP_TRUE)
371 MIRBuilder.buildCopy(
372 Res, getOrCreateVReg(*Constant::getAllOnesValue(U.getType())));
373 else
374 MIRBuilder.buildFCmp(Pred, Res, Op0, Op1, Flags);
375
376 return true;
377}
378
379bool IRTranslator::translateRet(const User &U, MachineIRBuilder &MIRBuilder) {
380 const ReturnInst &RI = cast<ReturnInst>(U);
381 const Value *Ret = RI.getReturnValue();
382 if (Ret && DL->getTypeStoreSize(Ret->getType()).isZero())
383 Ret = nullptr;
384
385 ArrayRef<Register> VRegs;
386 if (Ret)
387 VRegs = getOrCreateVRegs(*Ret);
388
389 Register SwiftErrorVReg = 0;
390 if (CLI->supportSwiftError() && SwiftError.getFunctionArg()) {
391 SwiftErrorVReg = SwiftError.getOrCreateVRegUseAt(
392 &RI, &MIRBuilder.getMBB(), SwiftError.getFunctionArg());
393 }
394
395 // The target may mess up with the insertion point, but
396 // this is not important as a return is the last instruction
397 // of the block anyway.
398 return CLI->lowerReturn(MIRBuilder, Ret, VRegs, FuncInfo, SwiftErrorVReg);
399}
400
401void IRTranslator::emitBranchForMergedCondition(
403 MachineBasicBlock *CurBB, MachineBasicBlock *SwitchBB,
404 BranchProbability TProb, BranchProbability FProb, bool InvertCond) {
405 // If the leaf of the tree is a comparison, merge the condition into
406 // the caseblock.
407 if (const CmpInst *BOp = dyn_cast<CmpInst>(Cond)) {
408 CmpInst::Predicate Condition;
409 if (const ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) {
410 Condition = InvertCond ? IC->getInversePredicate() : IC->getPredicate();
411 } else {
412 const FCmpInst *FC = cast<FCmpInst>(Cond);
413 Condition = InvertCond ? FC->getInversePredicate() : FC->getPredicate();
414 }
415
416 SwitchCG::CaseBlock CB(Condition, false, BOp->getOperand(0),
417 BOp->getOperand(1), nullptr, TBB, FBB, CurBB,
418 CurBuilder->getDebugLoc(), TProb, FProb);
419 SL->SwitchCases.push_back(CB);
420 return;
421 }
422
423 // Create a CaseBlock record representing this branch.
425 SwitchCG::CaseBlock CB(
426 Pred, false, Cond, ConstantInt::getTrue(MF->getFunction().getContext()),
427 nullptr, TBB, FBB, CurBB, CurBuilder->getDebugLoc(), TProb, FProb);
428 SL->SwitchCases.push_back(CB);
429}
430
431static bool isValInBlock(const Value *V, const BasicBlock *BB) {
432 if (const Instruction *I = dyn_cast<Instruction>(V))
433 return I->getParent() == BB;
434 return true;
435}
436
437void IRTranslator::findMergedConditions(
439 MachineBasicBlock *CurBB, MachineBasicBlock *SwitchBB,
441 BranchProbability FProb, bool InvertCond) {
442 using namespace PatternMatch;
443 assert((Opc == Instruction::And || Opc == Instruction::Or) &&
444 "Expected Opc to be AND/OR");
445 // Skip over not part of the tree and remember to invert op and operands at
446 // next level.
447 Value *NotCond;
448 if (match(Cond, m_OneUse(m_Not(m_Value(NotCond)))) &&
449 isValInBlock(NotCond, CurBB->getBasicBlock())) {
450 findMergedConditions(NotCond, TBB, FBB, CurBB, SwitchBB, Opc, TProb, FProb,
451 !InvertCond);
452 return;
453 }
454
456 const Value *BOpOp0, *BOpOp1;
457 // Compute the effective opcode for Cond, taking into account whether it needs
458 // to be inverted, e.g.
459 // and (not (or A, B)), C
460 // gets lowered as
461 // and (and (not A, not B), C)
463 if (BOp) {
464 BOpc = match(BOp, m_LogicalAnd(m_Value(BOpOp0), m_Value(BOpOp1)))
465 ? Instruction::And
466 : (match(BOp, m_LogicalOr(m_Value(BOpOp0), m_Value(BOpOp1)))
467 ? Instruction::Or
469 if (InvertCond) {
470 if (BOpc == Instruction::And)
471 BOpc = Instruction::Or;
472 else if (BOpc == Instruction::Or)
473 BOpc = Instruction::And;
474 }
475 }
476
477 // If this node is not part of the or/and tree, emit it as a branch.
478 // Note that all nodes in the tree should have same opcode.
479 bool BOpIsInOrAndTree = BOpc && BOpc == Opc && BOp->hasOneUse();
480 if (!BOpIsInOrAndTree || BOp->getParent() != CurBB->getBasicBlock() ||
481 !isValInBlock(BOpOp0, CurBB->getBasicBlock()) ||
482 !isValInBlock(BOpOp1, CurBB->getBasicBlock())) {
483 emitBranchForMergedCondition(Cond, TBB, FBB, CurBB, SwitchBB, TProb, FProb,
484 InvertCond);
485 return;
486 }
487
488 // Create TmpBB after CurBB.
489 MachineFunction::iterator BBI(CurBB);
490 MachineBasicBlock *TmpBB =
491 MF->CreateMachineBasicBlock(CurBB->getBasicBlock());
492 CurBB->getParent()->insert(++BBI, TmpBB);
493
494 if (Opc == Instruction::Or) {
495 // Codegen X | Y as:
496 // BB1:
497 // jmp_if_X TBB
498 // jmp TmpBB
499 // TmpBB:
500 // jmp_if_Y TBB
501 // jmp FBB
502 //
503
504 // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
505 // The requirement is that
506 // TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB)
507 // = TrueProb for original BB.
508 // Assuming the original probabilities are A and B, one choice is to set
509 // BB1's probabilities to A/2 and A/2+B, and set TmpBB's probabilities to
510 // A/(1+B) and 2B/(1+B). This choice assumes that
511 // TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB.
512 // Another choice is to assume TrueProb for BB1 equals to TrueProb for
513 // TmpBB, but the math is more complicated.
514
515 auto NewTrueProb = TProb / 2;
516 auto NewFalseProb = TProb / 2 + FProb;
517 // Emit the LHS condition.
518 findMergedConditions(BOpOp0, TBB, TmpBB, CurBB, SwitchBB, Opc, NewTrueProb,
519 NewFalseProb, InvertCond);
520
521 // Normalize A/2 and B to get A/(1+B) and 2B/(1+B).
522 SmallVector<BranchProbability, 2> Probs{TProb / 2, FProb};
524 // Emit the RHS condition into TmpBB.
525 findMergedConditions(BOpOp1, TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0],
526 Probs[1], InvertCond);
527 } else {
528 assert(Opc == Instruction::And && "Unknown merge op!");
529 // Codegen X & Y as:
530 // BB1:
531 // jmp_if_X TmpBB
532 // jmp FBB
533 // TmpBB:
534 // jmp_if_Y TBB
535 // jmp FBB
536 //
537 // This requires creation of TmpBB after CurBB.
538
539 // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
540 // The requirement is that
541 // FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB)
542 // = FalseProb for original BB.
543 // Assuming the original probabilities are A and B, one choice is to set
544 // BB1's probabilities to A+B/2 and B/2, and set TmpBB's probabilities to
545 // 2A/(1+A) and B/(1+A). This choice assumes that FalseProb for BB1 ==
546 // TrueProb for BB1 * FalseProb for TmpBB.
547
548 auto NewTrueProb = TProb + FProb / 2;
549 auto NewFalseProb = FProb / 2;
550 // Emit the LHS condition.
551 findMergedConditions(BOpOp0, TmpBB, FBB, CurBB, SwitchBB, Opc, NewTrueProb,
552 NewFalseProb, InvertCond);
553
554 // Normalize A and B/2 to get 2A/(1+A) and B/(1+A).
555 SmallVector<BranchProbability, 2> Probs{TProb, FProb / 2};
557 // Emit the RHS condition into TmpBB.
558 findMergedConditions(BOpOp1, TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0],
559 Probs[1], InvertCond);
560 }
561}
562
563bool IRTranslator::shouldEmitAsBranches(
564 const std::vector<SwitchCG::CaseBlock> &Cases) {
565 // For multiple cases, it's better to emit as branches.
566 if (Cases.size() != 2)
567 return true;
568
569 // If this is two comparisons of the same values or'd or and'd together, they
570 // will get folded into a single comparison, so don't emit two blocks.
571 if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
572 Cases[0].CmpRHS == Cases[1].CmpRHS) ||
573 (Cases[0].CmpRHS == Cases[1].CmpLHS &&
574 Cases[0].CmpLHS == Cases[1].CmpRHS)) {
575 return false;
576 }
577
578 // Handle: (X != null) | (Y != null) --> (X|Y) != 0
579 // Handle: (X == null) & (Y == null) --> (X|Y) == 0
580 if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
581 Cases[0].PredInfo.Pred == Cases[1].PredInfo.Pred &&
582 isa<Constant>(Cases[0].CmpRHS) &&
583 cast<Constant>(Cases[0].CmpRHS)->isNullValue()) {
584 if (Cases[0].PredInfo.Pred == CmpInst::ICMP_EQ &&
585 Cases[0].TrueBB == Cases[1].ThisBB)
586 return false;
587 if (Cases[0].PredInfo.Pred == CmpInst::ICMP_NE &&
588 Cases[0].FalseBB == Cases[1].ThisBB)
589 return false;
590 }
591
592 return true;
593}
594
595bool IRTranslator::translateBr(const User &U, MachineIRBuilder &MIRBuilder) {
596 const BranchInst &BrInst = cast<BranchInst>(U);
597 auto &CurMBB = MIRBuilder.getMBB();
598 auto *Succ0MBB = &getMBB(*BrInst.getSuccessor(0));
599
600 if (BrInst.isUnconditional()) {
601 // If the unconditional target is the layout successor, fallthrough.
602 if (OptLevel == CodeGenOptLevel::None ||
603 !CurMBB.isLayoutSuccessor(Succ0MBB))
604 MIRBuilder.buildBr(*Succ0MBB);
605
606 // Link successors.
607 for (const BasicBlock *Succ : successors(&BrInst))
608 CurMBB.addSuccessor(&getMBB(*Succ));
609 return true;
610 }
611
612 // If this condition is one of the special cases we handle, do special stuff
613 // now.
614 const Value *CondVal = BrInst.getCondition();
615 MachineBasicBlock *Succ1MBB = &getMBB(*BrInst.getSuccessor(1));
616
617 // If this is a series of conditions that are or'd or and'd together, emit
618 // this as a sequence of branches instead of setcc's with and/or operations.
619 // As long as jumps are not expensive (exceptions for multi-use logic ops,
620 // unpredictable branches, and vector extracts because those jumps are likely
621 // expensive for any target), this should improve performance.
622 // For example, instead of something like:
623 // cmp A, B
624 // C = seteq
625 // cmp D, E
626 // F = setle
627 // or C, F
628 // jnz foo
629 // Emit:
630 // cmp A, B
631 // je foo
632 // cmp D, E
633 // jle foo
634 using namespace PatternMatch;
635 const Instruction *CondI = dyn_cast<Instruction>(CondVal);
636 if (!TLI->isJumpExpensive() && CondI && CondI->hasOneUse() &&
637 !BrInst.hasMetadata(LLVMContext::MD_unpredictable)) {
639 Value *Vec;
640 const Value *BOp0, *BOp1;
641 if (match(CondI, m_LogicalAnd(m_Value(BOp0), m_Value(BOp1))))
642 Opcode = Instruction::And;
643 else if (match(CondI, m_LogicalOr(m_Value(BOp0), m_Value(BOp1))))
644 Opcode = Instruction::Or;
645
646 if (Opcode && !(match(BOp0, m_ExtractElt(m_Value(Vec), m_Value())) &&
647 match(BOp1, m_ExtractElt(m_Specific(Vec), m_Value())))) {
648 findMergedConditions(CondI, Succ0MBB, Succ1MBB, &CurMBB, &CurMBB, Opcode,
649 getEdgeProbability(&CurMBB, Succ0MBB),
650 getEdgeProbability(&CurMBB, Succ1MBB),
651 /*InvertCond=*/false);
652 assert(SL->SwitchCases[0].ThisBB == &CurMBB && "Unexpected lowering!");
653
654 // Allow some cases to be rejected.
655 if (shouldEmitAsBranches(SL->SwitchCases)) {
656 // Emit the branch for this block.
657 emitSwitchCase(SL->SwitchCases[0], &CurMBB, *CurBuilder);
658 SL->SwitchCases.erase(SL->SwitchCases.begin());
659 return true;
660 }
661
662 // Okay, we decided not to do this, remove any inserted MBB's and clear
663 // SwitchCases.
664 for (unsigned I = 1, E = SL->SwitchCases.size(); I != E; ++I)
665 MF->erase(SL->SwitchCases[I].ThisBB);
666
667 SL->SwitchCases.clear();
668 }
669 }
670
671 // Create a CaseBlock record representing this branch.
672 SwitchCG::CaseBlock CB(CmpInst::ICMP_EQ, false, CondVal,
673 ConstantInt::getTrue(MF->getFunction().getContext()),
674 nullptr, Succ0MBB, Succ1MBB, &CurMBB,
675 CurBuilder->getDebugLoc());
676
677 // Use emitSwitchCase to actually insert the fast branch sequence for this
678 // cond branch.
679 emitSwitchCase(CB, &CurMBB, *CurBuilder);
680 return true;
681}
682
683void IRTranslator::addSuccessorWithProb(MachineBasicBlock *Src,
685 BranchProbability Prob) {
686 if (!FuncInfo.BPI) {
687 Src->addSuccessorWithoutProb(Dst);
688 return;
689 }
690 if (Prob.isUnknown())
691 Prob = getEdgeProbability(Src, Dst);
692 Src->addSuccessor(Dst, Prob);
693}
694
696IRTranslator::getEdgeProbability(const MachineBasicBlock *Src,
697 const MachineBasicBlock *Dst) const {
698 const BasicBlock *SrcBB = Src->getBasicBlock();
699 const BasicBlock *DstBB = Dst->getBasicBlock();
700 if (!FuncInfo.BPI) {
701 // If BPI is not available, set the default probability as 1 / N, where N is
702 // the number of successors.
703 auto SuccSize = std::max<uint32_t>(succ_size(SrcBB), 1);
704 return BranchProbability(1, SuccSize);
705 }
706 return FuncInfo.BPI->getEdgeProbability(SrcBB, DstBB);
707}
708
709bool IRTranslator::translateSwitch(const User &U, MachineIRBuilder &MIB) {
710 using namespace SwitchCG;
711 // Extract cases from the switch.
712 const SwitchInst &SI = cast<SwitchInst>(U);
713 BranchProbabilityInfo *BPI = FuncInfo.BPI;
714 CaseClusterVector Clusters;
715 Clusters.reserve(SI.getNumCases());
716 for (const auto &I : SI.cases()) {
717 MachineBasicBlock *Succ = &getMBB(*I.getCaseSuccessor());
718 assert(Succ && "Could not find successor mbb in mapping");
719 const ConstantInt *CaseVal = I.getCaseValue();
720 BranchProbability Prob =
721 BPI ? BPI->getEdgeProbability(SI.getParent(), I.getSuccessorIndex())
722 : BranchProbability(1, SI.getNumCases() + 1);
723 Clusters.push_back(CaseCluster::range(CaseVal, CaseVal, Succ, Prob));
724 }
725
726 MachineBasicBlock *DefaultMBB = &getMBB(*SI.getDefaultDest());
727
728 // Cluster adjacent cases with the same destination. We do this at all
729 // optimization levels because it's cheap to do and will make codegen faster
730 // if there are many clusters.
731 sortAndRangeify(Clusters);
732
733 MachineBasicBlock *SwitchMBB = &getMBB(*SI.getParent());
734
735 // If there is only the default destination, jump there directly.
736 if (Clusters.empty()) {
737 SwitchMBB->addSuccessor(DefaultMBB);
738 if (DefaultMBB != SwitchMBB->getNextNode())
739 MIB.buildBr(*DefaultMBB);
740 return true;
741 }
742
743 SL->findJumpTables(Clusters, &SI, std::nullopt, DefaultMBB, nullptr, nullptr);
744 SL->findBitTestClusters(Clusters, &SI);
745
746 LLVM_DEBUG({
747 dbgs() << "Case clusters: ";
748 for (const CaseCluster &C : Clusters) {
749 if (C.Kind == CC_JumpTable)
750 dbgs() << "JT:";
751 if (C.Kind == CC_BitTests)
752 dbgs() << "BT:";
753
754 C.Low->getValue().print(dbgs(), true);
755 if (C.Low != C.High) {
756 dbgs() << '-';
757 C.High->getValue().print(dbgs(), true);
758 }
759 dbgs() << ' ';
760 }
761 dbgs() << '\n';
762 });
763
764 assert(!Clusters.empty());
765 SwitchWorkList WorkList;
766 CaseClusterIt First = Clusters.begin();
767 CaseClusterIt Last = Clusters.end() - 1;
768 auto DefaultProb = getEdgeProbability(SwitchMBB, DefaultMBB);
769 WorkList.push_back({SwitchMBB, First, Last, nullptr, nullptr, DefaultProb});
770
771 while (!WorkList.empty()) {
772 SwitchWorkListItem W = WorkList.pop_back_val();
773
774 unsigned NumClusters = W.LastCluster - W.FirstCluster + 1;
775 // For optimized builds, lower large range as a balanced binary tree.
776 if (NumClusters > 3 &&
777 MF->getTarget().getOptLevel() != CodeGenOptLevel::None &&
778 !DefaultMBB->getParent()->getFunction().hasMinSize()) {
779 splitWorkItem(WorkList, W, SI.getCondition(), SwitchMBB, MIB);
780 continue;
781 }
782
783 if (!lowerSwitchWorkItem(W, SI.getCondition(), SwitchMBB, DefaultMBB, MIB))
784 return false;
785 }
786 return true;
787}
788
789void IRTranslator::splitWorkItem(SwitchCG::SwitchWorkList &WorkList,
791 Value *Cond, MachineBasicBlock *SwitchMBB,
792 MachineIRBuilder &MIB) {
793 using namespace SwitchCG;
794 assert(W.FirstCluster->Low->getValue().slt(W.LastCluster->Low->getValue()) &&
795 "Clusters not sorted?");
796 assert(W.LastCluster - W.FirstCluster + 1 >= 2 && "Too small to split!");
797
798 auto [LastLeft, FirstRight, LeftProb, RightProb] =
799 SL->computeSplitWorkItemInfo(W);
800
801 // Use the first element on the right as pivot since we will make less-than
802 // comparisons against it.
803 CaseClusterIt PivotCluster = FirstRight;
804 assert(PivotCluster > W.FirstCluster);
805 assert(PivotCluster <= W.LastCluster);
806
807 CaseClusterIt FirstLeft = W.FirstCluster;
808 CaseClusterIt LastRight = W.LastCluster;
809
810 const ConstantInt *Pivot = PivotCluster->Low;
811
812 // New blocks will be inserted immediately after the current one.
814 ++BBI;
815
816 // We will branch to the LHS if Value < Pivot. If LHS is a single cluster,
817 // we can branch to its destination directly if it's squeezed exactly in
818 // between the known lower bound and Pivot - 1.
819 MachineBasicBlock *LeftMBB;
820 if (FirstLeft == LastLeft && FirstLeft->Kind == CC_Range &&
821 FirstLeft->Low == W.GE &&
822 (FirstLeft->High->getValue() + 1LL) == Pivot->getValue()) {
823 LeftMBB = FirstLeft->MBB;
824 } else {
825 LeftMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock());
826 FuncInfo.MF->insert(BBI, LeftMBB);
827 WorkList.push_back(
828 {LeftMBB, FirstLeft, LastLeft, W.GE, Pivot, W.DefaultProb / 2});
829 }
830
831 // Similarly, we will branch to the RHS if Value >= Pivot. If RHS is a
832 // single cluster, RHS.Low == Pivot, and we can branch to its destination
833 // directly if RHS.High equals the current upper bound.
834 MachineBasicBlock *RightMBB;
835 if (FirstRight == LastRight && FirstRight->Kind == CC_Range && W.LT &&
836 (FirstRight->High->getValue() + 1ULL) == W.LT->getValue()) {
837 RightMBB = FirstRight->MBB;
838 } else {
839 RightMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock());
840 FuncInfo.MF->insert(BBI, RightMBB);
841 WorkList.push_back(
842 {RightMBB, FirstRight, LastRight, Pivot, W.LT, W.DefaultProb / 2});
843 }
844
845 // Create the CaseBlock record that will be used to lower the branch.
846 CaseBlock CB(ICmpInst::Predicate::ICMP_SLT, false, Cond, Pivot, nullptr,
847 LeftMBB, RightMBB, W.MBB, MIB.getDebugLoc(), LeftProb,
848 RightProb);
849
850 if (W.MBB == SwitchMBB)
851 emitSwitchCase(CB, SwitchMBB, MIB);
852 else
853 SL->SwitchCases.push_back(CB);
854}
855
856void IRTranslator::emitJumpTable(SwitchCG::JumpTable &JT,
858 // Emit the code for the jump table
859 assert(JT.Reg && "Should lower JT Header first!");
860 MachineIRBuilder MIB(*MBB->getParent());
861 MIB.setMBB(*MBB);
862 MIB.setDebugLoc(CurBuilder->getDebugLoc());
863
864 Type *PtrIRTy = PointerType::getUnqual(MF->getFunction().getContext());
865 const LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
866
867 auto Table = MIB.buildJumpTable(PtrTy, JT.JTI);
868 MIB.buildBrJT(Table.getReg(0), JT.JTI, JT.Reg);
869}
870
871bool IRTranslator::emitJumpTableHeader(SwitchCG::JumpTable &JT,
873 MachineBasicBlock *HeaderBB) {
874 MachineIRBuilder MIB(*HeaderBB->getParent());
875 MIB.setMBB(*HeaderBB);
876 MIB.setDebugLoc(CurBuilder->getDebugLoc());
877
878 const Value &SValue = *JTH.SValue;
879 // Subtract the lowest switch case value from the value being switched on.
880 const LLT SwitchTy = getLLTForType(*SValue.getType(), *DL);
881 Register SwitchOpReg = getOrCreateVReg(SValue);
882 auto FirstCst = MIB.buildConstant(SwitchTy, JTH.First);
883 auto Sub = MIB.buildSub({SwitchTy}, SwitchOpReg, FirstCst);
884
885 // This value may be smaller or larger than the target's pointer type, and
886 // therefore require extension or truncating.
887 auto *PtrIRTy = PointerType::getUnqual(SValue.getContext());
888 const LLT PtrScalarTy = LLT::scalar(DL->getTypeSizeInBits(PtrIRTy));
889 Sub = MIB.buildZExtOrTrunc(PtrScalarTy, Sub);
890
891 JT.Reg = Sub.getReg(0);
892
893 if (JTH.FallthroughUnreachable) {
894 if (JT.MBB != HeaderBB->getNextNode())
895 MIB.buildBr(*JT.MBB);
896 return true;
897 }
898
899 // Emit the range check for the jump table, and branch to the default block
900 // for the switch statement if the value being switched on exceeds the
901 // largest case in the switch.
902 auto Cst = getOrCreateVReg(
903 *ConstantInt::get(SValue.getType(), JTH.Last - JTH.First));
904 Cst = MIB.buildZExtOrTrunc(PtrScalarTy, Cst).getReg(0);
905 auto Cmp = MIB.buildICmp(CmpInst::ICMP_UGT, LLT::scalar(1), Sub, Cst);
906
907 auto BrCond = MIB.buildBrCond(Cmp.getReg(0), *JT.Default);
908
909 // Avoid emitting unnecessary branches to the next block.
910 if (JT.MBB != HeaderBB->getNextNode())
911 BrCond = MIB.buildBr(*JT.MBB);
912 return true;
913}
914
915void IRTranslator::emitSwitchCase(SwitchCG::CaseBlock &CB,
916 MachineBasicBlock *SwitchBB,
917 MachineIRBuilder &MIB) {
918 Register CondLHS = getOrCreateVReg(*CB.CmpLHS);
920 DebugLoc OldDbgLoc = MIB.getDebugLoc();
921 MIB.setDebugLoc(CB.DbgLoc);
922 MIB.setMBB(*CB.ThisBB);
923
924 if (CB.PredInfo.NoCmp) {
925 // Branch or fall through to TrueBB.
926 addSuccessorWithProb(CB.ThisBB, CB.TrueBB, CB.TrueProb);
927 addMachineCFGPred({SwitchBB->getBasicBlock(), CB.TrueBB->getBasicBlock()},
928 CB.ThisBB);
930 if (CB.TrueBB != CB.ThisBB->getNextNode())
931 MIB.buildBr(*CB.TrueBB);
932 MIB.setDebugLoc(OldDbgLoc);
933 return;
934 }
935
936 const LLT i1Ty = LLT::scalar(1);
937 // Build the compare.
938 if (!CB.CmpMHS) {
939 const auto *CI = dyn_cast<ConstantInt>(CB.CmpRHS);
940 // For conditional branch lowering, we might try to do something silly like
941 // emit an G_ICMP to compare an existing G_ICMP i1 result with true. If so,
942 // just re-use the existing condition vreg.
943 if (MRI->getType(CondLHS).getSizeInBits() == 1 && CI && CI->isOne() &&
945 Cond = CondLHS;
946 } else {
947 Register CondRHS = getOrCreateVReg(*CB.CmpRHS);
949 Cond =
950 MIB.buildFCmp(CB.PredInfo.Pred, i1Ty, CondLHS, CondRHS).getReg(0);
951 else
952 Cond =
953 MIB.buildICmp(CB.PredInfo.Pred, i1Ty, CondLHS, CondRHS).getReg(0);
954 }
955 } else {
957 "Can only handle SLE ranges");
958
959 const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue();
960 const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue();
961
962 Register CmpOpReg = getOrCreateVReg(*CB.CmpMHS);
963 if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) {
964 Register CondRHS = getOrCreateVReg(*CB.CmpRHS);
965 Cond =
966 MIB.buildICmp(CmpInst::ICMP_SLE, i1Ty, CmpOpReg, CondRHS).getReg(0);
967 } else {
968 const LLT CmpTy = MRI->getType(CmpOpReg);
969 auto Sub = MIB.buildSub({CmpTy}, CmpOpReg, CondLHS);
970 auto Diff = MIB.buildConstant(CmpTy, High - Low);
971 Cond = MIB.buildICmp(CmpInst::ICMP_ULE, i1Ty, Sub, Diff).getReg(0);
972 }
973 }
974
975 // Update successor info
976 addSuccessorWithProb(CB.ThisBB, CB.TrueBB, CB.TrueProb);
977
978 addMachineCFGPred({SwitchBB->getBasicBlock(), CB.TrueBB->getBasicBlock()},
979 CB.ThisBB);
980
981 // TrueBB and FalseBB are always different unless the incoming IR is
982 // degenerate. This only happens when running llc on weird IR.
983 if (CB.TrueBB != CB.FalseBB)
984 addSuccessorWithProb(CB.ThisBB, CB.FalseBB, CB.FalseProb);
986
987 addMachineCFGPred({SwitchBB->getBasicBlock(), CB.FalseBB->getBasicBlock()},
988 CB.ThisBB);
989
990 MIB.buildBrCond(Cond, *CB.TrueBB);
991 MIB.buildBr(*CB.FalseBB);
992 MIB.setDebugLoc(OldDbgLoc);
993}
994
995bool IRTranslator::lowerJumpTableWorkItem(SwitchCG::SwitchWorkListItem W,
996 MachineBasicBlock *SwitchMBB,
997 MachineBasicBlock *CurMBB,
998 MachineBasicBlock *DefaultMBB,
999 MachineIRBuilder &MIB,
1001 BranchProbability UnhandledProbs,
1003 MachineBasicBlock *Fallthrough,
1004 bool FallthroughUnreachable) {
1005 using namespace SwitchCG;
1006 MachineFunction *CurMF = SwitchMBB->getParent();
1007 // FIXME: Optimize away range check based on pivot comparisons.
1008 JumpTableHeader *JTH = &SL->JTCases[I->JTCasesIndex].first;
1009 SwitchCG::JumpTable *JT = &SL->JTCases[I->JTCasesIndex].second;
1010 BranchProbability DefaultProb = W.DefaultProb;
1011
1012 // The jump block hasn't been inserted yet; insert it here.
1013 MachineBasicBlock *JumpMBB = JT->MBB;
1014 CurMF->insert(BBI, JumpMBB);
1015
1016 // Since the jump table block is separate from the switch block, we need
1017 // to keep track of it as a machine predecessor to the default block,
1018 // otherwise we lose the phi edges.
1019 addMachineCFGPred({SwitchMBB->getBasicBlock(), DefaultMBB->getBasicBlock()},
1020 CurMBB);
1021 addMachineCFGPred({SwitchMBB->getBasicBlock(), DefaultMBB->getBasicBlock()},
1022 JumpMBB);
1023
1024 auto JumpProb = I->Prob;
1025 auto FallthroughProb = UnhandledProbs;
1026
1027 // If the default statement is a target of the jump table, we evenly
1028 // distribute the default probability to successors of CurMBB. Also
1029 // update the probability on the edge from JumpMBB to Fallthrough.
1030 for (MachineBasicBlock::succ_iterator SI = JumpMBB->succ_begin(),
1031 SE = JumpMBB->succ_end();
1032 SI != SE; ++SI) {
1033 if (*SI == DefaultMBB) {
1034 JumpProb += DefaultProb / 2;
1035 FallthroughProb -= DefaultProb / 2;
1036 JumpMBB->setSuccProbability(SI, DefaultProb / 2);
1037 JumpMBB->normalizeSuccProbs();
1038 } else {
1039 // Also record edges from the jump table block to it's successors.
1040 addMachineCFGPred({SwitchMBB->getBasicBlock(), (*SI)->getBasicBlock()},
1041 JumpMBB);
1042 }
1043 }
1044
1045 if (FallthroughUnreachable)
1046 JTH->FallthroughUnreachable = true;
1047
1048 if (!JTH->FallthroughUnreachable)
1049 addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb);
1050 addSuccessorWithProb(CurMBB, JumpMBB, JumpProb);
1051 CurMBB->normalizeSuccProbs();
1052
1053 // The jump table header will be inserted in our current block, do the
1054 // range check, and fall through to our fallthrough block.
1055 JTH->HeaderBB = CurMBB;
1056 JT->Default = Fallthrough; // FIXME: Move Default to JumpTableHeader.
1057
1058 // If we're in the right place, emit the jump table header right now.
1059 if (CurMBB == SwitchMBB) {
1060 if (!emitJumpTableHeader(*JT, *JTH, CurMBB))
1061 return false;
1062 JTH->Emitted = true;
1063 }
1064 return true;
1065}
1066bool IRTranslator::lowerSwitchRangeWorkItem(SwitchCG::CaseClusterIt I,
1067 Value *Cond,
1068 MachineBasicBlock *Fallthrough,
1069 bool FallthroughUnreachable,
1070 BranchProbability UnhandledProbs,
1071 MachineBasicBlock *CurMBB,
1072 MachineIRBuilder &MIB,
1073 MachineBasicBlock *SwitchMBB) {
1074 using namespace SwitchCG;
1075 const Value *RHS, *LHS, *MHS;
1076 CmpInst::Predicate Pred;
1077 if (I->Low == I->High) {
1078 // Check Cond == I->Low.
1079 Pred = CmpInst::ICMP_EQ;
1080 LHS = Cond;
1081 RHS = I->Low;
1082 MHS = nullptr;
1083 } else {
1084 // Check I->Low <= Cond <= I->High.
1085 Pred = CmpInst::ICMP_SLE;
1086 LHS = I->Low;
1087 MHS = Cond;
1088 RHS = I->High;
1089 }
1090
1091 // If Fallthrough is unreachable, fold away the comparison.
1092 // The false probability is the sum of all unhandled cases.
1093 CaseBlock CB(Pred, FallthroughUnreachable, LHS, RHS, MHS, I->MBB, Fallthrough,
1094 CurMBB, MIB.getDebugLoc(), I->Prob, UnhandledProbs);
1095
1096 emitSwitchCase(CB, SwitchMBB, MIB);
1097 return true;
1098}
1099
1100void IRTranslator::emitBitTestHeader(SwitchCG::BitTestBlock &B,
1101 MachineBasicBlock *SwitchBB) {
1102 MachineIRBuilder &MIB = *CurBuilder;
1103 MIB.setMBB(*SwitchBB);
1104
1105 // Subtract the minimum value.
1106 Register SwitchOpReg = getOrCreateVReg(*B.SValue);
1107
1108 LLT SwitchOpTy = MRI->getType(SwitchOpReg);
1109 Register MinValReg = MIB.buildConstant(SwitchOpTy, B.First).getReg(0);
1110 auto RangeSub = MIB.buildSub(SwitchOpTy, SwitchOpReg, MinValReg);
1111
1112 Type *PtrIRTy = PointerType::getUnqual(MF->getFunction().getContext());
1113 const LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
1114
1115 LLT MaskTy = SwitchOpTy;
1116 if (MaskTy.getSizeInBits() > PtrTy.getSizeInBits() ||
1118 MaskTy = LLT::scalar(PtrTy.getSizeInBits());
1119 else {
1120 // Ensure that the type will fit the mask value.
1121 for (const SwitchCG::BitTestCase &Case : B.Cases) {
1122 if (!isUIntN(SwitchOpTy.getSizeInBits(), Case.Mask)) {
1123 // Switch table case range are encoded into series of masks.
1124 // Just use pointer type, it's guaranteed to fit.
1125 MaskTy = LLT::scalar(PtrTy.getSizeInBits());
1126 break;
1127 }
1128 }
1129 }
1130 Register SubReg = RangeSub.getReg(0);
1131 if (SwitchOpTy != MaskTy)
1132 SubReg = MIB.buildZExtOrTrunc(MaskTy, SubReg).getReg(0);
1133
1134 B.RegVT = getMVTForLLT(MaskTy);
1135 B.Reg = SubReg;
1136
1137 MachineBasicBlock *MBB = B.Cases[0].ThisBB;
1138
1139 if (!B.FallthroughUnreachable)
1140 addSuccessorWithProb(SwitchBB, B.Default, B.DefaultProb);
1141 addSuccessorWithProb(SwitchBB, MBB, B.Prob);
1142
1143 SwitchBB->normalizeSuccProbs();
1144
1145 if (!B.FallthroughUnreachable) {
1146 // Conditional branch to the default block.
1147 auto RangeCst = MIB.buildConstant(SwitchOpTy, B.Range);
1148 auto RangeCmp = MIB.buildICmp(CmpInst::Predicate::ICMP_UGT, LLT::scalar(1),
1149 RangeSub, RangeCst);
1150 MIB.buildBrCond(RangeCmp, *B.Default);
1151 }
1152
1153 // Avoid emitting unnecessary branches to the next block.
1154 if (MBB != SwitchBB->getNextNode())
1155 MIB.buildBr(*MBB);
1156}
1157
1158void IRTranslator::emitBitTestCase(SwitchCG::BitTestBlock &BB,
1159 MachineBasicBlock *NextMBB,
1160 BranchProbability BranchProbToNext,
1162 MachineBasicBlock *SwitchBB) {
1163 MachineIRBuilder &MIB = *CurBuilder;
1164 MIB.setMBB(*SwitchBB);
1165
1166 LLT SwitchTy = getLLTForMVT(BB.RegVT);
1167 Register Cmp;
1168 unsigned PopCount = llvm::popcount(B.Mask);
1169 if (PopCount == 1) {
1170 // Testing for a single bit; just compare the shift count with what it
1171 // would need to be to shift a 1 bit in that position.
1172 auto MaskTrailingZeros =
1173 MIB.buildConstant(SwitchTy, llvm::countr_zero(B.Mask));
1174 Cmp =
1175 MIB.buildICmp(ICmpInst::ICMP_EQ, LLT::scalar(1), Reg, MaskTrailingZeros)
1176 .getReg(0);
1177 } else if (PopCount == BB.Range) {
1178 // There is only one zero bit in the range, test for it directly.
1179 auto MaskTrailingOnes =
1180 MIB.buildConstant(SwitchTy, llvm::countr_one(B.Mask));
1181 Cmp = MIB.buildICmp(CmpInst::ICMP_NE, LLT::scalar(1), Reg, MaskTrailingOnes)
1182 .getReg(0);
1183 } else {
1184 // Make desired shift.
1185 auto CstOne = MIB.buildConstant(SwitchTy, 1);
1186 auto SwitchVal = MIB.buildShl(SwitchTy, CstOne, Reg);
1187
1188 // Emit bit tests and jumps.
1189 auto CstMask = MIB.buildConstant(SwitchTy, B.Mask);
1190 auto AndOp = MIB.buildAnd(SwitchTy, SwitchVal, CstMask);
1191 auto CstZero = MIB.buildConstant(SwitchTy, 0);
1192 Cmp = MIB.buildICmp(CmpInst::ICMP_NE, LLT::scalar(1), AndOp, CstZero)
1193 .getReg(0);
1194 }
1195
1196 // The branch probability from SwitchBB to B.TargetBB is B.ExtraProb.
1197 addSuccessorWithProb(SwitchBB, B.TargetBB, B.ExtraProb);
1198 // The branch probability from SwitchBB to NextMBB is BranchProbToNext.
1199 addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext);
1200 // It is not guaranteed that the sum of B.ExtraProb and BranchProbToNext is
1201 // one as they are relative probabilities (and thus work more like weights),
1202 // and hence we need to normalize them to let the sum of them become one.
1203 SwitchBB->normalizeSuccProbs();
1204
1205 // Record the fact that the IR edge from the header to the bit test target
1206 // will go through our new block. Neeeded for PHIs to have nodes added.
1207 addMachineCFGPred({BB.Parent->getBasicBlock(), B.TargetBB->getBasicBlock()},
1208 SwitchBB);
1209
1210 MIB.buildBrCond(Cmp, *B.TargetBB);
1211
1212 // Avoid emitting unnecessary branches to the next block.
1213 if (NextMBB != SwitchBB->getNextNode())
1214 MIB.buildBr(*NextMBB);
1215}
1216
1217bool IRTranslator::lowerBitTestWorkItem(
1219 MachineBasicBlock *CurMBB, MachineBasicBlock *DefaultMBB,
1221 BranchProbability DefaultProb, BranchProbability UnhandledProbs,
1223 bool FallthroughUnreachable) {
1224 using namespace SwitchCG;
1225 MachineFunction *CurMF = SwitchMBB->getParent();
1226 // FIXME: Optimize away range check based on pivot comparisons.
1227 BitTestBlock *BTB = &SL->BitTestCases[I->BTCasesIndex];
1228 // The bit test blocks haven't been inserted yet; insert them here.
1229 for (BitTestCase &BTC : BTB->Cases)
1230 CurMF->insert(BBI, BTC.ThisBB);
1231
1232 // Fill in fields of the BitTestBlock.
1233 BTB->Parent = CurMBB;
1234 BTB->Default = Fallthrough;
1235
1236 BTB->DefaultProb = UnhandledProbs;
1237 // If the cases in bit test don't form a contiguous range, we evenly
1238 // distribute the probability on the edge to Fallthrough to two
1239 // successors of CurMBB.
1240 if (!BTB->ContiguousRange) {
1241 BTB->Prob += DefaultProb / 2;
1242 BTB->DefaultProb -= DefaultProb / 2;
1243 }
1244
1245 if (FallthroughUnreachable)
1246 BTB->FallthroughUnreachable = true;
1247
1248 // If we're in the right place, emit the bit test header right now.
1249 if (CurMBB == SwitchMBB) {
1250 emitBitTestHeader(*BTB, SwitchMBB);
1251 BTB->Emitted = true;
1252 }
1253 return true;
1254}
1255
1256bool IRTranslator::lowerSwitchWorkItem(SwitchCG::SwitchWorkListItem W,
1257 Value *Cond,
1258 MachineBasicBlock *SwitchMBB,
1259 MachineBasicBlock *DefaultMBB,
1260 MachineIRBuilder &MIB) {
1261 using namespace SwitchCG;
1262 MachineFunction *CurMF = FuncInfo.MF;
1263 MachineBasicBlock *NextMBB = nullptr;
1265 if (++BBI != FuncInfo.MF->end())
1266 NextMBB = &*BBI;
1267
1268 if (EnableOpts) {
1269 // Here, we order cases by probability so the most likely case will be
1270 // checked first. However, two clusters can have the same probability in
1271 // which case their relative ordering is non-deterministic. So we use Low
1272 // as a tie-breaker as clusters are guaranteed to never overlap.
1273 llvm::sort(W.FirstCluster, W.LastCluster + 1,
1274 [](const CaseCluster &a, const CaseCluster &b) {
1275 return a.Prob != b.Prob
1276 ? a.Prob > b.Prob
1277 : a.Low->getValue().slt(b.Low->getValue());
1278 });
1279
1280 // Rearrange the case blocks so that the last one falls through if possible
1281 // without changing the order of probabilities.
1282 for (CaseClusterIt I = W.LastCluster; I > W.FirstCluster;) {
1283 --I;
1284 if (I->Prob > W.LastCluster->Prob)
1285 break;
1286 if (I->Kind == CC_Range && I->MBB == NextMBB) {
1287 std::swap(*I, *W.LastCluster);
1288 break;
1289 }
1290 }
1291 }
1292
1293 // Compute total probability.
1294 BranchProbability DefaultProb = W.DefaultProb;
1295 BranchProbability UnhandledProbs = DefaultProb;
1296 for (CaseClusterIt I = W.FirstCluster; I <= W.LastCluster; ++I)
1297 UnhandledProbs += I->Prob;
1298
1299 MachineBasicBlock *CurMBB = W.MBB;
1300 for (CaseClusterIt I = W.FirstCluster, E = W.LastCluster; I <= E; ++I) {
1301 bool FallthroughUnreachable = false;
1302 MachineBasicBlock *Fallthrough;
1303 if (I == W.LastCluster) {
1304 // For the last cluster, fall through to the default destination.
1305 Fallthrough = DefaultMBB;
1306 FallthroughUnreachable = isa<UnreachableInst>(
1307 DefaultMBB->getBasicBlock()->getFirstNonPHIOrDbg());
1308 } else {
1309 Fallthrough = CurMF->CreateMachineBasicBlock(CurMBB->getBasicBlock());
1310 CurMF->insert(BBI, Fallthrough);
1311 }
1312 UnhandledProbs -= I->Prob;
1313
1314 switch (I->Kind) {
1315 case CC_BitTests: {
1316 if (!lowerBitTestWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI,
1317 DefaultProb, UnhandledProbs, I, Fallthrough,
1318 FallthroughUnreachable)) {
1319 LLVM_DEBUG(dbgs() << "Failed to lower bit test for switch");
1320 return false;
1321 }
1322 break;
1323 }
1324
1325 case CC_JumpTable: {
1326 if (!lowerJumpTableWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI,
1327 UnhandledProbs, I, Fallthrough,
1328 FallthroughUnreachable)) {
1329 LLVM_DEBUG(dbgs() << "Failed to lower jump table");
1330 return false;
1331 }
1332 break;
1333 }
1334 case CC_Range: {
1335 if (!lowerSwitchRangeWorkItem(I, Cond, Fallthrough,
1336 FallthroughUnreachable, UnhandledProbs,
1337 CurMBB, MIB, SwitchMBB)) {
1338 LLVM_DEBUG(dbgs() << "Failed to lower switch range");
1339 return false;
1340 }
1341 break;
1342 }
1343 }
1344 CurMBB = Fallthrough;
1345 }
1346
1347 return true;
1348}
1349
1350bool IRTranslator::translateIndirectBr(const User &U,
1351 MachineIRBuilder &MIRBuilder) {
1352 const IndirectBrInst &BrInst = cast<IndirectBrInst>(U);
1353
1354 const Register Tgt = getOrCreateVReg(*BrInst.getAddress());
1355 MIRBuilder.buildBrIndirect(Tgt);
1356
1357 // Link successors.
1358 SmallPtrSet<const BasicBlock *, 32> AddedSuccessors;
1359 MachineBasicBlock &CurBB = MIRBuilder.getMBB();
1360 for (const BasicBlock *Succ : successors(&BrInst)) {
1361 // It's legal for indirectbr instructions to have duplicate blocks in the
1362 // destination list. We don't allow this in MIR. Skip anything that's
1363 // already a successor.
1364 if (!AddedSuccessors.insert(Succ).second)
1365 continue;
1366 CurBB.addSuccessor(&getMBB(*Succ));
1367 }
1368
1369 return true;
1370}
1371
1372static bool isSwiftError(const Value *V) {
1373 if (auto Arg = dyn_cast<Argument>(V))
1374 return Arg->hasSwiftErrorAttr();
1375 if (auto AI = dyn_cast<AllocaInst>(V))
1376 return AI->isSwiftError();
1377 return false;
1378}
1379
1380bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) {
1381 const LoadInst &LI = cast<LoadInst>(U);
1382 TypeSize StoreSize = DL->getTypeStoreSize(LI.getType());
1383 if (StoreSize.isZero())
1384 return true;
1385
1386 ArrayRef<Register> Regs = getOrCreateVRegs(LI);
1387 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(LI);
1388 Register Base = getOrCreateVReg(*LI.getPointerOperand());
1389 AAMDNodes AAInfo = LI.getAAMetadata();
1390
1391 const Value *Ptr = LI.getPointerOperand();
1392 Type *OffsetIRTy = DL->getIndexType(Ptr->getType());
1393 LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
1394
1395 if (CLI->supportSwiftError() && isSwiftError(Ptr)) {
1396 assert(Regs.size() == 1 && "swifterror should be single pointer");
1397 Register VReg =
1398 SwiftError.getOrCreateVRegUseAt(&LI, &MIRBuilder.getMBB(), Ptr);
1399 MIRBuilder.buildCopy(Regs[0], VReg);
1400 return true;
1401 }
1402
1404 TLI->getLoadMemOperandFlags(LI, *DL, AC, LibInfo);
1405 if (AA && !(Flags & MachineMemOperand::MOInvariant)) {
1406 if (AA->pointsToConstantMemory(
1407 MemoryLocation(Ptr, LocationSize::precise(StoreSize), AAInfo))) {
1409 }
1410 }
1411
1412 const MDNode *Ranges =
1413 Regs.size() == 1 ? LI.getMetadata(LLVMContext::MD_range) : nullptr;
1414 for (unsigned i = 0; i < Regs.size(); ++i) {
1415 Register Addr;
1416 MIRBuilder.materializeObjectPtrOffset(Addr, Base, OffsetTy, Offsets[i]);
1417
1418 MachinePointerInfo Ptr(LI.getPointerOperand(), Offsets[i]);
1419 Align BaseAlign = getMemOpAlign(LI);
1420 auto MMO =
1421 MF->getMachineMemOperand(Ptr, Flags, MRI->getType(Regs[i]),
1422 commonAlignment(BaseAlign, Offsets[i]), AAInfo,
1423 Ranges, LI.getSyncScopeID(), LI.getOrdering());
1424 MIRBuilder.buildLoad(Regs[i], Addr, *MMO);
1425 }
1426
1427 return true;
1428}
1429
1430bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) {
1431 const StoreInst &SI = cast<StoreInst>(U);
1432 if (DL->getTypeStoreSize(SI.getValueOperand()->getType()).isZero())
1433 return true;
1434
1435 ArrayRef<Register> Vals = getOrCreateVRegs(*SI.getValueOperand());
1436 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*SI.getValueOperand());
1437 Register Base = getOrCreateVReg(*SI.getPointerOperand());
1438
1439 Type *OffsetIRTy = DL->getIndexType(SI.getPointerOperandType());
1440 LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
1441
1442 if (CLI->supportSwiftError() && isSwiftError(SI.getPointerOperand())) {
1443 assert(Vals.size() == 1 && "swifterror should be single pointer");
1444
1445 Register VReg = SwiftError.getOrCreateVRegDefAt(&SI, &MIRBuilder.getMBB(),
1446 SI.getPointerOperand());
1447 MIRBuilder.buildCopy(VReg, Vals[0]);
1448 return true;
1449 }
1450
1451 MachineMemOperand::Flags Flags = TLI->getStoreMemOperandFlags(SI, *DL);
1452
1453 for (unsigned i = 0; i < Vals.size(); ++i) {
1454 Register Addr;
1455 MIRBuilder.materializeObjectPtrOffset(Addr, Base, OffsetTy, Offsets[i]);
1456
1457 MachinePointerInfo Ptr(SI.getPointerOperand(), Offsets[i]);
1458 Align BaseAlign = getMemOpAlign(SI);
1459 auto MMO = MF->getMachineMemOperand(Ptr, Flags, MRI->getType(Vals[i]),
1460 commonAlignment(BaseAlign, Offsets[i]),
1461 SI.getAAMetadata(), nullptr,
1462 SI.getSyncScopeID(), SI.getOrdering());
1463 MIRBuilder.buildStore(Vals[i], Addr, *MMO);
1464 }
1465 return true;
1466}
1467
1469 const Value *Src = U.getOperand(0);
1470 Type *Int32Ty = Type::getInt32Ty(U.getContext());
1471
1472 // getIndexedOffsetInType is designed for GEPs, so the first index is the
1473 // usual array element rather than looking into the actual aggregate.
1475 Indices.push_back(ConstantInt::get(Int32Ty, 0));
1476
1477 if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(&U)) {
1478 for (auto Idx : EVI->indices())
1479 Indices.push_back(ConstantInt::get(Int32Ty, Idx));
1480 } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(&U)) {
1481 for (auto Idx : IVI->indices())
1482 Indices.push_back(ConstantInt::get(Int32Ty, Idx));
1483 } else {
1484 llvm::append_range(Indices, drop_begin(U.operands()));
1485 }
1486
1487 return static_cast<uint64_t>(
1488 DL.getIndexedOffsetInType(Src->getType(), Indices));
1489}
1490
1491bool IRTranslator::translateExtractValue(const User &U,
1492 MachineIRBuilder &MIRBuilder) {
1493 const Value *Src = U.getOperand(0);
1494 uint64_t Offset = getOffsetFromIndices(U, *DL);
1495 ArrayRef<Register> SrcRegs = getOrCreateVRegs(*Src);
1496 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*Src);
1497 unsigned Idx = llvm::lower_bound(Offsets, Offset) - Offsets.begin();
1498 auto &DstRegs = allocateVRegs(U);
1499
1500 for (unsigned i = 0; i < DstRegs.size(); ++i)
1501 DstRegs[i] = SrcRegs[Idx++];
1502
1503 return true;
1504}
1505
1506bool IRTranslator::translateInsertValue(const User &U,
1507 MachineIRBuilder &MIRBuilder) {
1508 const Value *Src = U.getOperand(0);
1509 uint64_t Offset = getOffsetFromIndices(U, *DL);
1510 auto &DstRegs = allocateVRegs(U);
1511 ArrayRef<uint64_t> DstOffsets = *VMap.getOffsets(U);
1512 ArrayRef<Register> SrcRegs = getOrCreateVRegs(*Src);
1513 ArrayRef<Register> InsertedRegs = getOrCreateVRegs(*U.getOperand(1));
1514 auto *InsertedIt = InsertedRegs.begin();
1515
1516 for (unsigned i = 0; i < DstRegs.size(); ++i) {
1517 if (DstOffsets[i] >= Offset && InsertedIt != InsertedRegs.end())
1518 DstRegs[i] = *InsertedIt++;
1519 else
1520 DstRegs[i] = SrcRegs[i];
1521 }
1522
1523 return true;
1524}
1525
1526bool IRTranslator::translateSelect(const User &U,
1527 MachineIRBuilder &MIRBuilder) {
1528 Register Tst = getOrCreateVReg(*U.getOperand(0));
1529 ArrayRef<Register> ResRegs = getOrCreateVRegs(U);
1530 ArrayRef<Register> Op0Regs = getOrCreateVRegs(*U.getOperand(1));
1531 ArrayRef<Register> Op1Regs = getOrCreateVRegs(*U.getOperand(2));
1532
1533 uint32_t Flags = 0;
1534 if (const SelectInst *SI = dyn_cast<SelectInst>(&U))
1536
1537 for (unsigned i = 0; i < ResRegs.size(); ++i) {
1538 MIRBuilder.buildSelect(ResRegs[i], Tst, Op0Regs[i], Op1Regs[i], Flags);
1539 }
1540
1541 return true;
1542}
1543
1544bool IRTranslator::translateCopy(const User &U, const Value &V,
1545 MachineIRBuilder &MIRBuilder) {
1546 Register Src = getOrCreateVReg(V);
1547 auto &Regs = *VMap.getVRegs(U);
1548 if (Regs.empty()) {
1549 Regs.push_back(Src);
1550 VMap.getOffsets(U)->push_back(0);
1551 } else {
1552 // If we already assigned a vreg for this instruction, we can't change that.
1553 // Emit a copy to satisfy the users we already emitted.
1554 MIRBuilder.buildCopy(Regs[0], Src);
1555 }
1556 return true;
1557}
1558
1559bool IRTranslator::translateBitCast(const User &U,
1560 MachineIRBuilder &MIRBuilder) {
1561 // If we're bitcasting to the source type, we can reuse the source vreg.
1562 if (getLLTForType(*U.getOperand(0)->getType(), *DL) ==
1563 getLLTForType(*U.getType(), *DL)) {
1564 // If the source is a ConstantInt then it was probably created by
1565 // ConstantHoisting and we should leave it alone.
1566 if (isa<ConstantInt>(U.getOperand(0)))
1567 return translateCast(TargetOpcode::G_CONSTANT_FOLD_BARRIER, U,
1568 MIRBuilder);
1569 return translateCopy(U, *U.getOperand(0), MIRBuilder);
1570 }
1571
1572 return translateCast(TargetOpcode::G_BITCAST, U, MIRBuilder);
1573}
1574
1575bool IRTranslator::translateCast(unsigned Opcode, const User &U,
1576 MachineIRBuilder &MIRBuilder) {
1578 return false;
1579
1580 uint32_t Flags = 0;
1581 if (const Instruction *I = dyn_cast<Instruction>(&U))
1583
1584 Register Op = getOrCreateVReg(*U.getOperand(0));
1585 Register Res = getOrCreateVReg(U);
1586 MIRBuilder.buildInstr(Opcode, {Res}, {Op}, Flags);
1587 return true;
1588}
1589
1590bool IRTranslator::translateGetElementPtr(const User &U,
1591 MachineIRBuilder &MIRBuilder) {
1592 Value &Op0 = *U.getOperand(0);
1593 Register BaseReg = getOrCreateVReg(Op0);
1594 Type *PtrIRTy = Op0.getType();
1595 LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
1596 Type *OffsetIRTy = DL->getIndexType(PtrIRTy);
1597 LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
1598
1599 uint32_t PtrAddFlags = 0;
1600 // Each PtrAdd generated to implement the GEP inherits its nuw, nusw, inbounds
1601 // flags.
1602 if (const Instruction *I = dyn_cast<Instruction>(&U))
1604
1605 auto PtrAddFlagsWithConst = [&](int64_t Offset) {
1606 // For nusw/inbounds GEP with an offset that is nonnegative when interpreted
1607 // as signed, assume there is no unsigned overflow.
1608 if (Offset >= 0 && (PtrAddFlags & MachineInstr::MIFlag::NoUSWrap))
1609 return PtrAddFlags | MachineInstr::MIFlag::NoUWrap;
1610 return PtrAddFlags;
1611 };
1612
1613 // Normalize Vector GEP - all scalar operands should be converted to the
1614 // splat vector.
1615 unsigned VectorWidth = 0;
1616
1617 // True if we should use a splat vector; using VectorWidth alone is not
1618 // sufficient.
1619 bool WantSplatVector = false;
1620 if (auto *VT = dyn_cast<VectorType>(U.getType())) {
1621 VectorWidth = cast<FixedVectorType>(VT)->getNumElements();
1622 // We don't produce 1 x N vectors; those are treated as scalars.
1623 WantSplatVector = VectorWidth > 1;
1624 }
1625
1626 // We might need to splat the base pointer into a vector if the offsets
1627 // are vectors.
1628 if (WantSplatVector && !PtrTy.isVector()) {
1629 BaseReg = MIRBuilder
1630 .buildSplatBuildVector(LLT::fixed_vector(VectorWidth, PtrTy),
1631 BaseReg)
1632 .getReg(0);
1633 PtrIRTy = FixedVectorType::get(PtrIRTy, VectorWidth);
1634 PtrTy = getLLTForType(*PtrIRTy, *DL);
1635 OffsetIRTy = DL->getIndexType(PtrIRTy);
1636 OffsetTy = getLLTForType(*OffsetIRTy, *DL);
1637 }
1638
1639 int64_t Offset = 0;
1640 for (gep_type_iterator GTI = gep_type_begin(&U), E = gep_type_end(&U);
1641 GTI != E; ++GTI) {
1642 const Value *Idx = GTI.getOperand();
1643 if (StructType *StTy = GTI.getStructTypeOrNull()) {
1644 unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
1645 Offset += DL->getStructLayout(StTy)->getElementOffset(Field);
1646 continue;
1647 } else {
1648 uint64_t ElementSize = GTI.getSequentialElementStride(*DL);
1649
1650 // If this is a scalar constant or a splat vector of constants,
1651 // handle it quickly.
1652 if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
1653 if (std::optional<int64_t> Val = CI->getValue().trySExtValue()) {
1654 Offset += ElementSize * *Val;
1655 continue;
1656 }
1657 }
1658
1659 if (Offset != 0) {
1660 auto OffsetMIB = MIRBuilder.buildConstant({OffsetTy}, Offset);
1661 BaseReg = MIRBuilder
1662 .buildPtrAdd(PtrTy, BaseReg, OffsetMIB.getReg(0),
1663 PtrAddFlagsWithConst(Offset))
1664 .getReg(0);
1665 Offset = 0;
1666 }
1667
1668 Register IdxReg = getOrCreateVReg(*Idx);
1669 LLT IdxTy = MRI->getType(IdxReg);
1670 if (IdxTy != OffsetTy) {
1671 if (!IdxTy.isVector() && WantSplatVector) {
1672 IdxReg = MIRBuilder
1674 IdxReg)
1675 .getReg(0);
1676 }
1677
1678 IdxReg = MIRBuilder.buildSExtOrTrunc(OffsetTy, IdxReg).getReg(0);
1679 }
1680
1681 // N = N + Idx * ElementSize;
1682 // Avoid doing it for ElementSize of 1.
1683 Register GepOffsetReg;
1684 if (ElementSize != 1) {
1685 auto ElementSizeMIB = MIRBuilder.buildConstant(
1686 getLLTForType(*OffsetIRTy, *DL), ElementSize);
1687
1688 // The multiplication is NUW if the GEP is NUW and NSW if the GEP is
1689 // NUSW.
1690 uint32_t ScaleFlags = PtrAddFlags & MachineInstr::MIFlag::NoUWrap;
1691 if (PtrAddFlags & MachineInstr::MIFlag::NoUSWrap)
1692 ScaleFlags |= MachineInstr::MIFlag::NoSWrap;
1693
1694 GepOffsetReg =
1695 MIRBuilder.buildMul(OffsetTy, IdxReg, ElementSizeMIB, ScaleFlags)
1696 .getReg(0);
1697 } else {
1698 GepOffsetReg = IdxReg;
1699 }
1700
1701 BaseReg =
1702 MIRBuilder.buildPtrAdd(PtrTy, BaseReg, GepOffsetReg, PtrAddFlags)
1703 .getReg(0);
1704 }
1705 }
1706
1707 if (Offset != 0) {
1708 auto OffsetMIB =
1709 MIRBuilder.buildConstant(OffsetTy, Offset);
1710
1711 MIRBuilder.buildPtrAdd(getOrCreateVReg(U), BaseReg, OffsetMIB.getReg(0),
1712 PtrAddFlagsWithConst(Offset));
1713 return true;
1714 }
1715
1716 MIRBuilder.buildCopy(getOrCreateVReg(U), BaseReg);
1717 return true;
1718}
1719
1720bool IRTranslator::translateMemFunc(const CallInst &CI,
1721 MachineIRBuilder &MIRBuilder,
1722 unsigned Opcode) {
1723 const Value *SrcPtr = CI.getArgOperand(1);
1724 // If the source is undef, then just emit a nop.
1725 if (isa<UndefValue>(SrcPtr))
1726 return true;
1727
1729
1730 unsigned MinPtrSize = UINT_MAX;
1731 for (auto AI = CI.arg_begin(), AE = CI.arg_end(); std::next(AI) != AE; ++AI) {
1732 Register SrcReg = getOrCreateVReg(**AI);
1733 LLT SrcTy = MRI->getType(SrcReg);
1734 if (SrcTy.isPointer())
1735 MinPtrSize = std::min<unsigned>(SrcTy.getSizeInBits(), MinPtrSize);
1736 SrcRegs.push_back(SrcReg);
1737 }
1738
1739 LLT SizeTy = LLT::scalar(MinPtrSize);
1740
1741 // The size operand should be the minimum of the pointer sizes.
1742 Register &SizeOpReg = SrcRegs[SrcRegs.size() - 1];
1743 if (MRI->getType(SizeOpReg) != SizeTy)
1744 SizeOpReg = MIRBuilder.buildZExtOrTrunc(SizeTy, SizeOpReg).getReg(0);
1745
1746 auto ICall = MIRBuilder.buildInstr(Opcode);
1747 for (Register SrcReg : SrcRegs)
1748 ICall.addUse(SrcReg);
1749
1750 Align DstAlign;
1751 Align SrcAlign;
1752 unsigned IsVol =
1753 cast<ConstantInt>(CI.getArgOperand(CI.arg_size() - 1))->getZExtValue();
1754
1755 ConstantInt *CopySize = nullptr;
1756
1757 if (auto *MCI = dyn_cast<MemCpyInst>(&CI)) {
1758 DstAlign = MCI->getDestAlign().valueOrOne();
1759 SrcAlign = MCI->getSourceAlign().valueOrOne();
1760 CopySize = dyn_cast<ConstantInt>(MCI->getArgOperand(2));
1761 } else if (auto *MMI = dyn_cast<MemMoveInst>(&CI)) {
1762 DstAlign = MMI->getDestAlign().valueOrOne();
1763 SrcAlign = MMI->getSourceAlign().valueOrOne();
1764 CopySize = dyn_cast<ConstantInt>(MMI->getArgOperand(2));
1765 } else {
1766 auto *MSI = cast<MemSetInst>(&CI);
1767 DstAlign = MSI->getDestAlign().valueOrOne();
1768 }
1769
1770 if (Opcode != TargetOpcode::G_MEMCPY_INLINE) {
1771 // We need to propagate the tail call flag from the IR inst as an argument.
1772 // Otherwise, we have to pessimize and assume later that we cannot tail call
1773 // any memory intrinsics.
1774 ICall.addImm(CI.isTailCall() ? 1 : 0);
1775 }
1776
1777 // Create mem operands to store the alignment and volatile info.
1780 if (IsVol) {
1781 LoadFlags |= MachineMemOperand::MOVolatile;
1782 StoreFlags |= MachineMemOperand::MOVolatile;
1783 }
1784
1785 AAMDNodes AAInfo = CI.getAAMetadata();
1786 if (AA && CopySize &&
1787 AA->pointsToConstantMemory(MemoryLocation(
1788 SrcPtr, LocationSize::precise(CopySize->getZExtValue()), AAInfo))) {
1789 LoadFlags |= MachineMemOperand::MOInvariant;
1790
1791 // FIXME: pointsToConstantMemory probably does not imply dereferenceable,
1792 // but the previous usage implied it did. Probably should check
1793 // isDereferenceableAndAlignedPointer.
1795 }
1796
1797 ICall.addMemOperand(
1798 MF->getMachineMemOperand(MachinePointerInfo(CI.getArgOperand(0)),
1799 StoreFlags, 1, DstAlign, AAInfo));
1800 if (Opcode != TargetOpcode::G_MEMSET)
1801 ICall.addMemOperand(MF->getMachineMemOperand(
1802 MachinePointerInfo(SrcPtr), LoadFlags, 1, SrcAlign, AAInfo));
1803
1804 return true;
1805}
1806
1807bool IRTranslator::translateTrap(const CallInst &CI,
1808 MachineIRBuilder &MIRBuilder,
1809 unsigned Opcode) {
1810 StringRef TrapFuncName =
1811 CI.getAttributes().getFnAttr("trap-func-name").getValueAsString();
1812 if (TrapFuncName.empty()) {
1813 if (Opcode == TargetOpcode::G_UBSANTRAP) {
1814 uint64_t Code = cast<ConstantInt>(CI.getOperand(0))->getZExtValue();
1815 MIRBuilder.buildInstr(Opcode, {}, ArrayRef<llvm::SrcOp>{Code});
1816 } else {
1817 MIRBuilder.buildInstr(Opcode);
1818 }
1819 return true;
1820 }
1821
1822 CallLowering::CallLoweringInfo Info;
1823 if (Opcode == TargetOpcode::G_UBSANTRAP)
1824 Info.OrigArgs.push_back({getOrCreateVRegs(*CI.getArgOperand(0)),
1825 CI.getArgOperand(0)->getType(), 0});
1826
1827 Info.Callee = MachineOperand::CreateES(TrapFuncName.data());
1828 Info.CB = &CI;
1829 Info.OrigRet = {Register(), Type::getVoidTy(CI.getContext()), 0};
1830 return CLI->lowerCall(MIRBuilder, Info);
1831}
1832
1833bool IRTranslator::translateVectorInterleave2Intrinsic(
1834 const CallInst &CI, MachineIRBuilder &MIRBuilder) {
1835 assert(CI.getIntrinsicID() == Intrinsic::vector_interleave2 &&
1836 "This function can only be called on the interleave2 intrinsic!");
1837 // Canonicalize interleave2 to G_SHUFFLE_VECTOR (similar to SelectionDAG).
1838 Register Op0 = getOrCreateVReg(*CI.getOperand(0));
1839 Register Op1 = getOrCreateVReg(*CI.getOperand(1));
1840 Register Res = getOrCreateVReg(CI);
1841
1842 LLT OpTy = MRI->getType(Op0);
1843 MIRBuilder.buildShuffleVector(Res, Op0, Op1,
1845
1846 return true;
1847}
1848
1849bool IRTranslator::translateVectorDeinterleave2Intrinsic(
1850 const CallInst &CI, MachineIRBuilder &MIRBuilder) {
1851 assert(CI.getIntrinsicID() == Intrinsic::vector_deinterleave2 &&
1852 "This function can only be called on the deinterleave2 intrinsic!");
1853 // Canonicalize deinterleave2 to shuffles that extract sub-vectors (similar to
1854 // SelectionDAG).
1855 Register Op = getOrCreateVReg(*CI.getOperand(0));
1856 auto Undef = MIRBuilder.buildUndef(MRI->getType(Op));
1857 ArrayRef<Register> Res = getOrCreateVRegs(CI);
1858
1859 LLT ResTy = MRI->getType(Res[0]);
1860 MIRBuilder.buildShuffleVector(Res[0], Op, Undef,
1861 createStrideMask(0, 2, ResTy.getNumElements()));
1862 MIRBuilder.buildShuffleVector(Res[1], Op, Undef,
1863 createStrideMask(1, 2, ResTy.getNumElements()));
1864
1865 return true;
1866}
1867
1868void IRTranslator::getStackGuard(Register DstReg,
1869 MachineIRBuilder &MIRBuilder) {
1870 Value *Global = TLI->getSDagStackGuard(*MF->getFunction().getParent());
1871 if (!Global) {
1872 LLVMContext &Ctx = MIRBuilder.getContext();
1873 Ctx.diagnose(DiagnosticInfoGeneric("unable to lower stackguard"));
1874 MIRBuilder.buildUndef(DstReg);
1875 return;
1876 }
1877
1878 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
1879 MRI->setRegClass(DstReg, TRI->getPointerRegClass());
1880 auto MIB =
1881 MIRBuilder.buildInstr(TargetOpcode::LOAD_STACK_GUARD, {DstReg}, {});
1882
1883 unsigned AddrSpace = Global->getType()->getPointerAddressSpace();
1884 LLT PtrTy = LLT::pointer(AddrSpace, DL->getPointerSizeInBits(AddrSpace));
1885
1886 MachinePointerInfo MPInfo(Global);
1889 MachineMemOperand *MemRef = MF->getMachineMemOperand(
1890 MPInfo, Flags, PtrTy, DL->getPointerABIAlignment(AddrSpace));
1891 MIB.setMemRefs({MemRef});
1892}
1893
1894bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
1895 MachineIRBuilder &MIRBuilder) {
1896 ArrayRef<Register> ResRegs = getOrCreateVRegs(CI);
1897 MIRBuilder.buildInstr(
1898 Op, {ResRegs[0], ResRegs[1]},
1899 {getOrCreateVReg(*CI.getOperand(0)), getOrCreateVReg(*CI.getOperand(1))});
1900
1901 return true;
1902}
1903
1904bool IRTranslator::translateFixedPointIntrinsic(unsigned Op, const CallInst &CI,
1905 MachineIRBuilder &MIRBuilder) {
1906 Register Dst = getOrCreateVReg(CI);
1907 Register Src0 = getOrCreateVReg(*CI.getOperand(0));
1908 Register Src1 = getOrCreateVReg(*CI.getOperand(1));
1909 uint64_t Scale = cast<ConstantInt>(CI.getOperand(2))->getZExtValue();
1910 MIRBuilder.buildInstr(Op, {Dst}, { Src0, Src1, Scale });
1911 return true;
1912}
1913
1914unsigned IRTranslator::getSimpleIntrinsicOpcode(Intrinsic::ID ID) {
1915 switch (ID) {
1916 default:
1917 break;
1918 case Intrinsic::acos:
1919 return TargetOpcode::G_FACOS;
1920 case Intrinsic::asin:
1921 return TargetOpcode::G_FASIN;
1922 case Intrinsic::atan:
1923 return TargetOpcode::G_FATAN;
1924 case Intrinsic::atan2:
1925 return TargetOpcode::G_FATAN2;
1926 case Intrinsic::bswap:
1927 return TargetOpcode::G_BSWAP;
1928 case Intrinsic::bitreverse:
1929 return TargetOpcode::G_BITREVERSE;
1930 case Intrinsic::fshl:
1931 return TargetOpcode::G_FSHL;
1932 case Intrinsic::fshr:
1933 return TargetOpcode::G_FSHR;
1934 case Intrinsic::ceil:
1935 return TargetOpcode::G_FCEIL;
1936 case Intrinsic::cos:
1937 return TargetOpcode::G_FCOS;
1938 case Intrinsic::cosh:
1939 return TargetOpcode::G_FCOSH;
1940 case Intrinsic::ctpop:
1941 return TargetOpcode::G_CTPOP;
1942 case Intrinsic::exp:
1943 return TargetOpcode::G_FEXP;
1944 case Intrinsic::exp2:
1945 return TargetOpcode::G_FEXP2;
1946 case Intrinsic::exp10:
1947 return TargetOpcode::G_FEXP10;
1948 case Intrinsic::fabs:
1949 return TargetOpcode::G_FABS;
1950 case Intrinsic::copysign:
1951 return TargetOpcode::G_FCOPYSIGN;
1952 case Intrinsic::minnum:
1953 return TargetOpcode::G_FMINNUM;
1954 case Intrinsic::maxnum:
1955 return TargetOpcode::G_FMAXNUM;
1956 case Intrinsic::minimum:
1957 return TargetOpcode::G_FMINIMUM;
1958 case Intrinsic::maximum:
1959 return TargetOpcode::G_FMAXIMUM;
1960 case Intrinsic::minimumnum:
1961 return TargetOpcode::G_FMINIMUMNUM;
1962 case Intrinsic::maximumnum:
1963 return TargetOpcode::G_FMAXIMUMNUM;
1964 case Intrinsic::canonicalize:
1965 return TargetOpcode::G_FCANONICALIZE;
1966 case Intrinsic::floor:
1967 return TargetOpcode::G_FFLOOR;
1968 case Intrinsic::fma:
1969 return TargetOpcode::G_FMA;
1970 case Intrinsic::log:
1971 return TargetOpcode::G_FLOG;
1972 case Intrinsic::log2:
1973 return TargetOpcode::G_FLOG2;
1974 case Intrinsic::log10:
1975 return TargetOpcode::G_FLOG10;
1976 case Intrinsic::ldexp:
1977 return TargetOpcode::G_FLDEXP;
1978 case Intrinsic::nearbyint:
1979 return TargetOpcode::G_FNEARBYINT;
1980 case Intrinsic::pow:
1981 return TargetOpcode::G_FPOW;
1982 case Intrinsic::powi:
1983 return TargetOpcode::G_FPOWI;
1984 case Intrinsic::rint:
1985 return TargetOpcode::G_FRINT;
1986 case Intrinsic::round:
1987 return TargetOpcode::G_INTRINSIC_ROUND;
1988 case Intrinsic::roundeven:
1989 return TargetOpcode::G_INTRINSIC_ROUNDEVEN;
1990 case Intrinsic::sin:
1991 return TargetOpcode::G_FSIN;
1992 case Intrinsic::sinh:
1993 return TargetOpcode::G_FSINH;
1994 case Intrinsic::sqrt:
1995 return TargetOpcode::G_FSQRT;
1996 case Intrinsic::tan:
1997 return TargetOpcode::G_FTAN;
1998 case Intrinsic::tanh:
1999 return TargetOpcode::G_FTANH;
2000 case Intrinsic::trunc:
2001 return TargetOpcode::G_INTRINSIC_TRUNC;
2002 case Intrinsic::readcyclecounter:
2003 return TargetOpcode::G_READCYCLECOUNTER;
2004 case Intrinsic::readsteadycounter:
2005 return TargetOpcode::G_READSTEADYCOUNTER;
2006 case Intrinsic::ptrmask:
2007 return TargetOpcode::G_PTRMASK;
2008 case Intrinsic::lrint:
2009 return TargetOpcode::G_INTRINSIC_LRINT;
2010 case Intrinsic::llrint:
2011 return TargetOpcode::G_INTRINSIC_LLRINT;
2012 // FADD/FMUL require checking the FMF, so are handled elsewhere.
2013 case Intrinsic::vector_reduce_fmin:
2014 return TargetOpcode::G_VECREDUCE_FMIN;
2015 case Intrinsic::vector_reduce_fmax:
2016 return TargetOpcode::G_VECREDUCE_FMAX;
2017 case Intrinsic::vector_reduce_fminimum:
2018 return TargetOpcode::G_VECREDUCE_FMINIMUM;
2019 case Intrinsic::vector_reduce_fmaximum:
2020 return TargetOpcode::G_VECREDUCE_FMAXIMUM;
2021 case Intrinsic::vector_reduce_add:
2022 return TargetOpcode::G_VECREDUCE_ADD;
2023 case Intrinsic::vector_reduce_mul:
2024 return TargetOpcode::G_VECREDUCE_MUL;
2025 case Intrinsic::vector_reduce_and:
2026 return TargetOpcode::G_VECREDUCE_AND;
2027 case Intrinsic::vector_reduce_or:
2028 return TargetOpcode::G_VECREDUCE_OR;
2029 case Intrinsic::vector_reduce_xor:
2030 return TargetOpcode::G_VECREDUCE_XOR;
2031 case Intrinsic::vector_reduce_smax:
2032 return TargetOpcode::G_VECREDUCE_SMAX;
2033 case Intrinsic::vector_reduce_smin:
2034 return TargetOpcode::G_VECREDUCE_SMIN;
2035 case Intrinsic::vector_reduce_umax:
2036 return TargetOpcode::G_VECREDUCE_UMAX;
2037 case Intrinsic::vector_reduce_umin:
2038 return TargetOpcode::G_VECREDUCE_UMIN;
2039 case Intrinsic::experimental_vector_compress:
2040 return TargetOpcode::G_VECTOR_COMPRESS;
2041 case Intrinsic::lround:
2042 return TargetOpcode::G_LROUND;
2043 case Intrinsic::llround:
2044 return TargetOpcode::G_LLROUND;
2045 case Intrinsic::get_fpenv:
2046 return TargetOpcode::G_GET_FPENV;
2047 case Intrinsic::get_fpmode:
2048 return TargetOpcode::G_GET_FPMODE;
2049 }
2051}
2052
2053bool IRTranslator::translateSimpleIntrinsic(const CallInst &CI,
2055 MachineIRBuilder &MIRBuilder) {
2056
2057 unsigned Op = getSimpleIntrinsicOpcode(ID);
2058
2059 // Is this a simple intrinsic?
2061 return false;
2062
2063 // Yes. Let's translate it.
2065 for (const auto &Arg : CI.args())
2066 VRegs.push_back(getOrCreateVReg(*Arg));
2067
2068 MIRBuilder.buildInstr(Op, {getOrCreateVReg(CI)}, VRegs,
2070 return true;
2071}
2072
2073// TODO: Include ConstainedOps.def when all strict instructions are defined.
2075 switch (ID) {
2076 case Intrinsic::experimental_constrained_fadd:
2077 return TargetOpcode::G_STRICT_FADD;
2078 case Intrinsic::experimental_constrained_fsub:
2079 return TargetOpcode::G_STRICT_FSUB;
2080 case Intrinsic::experimental_constrained_fmul:
2081 return TargetOpcode::G_STRICT_FMUL;
2082 case Intrinsic::experimental_constrained_fdiv:
2083 return TargetOpcode::G_STRICT_FDIV;
2084 case Intrinsic::experimental_constrained_frem:
2085 return TargetOpcode::G_STRICT_FREM;
2086 case Intrinsic::experimental_constrained_fma:
2087 return TargetOpcode::G_STRICT_FMA;
2088 case Intrinsic::experimental_constrained_sqrt:
2089 return TargetOpcode::G_STRICT_FSQRT;
2090 case Intrinsic::experimental_constrained_ldexp:
2091 return TargetOpcode::G_STRICT_FLDEXP;
2092 default:
2093 return 0;
2094 }
2095}
2096
2097bool IRTranslator::translateConstrainedFPIntrinsic(
2098 const ConstrainedFPIntrinsic &FPI, MachineIRBuilder &MIRBuilder) {
2100
2101 unsigned Opcode = getConstrainedOpcode(FPI.getIntrinsicID());
2102 if (!Opcode)
2103 return false;
2104
2108
2110 for (unsigned I = 0, E = FPI.getNonMetadataArgCount(); I != E; ++I)
2111 VRegs.push_back(getOrCreateVReg(*FPI.getArgOperand(I)));
2112
2113 MIRBuilder.buildInstr(Opcode, {getOrCreateVReg(FPI)}, VRegs, Flags);
2114 return true;
2115}
2116
2117std::optional<MCRegister> IRTranslator::getArgPhysReg(Argument &Arg) {
2118 auto VRegs = getOrCreateVRegs(Arg);
2119 if (VRegs.size() != 1)
2120 return std::nullopt;
2121
2122 // Arguments are lowered as a copy of a livein physical register.
2123 auto *VRegDef = MF->getRegInfo().getVRegDef(VRegs[0]);
2124 if (!VRegDef || !VRegDef->isCopy())
2125 return std::nullopt;
2126 return VRegDef->getOperand(1).getReg().asMCReg();
2127}
2128
2129bool IRTranslator::translateIfEntryValueArgument(bool isDeclare, Value *Val,
2130 const DILocalVariable *Var,
2131 const DIExpression *Expr,
2132 const DebugLoc &DL,
2133 MachineIRBuilder &MIRBuilder) {
2134 auto *Arg = dyn_cast<Argument>(Val);
2135 if (!Arg)
2136 return false;
2137
2138 if (!Expr->isEntryValue())
2139 return false;
2140
2141 std::optional<MCRegister> PhysReg = getArgPhysReg(*Arg);
2142 if (!PhysReg) {
2143 LLVM_DEBUG(dbgs() << "Dropping dbg." << (isDeclare ? "declare" : "value")
2144 << ": expression is entry_value but "
2145 << "couldn't find a physical register\n");
2146 LLVM_DEBUG(dbgs() << *Var << "\n");
2147 return true;
2148 }
2149
2150 if (isDeclare) {
2151 // Append an op deref to account for the fact that this is a dbg_declare.
2152 Expr = DIExpression::append(Expr, dwarf::DW_OP_deref);
2153 MF->setVariableDbgInfo(Var, Expr, *PhysReg, DL);
2154 } else {
2155 MIRBuilder.buildDirectDbgValue(*PhysReg, Var, Expr);
2156 }
2157
2158 return true;
2159}
2160
2162 switch (ID) {
2163 default:
2164 llvm_unreachable("Unexpected intrinsic");
2165 case Intrinsic::experimental_convergence_anchor:
2166 return TargetOpcode::CONVERGENCECTRL_ANCHOR;
2167 case Intrinsic::experimental_convergence_entry:
2168 return TargetOpcode::CONVERGENCECTRL_ENTRY;
2169 case Intrinsic::experimental_convergence_loop:
2170 return TargetOpcode::CONVERGENCECTRL_LOOP;
2171 }
2172}
2173
2174bool IRTranslator::translateConvergenceControlIntrinsic(
2175 const CallInst &CI, Intrinsic::ID ID, MachineIRBuilder &MIRBuilder) {
2176 MachineInstrBuilder MIB = MIRBuilder.buildInstr(getConvOpcode(ID));
2177 Register OutputReg = getOrCreateConvergenceTokenVReg(CI);
2178 MIB.addDef(OutputReg);
2179
2180 if (ID == Intrinsic::experimental_convergence_loop) {
2182 assert(Bundle && "Expected a convergence control token.");
2183 Register InputReg =
2184 getOrCreateConvergenceTokenVReg(*Bundle->Inputs[0].get());
2185 MIB.addUse(InputReg);
2186 }
2187
2188 return true;
2189}
2190
2191bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
2192 MachineIRBuilder &MIRBuilder) {
2193 if (auto *MI = dyn_cast<AnyMemIntrinsic>(&CI)) {
2194 if (ORE->enabled()) {
2195 if (MemoryOpRemark::canHandle(MI, *LibInfo)) {
2196 MemoryOpRemark R(*ORE, "gisel-irtranslator-memsize", *DL, *LibInfo);
2197 R.visit(MI);
2198 }
2199 }
2200 }
2201
2202 // If this is a simple intrinsic (that is, we just need to add a def of
2203 // a vreg, and uses for each arg operand, then translate it.
2204 if (translateSimpleIntrinsic(CI, ID, MIRBuilder))
2205 return true;
2206
2207 switch (ID) {
2208 default:
2209 break;
2210 case Intrinsic::lifetime_start:
2211 case Intrinsic::lifetime_end: {
2212 // No stack colouring in O0, discard region information.
2213 if (MF->getTarget().getOptLevel() == CodeGenOptLevel::None ||
2214 MF->getFunction().hasOptNone())
2215 return true;
2216
2217 unsigned Op = ID == Intrinsic::lifetime_start ? TargetOpcode::LIFETIME_START
2218 : TargetOpcode::LIFETIME_END;
2219
2220 const AllocaInst *AI = dyn_cast<AllocaInst>(CI.getArgOperand(0));
2221 if (!AI || !AI->isStaticAlloca())
2222 return true;
2223
2224 MIRBuilder.buildInstr(Op).addFrameIndex(getOrCreateFrameIndex(*AI));
2225 return true;
2226 }
2227 case Intrinsic::fake_use: {
2229 for (const auto &Arg : CI.args())
2230 llvm::append_range(VRegs, getOrCreateVRegs(*Arg));
2231 MIRBuilder.buildInstr(TargetOpcode::FAKE_USE, {}, VRegs);
2232 MF->setHasFakeUses(true);
2233 return true;
2234 }
2235 case Intrinsic::dbg_declare: {
2236 const DbgDeclareInst &DI = cast<DbgDeclareInst>(CI);
2237 assert(DI.getVariable() && "Missing variable");
2238 translateDbgDeclareRecord(DI.getAddress(), DI.hasArgList(), DI.getVariable(),
2239 DI.getExpression(), DI.getDebugLoc(), MIRBuilder);
2240 return true;
2241 }
2242 case Intrinsic::dbg_label: {
2243 const DbgLabelInst &DI = cast<DbgLabelInst>(CI);
2244 assert(DI.getLabel() && "Missing label");
2245
2247 MIRBuilder.getDebugLoc()) &&
2248 "Expected inlined-at fields to agree");
2249
2250 MIRBuilder.buildDbgLabel(DI.getLabel());
2251 return true;
2252 }
2253 case Intrinsic::vaend:
2254 // No target I know of cares about va_end. Certainly no in-tree target
2255 // does. Simplest intrinsic ever!
2256 return true;
2257 case Intrinsic::vastart: {
2258 Value *Ptr = CI.getArgOperand(0);
2259 unsigned ListSize = TLI->getVaListSizeInBits(*DL) / 8;
2260 Align Alignment = getKnownAlignment(Ptr, *DL);
2261
2262 MIRBuilder.buildInstr(TargetOpcode::G_VASTART, {}, {getOrCreateVReg(*Ptr)})
2263 .addMemOperand(MF->getMachineMemOperand(MachinePointerInfo(Ptr),
2265 ListSize, Alignment));
2266 return true;
2267 }
2268 case Intrinsic::dbg_assign:
2269 // A dbg.assign is a dbg.value with more information about stack locations,
2270 // typically produced during optimisation of variables with leaked
2271 // addresses. We can treat it like a normal dbg_value intrinsic here; to
2272 // benefit from the full analysis of stack/SSA locations, GlobalISel would
2273 // need to register for and use the AssignmentTrackingAnalysis pass.
2274 [[fallthrough]];
2275 case Intrinsic::dbg_value: {
2276 // This form of DBG_VALUE is target-independent.
2277 const DbgValueInst &DI = cast<DbgValueInst>(CI);
2278 translateDbgValueRecord(DI.getValue(), DI.hasArgList(), DI.getVariable(),
2279 DI.getExpression(), DI.getDebugLoc(), MIRBuilder);
2280 return true;
2281 }
2282 case Intrinsic::uadd_with_overflow:
2283 return translateOverflowIntrinsic(CI, TargetOpcode::G_UADDO, MIRBuilder);
2284 case Intrinsic::sadd_with_overflow:
2285 return translateOverflowIntrinsic(CI, TargetOpcode::G_SADDO, MIRBuilder);
2286 case Intrinsic::usub_with_overflow:
2287 return translateOverflowIntrinsic(CI, TargetOpcode::G_USUBO, MIRBuilder);
2288 case Intrinsic::ssub_with_overflow:
2289 return translateOverflowIntrinsic(CI, TargetOpcode::G_SSUBO, MIRBuilder);
2290 case Intrinsic::umul_with_overflow:
2291 return translateOverflowIntrinsic(CI, TargetOpcode::G_UMULO, MIRBuilder);
2292 case Intrinsic::smul_with_overflow:
2293 return translateOverflowIntrinsic(CI, TargetOpcode::G_SMULO, MIRBuilder);
2294 case Intrinsic::uadd_sat:
2295 return translateBinaryOp(TargetOpcode::G_UADDSAT, CI, MIRBuilder);
2296 case Intrinsic::sadd_sat:
2297 return translateBinaryOp(TargetOpcode::G_SADDSAT, CI, MIRBuilder);
2298 case Intrinsic::usub_sat:
2299 return translateBinaryOp(TargetOpcode::G_USUBSAT, CI, MIRBuilder);
2300 case Intrinsic::ssub_sat:
2301 return translateBinaryOp(TargetOpcode::G_SSUBSAT, CI, MIRBuilder);
2302 case Intrinsic::ushl_sat:
2303 return translateBinaryOp(TargetOpcode::G_USHLSAT, CI, MIRBuilder);
2304 case Intrinsic::sshl_sat:
2305 return translateBinaryOp(TargetOpcode::G_SSHLSAT, CI, MIRBuilder);
2306 case Intrinsic::umin:
2307 return translateBinaryOp(TargetOpcode::G_UMIN, CI, MIRBuilder);
2308 case Intrinsic::umax:
2309 return translateBinaryOp(TargetOpcode::G_UMAX, CI, MIRBuilder);
2310 case Intrinsic::smin:
2311 return translateBinaryOp(TargetOpcode::G_SMIN, CI, MIRBuilder);
2312 case Intrinsic::smax:
2313 return translateBinaryOp(TargetOpcode::G_SMAX, CI, MIRBuilder);
2314 case Intrinsic::abs:
2315 // TODO: Preserve "int min is poison" arg in GMIR?
2316 return translateUnaryOp(TargetOpcode::G_ABS, CI, MIRBuilder);
2317 case Intrinsic::smul_fix:
2318 return translateFixedPointIntrinsic(TargetOpcode::G_SMULFIX, CI, MIRBuilder);
2319 case Intrinsic::umul_fix:
2320 return translateFixedPointIntrinsic(TargetOpcode::G_UMULFIX, CI, MIRBuilder);
2321 case Intrinsic::smul_fix_sat:
2322 return translateFixedPointIntrinsic(TargetOpcode::G_SMULFIXSAT, CI, MIRBuilder);
2323 case Intrinsic::umul_fix_sat:
2324 return translateFixedPointIntrinsic(TargetOpcode::G_UMULFIXSAT, CI, MIRBuilder);
2325 case Intrinsic::sdiv_fix:
2326 return translateFixedPointIntrinsic(TargetOpcode::G_SDIVFIX, CI, MIRBuilder);
2327 case Intrinsic::udiv_fix:
2328 return translateFixedPointIntrinsic(TargetOpcode::G_UDIVFIX, CI, MIRBuilder);
2329 case Intrinsic::sdiv_fix_sat:
2330 return translateFixedPointIntrinsic(TargetOpcode::G_SDIVFIXSAT, CI, MIRBuilder);
2331 case Intrinsic::udiv_fix_sat:
2332 return translateFixedPointIntrinsic(TargetOpcode::G_UDIVFIXSAT, CI, MIRBuilder);
2333 case Intrinsic::fmuladd: {
2334 const TargetMachine &TM = MF->getTarget();
2335 Register Dst = getOrCreateVReg(CI);
2336 Register Op0 = getOrCreateVReg(*CI.getArgOperand(0));
2337 Register Op1 = getOrCreateVReg(*CI.getArgOperand(1));
2338 Register Op2 = getOrCreateVReg(*CI.getArgOperand(2));
2340 TLI->isFMAFasterThanFMulAndFAdd(*MF,
2341 TLI->getValueType(*DL, CI.getType()))) {
2342 // TODO: Revisit this to see if we should move this part of the
2343 // lowering to the combiner.
2344 MIRBuilder.buildFMA(Dst, Op0, Op1, Op2,
2346 } else {
2347 LLT Ty = getLLTForType(*CI.getType(), *DL);
2348 auto FMul = MIRBuilder.buildFMul(
2349 Ty, Op0, Op1, MachineInstr::copyFlagsFromInstruction(CI));
2350 MIRBuilder.buildFAdd(Dst, FMul, Op2,
2352 }
2353 return true;
2354 }
2355 case Intrinsic::convert_from_fp16:
2356 // FIXME: This intrinsic should probably be removed from the IR.
2357 MIRBuilder.buildFPExt(getOrCreateVReg(CI),
2358 getOrCreateVReg(*CI.getArgOperand(0)),
2360 return true;
2361 case Intrinsic::convert_to_fp16:
2362 // FIXME: This intrinsic should probably be removed from the IR.
2363 MIRBuilder.buildFPTrunc(getOrCreateVReg(CI),
2364 getOrCreateVReg(*CI.getArgOperand(0)),
2366 return true;
2367 case Intrinsic::frexp: {
2368 ArrayRef<Register> VRegs = getOrCreateVRegs(CI);
2369 MIRBuilder.buildFFrexp(VRegs[0], VRegs[1],
2370 getOrCreateVReg(*CI.getArgOperand(0)),
2372 return true;
2373 }
2374 case Intrinsic::modf: {
2375 ArrayRef<Register> VRegs = getOrCreateVRegs(CI);
2376 MIRBuilder.buildModf(VRegs[0], VRegs[1],
2377 getOrCreateVReg(*CI.getArgOperand(0)),
2379 return true;
2380 }
2381 case Intrinsic::sincos: {
2382 ArrayRef<Register> VRegs = getOrCreateVRegs(CI);
2383 MIRBuilder.buildFSincos(VRegs[0], VRegs[1],
2384 getOrCreateVReg(*CI.getArgOperand(0)),
2386 return true;
2387 }
2388 case Intrinsic::fptosi_sat:
2389 MIRBuilder.buildFPTOSI_SAT(getOrCreateVReg(CI),
2390 getOrCreateVReg(*CI.getArgOperand(0)));
2391 return true;
2392 case Intrinsic::fptoui_sat:
2393 MIRBuilder.buildFPTOUI_SAT(getOrCreateVReg(CI),
2394 getOrCreateVReg(*CI.getArgOperand(0)));
2395 return true;
2396 case Intrinsic::memcpy_inline:
2397 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMCPY_INLINE);
2398 case Intrinsic::memcpy:
2399 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMCPY);
2400 case Intrinsic::memmove:
2401 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMMOVE);
2402 case Intrinsic::memset:
2403 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMSET);
2404 case Intrinsic::eh_typeid_for: {
2405 GlobalValue *GV = ExtractTypeInfo(CI.getArgOperand(0));
2406 Register Reg = getOrCreateVReg(CI);
2407 unsigned TypeID = MF->getTypeIDFor(GV);
2408 MIRBuilder.buildConstant(Reg, TypeID);
2409 return true;
2410 }
2411 case Intrinsic::objectsize:
2412 llvm_unreachable("llvm.objectsize.* should have been lowered already");
2413
2414 case Intrinsic::is_constant:
2415 llvm_unreachable("llvm.is.constant.* should have been lowered already");
2416
2417 case Intrinsic::stackguard:
2418 getStackGuard(getOrCreateVReg(CI), MIRBuilder);
2419 return true;
2420 case Intrinsic::stackprotector: {
2421 LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
2422 Register GuardVal;
2423 if (TLI->useLoadStackGuardNode(*CI.getModule())) {
2424 GuardVal = MRI->createGenericVirtualRegister(PtrTy);
2425 getStackGuard(GuardVal, MIRBuilder);
2426 } else
2427 GuardVal = getOrCreateVReg(*CI.getArgOperand(0)); // The guard's value.
2428
2429 AllocaInst *Slot = cast<AllocaInst>(CI.getArgOperand(1));
2430 int FI = getOrCreateFrameIndex(*Slot);
2431 MF->getFrameInfo().setStackProtectorIndex(FI);
2432
2433 MIRBuilder.buildStore(
2434 GuardVal, getOrCreateVReg(*Slot),
2435 *MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(*MF, FI),
2438 PtrTy, Align(8)));
2439 return true;
2440 }
2441 case Intrinsic::stacksave: {
2442 MIRBuilder.buildInstr(TargetOpcode::G_STACKSAVE, {getOrCreateVReg(CI)}, {});
2443 return true;
2444 }
2445 case Intrinsic::stackrestore: {
2446 MIRBuilder.buildInstr(TargetOpcode::G_STACKRESTORE, {},
2447 {getOrCreateVReg(*CI.getArgOperand(0))});
2448 return true;
2449 }
2450 case Intrinsic::cttz:
2451 case Intrinsic::ctlz: {
2452 ConstantInt *Cst = cast<ConstantInt>(CI.getArgOperand(1));
2453 bool isTrailing = ID == Intrinsic::cttz;
2454 unsigned Opcode = isTrailing
2455 ? Cst->isZero() ? TargetOpcode::G_CTTZ
2456 : TargetOpcode::G_CTTZ_ZERO_UNDEF
2457 : Cst->isZero() ? TargetOpcode::G_CTLZ
2458 : TargetOpcode::G_CTLZ_ZERO_UNDEF;
2459 MIRBuilder.buildInstr(Opcode, {getOrCreateVReg(CI)},
2460 {getOrCreateVReg(*CI.getArgOperand(0))});
2461 return true;
2462 }
2463 case Intrinsic::invariant_start: {
2464 MIRBuilder.buildUndef(getOrCreateVReg(CI));
2465 return true;
2466 }
2467 case Intrinsic::invariant_end:
2468 return true;
2469 case Intrinsic::expect:
2470 case Intrinsic::expect_with_probability:
2471 case Intrinsic::annotation:
2472 case Intrinsic::ptr_annotation:
2473 case Intrinsic::launder_invariant_group:
2474 case Intrinsic::strip_invariant_group: {
2475 // Drop the intrinsic, but forward the value.
2476 MIRBuilder.buildCopy(getOrCreateVReg(CI),
2477 getOrCreateVReg(*CI.getArgOperand(0)));
2478 return true;
2479 }
2480 case Intrinsic::assume:
2481 case Intrinsic::experimental_noalias_scope_decl:
2482 case Intrinsic::var_annotation:
2483 case Intrinsic::sideeffect:
2484 // Discard annotate attributes, assumptions, and artificial side-effects.
2485 return true;
2486 case Intrinsic::read_volatile_register:
2487 case Intrinsic::read_register: {
2488 Value *Arg = CI.getArgOperand(0);
2489 MIRBuilder
2490 .buildInstr(TargetOpcode::G_READ_REGISTER, {getOrCreateVReg(CI)}, {})
2491 .addMetadata(cast<MDNode>(cast<MetadataAsValue>(Arg)->getMetadata()));
2492 return true;
2493 }
2494 case Intrinsic::write_register: {
2495 Value *Arg = CI.getArgOperand(0);
2496 MIRBuilder.buildInstr(TargetOpcode::G_WRITE_REGISTER)
2497 .addMetadata(cast<MDNode>(cast<MetadataAsValue>(Arg)->getMetadata()))
2498 .addUse(getOrCreateVReg(*CI.getArgOperand(1)));
2499 return true;
2500 }
2501 case Intrinsic::localescape: {
2502 MachineBasicBlock &EntryMBB = MF->front();
2503 StringRef EscapedName = GlobalValue::dropLLVMManglingEscape(MF->getName());
2504
2505 // Directly emit some LOCAL_ESCAPE machine instrs. Label assignment emission
2506 // is the same on all targets.
2507 for (unsigned Idx = 0, E = CI.arg_size(); Idx < E; ++Idx) {
2508 Value *Arg = CI.getArgOperand(Idx)->stripPointerCasts();
2509 if (isa<ConstantPointerNull>(Arg))
2510 continue; // Skip null pointers. They represent a hole in index space.
2511
2512 int FI = getOrCreateFrameIndex(*cast<AllocaInst>(Arg));
2513 MCSymbol *FrameAllocSym =
2514 MF->getContext().getOrCreateFrameAllocSymbol(EscapedName, Idx);
2515
2516 // This should be inserted at the start of the entry block.
2517 auto LocalEscape =
2518 MIRBuilder.buildInstrNoInsert(TargetOpcode::LOCAL_ESCAPE)
2519 .addSym(FrameAllocSym)
2520 .addFrameIndex(FI);
2521
2522 EntryMBB.insert(EntryMBB.begin(), LocalEscape);
2523 }
2524
2525 return true;
2526 }
2527 case Intrinsic::vector_reduce_fadd:
2528 case Intrinsic::vector_reduce_fmul: {
2529 // Need to check for the reassoc flag to decide whether we want a
2530 // sequential reduction opcode or not.
2531 Register Dst = getOrCreateVReg(CI);
2532 Register ScalarSrc = getOrCreateVReg(*CI.getArgOperand(0));
2533 Register VecSrc = getOrCreateVReg(*CI.getArgOperand(1));
2534 unsigned Opc = 0;
2535 if (!CI.hasAllowReassoc()) {
2536 // The sequential ordering case.
2537 Opc = ID == Intrinsic::vector_reduce_fadd
2538 ? TargetOpcode::G_VECREDUCE_SEQ_FADD
2539 : TargetOpcode::G_VECREDUCE_SEQ_FMUL;
2540 if (!MRI->getType(VecSrc).isVector())
2541 Opc = ID == Intrinsic::vector_reduce_fadd ? TargetOpcode::G_FADD
2542 : TargetOpcode::G_FMUL;
2543 MIRBuilder.buildInstr(Opc, {Dst}, {ScalarSrc, VecSrc},
2545 return true;
2546 }
2547 // We split the operation into a separate G_FADD/G_FMUL + the reduce,
2548 // since the associativity doesn't matter.
2549 unsigned ScalarOpc;
2550 if (ID == Intrinsic::vector_reduce_fadd) {
2551 Opc = TargetOpcode::G_VECREDUCE_FADD;
2552 ScalarOpc = TargetOpcode::G_FADD;
2553 } else {
2554 Opc = TargetOpcode::G_VECREDUCE_FMUL;
2555 ScalarOpc = TargetOpcode::G_FMUL;
2556 }
2557 LLT DstTy = MRI->getType(Dst);
2558 auto Rdx = MIRBuilder.buildInstr(
2559 Opc, {DstTy}, {VecSrc}, MachineInstr::copyFlagsFromInstruction(CI));
2560 MIRBuilder.buildInstr(ScalarOpc, {Dst}, {ScalarSrc, Rdx},
2562
2563 return true;
2564 }
2565 case Intrinsic::trap:
2566 return translateTrap(CI, MIRBuilder, TargetOpcode::G_TRAP);
2567 case Intrinsic::debugtrap:
2568 return translateTrap(CI, MIRBuilder, TargetOpcode::G_DEBUGTRAP);
2569 case Intrinsic::ubsantrap:
2570 return translateTrap(CI, MIRBuilder, TargetOpcode::G_UBSANTRAP);
2571 case Intrinsic::allow_runtime_check:
2572 case Intrinsic::allow_ubsan_check:
2573 MIRBuilder.buildCopy(getOrCreateVReg(CI),
2574 getOrCreateVReg(*ConstantInt::getTrue(CI.getType())));
2575 return true;
2576 case Intrinsic::amdgcn_cs_chain:
2577 case Intrinsic::amdgcn_call_whole_wave:
2578 return translateCallBase(CI, MIRBuilder);
2579 case Intrinsic::fptrunc_round: {
2581
2582 // Convert the metadata argument to a constant integer
2583 Metadata *MD = cast<MetadataAsValue>(CI.getArgOperand(1))->getMetadata();
2584 std::optional<RoundingMode> RoundMode =
2585 convertStrToRoundingMode(cast<MDString>(MD)->getString());
2586
2587 // Add the Rounding mode as an integer
2588 MIRBuilder
2589 .buildInstr(TargetOpcode::G_INTRINSIC_FPTRUNC_ROUND,
2590 {getOrCreateVReg(CI)},
2591 {getOrCreateVReg(*CI.getArgOperand(0))}, Flags)
2592 .addImm((int)*RoundMode);
2593
2594 return true;
2595 }
2596 case Intrinsic::is_fpclass: {
2597 Value *FpValue = CI.getOperand(0);
2598 ConstantInt *TestMaskValue = cast<ConstantInt>(CI.getOperand(1));
2599
2600 MIRBuilder
2601 .buildInstr(TargetOpcode::G_IS_FPCLASS, {getOrCreateVReg(CI)},
2602 {getOrCreateVReg(*FpValue)})
2603 .addImm(TestMaskValue->getZExtValue());
2604
2605 return true;
2606 }
2607 case Intrinsic::set_fpenv: {
2608 Value *FPEnv = CI.getOperand(0);
2609 MIRBuilder.buildSetFPEnv(getOrCreateVReg(*FPEnv));
2610 return true;
2611 }
2612 case Intrinsic::reset_fpenv:
2613 MIRBuilder.buildResetFPEnv();
2614 return true;
2615 case Intrinsic::set_fpmode: {
2616 Value *FPState = CI.getOperand(0);
2617 MIRBuilder.buildSetFPMode(getOrCreateVReg(*FPState));
2618 return true;
2619 }
2620 case Intrinsic::reset_fpmode:
2621 MIRBuilder.buildResetFPMode();
2622 return true;
2623 case Intrinsic::get_rounding:
2624 MIRBuilder.buildGetRounding(getOrCreateVReg(CI));
2625 return true;
2626 case Intrinsic::set_rounding:
2627 MIRBuilder.buildSetRounding(getOrCreateVReg(*CI.getOperand(0)));
2628 return true;
2629 case Intrinsic::vscale: {
2630 MIRBuilder.buildVScale(getOrCreateVReg(CI), 1);
2631 return true;
2632 }
2633 case Intrinsic::scmp:
2634 MIRBuilder.buildSCmp(getOrCreateVReg(CI),
2635 getOrCreateVReg(*CI.getOperand(0)),
2636 getOrCreateVReg(*CI.getOperand(1)));
2637 return true;
2638 case Intrinsic::ucmp:
2639 MIRBuilder.buildUCmp(getOrCreateVReg(CI),
2640 getOrCreateVReg(*CI.getOperand(0)),
2641 getOrCreateVReg(*CI.getOperand(1)));
2642 return true;
2643 case Intrinsic::vector_extract:
2644 return translateExtractVector(CI, MIRBuilder);
2645 case Intrinsic::vector_insert:
2646 return translateInsertVector(CI, MIRBuilder);
2647 case Intrinsic::stepvector: {
2648 MIRBuilder.buildStepVector(getOrCreateVReg(CI), 1);
2649 return true;
2650 }
2651 case Intrinsic::prefetch: {
2652 Value *Addr = CI.getOperand(0);
2653 unsigned RW = cast<ConstantInt>(CI.getOperand(1))->getZExtValue();
2654 unsigned Locality = cast<ConstantInt>(CI.getOperand(2))->getZExtValue();
2655 unsigned CacheType = cast<ConstantInt>(CI.getOperand(3))->getZExtValue();
2656
2658 auto &MMO = *MF->getMachineMemOperand(MachinePointerInfo(Addr), Flags,
2659 LLT(), Align());
2660
2661 MIRBuilder.buildPrefetch(getOrCreateVReg(*Addr), RW, Locality, CacheType,
2662 MMO);
2663
2664 return true;
2665 }
2666
2667 case Intrinsic::vector_interleave2:
2668 case Intrinsic::vector_deinterleave2: {
2669 // Both intrinsics have at least one operand.
2670 Value *Op0 = CI.getOperand(0);
2671 LLT ResTy = getLLTForType(*Op0->getType(), MIRBuilder.getDataLayout());
2672 if (!ResTy.isFixedVector())
2673 return false;
2674
2675 if (CI.getIntrinsicID() == Intrinsic::vector_interleave2)
2676 return translateVectorInterleave2Intrinsic(CI, MIRBuilder);
2677
2678 return translateVectorDeinterleave2Intrinsic(CI, MIRBuilder);
2679 }
2680
2681#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
2682 case Intrinsic::INTRINSIC:
2683#include "llvm/IR/ConstrainedOps.def"
2684 return translateConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(CI),
2685 MIRBuilder);
2686 case Intrinsic::experimental_convergence_anchor:
2687 case Intrinsic::experimental_convergence_entry:
2688 case Intrinsic::experimental_convergence_loop:
2689 return translateConvergenceControlIntrinsic(CI, ID, MIRBuilder);
2690 case Intrinsic::reloc_none: {
2691 Metadata *MD = cast<MetadataAsValue>(CI.getArgOperand(0))->getMetadata();
2692 StringRef SymbolName = cast<MDString>(MD)->getString();
2693 MIRBuilder.buildInstr(TargetOpcode::RELOC_NONE)
2695 return true;
2696 }
2697 }
2698 return false;
2699}
2700
2701bool IRTranslator::translateInlineAsm(const CallBase &CB,
2702 MachineIRBuilder &MIRBuilder) {
2704 return false;
2705
2706 const InlineAsmLowering *ALI = MF->getSubtarget().getInlineAsmLowering();
2707
2708 if (!ALI) {
2709 LLVM_DEBUG(
2710 dbgs() << "Inline asm lowering is not supported for this target yet\n");
2711 return false;
2712 }
2713
2714 return ALI->lowerInlineAsm(
2715 MIRBuilder, CB, [&](const Value &Val) { return getOrCreateVRegs(Val); });
2716}
2717
2718bool IRTranslator::translateCallBase(const CallBase &CB,
2719 MachineIRBuilder &MIRBuilder) {
2720 ArrayRef<Register> Res = getOrCreateVRegs(CB);
2721
2723 Register SwiftInVReg = 0;
2724 Register SwiftErrorVReg = 0;
2725 for (const auto &Arg : CB.args()) {
2726 if (CLI->supportSwiftError() && isSwiftError(Arg)) {
2727 assert(SwiftInVReg == 0 && "Expected only one swift error argument");
2728 LLT Ty = getLLTForType(*Arg->getType(), *DL);
2729 SwiftInVReg = MRI->createGenericVirtualRegister(Ty);
2730 MIRBuilder.buildCopy(SwiftInVReg, SwiftError.getOrCreateVRegUseAt(
2731 &CB, &MIRBuilder.getMBB(), Arg));
2732 Args.emplace_back(ArrayRef(SwiftInVReg));
2733 SwiftErrorVReg =
2734 SwiftError.getOrCreateVRegDefAt(&CB, &MIRBuilder.getMBB(), Arg);
2735 continue;
2736 }
2737 Args.push_back(getOrCreateVRegs(*Arg));
2738 }
2739
2740 if (auto *CI = dyn_cast<CallInst>(&CB)) {
2741 if (ORE->enabled()) {
2742 if (MemoryOpRemark::canHandle(CI, *LibInfo)) {
2743 MemoryOpRemark R(*ORE, "gisel-irtranslator-memsize", *DL, *LibInfo);
2744 R.visit(CI);
2745 }
2746 }
2747 }
2748
2749 std::optional<CallLowering::PtrAuthInfo> PAI;
2750 if (auto Bundle = CB.getOperandBundle(LLVMContext::OB_ptrauth)) {
2751 // Functions should never be ptrauth-called directly.
2752 assert(!CB.getCalledFunction() && "invalid direct ptrauth call");
2753
2754 const Value *Key = Bundle->Inputs[0];
2755 const Value *Discriminator = Bundle->Inputs[1];
2756
2757 // Look through ptrauth constants to try to eliminate the matching bundle
2758 // and turn this into a direct call with no ptrauth.
2759 // CallLowering will use the raw pointer if it doesn't find the PAI.
2760 const auto *CalleeCPA = dyn_cast<ConstantPtrAuth>(CB.getCalledOperand());
2761 if (!CalleeCPA || !isa<Function>(CalleeCPA->getPointer()) ||
2762 !CalleeCPA->isKnownCompatibleWith(Key, Discriminator, *DL)) {
2763 // If we can't make it direct, package the bundle into PAI.
2764 Register DiscReg = getOrCreateVReg(*Discriminator);
2765 PAI = CallLowering::PtrAuthInfo{cast<ConstantInt>(Key)->getZExtValue(),
2766 DiscReg};
2767 }
2768 }
2769
2770 Register ConvergenceCtrlToken = 0;
2771 if (auto Bundle = CB.getOperandBundle(LLVMContext::OB_convergencectrl)) {
2772 const auto &Token = *Bundle->Inputs[0].get();
2773 ConvergenceCtrlToken = getOrCreateConvergenceTokenVReg(Token);
2774 }
2775
2776 // We don't set HasCalls on MFI here yet because call lowering may decide to
2777 // optimize into tail calls. Instead, we defer that to selection where a final
2778 // scan is done to check if any instructions are calls.
2779 bool Success = CLI->lowerCall(
2780 MIRBuilder, CB, Res, Args, SwiftErrorVReg, PAI, ConvergenceCtrlToken,
2781 [&]() { return getOrCreateVReg(*CB.getCalledOperand()); });
2782
2783 // Check if we just inserted a tail call.
2784 if (Success) {
2785 assert(!HasTailCall && "Can't tail call return twice from block?");
2786 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
2787 HasTailCall = TII->isTailCall(*std::prev(MIRBuilder.getInsertPt()));
2788 }
2789
2790 return Success;
2791}
2792
2793bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) {
2795 return false;
2796
2797 const CallInst &CI = cast<CallInst>(U);
2798 const Function *F = CI.getCalledFunction();
2799
2800 // FIXME: support Windows dllimport function calls and calls through
2801 // weak symbols.
2802 if (F && (F->hasDLLImportStorageClass() ||
2803 (MF->getTarget().getTargetTriple().isOSWindows() &&
2804 F->hasExternalWeakLinkage())))
2805 return false;
2806
2807 // FIXME: support control flow guard targets.
2809 return false;
2810
2811 // FIXME: support statepoints and related.
2813 return false;
2814
2815 if (CI.isInlineAsm())
2816 return translateInlineAsm(CI, MIRBuilder);
2817
2818 Intrinsic::ID ID = F ? F->getIntrinsicID() : Intrinsic::not_intrinsic;
2819 if (!F || ID == Intrinsic::not_intrinsic) {
2820 if (translateCallBase(CI, MIRBuilder)) {
2821 diagnoseDontCall(CI);
2822 return true;
2823 }
2824 return false;
2825 }
2826
2827 assert(ID != Intrinsic::not_intrinsic && "unknown intrinsic");
2828
2829 if (translateKnownIntrinsic(CI, ID, MIRBuilder))
2830 return true;
2831
2832 TargetLowering::IntrinsicInfo Info;
2833 bool IsTgtMemIntrinsic = TLI->getTgtMemIntrinsic(Info, CI, *MF, ID);
2834
2835 return translateIntrinsic(CI, ID, MIRBuilder,
2836 IsTgtMemIntrinsic ? &Info : nullptr);
2837}
2838
2839/// Translate a call or callbr to an intrinsic.
2840/// Depending on whether TLI->getTgtMemIntrinsic() is true, TgtMemIntrinsicInfo
2841/// is a pointer to the correspondingly populated IntrinsicInfo object.
2842/// Otherwise, this pointer is null.
2843bool IRTranslator::translateIntrinsic(
2844 const CallBase &CB, Intrinsic::ID ID, MachineIRBuilder &MIRBuilder,
2845 const TargetLowering::IntrinsicInfo *TgtMemIntrinsicInfo) {
2846 ArrayRef<Register> ResultRegs;
2847 if (!CB.getType()->isVoidTy())
2848 ResultRegs = getOrCreateVRegs(CB);
2849
2850 // Ignore the callsite attributes. Backend code is most likely not expecting
2851 // an intrinsic to sometimes have side effects and sometimes not.
2852 MachineInstrBuilder MIB = MIRBuilder.buildIntrinsic(ID, ResultRegs);
2853 if (isa<FPMathOperator>(CB))
2854 MIB->copyIRFlags(CB);
2855
2856 for (const auto &Arg : enumerate(CB.args())) {
2857 // If this is required to be an immediate, don't materialize it in a
2858 // register.
2859 if (CB.paramHasAttr(Arg.index(), Attribute::ImmArg)) {
2860 if (ConstantInt *CI = dyn_cast<ConstantInt>(Arg.value())) {
2861 // imm arguments are more convenient than cimm (and realistically
2862 // probably sufficient), so use them.
2863 assert(CI->getBitWidth() <= 64 &&
2864 "large intrinsic immediates not handled");
2865 MIB.addImm(CI->getSExtValue());
2866 } else {
2867 MIB.addFPImm(cast<ConstantFP>(Arg.value()));
2868 }
2869 } else if (auto *MDVal = dyn_cast<MetadataAsValue>(Arg.value())) {
2870 auto *MD = MDVal->getMetadata();
2871 auto *MDN = dyn_cast<MDNode>(MD);
2872 if (!MDN) {
2873 if (auto *ConstMD = dyn_cast<ConstantAsMetadata>(MD))
2874 MDN = MDNode::get(MF->getFunction().getContext(), ConstMD);
2875 else // This was probably an MDString.
2876 return false;
2877 }
2878 MIB.addMetadata(MDN);
2879 } else {
2880 ArrayRef<Register> VRegs = getOrCreateVRegs(*Arg.value());
2881 if (VRegs.size() > 1)
2882 return false;
2883 MIB.addUse(VRegs[0]);
2884 }
2885 }
2886
2887 // Add a MachineMemOperand if it is a target mem intrinsic.
2888 if (TgtMemIntrinsicInfo) {
2889 const Function *F = CB.getCalledFunction();
2890
2891 Align Alignment = TgtMemIntrinsicInfo->align.value_or(DL->getABITypeAlign(
2892 TgtMemIntrinsicInfo->memVT.getTypeForEVT(F->getContext())));
2893 LLT MemTy =
2894 TgtMemIntrinsicInfo->memVT.isSimple()
2895 ? getLLTForMVT(TgtMemIntrinsicInfo->memVT.getSimpleVT())
2896 : LLT::scalar(TgtMemIntrinsicInfo->memVT.getStoreSizeInBits());
2897
2898 // TODO: We currently just fallback to address space 0 if getTgtMemIntrinsic
2899 // didn't yield anything useful.
2900 MachinePointerInfo MPI;
2901 if (TgtMemIntrinsicInfo->ptrVal) {
2902 MPI = MachinePointerInfo(TgtMemIntrinsicInfo->ptrVal,
2903 TgtMemIntrinsicInfo->offset);
2904 } else if (TgtMemIntrinsicInfo->fallbackAddressSpace) {
2905 MPI = MachinePointerInfo(*TgtMemIntrinsicInfo->fallbackAddressSpace);
2906 }
2907 MIB.addMemOperand(MF->getMachineMemOperand(
2908 MPI, TgtMemIntrinsicInfo->flags, MemTy, Alignment, CB.getAAMetadata(),
2909 /*Ranges=*/nullptr, TgtMemIntrinsicInfo->ssid,
2910 TgtMemIntrinsicInfo->order, TgtMemIntrinsicInfo->failureOrder));
2911 }
2912
2913 if (CB.isConvergent()) {
2914 if (auto Bundle = CB.getOperandBundle(LLVMContext::OB_convergencectrl)) {
2915 auto *Token = Bundle->Inputs[0].get();
2916 Register TokenReg = getOrCreateVReg(*Token);
2917 MIB.addUse(TokenReg, RegState::Implicit);
2918 }
2919 }
2920
2922 MIB->setDeactivationSymbol(*MF, Bundle->Inputs[0].get());
2923
2924 return true;
2925}
2926
2927bool IRTranslator::findUnwindDestinations(
2928 const BasicBlock *EHPadBB,
2929 BranchProbability Prob,
2930 SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
2931 &UnwindDests) {
2933 EHPadBB->getParent()->getFunction().getPersonalityFn());
2934 bool IsMSVCCXX = Personality == EHPersonality::MSVC_CXX;
2935 bool IsCoreCLR = Personality == EHPersonality::CoreCLR;
2936 bool IsWasmCXX = Personality == EHPersonality::Wasm_CXX;
2937 bool IsSEH = isAsynchronousEHPersonality(Personality);
2938
2939 if (IsWasmCXX) {
2940 // Ignore this for now.
2941 return false;
2942 }
2943
2944 while (EHPadBB) {
2946 BasicBlock *NewEHPadBB = nullptr;
2947 if (isa<LandingPadInst>(Pad)) {
2948 // Stop on landingpads. They are not funclets.
2949 UnwindDests.emplace_back(&getMBB(*EHPadBB), Prob);
2950 break;
2951 }
2952 if (isa<CleanupPadInst>(Pad)) {
2953 // Stop on cleanup pads. Cleanups are always funclet entries for all known
2954 // personalities.
2955 UnwindDests.emplace_back(&getMBB(*EHPadBB), Prob);
2956 UnwindDests.back().first->setIsEHScopeEntry();
2957 UnwindDests.back().first->setIsEHFuncletEntry();
2958 break;
2959 }
2960 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
2961 // Add the catchpad handlers to the possible destinations.
2962 for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
2963 UnwindDests.emplace_back(&getMBB(*CatchPadBB), Prob);
2964 // For MSVC++ and the CLR, catchblocks are funclets and need prologues.
2965 if (IsMSVCCXX || IsCoreCLR)
2966 UnwindDests.back().first->setIsEHFuncletEntry();
2967 if (!IsSEH)
2968 UnwindDests.back().first->setIsEHScopeEntry();
2969 }
2970 NewEHPadBB = CatchSwitch->getUnwindDest();
2971 } else {
2972 continue;
2973 }
2974
2975 BranchProbabilityInfo *BPI = FuncInfo.BPI;
2976 if (BPI && NewEHPadBB)
2977 Prob *= BPI->getEdgeProbability(EHPadBB, NewEHPadBB);
2978 EHPadBB = NewEHPadBB;
2979 }
2980 return true;
2981}
2982
2983bool IRTranslator::translateInvoke(const User &U,
2984 MachineIRBuilder &MIRBuilder) {
2985 const InvokeInst &I = cast<InvokeInst>(U);
2986 MCContext &Context = MF->getContext();
2987
2988 const BasicBlock *ReturnBB = I.getSuccessor(0);
2989 const BasicBlock *EHPadBB = I.getSuccessor(1);
2990
2991 const Function *Fn = I.getCalledFunction();
2992
2993 // FIXME: support invoking patchpoint and statepoint intrinsics.
2994 if (Fn && Fn->isIntrinsic())
2995 return false;
2996
2997 // FIXME: support whatever these are.
2998 if (I.hasDeoptState())
2999 return false;
3000
3001 // FIXME: support control flow guard targets.
3002 if (I.countOperandBundlesOfType(LLVMContext::OB_cfguardtarget))
3003 return false;
3004
3005 // FIXME: support Windows exception handling.
3006 if (!isa<LandingPadInst>(EHPadBB->getFirstNonPHIIt()))
3007 return false;
3008
3009 // FIXME: support Windows dllimport function calls and calls through
3010 // weak symbols.
3011 if (Fn && (Fn->hasDLLImportStorageClass() ||
3012 (MF->getTarget().getTargetTriple().isOSWindows() &&
3013 Fn->hasExternalWeakLinkage())))
3014 return false;
3015
3016 bool LowerInlineAsm = I.isInlineAsm();
3017 bool NeedEHLabel = true;
3018
3019 // Emit the actual call, bracketed by EH_LABELs so that the MF knows about
3020 // the region covered by the try.
3021 MCSymbol *BeginSymbol = nullptr;
3022 if (NeedEHLabel) {
3023 MIRBuilder.buildInstr(TargetOpcode::G_INVOKE_REGION_START);
3024 BeginSymbol = Context.createTempSymbol();
3025 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(BeginSymbol);
3026 }
3027
3028 if (LowerInlineAsm) {
3029 if (!translateInlineAsm(I, MIRBuilder))
3030 return false;
3031 } else if (!translateCallBase(I, MIRBuilder))
3032 return false;
3033
3034 MCSymbol *EndSymbol = nullptr;
3035 if (NeedEHLabel) {
3036 EndSymbol = Context.createTempSymbol();
3037 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(EndSymbol);
3038 }
3039
3041 BranchProbabilityInfo *BPI = FuncInfo.BPI;
3042 MachineBasicBlock *InvokeMBB = &MIRBuilder.getMBB();
3043 BranchProbability EHPadBBProb =
3044 BPI ? BPI->getEdgeProbability(InvokeMBB->getBasicBlock(), EHPadBB)
3046
3047 if (!findUnwindDestinations(EHPadBB, EHPadBBProb, UnwindDests))
3048 return false;
3049
3050 MachineBasicBlock &EHPadMBB = getMBB(*EHPadBB),
3051 &ReturnMBB = getMBB(*ReturnBB);
3052 // Update successor info.
3053 addSuccessorWithProb(InvokeMBB, &ReturnMBB);
3054 for (auto &UnwindDest : UnwindDests) {
3055 UnwindDest.first->setIsEHPad();
3056 addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second);
3057 }
3058 InvokeMBB->normalizeSuccProbs();
3059
3060 if (NeedEHLabel) {
3061 assert(BeginSymbol && "Expected a begin symbol!");
3062 assert(EndSymbol && "Expected an end symbol!");
3063 MF->addInvoke(&EHPadMBB, BeginSymbol, EndSymbol);
3064 }
3065
3066 MIRBuilder.buildBr(ReturnMBB);
3067 return true;
3068}
3069
3070/// The intrinsics currently supported by callbr are implicit control flow
3071/// intrinsics such as amdgcn.kill.
3072bool IRTranslator::translateCallBr(const User &U,
3073 MachineIRBuilder &MIRBuilder) {
3074 if (containsBF16Type(U))
3075 return false; // see translateCall
3076
3077 const CallBrInst &I = cast<CallBrInst>(U);
3078 MachineBasicBlock *CallBrMBB = &MIRBuilder.getMBB();
3079
3080 Intrinsic::ID IID = I.getIntrinsicID();
3081 if (I.isInlineAsm()) {
3082 // FIXME: inline asm is not yet supported for callbr in GlobalISel. As soon
3083 // as we add support, we need to handle the indirect asm targets, see
3084 // SelectionDAGBuilder::visitCallBr().
3085 return false;
3086 }
3087 if (!translateIntrinsic(I, IID, MIRBuilder))
3088 return false;
3089
3090 // Retrieve successors.
3091 SmallPtrSet<BasicBlock *, 8> Dests = {I.getDefaultDest()};
3092 MachineBasicBlock *Return = &getMBB(*I.getDefaultDest());
3093
3094 // Update successor info.
3095 addSuccessorWithProb(CallBrMBB, Return, BranchProbability::getOne());
3096
3097 // Add indirect targets as successors. For intrinsic callbr, these represent
3098 // implicit control flow (e.g., the "kill" path for amdgcn.kill). We mark them
3099 // with setIsInlineAsmBrIndirectTarget so the machine verifier accepts them as
3100 // valid successors, even though they're not from inline asm.
3101 for (BasicBlock *Dest : I.getIndirectDests()) {
3102 MachineBasicBlock &Target = getMBB(*Dest);
3103 Target.setIsInlineAsmBrIndirectTarget();
3104 Target.setLabelMustBeEmitted();
3105 // Don't add duplicate machine successors.
3106 if (Dests.insert(Dest).second)
3107 addSuccessorWithProb(CallBrMBB, &Target, BranchProbability::getZero());
3108 }
3109
3110 CallBrMBB->normalizeSuccProbs();
3111
3112 // Drop into default successor.
3113 MIRBuilder.buildBr(*Return);
3114
3115 return true;
3116}
3117
3118bool IRTranslator::translateLandingPad(const User &U,
3119 MachineIRBuilder &MIRBuilder) {
3120 const LandingPadInst &LP = cast<LandingPadInst>(U);
3121
3122 MachineBasicBlock &MBB = MIRBuilder.getMBB();
3123
3124 MBB.setIsEHPad();
3125
3126 // If there aren't registers to copy the values into (e.g., during SjLj
3127 // exceptions), then don't bother.
3128 const Constant *PersonalityFn = MF->getFunction().getPersonalityFn();
3129 if (TLI->getExceptionPointerRegister(PersonalityFn) == 0 &&
3130 TLI->getExceptionSelectorRegister(PersonalityFn) == 0)
3131 return true;
3132
3133 // If landingpad's return type is token type, we don't create DAG nodes
3134 // for its exception pointer and selector value. The extraction of exception
3135 // pointer or selector value from token type landingpads is not currently
3136 // supported.
3137 if (LP.getType()->isTokenTy())
3138 return true;
3139
3140 // Add a label to mark the beginning of the landing pad. Deletion of the
3141 // landing pad can thus be detected via the MachineModuleInfo.
3142 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL)
3143 .addSym(MF->addLandingPad(&MBB));
3144
3145 // If the unwinder does not preserve all registers, ensure that the
3146 // function marks the clobbered registers as used.
3147 const TargetRegisterInfo &TRI = *MF->getSubtarget().getRegisterInfo();
3148 if (auto *RegMask = TRI.getCustomEHPadPreservedMask(*MF))
3149 MF->getRegInfo().addPhysRegsUsedFromRegMask(RegMask);
3150
3151 LLT Ty = getLLTForType(*LP.getType(), *DL);
3152 Register Undef = MRI->createGenericVirtualRegister(Ty);
3153 MIRBuilder.buildUndef(Undef);
3154
3156 for (Type *Ty : cast<StructType>(LP.getType())->elements())
3157 Tys.push_back(getLLTForType(*Ty, *DL));
3158 assert(Tys.size() == 2 && "Only two-valued landingpads are supported");
3159
3160 // Mark exception register as live in.
3161 Register ExceptionReg = TLI->getExceptionPointerRegister(PersonalityFn);
3162 if (!ExceptionReg)
3163 return false;
3164
3165 MBB.addLiveIn(ExceptionReg);
3166 ArrayRef<Register> ResRegs = getOrCreateVRegs(LP);
3167 MIRBuilder.buildCopy(ResRegs[0], ExceptionReg);
3168
3169 Register SelectorReg = TLI->getExceptionSelectorRegister(PersonalityFn);
3170 if (!SelectorReg)
3171 return false;
3172
3173 MBB.addLiveIn(SelectorReg);
3174 Register PtrVReg = MRI->createGenericVirtualRegister(Tys[0]);
3175 MIRBuilder.buildCopy(PtrVReg, SelectorReg);
3176 MIRBuilder.buildCast(ResRegs[1], PtrVReg);
3177
3178 return true;
3179}
3180
3181bool IRTranslator::translateAlloca(const User &U,
3182 MachineIRBuilder &MIRBuilder) {
3183 auto &AI = cast<AllocaInst>(U);
3184
3185 if (AI.isSwiftError())
3186 return true;
3187
3188 if (AI.isStaticAlloca()) {
3189 Register Res = getOrCreateVReg(AI);
3190 int FI = getOrCreateFrameIndex(AI);
3191 MIRBuilder.buildFrameIndex(Res, FI);
3192 return true;
3193 }
3194
3195 // FIXME: support stack probing for Windows.
3196 if (MF->getTarget().getTargetTriple().isOSWindows())
3197 return false;
3198
3199 // Now we're in the harder dynamic case.
3200 Register NumElts = getOrCreateVReg(*AI.getArraySize());
3201 Type *IntPtrIRTy = DL->getIntPtrType(AI.getType());
3202 LLT IntPtrTy = getLLTForType(*IntPtrIRTy, *DL);
3203 if (MRI->getType(NumElts) != IntPtrTy) {
3204 Register ExtElts = MRI->createGenericVirtualRegister(IntPtrTy);
3205 MIRBuilder.buildZExtOrTrunc(ExtElts, NumElts);
3206 NumElts = ExtElts;
3207 }
3208
3209 Type *Ty = AI.getAllocatedType();
3210
3211 Register AllocSize = MRI->createGenericVirtualRegister(IntPtrTy);
3212 Register TySize =
3213 getOrCreateVReg(*ConstantInt::get(IntPtrIRTy, DL->getTypeAllocSize(Ty)));
3214 MIRBuilder.buildMul(AllocSize, NumElts, TySize);
3215
3216 // Round the size of the allocation up to the stack alignment size
3217 // by add SA-1 to the size. This doesn't overflow because we're computing
3218 // an address inside an alloca.
3219 Align StackAlign = MF->getSubtarget().getFrameLowering()->getStackAlign();
3220 auto SAMinusOne = MIRBuilder.buildConstant(IntPtrTy, StackAlign.value() - 1);
3221 auto AllocAdd = MIRBuilder.buildAdd(IntPtrTy, AllocSize, SAMinusOne,
3223 auto AlignCst =
3224 MIRBuilder.buildConstant(IntPtrTy, ~(uint64_t)(StackAlign.value() - 1));
3225 auto AlignedAlloc = MIRBuilder.buildAnd(IntPtrTy, AllocAdd, AlignCst);
3226
3227 Align Alignment = std::max(AI.getAlign(), DL->getPrefTypeAlign(Ty));
3228 if (Alignment <= StackAlign)
3229 Alignment = Align(1);
3230 MIRBuilder.buildDynStackAlloc(getOrCreateVReg(AI), AlignedAlloc, Alignment);
3231
3232 MF->getFrameInfo().CreateVariableSizedObject(Alignment, &AI);
3233 assert(MF->getFrameInfo().hasVarSizedObjects());
3234 return true;
3235}
3236
3237bool IRTranslator::translateVAArg(const User &U, MachineIRBuilder &MIRBuilder) {
3238 // FIXME: We may need more info about the type. Because of how LLT works,
3239 // we're completely discarding the i64/double distinction here (amongst
3240 // others). Fortunately the ABIs I know of where that matters don't use va_arg
3241 // anyway but that's not guaranteed.
3242 MIRBuilder.buildInstr(TargetOpcode::G_VAARG, {getOrCreateVReg(U)},
3243 {getOrCreateVReg(*U.getOperand(0)),
3244 DL->getABITypeAlign(U.getType()).value()});
3245 return true;
3246}
3247
3248bool IRTranslator::translateUnreachable(const User &U,
3249 MachineIRBuilder &MIRBuilder) {
3250 auto &UI = cast<UnreachableInst>(U);
3251 if (!UI.shouldLowerToTrap(MF->getTarget().Options.TrapUnreachable,
3252 MF->getTarget().Options.NoTrapAfterNoreturn))
3253 return true;
3254
3255 MIRBuilder.buildTrap();
3256 return true;
3257}
3258
3259bool IRTranslator::translateInsertElement(const User &U,
3260 MachineIRBuilder &MIRBuilder) {
3261 // If it is a <1 x Ty> vector, use the scalar as it is
3262 // not a legal vector type in LLT.
3263 if (auto *FVT = dyn_cast<FixedVectorType>(U.getType());
3264 FVT && FVT->getNumElements() == 1)
3265 return translateCopy(U, *U.getOperand(1), MIRBuilder);
3266
3267 Register Res = getOrCreateVReg(U);
3268 Register Val = getOrCreateVReg(*U.getOperand(0));
3269 Register Elt = getOrCreateVReg(*U.getOperand(1));
3270 unsigned PreferredVecIdxWidth = TLI->getVectorIdxWidth(*DL);
3271 Register Idx;
3272 if (auto *CI = dyn_cast<ConstantInt>(U.getOperand(2))) {
3273 if (CI->getBitWidth() != PreferredVecIdxWidth) {
3274 APInt NewIdx = CI->getValue().zextOrTrunc(PreferredVecIdxWidth);
3275 auto *NewIdxCI = ConstantInt::get(CI->getContext(), NewIdx);
3276 Idx = getOrCreateVReg(*NewIdxCI);
3277 }
3278 }
3279 if (!Idx)
3280 Idx = getOrCreateVReg(*U.getOperand(2));
3281 if (MRI->getType(Idx).getSizeInBits() != PreferredVecIdxWidth) {
3282 const LLT VecIdxTy = LLT::scalar(PreferredVecIdxWidth);
3283 Idx = MIRBuilder.buildZExtOrTrunc(VecIdxTy, Idx).getReg(0);
3284 }
3285 MIRBuilder.buildInsertVectorElement(Res, Val, Elt, Idx);
3286 return true;
3287}
3288
3289bool IRTranslator::translateInsertVector(const User &U,
3290 MachineIRBuilder &MIRBuilder) {
3291 Register Dst = getOrCreateVReg(U);
3292 Register Vec = getOrCreateVReg(*U.getOperand(0));
3293 Register Elt = getOrCreateVReg(*U.getOperand(1));
3294
3295 ConstantInt *CI = cast<ConstantInt>(U.getOperand(2));
3296 unsigned PreferredVecIdxWidth = TLI->getVectorIdxWidth(*DL);
3297
3298 // Resize Index to preferred index width.
3299 if (CI->getBitWidth() != PreferredVecIdxWidth) {
3300 APInt NewIdx = CI->getValue().zextOrTrunc(PreferredVecIdxWidth);
3301 CI = ConstantInt::get(CI->getContext(), NewIdx);
3302 }
3303
3304 // If it is a <1 x Ty> vector, we have to use other means.
3305 if (auto *ResultType = dyn_cast<FixedVectorType>(U.getOperand(1)->getType());
3306 ResultType && ResultType->getNumElements() == 1) {
3307 if (auto *InputType = dyn_cast<FixedVectorType>(U.getOperand(0)->getType());
3308 InputType && InputType->getNumElements() == 1) {
3309 // We are inserting an illegal fixed vector into an illegal
3310 // fixed vector, use the scalar as it is not a legal vector type
3311 // in LLT.
3312 return translateCopy(U, *U.getOperand(0), MIRBuilder);
3313 }
3314 if (isa<FixedVectorType>(U.getOperand(0)->getType())) {
3315 // We are inserting an illegal fixed vector into a legal fixed
3316 // vector, use the scalar as it is not a legal vector type in
3317 // LLT.
3318 Register Idx = getOrCreateVReg(*CI);
3319 MIRBuilder.buildInsertVectorElement(Dst, Vec, Elt, Idx);
3320 return true;
3321 }
3322 if (isa<ScalableVectorType>(U.getOperand(0)->getType())) {
3323 // We are inserting an illegal fixed vector into a scalable
3324 // vector, use a scalar element insert.
3325 LLT VecIdxTy = LLT::scalar(PreferredVecIdxWidth);
3326 Register Idx = getOrCreateVReg(*CI);
3327 auto ScaledIndex = MIRBuilder.buildMul(
3328 VecIdxTy, MIRBuilder.buildVScale(VecIdxTy, 1), Idx);
3329 MIRBuilder.buildInsertVectorElement(Dst, Vec, Elt, ScaledIndex);
3330 return true;
3331 }
3332 }
3333
3334 MIRBuilder.buildInsertSubvector(
3335 getOrCreateVReg(U), getOrCreateVReg(*U.getOperand(0)),
3336 getOrCreateVReg(*U.getOperand(1)), CI->getZExtValue());
3337 return true;
3338}
3339
3340bool IRTranslator::translateExtractElement(const User &U,
3341 MachineIRBuilder &MIRBuilder) {
3342 // If it is a <1 x Ty> vector, use the scalar as it is
3343 // not a legal vector type in LLT.
3344 if (const FixedVectorType *FVT =
3345 dyn_cast<FixedVectorType>(U.getOperand(0)->getType()))
3346 if (FVT->getNumElements() == 1)
3347 return translateCopy(U, *U.getOperand(0), MIRBuilder);
3348
3349 Register Res = getOrCreateVReg(U);
3350 Register Val = getOrCreateVReg(*U.getOperand(0));
3351 unsigned PreferredVecIdxWidth = TLI->getVectorIdxWidth(*DL);
3352 Register Idx;
3353 if (auto *CI = dyn_cast<ConstantInt>(U.getOperand(1))) {
3354 if (CI->getBitWidth() != PreferredVecIdxWidth) {
3355 APInt NewIdx = CI->getValue().zextOrTrunc(PreferredVecIdxWidth);
3356 auto *NewIdxCI = ConstantInt::get(CI->getContext(), NewIdx);
3357 Idx = getOrCreateVReg(*NewIdxCI);
3358 }
3359 }
3360 if (!Idx)
3361 Idx = getOrCreateVReg(*U.getOperand(1));
3362 if (MRI->getType(Idx).getSizeInBits() != PreferredVecIdxWidth) {
3363 const LLT VecIdxTy = LLT::scalar(PreferredVecIdxWidth);
3364 Idx = MIRBuilder.buildZExtOrTrunc(VecIdxTy, Idx).getReg(0);
3365 }
3366 MIRBuilder.buildExtractVectorElement(Res, Val, Idx);
3367 return true;
3368}
3369
3370bool IRTranslator::translateExtractVector(const User &U,
3371 MachineIRBuilder &MIRBuilder) {
3372 Register Res = getOrCreateVReg(U);
3373 Register Vec = getOrCreateVReg(*U.getOperand(0));
3374 ConstantInt *CI = cast<ConstantInt>(U.getOperand(1));
3375 unsigned PreferredVecIdxWidth = TLI->getVectorIdxWidth(*DL);
3376
3377 // Resize Index to preferred index width.
3378 if (CI->getBitWidth() != PreferredVecIdxWidth) {
3379 APInt NewIdx = CI->getValue().zextOrTrunc(PreferredVecIdxWidth);
3380 CI = ConstantInt::get(CI->getContext(), NewIdx);
3381 }
3382
3383 // If it is a <1 x Ty> vector, we have to use other means.
3384 if (auto *ResultType = dyn_cast<FixedVectorType>(U.getType());
3385 ResultType && ResultType->getNumElements() == 1) {
3386 if (auto *InputType = dyn_cast<FixedVectorType>(U.getOperand(0)->getType());
3387 InputType && InputType->getNumElements() == 1) {
3388 // We are extracting an illegal fixed vector from an illegal fixed vector,
3389 // use the scalar as it is not a legal vector type in LLT.
3390 return translateCopy(U, *U.getOperand(0), MIRBuilder);
3391 }
3392 if (isa<FixedVectorType>(U.getOperand(0)->getType())) {
3393 // We are extracting an illegal fixed vector from a legal fixed
3394 // vector, use the scalar as it is not a legal vector type in
3395 // LLT.
3396 Register Idx = getOrCreateVReg(*CI);
3397 MIRBuilder.buildExtractVectorElement(Res, Vec, Idx);
3398 return true;
3399 }
3400 if (isa<ScalableVectorType>(U.getOperand(0)->getType())) {
3401 // We are extracting an illegal fixed vector from a scalable
3402 // vector, use a scalar element extract.
3403 LLT VecIdxTy = LLT::scalar(PreferredVecIdxWidth);
3404 Register Idx = getOrCreateVReg(*CI);
3405 auto ScaledIndex = MIRBuilder.buildMul(
3406 VecIdxTy, MIRBuilder.buildVScale(VecIdxTy, 1), Idx);
3407 MIRBuilder.buildExtractVectorElement(Res, Vec, ScaledIndex);
3408 return true;
3409 }
3410 }
3411
3412 MIRBuilder.buildExtractSubvector(getOrCreateVReg(U),
3413 getOrCreateVReg(*U.getOperand(0)),
3414 CI->getZExtValue());
3415 return true;
3416}
3417
3418bool IRTranslator::translateShuffleVector(const User &U,
3419 MachineIRBuilder &MIRBuilder) {
3420 // A ShuffleVector that operates on scalable vectors is a splat vector where
3421 // the value of the splat vector is the 0th element of the first operand,
3422 // since the index mask operand is the zeroinitializer (undef and
3423 // poison are treated as zeroinitializer here).
3424 if (U.getOperand(0)->getType()->isScalableTy()) {
3425 Register Val = getOrCreateVReg(*U.getOperand(0));
3426 auto SplatVal = MIRBuilder.buildExtractVectorElementConstant(
3427 MRI->getType(Val).getElementType(), Val, 0);
3428 MIRBuilder.buildSplatVector(getOrCreateVReg(U), SplatVal);
3429 return true;
3430 }
3431
3432 ArrayRef<int> Mask;
3433 if (auto *SVI = dyn_cast<ShuffleVectorInst>(&U))
3434 Mask = SVI->getShuffleMask();
3435 else
3436 Mask = cast<ConstantExpr>(U).getShuffleMask();
3437
3438 // As GISel does not represent <1 x > vectors as a separate type from scalars,
3439 // we transform shuffle_vector with a scalar output to an
3440 // ExtractVectorElement. If the input type is also scalar it becomes a Copy.
3441 unsigned DstElts = cast<FixedVectorType>(U.getType())->getNumElements();
3442 unsigned SrcElts =
3443 cast<FixedVectorType>(U.getOperand(0)->getType())->getNumElements();
3444 if (DstElts == 1) {
3445 unsigned M = Mask[0];
3446 if (SrcElts == 1) {
3447 if (M == 0 || M == 1)
3448 return translateCopy(U, *U.getOperand(M), MIRBuilder);
3449 MIRBuilder.buildUndef(getOrCreateVReg(U));
3450 } else {
3451 Register Dst = getOrCreateVReg(U);
3452 if (M < SrcElts) {
3454 Dst, getOrCreateVReg(*U.getOperand(0)), M);
3455 } else if (M < SrcElts * 2) {
3457 Dst, getOrCreateVReg(*U.getOperand(1)), M - SrcElts);
3458 } else {
3459 MIRBuilder.buildUndef(Dst);
3460 }
3461 }
3462 return true;
3463 }
3464
3465 // A single element src is transformed to a build_vector.
3466 if (SrcElts == 1) {
3469 for (int M : Mask) {
3470 LLT SrcTy = getLLTForType(*U.getOperand(0)->getType(), *DL);
3471 if (M == 0 || M == 1) {
3472 Ops.push_back(getOrCreateVReg(*U.getOperand(M)));
3473 } else {
3474 if (!Undef.isValid()) {
3475 Undef = MRI->createGenericVirtualRegister(SrcTy);
3476 MIRBuilder.buildUndef(Undef);
3477 }
3478 Ops.push_back(Undef);
3479 }
3480 }
3481 MIRBuilder.buildBuildVector(getOrCreateVReg(U), Ops);
3482 return true;
3483 }
3484
3485 ArrayRef<int> MaskAlloc = MF->allocateShuffleMask(Mask);
3486 MIRBuilder
3487 .buildInstr(TargetOpcode::G_SHUFFLE_VECTOR, {getOrCreateVReg(U)},
3488 {getOrCreateVReg(*U.getOperand(0)),
3489 getOrCreateVReg(*U.getOperand(1))})
3490 .addShuffleMask(MaskAlloc);
3491 return true;
3492}
3493
3494bool IRTranslator::translatePHI(const User &U, MachineIRBuilder &MIRBuilder) {
3495 const PHINode &PI = cast<PHINode>(U);
3496
3497 SmallVector<MachineInstr *, 4> Insts;
3498 for (auto Reg : getOrCreateVRegs(PI)) {
3499 auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_PHI, {Reg}, {});
3500 Insts.push_back(MIB.getInstr());
3501 }
3502
3503 PendingPHIs.emplace_back(&PI, std::move(Insts));
3504 return true;
3505}
3506
3507bool IRTranslator::translateAtomicCmpXchg(const User &U,
3508 MachineIRBuilder &MIRBuilder) {
3509 const AtomicCmpXchgInst &I = cast<AtomicCmpXchgInst>(U);
3510
3511 auto Flags = TLI->getAtomicMemOperandFlags(I, *DL);
3512
3513 auto Res = getOrCreateVRegs(I);
3514 Register OldValRes = Res[0];
3515 Register SuccessRes = Res[1];
3516 Register Addr = getOrCreateVReg(*I.getPointerOperand());
3517 Register Cmp = getOrCreateVReg(*I.getCompareOperand());
3518 Register NewVal = getOrCreateVReg(*I.getNewValOperand());
3519
3521 OldValRes, SuccessRes, Addr, Cmp, NewVal,
3522 *MF->getMachineMemOperand(
3523 MachinePointerInfo(I.getPointerOperand()), Flags, MRI->getType(Cmp),
3524 getMemOpAlign(I), I.getAAMetadata(), nullptr, I.getSyncScopeID(),
3525 I.getSuccessOrdering(), I.getFailureOrdering()));
3526 return true;
3527}
3528
3529bool IRTranslator::translateAtomicRMW(const User &U,
3530 MachineIRBuilder &MIRBuilder) {
3532 return false;
3533
3534 const AtomicRMWInst &I = cast<AtomicRMWInst>(U);
3535 auto Flags = TLI->getAtomicMemOperandFlags(I, *DL);
3536
3537 Register Res = getOrCreateVReg(I);
3538 Register Addr = getOrCreateVReg(*I.getPointerOperand());
3539 Register Val = getOrCreateVReg(*I.getValOperand());
3540
3541 unsigned Opcode = 0;
3542 switch (I.getOperation()) {
3543 default:
3544 return false;
3546 Opcode = TargetOpcode::G_ATOMICRMW_XCHG;
3547 break;
3548 case AtomicRMWInst::Add:
3549 Opcode = TargetOpcode::G_ATOMICRMW_ADD;
3550 break;
3551 case AtomicRMWInst::Sub:
3552 Opcode = TargetOpcode::G_ATOMICRMW_SUB;
3553 break;
3554 case AtomicRMWInst::And:
3555 Opcode = TargetOpcode::G_ATOMICRMW_AND;
3556 break;
3558 Opcode = TargetOpcode::G_ATOMICRMW_NAND;
3559 break;
3560 case AtomicRMWInst::Or:
3561 Opcode = TargetOpcode::G_ATOMICRMW_OR;
3562 break;
3563 case AtomicRMWInst::Xor:
3564 Opcode = TargetOpcode::G_ATOMICRMW_XOR;
3565 break;
3566 case AtomicRMWInst::Max:
3567 Opcode = TargetOpcode::G_ATOMICRMW_MAX;
3568 break;
3569 case AtomicRMWInst::Min:
3570 Opcode = TargetOpcode::G_ATOMICRMW_MIN;
3571 break;
3573 Opcode = TargetOpcode::G_ATOMICRMW_UMAX;
3574 break;
3576 Opcode = TargetOpcode::G_ATOMICRMW_UMIN;
3577 break;
3579 Opcode = TargetOpcode::G_ATOMICRMW_FADD;
3580 break;
3582 Opcode = TargetOpcode::G_ATOMICRMW_FSUB;
3583 break;
3585 Opcode = TargetOpcode::G_ATOMICRMW_FMAX;
3586 break;
3588 Opcode = TargetOpcode::G_ATOMICRMW_FMIN;
3589 break;
3591 Opcode = TargetOpcode::G_ATOMICRMW_FMAXIMUM;
3592 break;
3594 Opcode = TargetOpcode::G_ATOMICRMW_FMINIMUM;
3595 break;
3597 Opcode = TargetOpcode::G_ATOMICRMW_UINC_WRAP;
3598 break;
3600 Opcode = TargetOpcode::G_ATOMICRMW_UDEC_WRAP;
3601 break;
3603 Opcode = TargetOpcode::G_ATOMICRMW_USUB_COND;
3604 break;
3606 Opcode = TargetOpcode::G_ATOMICRMW_USUB_SAT;
3607 break;
3608 }
3609
3610 MIRBuilder.buildAtomicRMW(
3611 Opcode, Res, Addr, Val,
3612 *MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
3613 Flags, MRI->getType(Val), getMemOpAlign(I),
3614 I.getAAMetadata(), nullptr, I.getSyncScopeID(),
3615 I.getOrdering()));
3616 return true;
3617}
3618
3619bool IRTranslator::translateFence(const User &U,
3620 MachineIRBuilder &MIRBuilder) {
3621 const FenceInst &Fence = cast<FenceInst>(U);
3622 MIRBuilder.buildFence(static_cast<unsigned>(Fence.getOrdering()),
3623 Fence.getSyncScopeID());
3624 return true;
3625}
3626
3627bool IRTranslator::translateFreeze(const User &U,
3628 MachineIRBuilder &MIRBuilder) {
3629 const ArrayRef<Register> DstRegs = getOrCreateVRegs(U);
3630 const ArrayRef<Register> SrcRegs = getOrCreateVRegs(*U.getOperand(0));
3631
3632 assert(DstRegs.size() == SrcRegs.size() &&
3633 "Freeze with different source and destination type?");
3634
3635 for (unsigned I = 0; I < DstRegs.size(); ++I) {
3636 MIRBuilder.buildFreeze(DstRegs[I], SrcRegs[I]);
3637 }
3638
3639 return true;
3640}
3641
3642void IRTranslator::finishPendingPhis() {
3643#ifndef NDEBUG
3644 DILocationVerifier Verifier;
3645 GISelObserverWrapper WrapperObserver(&Verifier);
3646 RAIIMFObsDelInstaller ObsInstall(*MF, WrapperObserver);
3647#endif // ifndef NDEBUG
3648 for (auto &Phi : PendingPHIs) {
3649 const PHINode *PI = Phi.first;
3650 if (PI->getType()->isEmptyTy())
3651 continue;
3652 ArrayRef<MachineInstr *> ComponentPHIs = Phi.second;
3653 MachineBasicBlock *PhiMBB = ComponentPHIs[0]->getParent();
3654 EntryBuilder->setDebugLoc(PI->getDebugLoc());
3655#ifndef NDEBUG
3656 Verifier.setCurrentInst(PI);
3657#endif // ifndef NDEBUG
3658
3659 SmallPtrSet<const MachineBasicBlock *, 16> SeenPreds;
3660 for (unsigned i = 0; i < PI->getNumIncomingValues(); ++i) {
3661 auto IRPred = PI->getIncomingBlock(i);
3662 ArrayRef<Register> ValRegs = getOrCreateVRegs(*PI->getIncomingValue(i));
3663 for (auto *Pred : getMachinePredBBs({IRPred, PI->getParent()})) {
3664 if (SeenPreds.count(Pred) || !PhiMBB->isPredecessor(Pred))
3665 continue;
3666 SeenPreds.insert(Pred);
3667 for (unsigned j = 0; j < ValRegs.size(); ++j) {
3668 MachineInstrBuilder MIB(*MF, ComponentPHIs[j]);
3669 MIB.addUse(ValRegs[j]);
3670 MIB.addMBB(Pred);
3671 }
3672 }
3673 }
3674 }
3675}
3676
3677void IRTranslator::translateDbgValueRecord(Value *V, bool HasArgList,
3678 const DILocalVariable *Variable,
3679 const DIExpression *Expression,
3680 const DebugLoc &DL,
3681 MachineIRBuilder &MIRBuilder) {
3682 assert(Variable->isValidLocationForIntrinsic(DL) &&
3683 "Expected inlined-at fields to agree");
3684 // Act as if we're handling a debug intrinsic.
3685 MIRBuilder.setDebugLoc(DL);
3686
3687 if (!V || HasArgList) {
3688 // DI cannot produce a valid DBG_VALUE, so produce an undef DBG_VALUE to
3689 // terminate any prior location.
3690 MIRBuilder.buildIndirectDbgValue(0, Variable, Expression);
3691 return;
3692 }
3693
3694 if (const auto *CI = dyn_cast<Constant>(V)) {
3695 MIRBuilder.buildConstDbgValue(*CI, Variable, Expression);
3696 return;
3697 }
3698
3699 if (auto *AI = dyn_cast<AllocaInst>(V);
3700 AI && AI->isStaticAlloca() && Expression->startsWithDeref()) {
3701 // If the value is an alloca and the expression starts with a
3702 // dereference, track a stack slot instead of a register, as registers
3703 // may be clobbered.
3704 auto ExprOperands = Expression->getElements();
3705 auto *ExprDerefRemoved =
3706 DIExpression::get(AI->getContext(), ExprOperands.drop_front());
3707 MIRBuilder.buildFIDbgValue(getOrCreateFrameIndex(*AI), Variable,
3708 ExprDerefRemoved);
3709 return;
3710 }
3711 if (translateIfEntryValueArgument(false, V, Variable, Expression, DL,
3712 MIRBuilder))
3713 return;
3714 for (Register Reg : getOrCreateVRegs(*V)) {
3715 // FIXME: This does not handle register-indirect values at offset 0. The
3716 // direct/indirect thing shouldn't really be handled by something as
3717 // implicit as reg+noreg vs reg+imm in the first place, but it seems
3718 // pretty baked in right now.
3719 MIRBuilder.buildDirectDbgValue(Reg, Variable, Expression);
3720 }
3721}
3722
3723void IRTranslator::translateDbgDeclareRecord(Value *Address, bool HasArgList,
3724 const DILocalVariable *Variable,
3725 const DIExpression *Expression,
3726 const DebugLoc &DL,
3727 MachineIRBuilder &MIRBuilder) {
3728 if (!Address || isa<UndefValue>(Address)) {
3729 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *Variable << "\n");
3730 return;
3731 }
3732
3733 assert(Variable->isValidLocationForIntrinsic(DL) &&
3734 "Expected inlined-at fields to agree");
3735 auto AI = dyn_cast<AllocaInst>(Address);
3736 if (AI && AI->isStaticAlloca()) {
3737 // Static allocas are tracked at the MF level, no need for DBG_VALUE
3738 // instructions (in fact, they get ignored if they *do* exist).
3739 MF->setVariableDbgInfo(Variable, Expression,
3740 getOrCreateFrameIndex(*AI), DL);
3741 return;
3742 }
3743
3744 if (translateIfEntryValueArgument(true, Address, Variable,
3745 Expression, DL,
3746 MIRBuilder))
3747 return;
3748
3749 // A dbg.declare describes the address of a source variable, so lower it
3750 // into an indirect DBG_VALUE.
3751 MIRBuilder.setDebugLoc(DL);
3752 MIRBuilder.buildIndirectDbgValue(getOrCreateVReg(*Address), Variable,
3753 Expression);
3754}
3755
3756void IRTranslator::translateDbgInfo(const Instruction &Inst,
3757 MachineIRBuilder &MIRBuilder) {
3758 for (DbgRecord &DR : Inst.getDbgRecordRange()) {
3759 if (DbgLabelRecord *DLR = dyn_cast<DbgLabelRecord>(&DR)) {
3760 MIRBuilder.setDebugLoc(DLR->getDebugLoc());
3761 assert(DLR->getLabel() && "Missing label");
3762 assert(DLR->getLabel()->isValidLocationForIntrinsic(
3763 MIRBuilder.getDebugLoc()) &&
3764 "Expected inlined-at fields to agree");
3765 MIRBuilder.buildDbgLabel(DLR->getLabel());
3766 continue;
3767 }
3768 DbgVariableRecord &DVR = cast<DbgVariableRecord>(DR);
3769 const DILocalVariable *Variable = DVR.getVariable();
3770 const DIExpression *Expression = DVR.getExpression();
3771 Value *V = DVR.getVariableLocationOp(0);
3772 if (DVR.isDbgDeclare())
3773 translateDbgDeclareRecord(V, DVR.hasArgList(), Variable, Expression,
3774 DVR.getDebugLoc(), MIRBuilder);
3775 else
3776 translateDbgValueRecord(V, DVR.hasArgList(), Variable, Expression,
3777 DVR.getDebugLoc(), MIRBuilder);
3778 }
3779}
3780
3781bool IRTranslator::translate(const Instruction &Inst) {
3782 CurBuilder->setDebugLoc(Inst.getDebugLoc());
3783 CurBuilder->setPCSections(Inst.getMetadata(LLVMContext::MD_pcsections));
3784 CurBuilder->setMMRAMetadata(Inst.getMetadata(LLVMContext::MD_mmra));
3785
3786 if (TLI->fallBackToDAGISel(Inst))
3787 return false;
3788
3789 switch (Inst.getOpcode()) {
3790#define HANDLE_INST(NUM, OPCODE, CLASS) \
3791 case Instruction::OPCODE: \
3792 return translate##OPCODE(Inst, *CurBuilder.get());
3793#include "llvm/IR/Instruction.def"
3794 default:
3795 return false;
3796 }
3797}
3798
3799bool IRTranslator::translate(const Constant &C, Register Reg) {
3800 // We only emit constants into the entry block from here. To prevent jumpy
3801 // debug behaviour remove debug line.
3802 if (auto CurrInstDL = CurBuilder->getDL())
3803 EntryBuilder->setDebugLoc(DebugLoc());
3804
3805 if (auto CI = dyn_cast<ConstantInt>(&C)) {
3806 // buildConstant expects a to-be-splatted scalar ConstantInt.
3807 if (isa<VectorType>(CI->getType()))
3808 CI = ConstantInt::get(CI->getContext(), CI->getValue());
3809 EntryBuilder->buildConstant(Reg, *CI);
3810 } else if (auto CF = dyn_cast<ConstantFP>(&C)) {
3811 // buildFConstant expects a to-be-splatted scalar ConstantFP.
3812 if (isa<VectorType>(CF->getType()))
3813 CF = ConstantFP::get(CF->getContext(), CF->getValue());
3814 EntryBuilder->buildFConstant(Reg, *CF);
3815 } else if (isa<UndefValue>(C))
3816 EntryBuilder->buildUndef(Reg);
3817 else if (isa<ConstantPointerNull>(C))
3818 EntryBuilder->buildConstant(Reg, 0);
3819 else if (auto GV = dyn_cast<GlobalValue>(&C))
3820 EntryBuilder->buildGlobalValue(Reg, GV);
3821 else if (auto CPA = dyn_cast<ConstantPtrAuth>(&C)) {
3822 Register Addr = getOrCreateVReg(*CPA->getPointer());
3823 Register AddrDisc = getOrCreateVReg(*CPA->getAddrDiscriminator());
3824 EntryBuilder->buildConstantPtrAuth(Reg, CPA, Addr, AddrDisc);
3825 } else if (auto CAZ = dyn_cast<ConstantAggregateZero>(&C)) {
3826 Constant &Elt = *CAZ->getElementValue(0u);
3827 if (isa<ScalableVectorType>(CAZ->getType())) {
3828 EntryBuilder->buildSplatVector(Reg, getOrCreateVReg(Elt));
3829 return true;
3830 }
3831 // Return the scalar if it is a <1 x Ty> vector.
3832 unsigned NumElts = CAZ->getElementCount().getFixedValue();
3833 if (NumElts == 1)
3834 return translateCopy(C, Elt, *EntryBuilder);
3835 // All elements are zero so we can just use the first one.
3836 EntryBuilder->buildSplatBuildVector(Reg, getOrCreateVReg(Elt));
3837 } else if (auto CV = dyn_cast<ConstantDataVector>(&C)) {
3838 // Return the scalar if it is a <1 x Ty> vector.
3839 if (CV->getNumElements() == 1)
3840 return translateCopy(C, *CV->getElementAsConstant(0), *EntryBuilder);
3842 for (unsigned i = 0; i < CV->getNumElements(); ++i) {
3843 Constant &Elt = *CV->getElementAsConstant(i);
3844 Ops.push_back(getOrCreateVReg(Elt));
3845 }
3846 EntryBuilder->buildBuildVector(Reg, Ops);
3847 } else if (auto CE = dyn_cast<ConstantExpr>(&C)) {
3848 switch(CE->getOpcode()) {
3849#define HANDLE_INST(NUM, OPCODE, CLASS) \
3850 case Instruction::OPCODE: \
3851 return translate##OPCODE(*CE, *EntryBuilder.get());
3852#include "llvm/IR/Instruction.def"
3853 default:
3854 return false;
3855 }
3856 } else if (auto CV = dyn_cast<ConstantVector>(&C)) {
3857 if (CV->getNumOperands() == 1)
3858 return translateCopy(C, *CV->getOperand(0), *EntryBuilder);
3860 for (unsigned i = 0; i < CV->getNumOperands(); ++i) {
3861 Ops.push_back(getOrCreateVReg(*CV->getOperand(i)));
3862 }
3863 EntryBuilder->buildBuildVector(Reg, Ops);
3864 } else if (auto *BA = dyn_cast<BlockAddress>(&C)) {
3865 EntryBuilder->buildBlockAddress(Reg, BA);
3866 } else
3867 return false;
3868
3869 return true;
3870}
3871
3872bool IRTranslator::finalizeBasicBlock(const BasicBlock &BB,
3874 for (auto &BTB : SL->BitTestCases) {
3875 // Emit header first, if it wasn't already emitted.
3876 if (!BTB.Emitted)
3877 emitBitTestHeader(BTB, BTB.Parent);
3878
3879 BranchProbability UnhandledProb = BTB.Prob;
3880 for (unsigned j = 0, ej = BTB.Cases.size(); j != ej; ++j) {
3881 UnhandledProb -= BTB.Cases[j].ExtraProb;
3882 // Set the current basic block to the mbb we wish to insert the code into
3883 MachineBasicBlock *MBB = BTB.Cases[j].ThisBB;
3884 // If all cases cover a contiguous range, it is not necessary to jump to
3885 // the default block after the last bit test fails. This is because the
3886 // range check during bit test header creation has guaranteed that every
3887 // case here doesn't go outside the range. In this case, there is no need
3888 // to perform the last bit test, as it will always be true. Instead, make
3889 // the second-to-last bit-test fall through to the target of the last bit
3890 // test, and delete the last bit test.
3891
3892 MachineBasicBlock *NextMBB;
3893 if ((BTB.ContiguousRange || BTB.FallthroughUnreachable) && j + 2 == ej) {
3894 // Second-to-last bit-test with contiguous range: fall through to the
3895 // target of the final bit test.
3896 NextMBB = BTB.Cases[j + 1].TargetBB;
3897 } else if (j + 1 == ej) {
3898 // For the last bit test, fall through to Default.
3899 NextMBB = BTB.Default;
3900 } else {
3901 // Otherwise, fall through to the next bit test.
3902 NextMBB = BTB.Cases[j + 1].ThisBB;
3903 }
3904
3905 emitBitTestCase(BTB, NextMBB, UnhandledProb, BTB.Reg, BTB.Cases[j], MBB);
3906
3907 if ((BTB.ContiguousRange || BTB.FallthroughUnreachable) && j + 2 == ej) {
3908 // We need to record the replacement phi edge here that normally
3909 // happens in emitBitTestCase before we delete the case, otherwise the
3910 // phi edge will be lost.
3911 addMachineCFGPred({BTB.Parent->getBasicBlock(),
3912 BTB.Cases[ej - 1].TargetBB->getBasicBlock()},
3913 MBB);
3914 // Since we're not going to use the final bit test, remove it.
3915 BTB.Cases.pop_back();
3916 break;
3917 }
3918 }
3919 // This is "default" BB. We have two jumps to it. From "header" BB and from
3920 // last "case" BB, unless the latter was skipped.
3921 CFGEdge HeaderToDefaultEdge = {BTB.Parent->getBasicBlock(),
3922 BTB.Default->getBasicBlock()};
3923 addMachineCFGPred(HeaderToDefaultEdge, BTB.Parent);
3924 if (!BTB.ContiguousRange) {
3925 addMachineCFGPred(HeaderToDefaultEdge, BTB.Cases.back().ThisBB);
3926 }
3927 }
3928 SL->BitTestCases.clear();
3929
3930 for (auto &JTCase : SL->JTCases) {
3931 // Emit header first, if it wasn't already emitted.
3932 if (!JTCase.first.Emitted)
3933 emitJumpTableHeader(JTCase.second, JTCase.first, JTCase.first.HeaderBB);
3934
3935 emitJumpTable(JTCase.second, JTCase.second.MBB);
3936 }
3937 SL->JTCases.clear();
3938
3939 for (auto &SwCase : SL->SwitchCases)
3940 emitSwitchCase(SwCase, &CurBuilder->getMBB(), *CurBuilder);
3941 SL->SwitchCases.clear();
3942
3943 // Check if we need to generate stack-protector guard checks.
3944 StackProtector &SP = getAnalysis<StackProtector>();
3945 if (SP.shouldEmitSDCheck(BB)) {
3946 bool FunctionBasedInstrumentation =
3947 TLI->getSSPStackGuardCheck(*MF->getFunction().getParent());
3948 SPDescriptor.initialize(&BB, &MBB, FunctionBasedInstrumentation);
3949 }
3950 // Handle stack protector.
3951 if (SPDescriptor.shouldEmitFunctionBasedCheckStackProtector()) {
3952 LLVM_DEBUG(dbgs() << "Unimplemented stack protector case\n");
3953 return false;
3954 } else if (SPDescriptor.shouldEmitStackProtector()) {
3955 MachineBasicBlock *ParentMBB = SPDescriptor.getParentMBB();
3956 MachineBasicBlock *SuccessMBB = SPDescriptor.getSuccessMBB();
3957
3958 // Find the split point to split the parent mbb. At the same time copy all
3959 // physical registers used in the tail of parent mbb into virtual registers
3960 // before the split point and back into physical registers after the split
3961 // point. This prevents us needing to deal with Live-ins and many other
3962 // register allocation issues caused by us splitting the parent mbb. The
3963 // register allocator will clean up said virtual copies later on.
3965 ParentMBB, *MF->getSubtarget().getInstrInfo());
3966
3967 // Splice the terminator of ParentMBB into SuccessMBB.
3968 SuccessMBB->splice(SuccessMBB->end(), ParentMBB, SplitPoint,
3969 ParentMBB->end());
3970
3971 // Add compare/jump on neq/jump to the parent BB.
3972 if (!emitSPDescriptorParent(SPDescriptor, ParentMBB))
3973 return false;
3974
3975 // CodeGen Failure MBB if we have not codegened it yet.
3976 MachineBasicBlock *FailureMBB = SPDescriptor.getFailureMBB();
3977 if (FailureMBB->empty()) {
3978 if (!emitSPDescriptorFailure(SPDescriptor, FailureMBB))
3979 return false;
3980 }
3981
3982 // Clear the Per-BB State.
3983 SPDescriptor.resetPerBBState();
3984 }
3985 return true;
3986}
3987
3988bool IRTranslator::emitSPDescriptorParent(StackProtectorDescriptor &SPD,
3989 MachineBasicBlock *ParentBB) {
3990 CurBuilder->setInsertPt(*ParentBB, ParentBB->end());
3991 // First create the loads to the guard/stack slot for the comparison.
3992 Type *PtrIRTy = PointerType::getUnqual(MF->getFunction().getContext());
3993 const LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
3994 LLT PtrMemTy = getLLTForMVT(TLI->getPointerMemTy(*DL));
3995
3996 MachineFrameInfo &MFI = ParentBB->getParent()->getFrameInfo();
3997 int FI = MFI.getStackProtectorIndex();
3998
3999 Register Guard;
4000 Register StackSlotPtr = CurBuilder->buildFrameIndex(PtrTy, FI).getReg(0);
4001 const Module &M = *ParentBB->getParent()->getFunction().getParent();
4002 Align Align = DL->getPrefTypeAlign(PointerType::getUnqual(M.getContext()));
4003
4004 // Generate code to load the content of the guard slot.
4005 Register GuardVal =
4006 CurBuilder
4007 ->buildLoad(PtrMemTy, StackSlotPtr,
4008 MachinePointerInfo::getFixedStack(*MF, FI), Align,
4010 .getReg(0);
4011
4012 if (TLI->useStackGuardXorFP()) {
4013 LLVM_DEBUG(dbgs() << "Stack protector xor'ing with FP not yet implemented");
4014 return false;
4015 }
4016
4017 // Retrieve guard check function, nullptr if instrumentation is inlined.
4018 if (const Function *GuardCheckFn = TLI->getSSPStackGuardCheck(M)) {
4019 // This path is currently untestable on GlobalISel, since the only platform
4020 // that needs this seems to be Windows, and we fall back on that currently.
4021 // The code still lives here in case that changes.
4022 // Silence warning about unused variable until the code below that uses
4023 // 'GuardCheckFn' is enabled.
4024 (void)GuardCheckFn;
4025 return false;
4026#if 0
4027 // The target provides a guard check function to validate the guard value.
4028 // Generate a call to that function with the content of the guard slot as
4029 // argument.
4030 FunctionType *FnTy = GuardCheckFn->getFunctionType();
4031 assert(FnTy->getNumParams() == 1 && "Invalid function signature");
4032 ISD::ArgFlagsTy Flags;
4033 if (GuardCheckFn->hasAttribute(1, Attribute::AttrKind::InReg))
4034 Flags.setInReg();
4035 CallLowering::ArgInfo GuardArgInfo(
4036 {GuardVal, FnTy->getParamType(0), {Flags}});
4037
4038 CallLowering::CallLoweringInfo Info;
4039 Info.OrigArgs.push_back(GuardArgInfo);
4040 Info.CallConv = GuardCheckFn->getCallingConv();
4041 Info.Callee = MachineOperand::CreateGA(GuardCheckFn, 0);
4042 Info.OrigRet = {Register(), FnTy->getReturnType()};
4043 if (!CLI->lowerCall(MIRBuilder, Info)) {
4044 LLVM_DEBUG(dbgs() << "Failed to lower call to stack protector check\n");
4045 return false;
4046 }
4047 return true;
4048#endif
4049 }
4050
4051 // If useLoadStackGuardNode returns true, generate LOAD_STACK_GUARD.
4052 // Otherwise, emit a volatile load to retrieve the stack guard value.
4053 if (TLI->useLoadStackGuardNode(*ParentBB->getBasicBlock()->getModule())) {
4054 Guard =
4055 MRI->createGenericVirtualRegister(LLT::scalar(PtrTy.getSizeInBits()));
4056 getStackGuard(Guard, *CurBuilder);
4057 } else {
4058 // TODO: test using android subtarget when we support @llvm.thread.pointer.
4059 const Value *IRGuard = TLI->getSDagStackGuard(M);
4060 Register GuardPtr = getOrCreateVReg(*IRGuard);
4061
4062 Guard = CurBuilder
4063 ->buildLoad(PtrMemTy, GuardPtr,
4064 MachinePointerInfo::getFixedStack(*MF, FI), Align,
4067 .getReg(0);
4068 }
4069
4070 // Perform the comparison.
4071 auto Cmp =
4072 CurBuilder->buildICmp(CmpInst::ICMP_NE, LLT::scalar(1), Guard, GuardVal);
4073 // If the guard/stackslot do not equal, branch to failure MBB.
4074 CurBuilder->buildBrCond(Cmp, *SPD.getFailureMBB());
4075 // Otherwise branch to success MBB.
4076 CurBuilder->buildBr(*SPD.getSuccessMBB());
4077 return true;
4078}
4079
4080bool IRTranslator::emitSPDescriptorFailure(StackProtectorDescriptor &SPD,
4081 MachineBasicBlock *FailureBB) {
4082 CurBuilder->setInsertPt(*FailureBB, FailureBB->end());
4083
4084 const RTLIB::Libcall Libcall = RTLIB::STACKPROTECTOR_CHECK_FAIL;
4085 const char *Name = TLI->getLibcallName(Libcall);
4086
4087 CallLowering::CallLoweringInfo Info;
4088 Info.CallConv = TLI->getLibcallCallingConv(Libcall);
4089 Info.Callee = MachineOperand::CreateES(Name);
4090 Info.OrigRet = {Register(), Type::getVoidTy(MF->getFunction().getContext()),
4091 0};
4092 if (!CLI->lowerCall(*CurBuilder, Info)) {
4093 LLVM_DEBUG(dbgs() << "Failed to lower call to stack protector fail\n");
4094 return false;
4095 }
4096
4097 // Emit a trap instruction if we are required to do so.
4098 const TargetOptions &TargetOpts = TLI->getTargetMachine().Options;
4099 if (TargetOpts.TrapUnreachable && !TargetOpts.NoTrapAfterNoreturn)
4100 CurBuilder->buildInstr(TargetOpcode::G_TRAP);
4101
4102 return true;
4103}
4104
4105void IRTranslator::finalizeFunction() {
4106 // Release the memory used by the different maps we
4107 // needed during the translation.
4108 PendingPHIs.clear();
4109 VMap.reset();
4110 FrameIndices.clear();
4111 MachinePreds.clear();
4112 // MachineIRBuilder::DebugLoc can outlive the DILocation it holds. Clear it
4113 // to avoid accessing free’d memory (in runOnMachineFunction) and to avoid
4114 // destroying it twice (in ~IRTranslator() and ~LLVMContext())
4115 EntryBuilder.reset();
4116 CurBuilder.reset();
4117 FuncInfo.clear();
4118 SPDescriptor.resetPerFunctionState();
4119}
4120
4121/// Returns true if a BasicBlock \p BB within a variadic function contains a
4122/// variadic musttail call.
4123static bool checkForMustTailInVarArgFn(bool IsVarArg, const BasicBlock &BB) {
4124 if (!IsVarArg)
4125 return false;
4126
4127 // Walk the block backwards, because tail calls usually only appear at the end
4128 // of a block.
4129 return llvm::any_of(llvm::reverse(BB), [](const Instruction &I) {
4130 const auto *CI = dyn_cast<CallInst>(&I);
4131 return CI && CI->isMustTailCall();
4132 });
4133}
4134
4136 MF = &CurMF;
4137 const Function &F = MF->getFunction();
4140 // Set the CSEConfig and run the analysis.
4141 GISelCSEInfo *CSEInfo = nullptr;
4143 bool EnableCSE = EnableCSEInIRTranslator.getNumOccurrences()
4145 : TPC->isGISelCSEEnabled();
4146 TLI = MF->getSubtarget().getTargetLowering();
4147
4148 if (EnableCSE) {
4149 EntryBuilder = std::make_unique<CSEMIRBuilder>(CurMF);
4150 CSEInfo = &Wrapper.get(TPC->getCSEConfig());
4151 EntryBuilder->setCSEInfo(CSEInfo);
4152 CurBuilder = std::make_unique<CSEMIRBuilder>(CurMF);
4153 CurBuilder->setCSEInfo(CSEInfo);
4154 } else {
4155 EntryBuilder = std::make_unique<MachineIRBuilder>();
4156 CurBuilder = std::make_unique<MachineIRBuilder>();
4157 }
4158 CLI = MF->getSubtarget().getCallLowering();
4159 CurBuilder->setMF(*MF);
4160 EntryBuilder->setMF(*MF);
4161 MRI = &MF->getRegInfo();
4162 DL = &F.getDataLayout();
4163 ORE = std::make_unique<OptimizationRemarkEmitter>(&F);
4164 const TargetMachine &TM = MF->getTarget();
4166 EnableOpts = OptLevel != CodeGenOptLevel::None && !skipFunction(F);
4167 FuncInfo.MF = MF;
4168 if (EnableOpts) {
4169 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
4170 FuncInfo.BPI = &getAnalysis<BranchProbabilityInfoWrapperPass>().getBPI();
4171 } else {
4172 AA = nullptr;
4173 FuncInfo.BPI = nullptr;
4174 }
4175
4176 AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(
4177 MF->getFunction());
4178 LibInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
4179 FuncInfo.CanLowerReturn = CLI->checkReturnTypeForCallConv(*MF);
4180
4181 SL = std::make_unique<GISelSwitchLowering>(this, FuncInfo);
4182 SL->init(*TLI, TM, *DL);
4183
4184 assert(PendingPHIs.empty() && "stale PHIs");
4185
4186 // Targets which want to use big endian can enable it using
4187 // enableBigEndian()
4188 if (!DL->isLittleEndian() && !CLI->enableBigEndian()) {
4189 // Currently we don't properly handle big endian code.
4190 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
4191 F.getSubprogram(), &F.getEntryBlock());
4192 R << "unable to translate in big endian mode";
4193 reportTranslationError(*MF, *ORE, R);
4194 return false;
4195 }
4196
4197 // Release the per-function state when we return, whether we succeeded or not.
4198 auto FinalizeOnReturn = make_scope_exit([this]() { finalizeFunction(); });
4199
4200 // Setup a separate basic-block for the arguments and constants
4201 MachineBasicBlock *EntryBB = MF->CreateMachineBasicBlock();
4202 MF->push_back(EntryBB);
4203 EntryBuilder->setMBB(*EntryBB);
4204
4205 DebugLoc DbgLoc = F.getEntryBlock().getFirstNonPHIIt()->getDebugLoc();
4206 SwiftError.setFunction(CurMF);
4207 SwiftError.createEntriesInEntryBlock(DbgLoc);
4208
4209 bool IsVarArg = F.isVarArg();
4210 bool HasMustTailInVarArgFn = false;
4211
4212 // Create all blocks, in IR order, to preserve the layout.
4213 FuncInfo.MBBMap.resize(F.getMaxBlockNumber());
4214 for (const BasicBlock &BB: F) {
4215 auto *&MBB = FuncInfo.MBBMap[BB.getNumber()];
4216
4217 MBB = MF->CreateMachineBasicBlock(&BB);
4218 MF->push_back(MBB);
4219
4220 if (BB.hasAddressTaken())
4221 MBB->setAddressTakenIRBlock(const_cast<BasicBlock *>(&BB));
4222
4223 if (!HasMustTailInVarArgFn)
4224 HasMustTailInVarArgFn = checkForMustTailInVarArgFn(IsVarArg, BB);
4225 }
4226
4227 MF->getFrameInfo().setHasMustTailInVarArgFunc(HasMustTailInVarArgFn);
4228
4229 // Make our arguments/constants entry block fallthrough to the IR entry block.
4230 EntryBB->addSuccessor(&getMBB(F.front()));
4231
4232 if (CLI->fallBackToDAGISel(*MF)) {
4233 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
4234 F.getSubprogram(), &F.getEntryBlock());
4235 R << "unable to lower function: "
4236 << ore::NV("Prototype", F.getFunctionType());
4237 reportTranslationError(*MF, *ORE, R);
4238 return false;
4239 }
4240
4241 // Lower the actual args into this basic block.
4242 SmallVector<ArrayRef<Register>, 8> VRegArgs;
4243 for (const Argument &Arg: F.args()) {
4244 if (DL->getTypeStoreSize(Arg.getType()).isZero())
4245 continue; // Don't handle zero sized types.
4246 ArrayRef<Register> VRegs = getOrCreateVRegs(Arg);
4247 VRegArgs.push_back(VRegs);
4248
4249 if (Arg.hasSwiftErrorAttr()) {
4250 assert(VRegs.size() == 1 && "Too many vregs for Swift error");
4251 SwiftError.setCurrentVReg(EntryBB, SwiftError.getFunctionArg(), VRegs[0]);
4252 }
4253 }
4254
4255 if (!CLI->lowerFormalArguments(*EntryBuilder, F, VRegArgs, FuncInfo)) {
4256 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
4257 F.getSubprogram(), &F.getEntryBlock());
4258 R << "unable to lower arguments: "
4259 << ore::NV("Prototype", F.getFunctionType());
4260 reportTranslationError(*MF, *ORE, R);
4261 return false;
4262 }
4263
4264 // Need to visit defs before uses when translating instructions.
4265 GISelObserverWrapper WrapperObserver;
4266 if (EnableCSE && CSEInfo)
4267 WrapperObserver.addObserver(CSEInfo);
4268 {
4270#ifndef NDEBUG
4271 DILocationVerifier Verifier;
4272 WrapperObserver.addObserver(&Verifier);
4273#endif // ifndef NDEBUG
4274 RAIIMFObsDelInstaller ObsInstall(*MF, WrapperObserver);
4275 for (const BasicBlock *BB : RPOT) {
4276 MachineBasicBlock &MBB = getMBB(*BB);
4277 // Set the insertion point of all the following translations to
4278 // the end of this basic block.
4279 CurBuilder->setMBB(MBB);
4280 HasTailCall = false;
4281 for (const Instruction &Inst : *BB) {
4282 // If we translated a tail call in the last step, then we know
4283 // everything after the call is either a return, or something that is
4284 // handled by the call itself. (E.g. a lifetime marker or assume
4285 // intrinsic.) In this case, we should stop translating the block and
4286 // move on.
4287 if (HasTailCall)
4288 break;
4289#ifndef NDEBUG
4290 Verifier.setCurrentInst(&Inst);
4291#endif // ifndef NDEBUG
4292
4293 // Translate any debug-info attached to the instruction.
4294 translateDbgInfo(Inst, *CurBuilder);
4295
4296 if (translate(Inst))
4297 continue;
4298
4299 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
4300 Inst.getDebugLoc(), BB);
4301 R << "unable to translate instruction: " << ore::NV("Opcode", &Inst);
4302
4303 if (ORE->allowExtraAnalysis("gisel-irtranslator")) {
4304 std::string InstStrStorage;
4305 raw_string_ostream InstStr(InstStrStorage);
4306 InstStr << Inst;
4307
4308 R << ": '" << InstStrStorage << "'";
4309 }
4310
4311 reportTranslationError(*MF, *ORE, R);
4312 return false;
4313 }
4314
4315 if (!finalizeBasicBlock(*BB, MBB)) {
4316 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
4317 BB->getTerminator()->getDebugLoc(), BB);
4318 R << "unable to translate basic block";
4319 reportTranslationError(*MF, *ORE, R);
4320 return false;
4321 }
4322 }
4323#ifndef NDEBUG
4324 WrapperObserver.removeObserver(&Verifier);
4325#endif
4326 }
4327
4328 finishPendingPhis();
4329
4330 SwiftError.propagateVRegs();
4331
4332 // Merge the argument lowering and constants block with its single
4333 // successor, the LLVM-IR entry block. We want the basic block to
4334 // be maximal.
4335 assert(EntryBB->succ_size() == 1 &&
4336 "Custom BB used for lowering should have only one successor");
4337 // Get the successor of the current entry block.
4338 MachineBasicBlock &NewEntryBB = **EntryBB->succ_begin();
4339 assert(NewEntryBB.pred_size() == 1 &&
4340 "LLVM-IR entry block has a predecessor!?");
4341 // Move all the instruction from the current entry block to the
4342 // new entry block.
4343 NewEntryBB.splice(NewEntryBB.begin(), EntryBB, EntryBB->begin(),
4344 EntryBB->end());
4345
4346 // Update the live-in information for the new entry block.
4347 for (const MachineBasicBlock::RegisterMaskPair &LiveIn : EntryBB->liveins())
4348 NewEntryBB.addLiveIn(LiveIn);
4349 NewEntryBB.sortUniqueLiveIns();
4350
4351 // Get rid of the now empty basic block.
4352 EntryBB->removeSuccessor(&NewEntryBB);
4353 MF->remove(EntryBB);
4354 MF->deleteMachineBasicBlock(EntryBB);
4355
4356 assert(&MF->front() == &NewEntryBB &&
4357 "New entry wasn't next in the list of basic block!");
4358
4359 // Initialize stack protector information.
4361 SP.copyToMachineFrameInfo(MF->getFrameInfo());
4362
4363 return false;
4364}
unsigned SubReg
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
const TargetInstrInfo & TII
amdgpu aa AMDGPU Address space based Alias Analysis Wrapper
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
Definition CSEInfo.cpp:27
Provides analysis for continuously CSEing during GISel passes.
This file implements a version of MachineIRBuilder which CSEs insts within a MachineBasicBlock.
This file describes how to lower LLVM calls to machine code calls.
This file contains the declarations for the subclasses of Constant, which represent the different fla...
dxil translate DXIL Translate Metadata
This contains common code to allow clients to notify changes to machine instr.
#define DEBUG_TYPE
static bool checkForMustTailInVarArgFn(bool IsVarArg, const BasicBlock &BB)
Returns true if a BasicBlock BB within a variadic function contains a variadic musttail call.
static bool targetSupportsBF16Type(const MachineFunction *MF)
static bool containsBF16Type(const User &U)
static unsigned getConvOpcode(Intrinsic::ID ID)
static uint64_t getOffsetFromIndices(const User &U, const DataLayout &DL)
static unsigned getConstrainedOpcode(Intrinsic::ID ID)
IRTranslator LLVM IR MI
IRTranslator LLVM IR static false void reportTranslationError(MachineFunction &MF, OptimizationRemarkEmitter &ORE, OptimizationRemarkMissed &R)
static cl::opt< bool > EnableCSEInIRTranslator("enable-cse-in-irtranslator", cl::desc("Should enable CSE in irtranslator"), cl::Optional, cl::init(false))
static bool isValInBlock(const Value *V, const BasicBlock *BB)
static bool isSwiftError(const Value *V)
This file declares the IRTranslator pass.
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
This file describes how to lower LLVM inline asm to machine code INLINEASM.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
static LVOptions Options
Definition LVOptions.cpp:25
Implement a low-level type suitable for MachineInstr level instruction selection.
Implement a low-level type suitable for MachineInstr level instruction selection.
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Machine Check Debug Module
This file declares the MachineIRBuilder class.
Register Reg
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
This file contains the declarations for metadata subclasses.
Type::TypeID TypeID
uint64_t High
OptimizedStructLayoutField Field
if(PassOpts->AAPipeline)
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition PassSupport.h:42
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition PassSupport.h:44
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition PassSupport.h:39
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
std::pair< BasicBlock *, BasicBlock * > Edge
This file contains some templates that are useful if you are working with the STL at all.
verify safepoint Safepoint IR Verifier
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
This file defines the SmallVector class.
#define LLVM_DEBUG(...)
Definition Debug.h:114
This file describes how to lower LLVM code to machine code.
Target-Independent Code Generator Pass Configuration Options pass.
Value * RHS
Value * LHS
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
LLVM_ABI APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
Definition APInt.cpp:1033
an instruction to allocate memory on the stack
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
LLVM_ABI bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
PointerType * getType() const
Overload to return most specific pointer type.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
const Value * getArraySize() const
Get the number of elements allocated.
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
This class represents an incoming formal argument to a Function.
Definition Argument.h:32
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
iterator end() const
Definition ArrayRef.h:131
size_t size() const
size - Get the array size.
Definition ArrayRef.h:142
iterator begin() const
Definition ArrayRef.h:130
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:137
An immutable pass that tracks lazily created AssumptionCache objects.
@ Add
*p = old + v
@ FAdd
*p = old + v
@ USubCond
Subtract only if no unsigned overflow.
@ FMinimum
*p = minimum(old, v) minimum matches the behavior of llvm.minimum.
@ Min
*p = old <signed v ? old : v
@ Sub
*p = old - v
@ And
*p = old & v
@ Xor
*p = old ^ v
@ USubSat
*p = usub.sat(old, v) usub.sat matches the behavior of llvm.usub.sat.
@ FMaximum
*p = maximum(old, v) maximum matches the behavior of llvm.maximum.
@ FSub
*p = old - v
@ UIncWrap
Increment one up to a maximum value.
@ Max
*p = old >signed v ? old : v
@ UMin
*p = old <unsigned v ? old : v
@ FMin
*p = minnum(old, v) minnum matches the behavior of llvm.minnum.
@ UMax
*p = old >unsigned v ? old : v
@ FMax
*p = maxnum(old, v) maxnum matches the behavior of llvm.maxnum.
@ UDecWrap
Decrement one until a minimum value or zero.
@ Nand
*p = ~(old & v)
LLVM Basic Block Representation.
Definition BasicBlock.h:62
unsigned getNumber() const
Definition BasicBlock.h:95
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
bool hasAddressTaken() const
Returns true if there are any uses of this basic block other than direct branches,...
Definition BasicBlock.h:690
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
InstListType::const_iterator const_iterator
Definition BasicBlock.h:171
LLVM_ABI InstListType::const_iterator getFirstNonPHIOrDbg(bool SkipPseudoOp=true) const
Returns a pointer to the first instruction in this block that is not a PHINode or a debug intrinsic,...
LLVM_ABI const Module * getModule() const
Return the module owning the function this basic block belongs to, or nullptr if the function does no...
Legacy analysis pass which computes BlockFrequencyInfo.
BasicBlock * getSuccessor(unsigned i) const
bool isUnconditional() const
Value * getCondition() const
Legacy analysis pass which computes BranchProbabilityInfo.
LLVM_ABI BranchProbability getEdgeProbability(const BasicBlock *Src, unsigned IndexInSuccessors) const
Get an edge's probability, relative to other out-edges of the Src.
static BranchProbability getOne()
static BranchProbability getZero()
static void normalizeProbabilities(ProbabilityIter Begin, ProbabilityIter End)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
bool isInlineAsm() const
Check if this call is an inline asm statement.
std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
unsigned countOperandBundlesOfType(StringRef Name) const
Return the number of operand bundles with the tag Name attached to this instruction.
Value * getCalledOperand() const
Value * getArgOperand(unsigned i) const
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
bool isConvergent() const
Determine if the invoke is convergent.
LLVM_ABI Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
unsigned arg_size() const
AttributeList getAttributes() const
Return the attributes for this call.
This class represents a function call, abstracting a target machine's calling convention.
bool isTailCall() const
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
Definition InstrTypes.h:693
@ ICMP_SLT
signed less than
Definition InstrTypes.h:705
@ ICMP_SLE
signed less or equal
Definition InstrTypes.h:706
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:699
@ ICMP_NE
not equal
Definition InstrTypes.h:698
@ ICMP_ULE
unsigned less or equal
Definition InstrTypes.h:702
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
Definition InstrTypes.h:678
bool isFPPredicate() const
Definition InstrTypes.h:782
bool isIntPredicate() const
Definition InstrTypes.h:783
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition Constants.h:214
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
Definition Constants.h:157
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition Constants.h:163
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition Constants.h:154
This is an important base class in LLVM.
Definition Constant.h:43
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
This is the common base class for constrained floating point intrinsics.
LLVM_ABI std::optional< fp::ExceptionBehavior > getExceptionBehavior() const
LLVM_ABI unsigned getNonMetadataArgCount() const
DWARF expression.
LLVM_ABI bool isEntryValue() const
Check if the expression consists of exactly one entry value operand.
static LLVM_ABI DIExpression * append(const DIExpression *Expr, ArrayRef< uint64_t > Ops)
Append the opcodes Ops to DIExpr.
LLVM_ABI bool startsWithDeref() const
Return whether the first element a DW_OP_deref.
ArrayRef< uint64_t > getElements() const
bool isValidLocationForIntrinsic(const DILocation *DL) const
Check that a location is valid for this label.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
Value * getAddress() const
DILabel * getLabel() const
DebugLoc getDebugLoc() const
Value * getValue(unsigned OpIdx=0) const
DILocalVariable * getVariable() const
DIExpression * getExpression() const
LLVM_ABI Value * getVariableLocationOp(unsigned OpIdx) const
DIExpression * getExpression() const
DILocalVariable * getVariable() const
A debug info location.
Definition DebugLoc.h:123
Class representing an expression and its matching format.
This instruction extracts a struct member or array element value from an aggregate value.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this fence instruction.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
static LLVM_ABI FixedVectorType * get(Type *ElementType, unsigned NumElts)
Definition Type.cpp:802
bool skipFunction(const Function &F) const
Optional passes call this function to check whether the pass should be skipped.
Definition Pass.cpp:188
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
Definition Function.h:703
Constant * getPersonalityFn() const
Get the personality function associated with this function.
const Function & getFunction() const
Definition Function.h:164
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
Definition Function.h:249
The actual analysis pass wrapper.
Definition CSEInfo.h:229
Simple wrapper that does the following.
Definition CSEInfo.h:211
The CSE Analysis object.
Definition CSEInfo.h:71
Abstract class that contains various methods for clients to notify about changes.
Simple wrapper observer that takes several observers, and calls each one for each event.
void removeObserver(GISelChangeObserver *O)
void addObserver(GISelChangeObserver *O)
static StringRef dropLLVMManglingEscape(StringRef Name)
If the given string begins with the GlobalValue name mangling escape character '\1',...
bool hasExternalWeakLinkage() const
bool hasDLLImportStorageClass() const
Module * getParent()
Get the module that this global value is contained inside of...
bool runOnMachineFunction(MachineFunction &MF) override
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
IRTranslator(CodeGenOptLevel OptLevel=CodeGenOptLevel::None)
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
bool lowerInlineAsm(MachineIRBuilder &MIRBuilder, const CallBase &CB, std::function< ArrayRef< Register >(const Value &Val)> GetOrCreateVRegs) const
Lower the given inline asm call instruction GetOrCreateVRegs is a callback to materialize a register ...
This instruction inserts a struct field of array element value into an aggregate value.
iterator_range< simple_ilist< DbgRecord >::iterator > getDbgRecordRange() const
Return a range over the DbgRecords attached to this instruction.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
bool hasMetadata() const
Return true if this instruction has any metadata attached to it.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
LLVM_ABI AAMDNodes getAAMetadata() const
Returns the AA metadata for this instruction.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
LLVM_ABI bool hasAllowReassoc() const LLVM_READONLY
Determine whether the allow-reassociation flag is set.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
constexpr LLT changeElementType(LLT NewEltTy) const
If this type is a vector, return a vector with the same number of elements but the new element type.
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
constexpr bool isVector() const
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr bool isPointer() const
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
constexpr bool isFixedVector() const
Returns true if the LLT is a fixed vector.
LLVM_ABI void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
Value * getPointerOperand()
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
static LocationSize precise(uint64_t Value)
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition Metadata.h:1569
void normalizeSuccProbs()
Normalize probabilities of all successors so that the sum of them becomes one.
LLVM_ABI instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
void push_back(MachineInstr *MI)
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
LLVM_ABI void setSuccProbability(succ_iterator I, BranchProbability Prob)
Set successor probability of a given iterator.
LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
SmallVectorImpl< MachineBasicBlock * >::iterator succ_iterator
LLVM_ABI void sortUniqueLiveIns()
Sorts and uniques the LiveIns vector.
LLVM_ABI bool isPredecessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB is a predecessor of this block.
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
MachineInstrBundleIterator< MachineInstr > iterator
void setIsEHPad(bool V=true)
Indicates the block is a landing pad.
int getStackProtectorIndex() const
Return the index for the stack protector object.
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
Function & getFunction()
Return the LLVM function that this machine code represents.
BasicBlockListType::iterator iterator
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineInstr - Allocate a new MachineInstr.
void insert(iterator MBBI, MachineBasicBlock *MBB)
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Helper class to build MachineInstr.
MachineInstrBuilder buildFPTOUI_SAT(const DstOp &Dst, const SrcOp &Src0)
Build and insert Res = G_FPTOUI_SAT Src0.
MachineInstrBuilder buildFMul(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
MachineInstrBuilder buildFreeze(const DstOp &Dst, const SrcOp &Src)
Build and insert Dst = G_FREEZE Src.
MachineInstrBuilder buildBr(MachineBasicBlock &Dest)
Build and insert G_BR Dest.
MachineInstrBuilder buildModf(const DstOp &Fract, const DstOp &Int, const SrcOp &Src, std::optional< unsigned > Flags=std::nullopt)
Build and insert Fract, Int = G_FMODF Src.
LLVMContext & getContext() const
MachineInstrBuilder buildAdd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_ADD Op0, Op1.
MachineInstrBuilder buildUndef(const DstOp &Res)
Build and insert Res = IMPLICIT_DEF.
MachineInstrBuilder buildResetFPMode()
Build and insert G_RESET_FPMODE.
MachineInstrBuilder buildFPExt(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_FPEXT Op.
MachineInstrBuilder buildFPTOSI_SAT(const DstOp &Dst, const SrcOp &Src0)
Build and insert Res = G_FPTOSI_SAT Src0.
MachineInstrBuilder buildUCmp(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_UCMP Op0, Op1.
MachineInstrBuilder buildJumpTable(const LLT PtrTy, unsigned JTI)
Build and insert Res = G_JUMP_TABLE JTI.
MachineInstrBuilder buildGetRounding(const DstOp &Dst)
Build and insert Dst = G_GET_ROUNDING.
MachineInstrBuilder buildSCmp(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_SCMP Op0, Op1.
MachineInstrBuilder buildFence(unsigned Ordering, unsigned Scope)
Build and insert G_FENCE Ordering, Scope.
MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_SELECT Tst, Op0, Op1.
MachineInstrBuilder buildFMA(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, const SrcOp &Src2, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_FMA Op0, Op1, Op2.
MachineInstrBuilder buildMul(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_MUL Op0, Op1.
MachineInstrBuilder buildInsertSubvector(const DstOp &Res, const SrcOp &Src0, const SrcOp &Src1, unsigned Index)
Build and insert Res = G_INSERT_SUBVECTOR Src0, Src1, Idx.
MachineInstrBuilder buildAnd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1)
Build and insert Res = G_AND Op0, Op1.
MachineInstrBuilder buildCast(const DstOp &Dst, const SrcOp &Src)
Build and insert an appropriate cast between two registers of equal size.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
MachineBasicBlock::iterator getInsertPt()
Current insertion point for new instructions.
MachineInstrBuilder buildSExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_SEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildAtomicRMW(unsigned Opcode, const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_<Opcode> Addr, Val, MMO.
MachineInstrBuilder buildSub(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_SUB Op0, Op1.
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef< Register > Res, bool HasSideEffects, bool isConvergent)
Build and insert a G_INTRINSIC instruction.
MachineInstrBuilder buildVScale(const DstOp &Res, unsigned MinElts)
Build and insert Res = G_VSCALE MinElts.
MachineInstrBuilder buildSplatBuildVector(const DstOp &Res, const SrcOp &Src)
Build and insert Res = G_BUILD_VECTOR with Src replicated to fill the number of elements.
MachineInstrBuilder buildSetFPMode(const SrcOp &Src)
Build and insert G_SET_FPMODE Src.
MachineInstrBuilder buildIndirectDbgValue(Register Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in me...
MachineInstrBuilder buildBuildVector(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_BUILD_VECTOR Op0, ...
MachineInstrBuilder buildConstDbgValue(const Constant &C, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instructions specifying that Variable is given by C (suitably modified b...
MachineInstrBuilder buildBrCond(const SrcOp &Tst, MachineBasicBlock &Dest)
Build and insert G_BRCOND Tst, Dest.
std::optional< MachineInstrBuilder > materializeObjectPtrOffset(Register &Res, Register Op0, const LLT ValueTy, uint64_t Value)
Materialize and insert an instruction with appropriate flags for addressing some offset of an object,...
MachineInstrBuilder buildSetRounding(const SrcOp &Src)
Build and insert G_SET_ROUNDING.
MachineInstrBuilder buildExtractVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Idx)
Build and insert Res = G_EXTRACT_VECTOR_ELT Val, Idx.
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildPtrAdd(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_PTR_ADD Op0, Op1.
MachineInstrBuilder buildZExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildExtractVectorElementConstant(const DstOp &Res, const SrcOp &Val, const int Idx)
Build and insert Res = G_EXTRACT_VECTOR_ELT Val, Idx.
MachineInstrBuilder buildShl(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineInstrBuilder buildFrameIndex(const DstOp &Res, int Idx)
Build and insert Res = G_FRAME_INDEX Idx.
MachineInstrBuilder buildDirectDbgValue(Register Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in Re...
MachineInstrBuilder buildDbgLabel(const MDNode *Label)
Build and insert a DBG_LABEL instructions specifying that Label is given.
MachineInstrBuilder buildBrJT(Register TablePtr, unsigned JTI, Register IndexReg)
Build and insert G_BRJT TablePtr, JTI, IndexReg.
MachineInstrBuilder buildDynStackAlloc(const DstOp &Res, const SrcOp &Size, Align Alignment)
Build and insert Res = G_DYN_STACKALLOC Size, Align.
MachineInstrBuilder buildFIDbgValue(int FI, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in th...
MachineInstrBuilder buildResetFPEnv()
Build and insert G_RESET_FPENV.
void setDebugLoc(const DebugLoc &DL)
Set the debug location to DL for all the next build instructions.
const MachineBasicBlock & getMBB() const
Getter for the basic block we currently build.
MachineInstrBuilder buildInsertVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Elt, const SrcOp &Idx)
Build and insert Res = G_INSERT_VECTOR_ELT Val, Elt, Idx.
MachineInstrBuilder buildAtomicCmpXchgWithSuccess(const DstOp &OldValRes, const DstOp &SuccessRes, const SrcOp &Addr, const SrcOp &CmpVal, const SrcOp &NewVal, MachineMemOperand &MMO)
Build and insert OldValRes<def>, SuccessRes<def> = / G_ATOMIC_CMPXCHG_WITH_SUCCESS Addr,...
void setMBB(MachineBasicBlock &MBB)
Set the insertion point to the end of MBB.
const DebugLoc & getDebugLoc()
Get the current instruction's debug location.
MachineInstrBuilder buildTrap(bool Debug=false)
Build and insert G_TRAP or G_DEBUGTRAP.
MachineInstrBuilder buildFFrexp(const DstOp &Fract, const DstOp &Exp, const SrcOp &Src, std::optional< unsigned > Flags=std::nullopt)
Build and insert Fract, Exp = G_FFREXP Src.
MachineInstrBuilder buildFPTrunc(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_FPTRUNC Op.
MachineInstrBuilder buildFSincos(const DstOp &Sin, const DstOp &Cos, const SrcOp &Src, std::optional< unsigned > Flags=std::nullopt)
Build and insert Sin, Cos = G_FSINCOS Src.
MachineInstrBuilder buildShuffleVector(const DstOp &Res, const SrcOp &Src1, const SrcOp &Src2, ArrayRef< int > Mask)
Build and insert Res = G_SHUFFLE_VECTOR Src1, Src2, Mask.
MachineInstrBuilder buildInstrNoInsert(unsigned Opcode)
Build but don't insert <empty> = Opcode <empty>.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
MachineInstrBuilder buildPrefetch(const SrcOp &Addr, unsigned RW, unsigned Locality, unsigned CacheType, MachineMemOperand &MMO)
Build and insert G_PREFETCH Addr, RW, Locality, CacheType.
MachineInstrBuilder buildExtractSubvector(const DstOp &Res, const SrcOp &Src, unsigned Index)
Build and insert Res = G_EXTRACT_SUBVECTOR Src, Idx0.
const DataLayout & getDataLayout() const
MachineInstrBuilder buildBrIndirect(Register Tgt)
Build and insert G_BRINDIRECT Tgt.
MachineInstrBuilder buildSplatVector(const DstOp &Res, const SrcOp &Val)
Build and insert Res = G_SPLAT_VECTOR Val.
MachineInstrBuilder buildStepVector(const DstOp &Res, unsigned Step)
Build and insert Res = G_STEP_VECTOR Step.
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
MachineInstrBuilder buildFCmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_FCMP PredOp0, Op1.
MachineInstrBuilder buildFAdd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_FADD Op0, Op1.
MachineInstrBuilder buildSetFPEnv(const SrcOp &Src)
Build and insert G_SET_FPENV Src.
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned TargetFlags=0) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addMetadata(const MDNode *MD) const
const MachineInstrBuilder & addSym(MCSymbol *Sym, unsigned char TargetFlags=0) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addFPImm(const ConstantFP *Val) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
LLVM_ABI void copyIRFlags(const Instruction &I)
Copy all flags to MachineInst MIFlags.
static LLVM_ABI uint32_t copyFlagsFromInstruction(const Instruction &I)
LLVM_ABI void setDeactivationSymbol(MachineFunction &MF, Value *DS)
void setDebugLoc(DebugLoc DL)
Replace current source information with new such.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
static MachineOperand CreateES(const char *SymName, unsigned TargetFlags=0)
static MachineOperand CreateGA(const GlobalValue *GV, int64_t Offset, unsigned TargetFlags=0)
The optimization diagnostic interface.
Diagnostic information for missed-optimization remarks.
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
AnalysisType & getAnalysis() const
getAnalysis<AnalysisType>() - This function is used by subclasses to get to the analysis information ...
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
Class to install both of the above.
Wrapper class representing virtual and physical registers.
Definition Register.h:20
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Encapsulates all of the information needed to generate a stack protector check, and signals to isel w...
MachineBasicBlock * getSuccessMBB()
MachineBasicBlock * getFailureMBB()
constexpr bool empty() const
empty - Check if the string is empty.
Definition StringRef.h:143
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition StringRef.h:140
virtual bool isTailCall(const MachineInstr &Inst) const
Determines whether Inst is a tail call instruction.
Primary interface to the complete machine description for the target machine.
const Triple & getTargetTriple() const
TargetOptions Options
const Target & getTarget() const
void resetTargetOptions(const Function &F) const
Reset the target options based on the function's attributes.
unsigned NoTrapAfterNoreturn
Do not emit a trap instruction for 'unreachable' IR instructions behind noreturn calls,...
unsigned TrapUnreachable
Emit target-specific trap instruction for 'unreachable' IR instructions.
FPOpFusion::FPOpFusionMode AllowFPOpFusion
AllowFPOpFusion - This flag is set by the -fp-contract=xxx option.
Target-Independent Code Generator Pass Configuration Options.
bool isSPIRV() const
Tests whether the target is SPIR-V (32/64-bit/Logical).
Definition Triple.h:908
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
LLVM_ABI bool isEmptyTy() const
Return true if this type is empty, that is, it has no elements or all of its elements are empty.
Definition Type.cpp:180
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:296
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Definition Type.cpp:280
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition Type.h:311
bool isAggregateType() const
Return true if the type is an aggregate type.
Definition Type.h:304
bool isTokenTy() const
Return true if this is 'token'.
Definition Type.h:234
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:139
Value * getOperand(unsigned i) const
Definition User.h:232
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition Value.h:439
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition Value.cpp:701
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.cpp:1099
constexpr bool isZero() const
Definition TypeSize.h:153
const ParentTy * getParent() const
Definition ilist_node.h:34
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition ilist_node.h:348
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr char SymbolName[]
Key for Kernel::Metadata::mSymbolName.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
@ Libcall
The operation should be implemented as a call to some kind of runtime support library.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
bool match(Val *V, const Pattern &P)
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
TwoOps_match< Val_t, Idx_t, Instruction::ExtractElement > m_ExtractElt(const Val_t &Val, const Idx_t &Idx)
Matches ExtractElementInst.
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Undef
Value of the register doesn't matter.
Offsets
Offsets in bytes from the start of the input buffer.
void sortAndRangeify(CaseClusterVector &Clusters)
Sort Clusters and merge adjacent cases.
std::vector< CaseCluster > CaseClusterVector
@ CC_Range
A cluster of adjacent case labels with the same destination, or just one case.
@ CC_JumpTable
A cluster of cases suitable for jump table lowering.
@ CC_BitTests
A cluster of cases suitable for bit test lowering.
SmallVector< SwitchWorkListItem, 4 > SwitchWorkList
CaseClusterVector::iterator CaseClusterIt
@ CE
Windows NT (Windows on ARM)
Definition MCAsmInfo.h:48
initializer< Ty > init(const Ty &Val)
ExceptionBehavior
Exception behavior used for floating point operations.
Definition FPEnv.h:39
@ ebIgnore
This corresponds to "fpexcept.ignore".
Definition FPEnv.h:40
DiagnosticInfoOptimizationBase::Argument NV
NodeAddr< PhiNode * > Phi
Definition RDFGraph.h:390
NodeAddr< CodeNode * > Code
Definition RDFGraph.h:388
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
BaseReg
Stack frame base register. Bit 0 of FREInfo.Info.
Definition SFrame.h:77
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:316
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
Definition Threading.h:280
@ Offset
Definition DWP.cpp:532
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)
Definition ScopeExit.h:59
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2472
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
Definition bit.h:293
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
Definition InstrProf.h:296
LLVM_ABI void diagnoseDontCall(const CallInst &CI)
auto successors(const MachineBasicBlock *BB)
LLVM_ABI MVT getMVTForLLT(LLT Ty)
Get a rough equivalent of an MVT for a given LLT.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2136
constexpr bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
Definition MathExtras.h:243
gep_type_iterator gep_type_end(const User *GEP)
MachineBasicBlock::iterator findSplitPointForStackProtector(MachineBasicBlock *BB, const TargetInstrInfo &TII)
Find the split point at which to splice the end of BB into its success stack protector check machine ...
LLVM_ABI LLT getLLTForMVT(MVT Ty)
Get a rough equivalent of an LLT for a given MVT.
constexpr int popcount(T Value) noexcept
Count the number of set bits in a value.
Definition bit.h:154
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition bit.h:202
Align getKnownAlignment(Value *V, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to infer an alignment for the specified pointer.
Definition Local.h:252
constexpr bool has_single_bit(T Value) noexcept
Definition bit.h:147
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1732
LLVM_ABI llvm::SmallVector< int, 16 > createStrideMask(unsigned Start, unsigned Stride, unsigned VF)
Create a stride shuffle mask.
auto reverse(ContainerTy &&C)
Definition STLExtras.h:406
void computeValueLLTs(const DataLayout &DL, Type &Ty, SmallVectorImpl< LLT > &ValueTys, SmallVectorImpl< uint64_t > *Offsets=nullptr, uint64_t StartingOffset=0)
computeValueLLTs - Given an LLVM IR type, compute a sequence of LLTs that represent all the individua...
Definition Analysis.cpp:149
void sort(IteratorTy Start, IteratorTy End)
Definition STLExtras.h:1622
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
generic_gep_type_iterator<> gep_type_iterator
auto succ_size(const MachineBasicBlock *BB)
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
CodeGenOptLevel
Code generation optimization level.
Definition CodeGen.h:82
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
@ Success
The lock was released successfully.
LLVM_ATTRIBUTE_VISIBILITY_DEFAULT AnalysisKey InnerAnalysisManagerProxy< AnalysisManagerT, IRUnitT, ExtraArgTs... >::Key
@ Global
Append to llvm.global_dtors.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
Definition ModRef.h:74
LLVM_ABI void getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU)
Modify analysis usage so it preserves passes required for the SelectionDAG fallback.
Definition Utils.cpp:1189
auto lower_bound(R &&Range, T &&Value)
Provide wrappers to std::lower_bound which take ranges instead of having to pass begin/end explicitly...
Definition STLExtras.h:1994
LLVM_ABI llvm::SmallVector< int, 16 > createInterleaveMask(unsigned VF, unsigned NumVecs)
Create an interleave shuffle mask.
@ FMul
Product of floats.
@ Sub
Subtraction of integers.
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
bool isAsynchronousEHPersonality(EHPersonality Pers)
Returns true if this personality function catches asynchronous exceptions.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI std::optional< RoundingMode > convertStrToRoundingMode(StringRef)
Returns a valid RoundingMode enumerator when given a string that is valid as input in constrained int...
Definition FPEnv.cpp:25
gep_type_iterator gep_type_begin(const User *GEP)
GlobalValue * ExtractTypeInfo(Value *V)
ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
Definition Analysis.cpp:185
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
Definition Alignment.h:201
LLVM_ABI LLT getLLTForType(Type &Ty, const DataLayout &DL)
Construct a low-level type based on an LLVM type.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:869
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:77
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
Definition ValueTypes.h:137
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition ValueTypes.h:316
LLVM_ABI Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
Pair of physical register and lane mask.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
static bool canHandle(const Instruction *I, const TargetLibraryInfo &TLI)
This structure is used to communicate between SelectionDAGBuilder and SDISel for the code generation ...
std::optional< unsigned > fallbackAddressSpace
PointerUnion< const Value *, const PseudoSourceValue * > ptrVal