Bug Summary

File:llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
Warning:line 341, column 26
Forming reference to null pointer

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name IRTranslator.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/lib/CodeGen/GlobalISel -resource-dir /usr/lib/llvm-14/lib/clang/14.0.0 -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/lib/CodeGen/GlobalISel -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/lib/CodeGen/GlobalISel -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/include -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/include -D NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-14/lib/clang/14.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/lib/CodeGen/GlobalISel -fdebug-prefix-map=/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e=. -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2021-09-04-040900-46481-1 -x c++ /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp

/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp

1//===- llvm/CodeGen/GlobalISel/IRTranslator.cpp - IRTranslator ---*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the IRTranslator class.
10//===----------------------------------------------------------------------===//
11
12#include "llvm/CodeGen/GlobalISel/IRTranslator.h"
13#include "llvm/ADT/PostOrderIterator.h"
14#include "llvm/ADT/STLExtras.h"
15#include "llvm/ADT/ScopeExit.h"
16#include "llvm/ADT/SmallSet.h"
17#include "llvm/ADT/SmallVector.h"
18#include "llvm/Analysis/BranchProbabilityInfo.h"
19#include "llvm/Analysis/Loads.h"
20#include "llvm/Analysis/OptimizationRemarkEmitter.h"
21#include "llvm/Analysis/ValueTracking.h"
22#include "llvm/CodeGen/Analysis.h"
23#include "llvm/CodeGen/GlobalISel/CallLowering.h"
24#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
25#include "llvm/CodeGen/GlobalISel/InlineAsmLowering.h"
26#include "llvm/CodeGen/LowLevelType.h"
27#include "llvm/CodeGen/MachineBasicBlock.h"
28#include "llvm/CodeGen/MachineFrameInfo.h"
29#include "llvm/CodeGen/MachineFunction.h"
30#include "llvm/CodeGen/MachineInstrBuilder.h"
31#include "llvm/CodeGen/MachineMemOperand.h"
32#include "llvm/CodeGen/MachineModuleInfo.h"
33#include "llvm/CodeGen/MachineOperand.h"
34#include "llvm/CodeGen/MachineRegisterInfo.h"
35#include "llvm/CodeGen/StackProtector.h"
36#include "llvm/CodeGen/SwitchLoweringUtils.h"
37#include "llvm/CodeGen/TargetFrameLowering.h"
38#include "llvm/CodeGen/TargetInstrInfo.h"
39#include "llvm/CodeGen/TargetLowering.h"
40#include "llvm/CodeGen/TargetPassConfig.h"
41#include "llvm/CodeGen/TargetRegisterInfo.h"
42#include "llvm/CodeGen/TargetSubtargetInfo.h"
43#include "llvm/IR/BasicBlock.h"
44#include "llvm/IR/CFG.h"
45#include "llvm/IR/Constant.h"
46#include "llvm/IR/Constants.h"
47#include "llvm/IR/DataLayout.h"
48#include "llvm/IR/DebugInfo.h"
49#include "llvm/IR/DerivedTypes.h"
50#include "llvm/IR/DiagnosticInfo.h"
51#include "llvm/IR/Function.h"
52#include "llvm/IR/GetElementPtrTypeIterator.h"
53#include "llvm/IR/InlineAsm.h"
54#include "llvm/IR/InstrTypes.h"
55#include "llvm/IR/Instructions.h"
56#include "llvm/IR/IntrinsicInst.h"
57#include "llvm/IR/Intrinsics.h"
58#include "llvm/IR/LLVMContext.h"
59#include "llvm/IR/Metadata.h"
60#include "llvm/IR/PatternMatch.h"
61#include "llvm/IR/Type.h"
62#include "llvm/IR/User.h"
63#include "llvm/IR/Value.h"
64#include "llvm/InitializePasses.h"
65#include "llvm/MC/MCContext.h"
66#include "llvm/Pass.h"
67#include "llvm/Support/Casting.h"
68#include "llvm/Support/CodeGen.h"
69#include "llvm/Support/Debug.h"
70#include "llvm/Support/ErrorHandling.h"
71#include "llvm/Support/LowLevelTypeImpl.h"
72#include "llvm/Support/MathExtras.h"
73#include "llvm/Support/raw_ostream.h"
74#include "llvm/Target/TargetIntrinsicInfo.h"
75#include "llvm/Target/TargetMachine.h"
76#include "llvm/Transforms/Utils/MemoryOpRemark.h"
77#include <algorithm>
78#include <cassert>
79#include <cstddef>
80#include <cstdint>
81#include <iterator>
82#include <string>
83#include <utility>
84#include <vector>
85
86#define DEBUG_TYPE"irtranslator" "irtranslator"
87
88using namespace llvm;
89
90static cl::opt<bool>
91 EnableCSEInIRTranslator("enable-cse-in-irtranslator",
92 cl::desc("Should enable CSE in irtranslator"),
93 cl::Optional, cl::init(false));
94char IRTranslator::ID = 0;
95
96INITIALIZE_PASS_BEGIN(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",static void *initializeIRTranslatorPassOnce(PassRegistry &
Registry) {
97 false, false)static void *initializeIRTranslatorPassOnce(PassRegistry &
Registry) {
98INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)initializeTargetPassConfigPass(Registry);
99INITIALIZE_PASS_DEPENDENCY(GISelCSEAnalysisWrapperPass)initializeGISelCSEAnalysisWrapperPassPass(Registry);
100INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass)initializeBlockFrequencyInfoWrapperPassPass(Registry);
101INITIALIZE_PASS_DEPENDENCY(StackProtector)initializeStackProtectorPass(Registry);
102INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)initializeTargetLibraryInfoWrapperPassPass(Registry);
103INITIALIZE_PASS_END(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",PassInfo *PI = new PassInfo( "IRTranslator LLVM IR -> MI",
"irtranslator", &IRTranslator::ID, PassInfo::NormalCtor_t
(callDefaultCtor<IRTranslator>), false, false); Registry
.registerPass(*PI, true); return PI; } static llvm::once_flag
InitializeIRTranslatorPassFlag; void llvm::initializeIRTranslatorPass
(PassRegistry &Registry) { llvm::call_once(InitializeIRTranslatorPassFlag
, initializeIRTranslatorPassOnce, std::ref(Registry)); }
104 false, false)PassInfo *PI = new PassInfo( "IRTranslator LLVM IR -> MI",
"irtranslator", &IRTranslator::ID, PassInfo::NormalCtor_t
(callDefaultCtor<IRTranslator>), false, false); Registry
.registerPass(*PI, true); return PI; } static llvm::once_flag
InitializeIRTranslatorPassFlag; void llvm::initializeIRTranslatorPass
(PassRegistry &Registry) { llvm::call_once(InitializeIRTranslatorPassFlag
, initializeIRTranslatorPassOnce, std::ref(Registry)); }
105
106static void reportTranslationError(MachineFunction &MF,
107 const TargetPassConfig &TPC,
108 OptimizationRemarkEmitter &ORE,
109 OptimizationRemarkMissed &R) {
110 MF.getProperties().set(MachineFunctionProperties::Property::FailedISel);
111
112 // Print the function name explicitly if we don't have a debug location (which
113 // makes the diagnostic less useful) or if we're going to emit a raw error.
114 if (!R.getLocation().isValid() || TPC.isGlobalISelAbortEnabled())
115 R << (" (in function: " + MF.getName() + ")").str();
116
117 if (TPC.isGlobalISelAbortEnabled())
118 report_fatal_error(R.getMsg());
119 else
120 ORE.emit(R);
121}
122
123IRTranslator::IRTranslator(CodeGenOpt::Level optlevel)
124 : MachineFunctionPass(ID), OptLevel(optlevel) {}
125
126#ifndef NDEBUG1
127namespace {
128/// Verify that every instruction created has the same DILocation as the
129/// instruction being translated.
130class DILocationVerifier : public GISelChangeObserver {
131 const Instruction *CurrInst = nullptr;
132
133public:
134 DILocationVerifier() = default;
135 ~DILocationVerifier() = default;
136
137 const Instruction *getCurrentInst() const { return CurrInst; }
138 void setCurrentInst(const Instruction *Inst) { CurrInst = Inst; }
139
140 void erasingInstr(MachineInstr &MI) override {}
141 void changingInstr(MachineInstr &MI) override {}
142 void changedInstr(MachineInstr &MI) override {}
143
144 void createdInstr(MachineInstr &MI) override {
145 assert(getCurrentInst() && "Inserted instruction without a current MI")(static_cast<void> (0));
146
147 // Only print the check message if we're actually checking it.
148#ifndef NDEBUG1
149 LLVM_DEBUG(dbgs() << "Checking DILocation from " << *CurrInstdo { } while (false)
150 << " was copied to " << MI)do { } while (false);
151#endif
152 // We allow insts in the entry block to have a debug loc line of 0 because
153 // they could have originated from constants, and we don't want a jumpy
154 // debug experience.
155 assert((CurrInst->getDebugLoc() == MI.getDebugLoc() ||(static_cast<void> (0))
156 MI.getDebugLoc().getLine() == 0) &&(static_cast<void> (0))
157 "Line info was not transferred to all instructions")(static_cast<void> (0));
158 }
159};
160} // namespace
161#endif // ifndef NDEBUG
162
163
164void IRTranslator::getAnalysisUsage(AnalysisUsage &AU) const {
165 AU.addRequired<StackProtector>();
166 AU.addRequired<TargetPassConfig>();
167 AU.addRequired<GISelCSEAnalysisWrapperPass>();
168 if (OptLevel != CodeGenOpt::None)
169 AU.addRequired<BranchProbabilityInfoWrapperPass>();
170 AU.addRequired<TargetLibraryInfoWrapperPass>();
171 AU.addPreserved<TargetLibraryInfoWrapperPass>();
172 getSelectionDAGFallbackAnalysisUsage(AU);
173 MachineFunctionPass::getAnalysisUsage(AU);
174}
175
176IRTranslator::ValueToVRegInfo::VRegListT &
177IRTranslator::allocateVRegs(const Value &Val) {
178 auto VRegsIt = VMap.findVRegs(Val);
179 if (VRegsIt != VMap.vregs_end())
180 return *VRegsIt->second;
181 auto *Regs = VMap.getVRegs(Val);
182 auto *Offsets = VMap.getOffsets(Val);
183 SmallVector<LLT, 4> SplitTys;
184 computeValueLLTs(*DL, *Val.getType(), SplitTys,
185 Offsets->empty() ? Offsets : nullptr);
186 for (unsigned i = 0; i < SplitTys.size(); ++i)
187 Regs->push_back(0);
188 return *Regs;
189}
190
191ArrayRef<Register> IRTranslator::getOrCreateVRegs(const Value &Val) {
192 auto VRegsIt = VMap.findVRegs(Val);
193 if (VRegsIt != VMap.vregs_end())
194 return *VRegsIt->second;
195
196 if (Val.getType()->isVoidTy())
197 return *VMap.getVRegs(Val);
198
199 // Create entry for this type.
200 auto *VRegs = VMap.getVRegs(Val);
201 auto *Offsets = VMap.getOffsets(Val);
202
203 assert(Val.getType()->isSized() &&(static_cast<void> (0))
204 "Don't know how to create an empty vreg")(static_cast<void> (0));
205
206 SmallVector<LLT, 4> SplitTys;
207 computeValueLLTs(*DL, *Val.getType(), SplitTys,
208 Offsets->empty() ? Offsets : nullptr);
209
210 if (!isa<Constant>(Val)) {
211 for (auto Ty : SplitTys)
212 VRegs->push_back(MRI->createGenericVirtualRegister(Ty));
213 return *VRegs;
214 }
215
216 if (Val.getType()->isAggregateType()) {
217 // UndefValue, ConstantAggregateZero
218 auto &C = cast<Constant>(Val);
219 unsigned Idx = 0;
220 while (auto Elt = C.getAggregateElement(Idx++)) {
221 auto EltRegs = getOrCreateVRegs(*Elt);
222 llvm::copy(EltRegs, std::back_inserter(*VRegs));
223 }
224 } else {
225 assert(SplitTys.size() == 1 && "unexpectedly split LLT")(static_cast<void> (0));
226 VRegs->push_back(MRI->createGenericVirtualRegister(SplitTys[0]));
227 bool Success = translate(cast<Constant>(Val), VRegs->front());
228 if (!Success) {
229 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
230 MF->getFunction().getSubprogram(),
231 &MF->getFunction().getEntryBlock());
232 R << "unable to translate constant: " << ore::NV("Type", Val.getType());
233 reportTranslationError(*MF, *TPC, *ORE, R);
234 return *VRegs;
235 }
236 }
237
238 return *VRegs;
239}
240
241int IRTranslator::getOrCreateFrameIndex(const AllocaInst &AI) {
242 auto MapEntry = FrameIndices.find(&AI);
243 if (MapEntry != FrameIndices.end())
244 return MapEntry->second;
245
246 uint64_t ElementSize = DL->getTypeAllocSize(AI.getAllocatedType());
247 uint64_t Size =
248 ElementSize * cast<ConstantInt>(AI.getArraySize())->getZExtValue();
249
250 // Always allocate at least one byte.
251 Size = std::max<uint64_t>(Size, 1u);
252
253 int &FI = FrameIndices[&AI];
254 FI = MF->getFrameInfo().CreateStackObject(Size, AI.getAlign(), false, &AI);
255 return FI;
256}
257
258Align IRTranslator::getMemOpAlign(const Instruction &I) {
259 if (const StoreInst *SI = dyn_cast<StoreInst>(&I))
260 return SI->getAlign();
261 if (const LoadInst *LI = dyn_cast<LoadInst>(&I))
262 return LI->getAlign();
263 if (const AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(&I))
264 return AI->getAlign();
265 if (const AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(&I))
266 return AI->getAlign();
267
268 OptimizationRemarkMissed R("gisel-irtranslator", "", &I);
269 R << "unable to translate memop: " << ore::NV("Opcode", &I);
270 reportTranslationError(*MF, *TPC, *ORE, R);
271 return Align(1);
272}
273
274MachineBasicBlock &IRTranslator::getMBB(const BasicBlock &BB) {
275 MachineBasicBlock *&MBB = BBToMBB[&BB];
276 assert(MBB && "BasicBlock was not encountered before")(static_cast<void> (0));
277 return *MBB;
278}
279
280void IRTranslator::addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred) {
281 assert(NewPred && "new predecessor must be a real MachineBasicBlock")(static_cast<void> (0));
282 MachinePreds[Edge].push_back(NewPred);
283}
284
285bool IRTranslator::translateBinaryOp(unsigned Opcode, const User &U,
286 MachineIRBuilder &MIRBuilder) {
287 // Get or create a virtual register for each value.
288 // Unless the value is a Constant => loadimm cst?
289 // or inline constant each time?
290 // Creation of a virtual register needs to have a size.
291 Register Op0 = getOrCreateVReg(*U.getOperand(0));
292 Register Op1 = getOrCreateVReg(*U.getOperand(1));
293 Register Res = getOrCreateVReg(U);
294 uint16_t Flags = 0;
295 if (isa<Instruction>(U)) {
296 const Instruction &I = cast<Instruction>(U);
297 Flags = MachineInstr::copyFlagsFromInstruction(I);
298 }
299
300 MIRBuilder.buildInstr(Opcode, {Res}, {Op0, Op1}, Flags);
301 return true;
302}
303
304bool IRTranslator::translateUnaryOp(unsigned Opcode, const User &U,
305 MachineIRBuilder &MIRBuilder) {
306 Register Op0 = getOrCreateVReg(*U.getOperand(0));
307 Register Res = getOrCreateVReg(U);
308 uint16_t Flags = 0;
309 if (isa<Instruction>(U)) {
310 const Instruction &I = cast<Instruction>(U);
311 Flags = MachineInstr::copyFlagsFromInstruction(I);
312 }
313 MIRBuilder.buildInstr(Opcode, {Res}, {Op0}, Flags);
314 return true;
315}
316
317bool IRTranslator::translateFNeg(const User &U, MachineIRBuilder &MIRBuilder) {
318 return translateUnaryOp(TargetOpcode::G_FNEG, U, MIRBuilder);
319}
320
321bool IRTranslator::translateCompare(const User &U,
322 MachineIRBuilder &MIRBuilder) {
323 auto *CI = dyn_cast<CmpInst>(&U);
21
Assuming the object is not a 'CmpInst'
22
'CI' initialized to a null pointer value
324 Register Op0 = getOrCreateVReg(*U.getOperand(0));
325 Register Op1 = getOrCreateVReg(*U.getOperand(1));
326 Register Res = getOrCreateVReg(U);
327 CmpInst::Predicate Pred =
328 CI
22.1
'CI' is null
22.1
'CI' is null
22.1
'CI' is null
22.1
'CI' is null
? CI->getPredicate() : static_cast<CmpInst::Predicate>(
23
'?' condition is false
329 cast<ConstantExpr>(U).getPredicate());
24
'U' is a 'ConstantExpr'
330 if (CmpInst::isIntPredicate(Pred))
25
Calling 'CmpInst::isIntPredicate'
28
Returning from 'CmpInst::isIntPredicate'
29
Taking false branch
331 MIRBuilder.buildICmp(Pred, Res, Op0, Op1);
332 else if (Pred == CmpInst::FCMP_FALSE)
30
Assuming 'Pred' is not equal to FCMP_FALSE
31
Taking false branch
333 MIRBuilder.buildCopy(
334 Res, getOrCreateVReg(*Constant::getNullValue(U.getType())));
335 else if (Pred == CmpInst::FCMP_TRUE)
32
Assuming 'Pred' is not equal to FCMP_TRUE
33
Taking false branch
336 MIRBuilder.buildCopy(
337 Res, getOrCreateVReg(*Constant::getAllOnesValue(U.getType())));
338 else {
339 assert(CI && "Instruction should be CmpInst")(static_cast<void> (0));
340 MIRBuilder.buildFCmp(Pred, Res, Op0, Op1,
341 MachineInstr::copyFlagsFromInstruction(*CI));
34
Forming reference to null pointer
342 }
343
344 return true;
345}
346
347bool IRTranslator::translateRet(const User &U, MachineIRBuilder &MIRBuilder) {
348 const ReturnInst &RI = cast<ReturnInst>(U);
349 const Value *Ret = RI.getReturnValue();
350 if (Ret && DL->getTypeStoreSize(Ret->getType()) == 0)
351 Ret = nullptr;
352
353 ArrayRef<Register> VRegs;
354 if (Ret)
355 VRegs = getOrCreateVRegs(*Ret);
356
357 Register SwiftErrorVReg = 0;
358 if (CLI->supportSwiftError() && SwiftError.getFunctionArg()) {
359 SwiftErrorVReg = SwiftError.getOrCreateVRegUseAt(
360 &RI, &MIRBuilder.getMBB(), SwiftError.getFunctionArg());
361 }
362
363 // The target may mess up with the insertion point, but
364 // this is not important as a return is the last instruction
365 // of the block anyway.
366 return CLI->lowerReturn(MIRBuilder, Ret, VRegs, FuncInfo, SwiftErrorVReg);
367}
368
369void IRTranslator::emitBranchForMergedCondition(
370 const Value *Cond, MachineBasicBlock *TBB, MachineBasicBlock *FBB,
371 MachineBasicBlock *CurBB, MachineBasicBlock *SwitchBB,
372 BranchProbability TProb, BranchProbability FProb, bool InvertCond) {
373 // If the leaf of the tree is a comparison, merge the condition into
374 // the caseblock.
375 if (const CmpInst *BOp = dyn_cast<CmpInst>(Cond)) {
376 CmpInst::Predicate Condition;
377 if (const ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) {
378 Condition = InvertCond ? IC->getInversePredicate() : IC->getPredicate();
379 } else {
380 const FCmpInst *FC = cast<FCmpInst>(Cond);
381 Condition = InvertCond ? FC->getInversePredicate() : FC->getPredicate();
382 }
383
384 SwitchCG::CaseBlock CB(Condition, false, BOp->getOperand(0),
385 BOp->getOperand(1), nullptr, TBB, FBB, CurBB,
386 CurBuilder->getDebugLoc(), TProb, FProb);
387 SL->SwitchCases.push_back(CB);
388 return;
389 }
390
391 // Create a CaseBlock record representing this branch.
392 CmpInst::Predicate Pred = InvertCond ? CmpInst::ICMP_NE : CmpInst::ICMP_EQ;
393 SwitchCG::CaseBlock CB(
394 Pred, false, Cond, ConstantInt::getTrue(MF->getFunction().getContext()),
395 nullptr, TBB, FBB, CurBB, CurBuilder->getDebugLoc(), TProb, FProb);
396 SL->SwitchCases.push_back(CB);
397}
398
399static bool isValInBlock(const Value *V, const BasicBlock *BB) {
400 if (const Instruction *I = dyn_cast<Instruction>(V))
401 return I->getParent() == BB;
402 return true;
403}
404
405void IRTranslator::findMergedConditions(
406 const Value *Cond, MachineBasicBlock *TBB, MachineBasicBlock *FBB,
407 MachineBasicBlock *CurBB, MachineBasicBlock *SwitchBB,
408 Instruction::BinaryOps Opc, BranchProbability TProb,
409 BranchProbability FProb, bool InvertCond) {
410 using namespace PatternMatch;
411 assert((Opc == Instruction::And || Opc == Instruction::Or) &&(static_cast<void> (0))
412 "Expected Opc to be AND/OR")(static_cast<void> (0));
413 // Skip over not part of the tree and remember to invert op and operands at
414 // next level.
415 Value *NotCond;
416 if (match(Cond, m_OneUse(m_Not(m_Value(NotCond)))) &&
417 isValInBlock(NotCond, CurBB->getBasicBlock())) {
418 findMergedConditions(NotCond, TBB, FBB, CurBB, SwitchBB, Opc, TProb, FProb,
419 !InvertCond);
420 return;
421 }
422
423 const Instruction *BOp = dyn_cast<Instruction>(Cond);
424 const Value *BOpOp0, *BOpOp1;
425 // Compute the effective opcode for Cond, taking into account whether it needs
426 // to be inverted, e.g.
427 // and (not (or A, B)), C
428 // gets lowered as
429 // and (and (not A, not B), C)
430 Instruction::BinaryOps BOpc = (Instruction::BinaryOps)0;
431 if (BOp) {
432 BOpc = match(BOp, m_LogicalAnd(m_Value(BOpOp0), m_Value(BOpOp1)))
433 ? Instruction::And
434 : (match(BOp, m_LogicalOr(m_Value(BOpOp0), m_Value(BOpOp1)))
435 ? Instruction::Or
436 : (Instruction::BinaryOps)0);
437 if (InvertCond) {
438 if (BOpc == Instruction::And)
439 BOpc = Instruction::Or;
440 else if (BOpc == Instruction::Or)
441 BOpc = Instruction::And;
442 }
443 }
444
445 // If this node is not part of the or/and tree, emit it as a branch.
446 // Note that all nodes in the tree should have same opcode.
447 bool BOpIsInOrAndTree = BOpc && BOpc == Opc && BOp->hasOneUse();
448 if (!BOpIsInOrAndTree || BOp->getParent() != CurBB->getBasicBlock() ||
449 !isValInBlock(BOpOp0, CurBB->getBasicBlock()) ||
450 !isValInBlock(BOpOp1, CurBB->getBasicBlock())) {
451 emitBranchForMergedCondition(Cond, TBB, FBB, CurBB, SwitchBB, TProb, FProb,
452 InvertCond);
453 return;
454 }
455
456 // Create TmpBB after CurBB.
457 MachineFunction::iterator BBI(CurBB);
458 MachineBasicBlock *TmpBB =
459 MF->CreateMachineBasicBlock(CurBB->getBasicBlock());
460 CurBB->getParent()->insert(++BBI, TmpBB);
461
462 if (Opc == Instruction::Or) {
463 // Codegen X | Y as:
464 // BB1:
465 // jmp_if_X TBB
466 // jmp TmpBB
467 // TmpBB:
468 // jmp_if_Y TBB
469 // jmp FBB
470 //
471
472 // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
473 // The requirement is that
474 // TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB)
475 // = TrueProb for original BB.
476 // Assuming the original probabilities are A and B, one choice is to set
477 // BB1's probabilities to A/2 and A/2+B, and set TmpBB's probabilities to
478 // A/(1+B) and 2B/(1+B). This choice assumes that
479 // TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB.
480 // Another choice is to assume TrueProb for BB1 equals to TrueProb for
481 // TmpBB, but the math is more complicated.
482
483 auto NewTrueProb = TProb / 2;
484 auto NewFalseProb = TProb / 2 + FProb;
485 // Emit the LHS condition.
486 findMergedConditions(BOpOp0, TBB, TmpBB, CurBB, SwitchBB, Opc, NewTrueProb,
487 NewFalseProb, InvertCond);
488
489 // Normalize A/2 and B to get A/(1+B) and 2B/(1+B).
490 SmallVector<BranchProbability, 2> Probs{TProb / 2, FProb};
491 BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
492 // Emit the RHS condition into TmpBB.
493 findMergedConditions(BOpOp1, TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0],
494 Probs[1], InvertCond);
495 } else {
496 assert(Opc == Instruction::And && "Unknown merge op!")(static_cast<void> (0));
497 // Codegen X & Y as:
498 // BB1:
499 // jmp_if_X TmpBB
500 // jmp FBB
501 // TmpBB:
502 // jmp_if_Y TBB
503 // jmp FBB
504 //
505 // This requires creation of TmpBB after CurBB.
506
507 // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
508 // The requirement is that
509 // FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB)
510 // = FalseProb for original BB.
511 // Assuming the original probabilities are A and B, one choice is to set
512 // BB1's probabilities to A+B/2 and B/2, and set TmpBB's probabilities to
513 // 2A/(1+A) and B/(1+A). This choice assumes that FalseProb for BB1 ==
514 // TrueProb for BB1 * FalseProb for TmpBB.
515
516 auto NewTrueProb = TProb + FProb / 2;
517 auto NewFalseProb = FProb / 2;
518 // Emit the LHS condition.
519 findMergedConditions(BOpOp0, TmpBB, FBB, CurBB, SwitchBB, Opc, NewTrueProb,
520 NewFalseProb, InvertCond);
521
522 // Normalize A and B/2 to get 2A/(1+A) and B/(1+A).
523 SmallVector<BranchProbability, 2> Probs{TProb, FProb / 2};
524 BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
525 // Emit the RHS condition into TmpBB.
526 findMergedConditions(BOpOp1, TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0],
527 Probs[1], InvertCond);
528 }
529}
530
531bool IRTranslator::shouldEmitAsBranches(
532 const std::vector<SwitchCG::CaseBlock> &Cases) {
533 // For multiple cases, it's better to emit as branches.
534 if (Cases.size() != 2)
535 return true;
536
537 // If this is two comparisons of the same values or'd or and'd together, they
538 // will get folded into a single comparison, so don't emit two blocks.
539 if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
540 Cases[0].CmpRHS == Cases[1].CmpRHS) ||
541 (Cases[0].CmpRHS == Cases[1].CmpLHS &&
542 Cases[0].CmpLHS == Cases[1].CmpRHS)) {
543 return false;
544 }
545
546 // Handle: (X != null) | (Y != null) --> (X|Y) != 0
547 // Handle: (X == null) & (Y == null) --> (X|Y) == 0
548 if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
549 Cases[0].PredInfo.Pred == Cases[1].PredInfo.Pred &&
550 isa<Constant>(Cases[0].CmpRHS) &&
551 cast<Constant>(Cases[0].CmpRHS)->isNullValue()) {
552 if (Cases[0].PredInfo.Pred == CmpInst::ICMP_EQ &&
553 Cases[0].TrueBB == Cases[1].ThisBB)
554 return false;
555 if (Cases[0].PredInfo.Pred == CmpInst::ICMP_NE &&
556 Cases[0].FalseBB == Cases[1].ThisBB)
557 return false;
558 }
559
560 return true;
561}
562
563bool IRTranslator::translateBr(const User &U, MachineIRBuilder &MIRBuilder) {
564 const BranchInst &BrInst = cast<BranchInst>(U);
565 auto &CurMBB = MIRBuilder.getMBB();
566 auto *Succ0MBB = &getMBB(*BrInst.getSuccessor(0));
567
568 if (BrInst.isUnconditional()) {
569 // If the unconditional target is the layout successor, fallthrough.
570 if (OptLevel == CodeGenOpt::None || !CurMBB.isLayoutSuccessor(Succ0MBB))
571 MIRBuilder.buildBr(*Succ0MBB);
572
573 // Link successors.
574 for (const BasicBlock *Succ : successors(&BrInst))
575 CurMBB.addSuccessor(&getMBB(*Succ));
576 return true;
577 }
578
579 // If this condition is one of the special cases we handle, do special stuff
580 // now.
581 const Value *CondVal = BrInst.getCondition();
582 MachineBasicBlock *Succ1MBB = &getMBB(*BrInst.getSuccessor(1));
583
584 const auto &TLI = *MF->getSubtarget().getTargetLowering();
585
586 // If this is a series of conditions that are or'd or and'd together, emit
587 // this as a sequence of branches instead of setcc's with and/or operations.
588 // As long as jumps are not expensive (exceptions for multi-use logic ops,
589 // unpredictable branches, and vector extracts because those jumps are likely
590 // expensive for any target), this should improve performance.
591 // For example, instead of something like:
592 // cmp A, B
593 // C = seteq
594 // cmp D, E
595 // F = setle
596 // or C, F
597 // jnz foo
598 // Emit:
599 // cmp A, B
600 // je foo
601 // cmp D, E
602 // jle foo
603 using namespace PatternMatch;
604 const Instruction *CondI = dyn_cast<Instruction>(CondVal);
605 if (!TLI.isJumpExpensive() && CondI && CondI->hasOneUse() &&
606 !BrInst.hasMetadata(LLVMContext::MD_unpredictable)) {
607 Instruction::BinaryOps Opcode = (Instruction::BinaryOps)0;
608 Value *Vec;
609 const Value *BOp0, *BOp1;
610 if (match(CondI, m_LogicalAnd(m_Value(BOp0), m_Value(BOp1))))
611 Opcode = Instruction::And;
612 else if (match(CondI, m_LogicalOr(m_Value(BOp0), m_Value(BOp1))))
613 Opcode = Instruction::Or;
614
615 if (Opcode && !(match(BOp0, m_ExtractElt(m_Value(Vec), m_Value())) &&
616 match(BOp1, m_ExtractElt(m_Specific(Vec), m_Value())))) {
617 findMergedConditions(CondI, Succ0MBB, Succ1MBB, &CurMBB, &CurMBB, Opcode,
618 getEdgeProbability(&CurMBB, Succ0MBB),
619 getEdgeProbability(&CurMBB, Succ1MBB),
620 /*InvertCond=*/false);
621 assert(SL->SwitchCases[0].ThisBB == &CurMBB && "Unexpected lowering!")(static_cast<void> (0));
622
623 // Allow some cases to be rejected.
624 if (shouldEmitAsBranches(SL->SwitchCases)) {
625 // Emit the branch for this block.
626 emitSwitchCase(SL->SwitchCases[0], &CurMBB, *CurBuilder);
627 SL->SwitchCases.erase(SL->SwitchCases.begin());
628 return true;
629 }
630
631 // Okay, we decided not to do this, remove any inserted MBB's and clear
632 // SwitchCases.
633 for (unsigned I = 1, E = SL->SwitchCases.size(); I != E; ++I)
634 MF->erase(SL->SwitchCases[I].ThisBB);
635
636 SL->SwitchCases.clear();
637 }
638 }
639
640 // Create a CaseBlock record representing this branch.
641 SwitchCG::CaseBlock CB(CmpInst::ICMP_EQ, false, CondVal,
642 ConstantInt::getTrue(MF->getFunction().getContext()),
643 nullptr, Succ0MBB, Succ1MBB, &CurMBB,
644 CurBuilder->getDebugLoc());
645
646 // Use emitSwitchCase to actually insert the fast branch sequence for this
647 // cond branch.
648 emitSwitchCase(CB, &CurMBB, *CurBuilder);
649 return true;
650}
651
652void IRTranslator::addSuccessorWithProb(MachineBasicBlock *Src,
653 MachineBasicBlock *Dst,
654 BranchProbability Prob) {
655 if (!FuncInfo.BPI) {
656 Src->addSuccessorWithoutProb(Dst);
657 return;
658 }
659 if (Prob.isUnknown())
660 Prob = getEdgeProbability(Src, Dst);
661 Src->addSuccessor(Dst, Prob);
662}
663
664BranchProbability
665IRTranslator::getEdgeProbability(const MachineBasicBlock *Src,
666 const MachineBasicBlock *Dst) const {
667 const BasicBlock *SrcBB = Src->getBasicBlock();
668 const BasicBlock *DstBB = Dst->getBasicBlock();
669 if (!FuncInfo.BPI) {
670 // If BPI is not available, set the default probability as 1 / N, where N is
671 // the number of successors.
672 auto SuccSize = std::max<uint32_t>(succ_size(SrcBB), 1);
673 return BranchProbability(1, SuccSize);
674 }
675 return FuncInfo.BPI->getEdgeProbability(SrcBB, DstBB);
676}
677
678bool IRTranslator::translateSwitch(const User &U, MachineIRBuilder &MIB) {
679 using namespace SwitchCG;
680 // Extract cases from the switch.
681 const SwitchInst &SI = cast<SwitchInst>(U);
682 BranchProbabilityInfo *BPI = FuncInfo.BPI;
683 CaseClusterVector Clusters;
684 Clusters.reserve(SI.getNumCases());
685 for (auto &I : SI.cases()) {
686 MachineBasicBlock *Succ = &getMBB(*I.getCaseSuccessor());
687 assert(Succ && "Could not find successor mbb in mapping")(static_cast<void> (0));
688 const ConstantInt *CaseVal = I.getCaseValue();
689 BranchProbability Prob =
690 BPI ? BPI->getEdgeProbability(SI.getParent(), I.getSuccessorIndex())
691 : BranchProbability(1, SI.getNumCases() + 1);
692 Clusters.push_back(CaseCluster::range(CaseVal, CaseVal, Succ, Prob));
693 }
694
695 MachineBasicBlock *DefaultMBB = &getMBB(*SI.getDefaultDest());
696
697 // Cluster adjacent cases with the same destination. We do this at all
698 // optimization levels because it's cheap to do and will make codegen faster
699 // if there are many clusters.
700 sortAndRangeify(Clusters);
701
702 MachineBasicBlock *SwitchMBB = &getMBB(*SI.getParent());
703
704 // If there is only the default destination, jump there directly.
705 if (Clusters.empty()) {
706 SwitchMBB->addSuccessor(DefaultMBB);
707 if (DefaultMBB != SwitchMBB->getNextNode())
708 MIB.buildBr(*DefaultMBB);
709 return true;
710 }
711
712 SL->findJumpTables(Clusters, &SI, DefaultMBB, nullptr, nullptr);
713 SL->findBitTestClusters(Clusters, &SI);
714
715 LLVM_DEBUG({do { } while (false)
716 dbgs() << "Case clusters: ";do { } while (false)
717 for (const CaseCluster &C : Clusters) {do { } while (false)
718 if (C.Kind == CC_JumpTable)do { } while (false)
719 dbgs() << "JT:";do { } while (false)
720 if (C.Kind == CC_BitTests)do { } while (false)
721 dbgs() << "BT:";do { } while (false)
722
723 C.Low->getValue().print(dbgs(), true);do { } while (false)
724 if (C.Low != C.High) {do { } while (false)
725 dbgs() << '-';do { } while (false)
726 C.High->getValue().print(dbgs(), true);do { } while (false)
727 }do { } while (false)
728 dbgs() << ' ';do { } while (false)
729 }do { } while (false)
730 dbgs() << '\n';do { } while (false)
731 })do { } while (false);
732
733 assert(!Clusters.empty())(static_cast<void> (0));
734 SwitchWorkList WorkList;
735 CaseClusterIt First = Clusters.begin();
736 CaseClusterIt Last = Clusters.end() - 1;
737 auto DefaultProb = getEdgeProbability(SwitchMBB, DefaultMBB);
738 WorkList.push_back({SwitchMBB, First, Last, nullptr, nullptr, DefaultProb});
739
740 // FIXME: At the moment we don't do any splitting optimizations here like
741 // SelectionDAG does, so this worklist only has one entry.
742 while (!WorkList.empty()) {
743 SwitchWorkListItem W = WorkList.back();
744 WorkList.pop_back();
745 if (!lowerSwitchWorkItem(W, SI.getCondition(), SwitchMBB, DefaultMBB, MIB))
746 return false;
747 }
748 return true;
749}
750
751void IRTranslator::emitJumpTable(SwitchCG::JumpTable &JT,
752 MachineBasicBlock *MBB) {
753 // Emit the code for the jump table
754 assert(JT.Reg != -1U && "Should lower JT Header first!")(static_cast<void> (0));
755 MachineIRBuilder MIB(*MBB->getParent());
756 MIB.setMBB(*MBB);
757 MIB.setDebugLoc(CurBuilder->getDebugLoc());
758
759 Type *PtrIRTy = Type::getInt8PtrTy(MF->getFunction().getContext());
760 const LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
761
762 auto Table = MIB.buildJumpTable(PtrTy, JT.JTI);
763 MIB.buildBrJT(Table.getReg(0), JT.JTI, JT.Reg);
764}
765
766bool IRTranslator::emitJumpTableHeader(SwitchCG::JumpTable &JT,
767 SwitchCG::JumpTableHeader &JTH,
768 MachineBasicBlock *HeaderBB) {
769 MachineIRBuilder MIB(*HeaderBB->getParent());
770 MIB.setMBB(*HeaderBB);
771 MIB.setDebugLoc(CurBuilder->getDebugLoc());
772
773 const Value &SValue = *JTH.SValue;
774 // Subtract the lowest switch case value from the value being switched on.
775 const LLT SwitchTy = getLLTForType(*SValue.getType(), *DL);
776 Register SwitchOpReg = getOrCreateVReg(SValue);
777 auto FirstCst = MIB.buildConstant(SwitchTy, JTH.First);
778 auto Sub = MIB.buildSub({SwitchTy}, SwitchOpReg, FirstCst);
779
780 // This value may be smaller or larger than the target's pointer type, and
781 // therefore require extension or truncating.
782 Type *PtrIRTy = SValue.getType()->getPointerTo();
783 const LLT PtrScalarTy = LLT::scalar(DL->getTypeSizeInBits(PtrIRTy));
784 Sub = MIB.buildZExtOrTrunc(PtrScalarTy, Sub);
785
786 JT.Reg = Sub.getReg(0);
787
788 if (JTH.OmitRangeCheck) {
789 if (JT.MBB != HeaderBB->getNextNode())
790 MIB.buildBr(*JT.MBB);
791 return true;
792 }
793
794 // Emit the range check for the jump table, and branch to the default block
795 // for the switch statement if the value being switched on exceeds the
796 // largest case in the switch.
797 auto Cst = getOrCreateVReg(
798 *ConstantInt::get(SValue.getType(), JTH.Last - JTH.First));
799 Cst = MIB.buildZExtOrTrunc(PtrScalarTy, Cst).getReg(0);
800 auto Cmp = MIB.buildICmp(CmpInst::ICMP_UGT, LLT::scalar(1), Sub, Cst);
801
802 auto BrCond = MIB.buildBrCond(Cmp.getReg(0), *JT.Default);
803
804 // Avoid emitting unnecessary branches to the next block.
805 if (JT.MBB != HeaderBB->getNextNode())
806 BrCond = MIB.buildBr(*JT.MBB);
807 return true;
808}
809
810void IRTranslator::emitSwitchCase(SwitchCG::CaseBlock &CB,
811 MachineBasicBlock *SwitchBB,
812 MachineIRBuilder &MIB) {
813 Register CondLHS = getOrCreateVReg(*CB.CmpLHS);
814 Register Cond;
815 DebugLoc OldDbgLoc = MIB.getDebugLoc();
816 MIB.setDebugLoc(CB.DbgLoc);
817 MIB.setMBB(*CB.ThisBB);
818
819 if (CB.PredInfo.NoCmp) {
820 // Branch or fall through to TrueBB.
821 addSuccessorWithProb(CB.ThisBB, CB.TrueBB, CB.TrueProb);
822 addMachineCFGPred({SwitchBB->getBasicBlock(), CB.TrueBB->getBasicBlock()},
823 CB.ThisBB);
824 CB.ThisBB->normalizeSuccProbs();
825 if (CB.TrueBB != CB.ThisBB->getNextNode())
826 MIB.buildBr(*CB.TrueBB);
827 MIB.setDebugLoc(OldDbgLoc);
828 return;
829 }
830
831 const LLT i1Ty = LLT::scalar(1);
832 // Build the compare.
833 if (!CB.CmpMHS) {
834 const auto *CI = dyn_cast<ConstantInt>(CB.CmpRHS);
835 // For conditional branch lowering, we might try to do something silly like
836 // emit an G_ICMP to compare an existing G_ICMP i1 result with true. If so,
837 // just re-use the existing condition vreg.
838 if (MRI->getType(CondLHS).getSizeInBits() == 1 && CI &&
839 CI->getZExtValue() == 1 && CB.PredInfo.Pred == CmpInst::ICMP_EQ) {
840 Cond = CondLHS;
841 } else {
842 Register CondRHS = getOrCreateVReg(*CB.CmpRHS);
843 if (CmpInst::isFPPredicate(CB.PredInfo.Pred))
844 Cond =
845 MIB.buildFCmp(CB.PredInfo.Pred, i1Ty, CondLHS, CondRHS).getReg(0);
846 else
847 Cond =
848 MIB.buildICmp(CB.PredInfo.Pred, i1Ty, CondLHS, CondRHS).getReg(0);
849 }
850 } else {
851 assert(CB.PredInfo.Pred == CmpInst::ICMP_SLE &&(static_cast<void> (0))
852 "Can only handle SLE ranges")(static_cast<void> (0));
853
854 const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue();
855 const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue();
856
857 Register CmpOpReg = getOrCreateVReg(*CB.CmpMHS);
858 if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) {
859 Register CondRHS = getOrCreateVReg(*CB.CmpRHS);
860 Cond =
861 MIB.buildICmp(CmpInst::ICMP_SLE, i1Ty, CmpOpReg, CondRHS).getReg(0);
862 } else {
863 const LLT CmpTy = MRI->getType(CmpOpReg);
864 auto Sub = MIB.buildSub({CmpTy}, CmpOpReg, CondLHS);
865 auto Diff = MIB.buildConstant(CmpTy, High - Low);
866 Cond = MIB.buildICmp(CmpInst::ICMP_ULE, i1Ty, Sub, Diff).getReg(0);
867 }
868 }
869
870 // Update successor info
871 addSuccessorWithProb(CB.ThisBB, CB.TrueBB, CB.TrueProb);
872
873 addMachineCFGPred({SwitchBB->getBasicBlock(), CB.TrueBB->getBasicBlock()},
874 CB.ThisBB);
875
876 // TrueBB and FalseBB are always different unless the incoming IR is
877 // degenerate. This only happens when running llc on weird IR.
878 if (CB.TrueBB != CB.FalseBB)
879 addSuccessorWithProb(CB.ThisBB, CB.FalseBB, CB.FalseProb);
880 CB.ThisBB->normalizeSuccProbs();
881
882 addMachineCFGPred({SwitchBB->getBasicBlock(), CB.FalseBB->getBasicBlock()},
883 CB.ThisBB);
884
885 MIB.buildBrCond(Cond, *CB.TrueBB);
886 MIB.buildBr(*CB.FalseBB);
887 MIB.setDebugLoc(OldDbgLoc);
888}
889
890bool IRTranslator::lowerJumpTableWorkItem(SwitchCG::SwitchWorkListItem W,
891 MachineBasicBlock *SwitchMBB,
892 MachineBasicBlock *CurMBB,
893 MachineBasicBlock *DefaultMBB,
894 MachineIRBuilder &MIB,
895 MachineFunction::iterator BBI,
896 BranchProbability UnhandledProbs,
897 SwitchCG::CaseClusterIt I,
898 MachineBasicBlock *Fallthrough,
899 bool FallthroughUnreachable) {
900 using namespace SwitchCG;
901 MachineFunction *CurMF = SwitchMBB->getParent();
902 // FIXME: Optimize away range check based on pivot comparisons.
903 JumpTableHeader *JTH = &SL->JTCases[I->JTCasesIndex].first;
904 SwitchCG::JumpTable *JT = &SL->JTCases[I->JTCasesIndex].second;
905 BranchProbability DefaultProb = W.DefaultProb;
906
907 // The jump block hasn't been inserted yet; insert it here.
908 MachineBasicBlock *JumpMBB = JT->MBB;
909 CurMF->insert(BBI, JumpMBB);
910
911 // Since the jump table block is separate from the switch block, we need
912 // to keep track of it as a machine predecessor to the default block,
913 // otherwise we lose the phi edges.
914 addMachineCFGPred({SwitchMBB->getBasicBlock(), DefaultMBB->getBasicBlock()},
915 CurMBB);
916 addMachineCFGPred({SwitchMBB->getBasicBlock(), DefaultMBB->getBasicBlock()},
917 JumpMBB);
918
919 auto JumpProb = I->Prob;
920 auto FallthroughProb = UnhandledProbs;
921
922 // If the default statement is a target of the jump table, we evenly
923 // distribute the default probability to successors of CurMBB. Also
924 // update the probability on the edge from JumpMBB to Fallthrough.
925 for (MachineBasicBlock::succ_iterator SI = JumpMBB->succ_begin(),
926 SE = JumpMBB->succ_end();
927 SI != SE; ++SI) {
928 if (*SI == DefaultMBB) {
929 JumpProb += DefaultProb / 2;
930 FallthroughProb -= DefaultProb / 2;
931 JumpMBB->setSuccProbability(SI, DefaultProb / 2);
932 JumpMBB->normalizeSuccProbs();
933 } else {
934 // Also record edges from the jump table block to it's successors.
935 addMachineCFGPred({SwitchMBB->getBasicBlock(), (*SI)->getBasicBlock()},
936 JumpMBB);
937 }
938 }
939
940 // Skip the range check if the fallthrough block is unreachable.
941 if (FallthroughUnreachable)
942 JTH->OmitRangeCheck = true;
943
944 if (!JTH->OmitRangeCheck)
945 addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb);
946 addSuccessorWithProb(CurMBB, JumpMBB, JumpProb);
947 CurMBB->normalizeSuccProbs();
948
949 // The jump table header will be inserted in our current block, do the
950 // range check, and fall through to our fallthrough block.
951 JTH->HeaderBB = CurMBB;
952 JT->Default = Fallthrough; // FIXME: Move Default to JumpTableHeader.
953
954 // If we're in the right place, emit the jump table header right now.
955 if (CurMBB == SwitchMBB) {
956 if (!emitJumpTableHeader(*JT, *JTH, CurMBB))
957 return false;
958 JTH->Emitted = true;
959 }
960 return true;
961}
962bool IRTranslator::lowerSwitchRangeWorkItem(SwitchCG::CaseClusterIt I,
963 Value *Cond,
964 MachineBasicBlock *Fallthrough,
965 bool FallthroughUnreachable,
966 BranchProbability UnhandledProbs,
967 MachineBasicBlock *CurMBB,
968 MachineIRBuilder &MIB,
969 MachineBasicBlock *SwitchMBB) {
970 using namespace SwitchCG;
971 const Value *RHS, *LHS, *MHS;
972 CmpInst::Predicate Pred;
973 if (I->Low == I->High) {
974 // Check Cond == I->Low.
975 Pred = CmpInst::ICMP_EQ;
976 LHS = Cond;
977 RHS = I->Low;
978 MHS = nullptr;
979 } else {
980 // Check I->Low <= Cond <= I->High.
981 Pred = CmpInst::ICMP_SLE;
982 LHS = I->Low;
983 MHS = Cond;
984 RHS = I->High;
985 }
986
987 // If Fallthrough is unreachable, fold away the comparison.
988 // The false probability is the sum of all unhandled cases.
989 CaseBlock CB(Pred, FallthroughUnreachable, LHS, RHS, MHS, I->MBB, Fallthrough,
990 CurMBB, MIB.getDebugLoc(), I->Prob, UnhandledProbs);
991
992 emitSwitchCase(CB, SwitchMBB, MIB);
993 return true;
994}
995
996void IRTranslator::emitBitTestHeader(SwitchCG::BitTestBlock &B,
997 MachineBasicBlock *SwitchBB) {
998 MachineIRBuilder &MIB = *CurBuilder;
999 MIB.setMBB(*SwitchBB);
1000
1001 // Subtract the minimum value.
1002 Register SwitchOpReg = getOrCreateVReg(*B.SValue);
1003
1004 LLT SwitchOpTy = MRI->getType(SwitchOpReg);
1005 Register MinValReg = MIB.buildConstant(SwitchOpTy, B.First).getReg(0);
1006 auto RangeSub = MIB.buildSub(SwitchOpTy, SwitchOpReg, MinValReg);
1007
1008 // Ensure that the type will fit the mask value.
1009 LLT MaskTy = SwitchOpTy;
1010 for (unsigned I = 0, E = B.Cases.size(); I != E; ++I) {
1011 if (!isUIntN(SwitchOpTy.getSizeInBits(), B.Cases[I].Mask)) {
1012 // Switch table case range are encoded into series of masks.
1013 // Just use pointer type, it's guaranteed to fit.
1014 MaskTy = LLT::scalar(64);
1015 break;
1016 }
1017 }
1018 Register SubReg = RangeSub.getReg(0);
1019 if (SwitchOpTy != MaskTy)
1020 SubReg = MIB.buildZExtOrTrunc(MaskTy, SubReg).getReg(0);
1021
1022 B.RegVT = getMVTForLLT(MaskTy);
1023 B.Reg = SubReg;
1024
1025 MachineBasicBlock *MBB = B.Cases[0].ThisBB;
1026
1027 if (!B.OmitRangeCheck)
1028 addSuccessorWithProb(SwitchBB, B.Default, B.DefaultProb);
1029 addSuccessorWithProb(SwitchBB, MBB, B.Prob);
1030
1031 SwitchBB->normalizeSuccProbs();
1032
1033 if (!B.OmitRangeCheck) {
1034 // Conditional branch to the default block.
1035 auto RangeCst = MIB.buildConstant(SwitchOpTy, B.Range);
1036 auto RangeCmp = MIB.buildICmp(CmpInst::Predicate::ICMP_UGT, LLT::scalar(1),
1037 RangeSub, RangeCst);
1038 MIB.buildBrCond(RangeCmp, *B.Default);
1039 }
1040
1041 // Avoid emitting unnecessary branches to the next block.
1042 if (MBB != SwitchBB->getNextNode())
1043 MIB.buildBr(*MBB);
1044}
1045
1046void IRTranslator::emitBitTestCase(SwitchCG::BitTestBlock &BB,
1047 MachineBasicBlock *NextMBB,
1048 BranchProbability BranchProbToNext,
1049 Register Reg, SwitchCG::BitTestCase &B,
1050 MachineBasicBlock *SwitchBB) {
1051 MachineIRBuilder &MIB = *CurBuilder;
1052 MIB.setMBB(*SwitchBB);
1053
1054 LLT SwitchTy = getLLTForMVT(BB.RegVT);
1055 Register Cmp;
1056 unsigned PopCount = countPopulation(B.Mask);
1057 if (PopCount == 1) {
1058 // Testing for a single bit; just compare the shift count with what it
1059 // would need to be to shift a 1 bit in that position.
1060 auto MaskTrailingZeros =
1061 MIB.buildConstant(SwitchTy, countTrailingZeros(B.Mask));
1062 Cmp =
1063 MIB.buildICmp(ICmpInst::ICMP_EQ, LLT::scalar(1), Reg, MaskTrailingZeros)
1064 .getReg(0);
1065 } else if (PopCount == BB.Range) {
1066 // There is only one zero bit in the range, test for it directly.
1067 auto MaskTrailingOnes =
1068 MIB.buildConstant(SwitchTy, countTrailingOnes(B.Mask));
1069 Cmp = MIB.buildICmp(CmpInst::ICMP_NE, LLT::scalar(1), Reg, MaskTrailingOnes)
1070 .getReg(0);
1071 } else {
1072 // Make desired shift.
1073 auto CstOne = MIB.buildConstant(SwitchTy, 1);
1074 auto SwitchVal = MIB.buildShl(SwitchTy, CstOne, Reg);
1075
1076 // Emit bit tests and jumps.
1077 auto CstMask = MIB.buildConstant(SwitchTy, B.Mask);
1078 auto AndOp = MIB.buildAnd(SwitchTy, SwitchVal, CstMask);
1079 auto CstZero = MIB.buildConstant(SwitchTy, 0);
1080 Cmp = MIB.buildICmp(CmpInst::ICMP_NE, LLT::scalar(1), AndOp, CstZero)
1081 .getReg(0);
1082 }
1083
1084 // The branch probability from SwitchBB to B.TargetBB is B.ExtraProb.
1085 addSuccessorWithProb(SwitchBB, B.TargetBB, B.ExtraProb);
1086 // The branch probability from SwitchBB to NextMBB is BranchProbToNext.
1087 addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext);
1088 // It is not guaranteed that the sum of B.ExtraProb and BranchProbToNext is
1089 // one as they are relative probabilities (and thus work more like weights),
1090 // and hence we need to normalize them to let the sum of them become one.
1091 SwitchBB->normalizeSuccProbs();
1092
1093 // Record the fact that the IR edge from the header to the bit test target
1094 // will go through our new block. Neeeded for PHIs to have nodes added.
1095 addMachineCFGPred({BB.Parent->getBasicBlock(), B.TargetBB->getBasicBlock()},
1096 SwitchBB);
1097
1098 MIB.buildBrCond(Cmp, *B.TargetBB);
1099
1100 // Avoid emitting unnecessary branches to the next block.
1101 if (NextMBB != SwitchBB->getNextNode())
1102 MIB.buildBr(*NextMBB);
1103}
1104
1105bool IRTranslator::lowerBitTestWorkItem(
1106 SwitchCG::SwitchWorkListItem W, MachineBasicBlock *SwitchMBB,
1107 MachineBasicBlock *CurMBB, MachineBasicBlock *DefaultMBB,
1108 MachineIRBuilder &MIB, MachineFunction::iterator BBI,
1109 BranchProbability DefaultProb, BranchProbability UnhandledProbs,
1110 SwitchCG::CaseClusterIt I, MachineBasicBlock *Fallthrough,
1111 bool FallthroughUnreachable) {
1112 using namespace SwitchCG;
1113 MachineFunction *CurMF = SwitchMBB->getParent();
1114 // FIXME: Optimize away range check based on pivot comparisons.
1115 BitTestBlock *BTB = &SL->BitTestCases[I->BTCasesIndex];
1116 // The bit test blocks haven't been inserted yet; insert them here.
1117 for (BitTestCase &BTC : BTB->Cases)
1118 CurMF->insert(BBI, BTC.ThisBB);
1119
1120 // Fill in fields of the BitTestBlock.
1121 BTB->Parent = CurMBB;
1122 BTB->Default = Fallthrough;
1123
1124 BTB->DefaultProb = UnhandledProbs;
1125 // If the cases in bit test don't form a contiguous range, we evenly
1126 // distribute the probability on the edge to Fallthrough to two
1127 // successors of CurMBB.
1128 if (!BTB->ContiguousRange) {
1129 BTB->Prob += DefaultProb / 2;
1130 BTB->DefaultProb -= DefaultProb / 2;
1131 }
1132
1133 if (FallthroughUnreachable) {
1134 // Skip the range check if the fallthrough block is unreachable.
1135 BTB->OmitRangeCheck = true;
1136 }
1137
1138 // If we're in the right place, emit the bit test header right now.
1139 if (CurMBB == SwitchMBB) {
1140 emitBitTestHeader(*BTB, SwitchMBB);
1141 BTB->Emitted = true;
1142 }
1143 return true;
1144}
1145
1146bool IRTranslator::lowerSwitchWorkItem(SwitchCG::SwitchWorkListItem W,
1147 Value *Cond,
1148 MachineBasicBlock *SwitchMBB,
1149 MachineBasicBlock *DefaultMBB,
1150 MachineIRBuilder &MIB) {
1151 using namespace SwitchCG;
1152 MachineFunction *CurMF = FuncInfo.MF;
1153 MachineBasicBlock *NextMBB = nullptr;
1154 MachineFunction::iterator BBI(W.MBB);
1155 if (++BBI != FuncInfo.MF->end())
1156 NextMBB = &*BBI;
1157
1158 if (EnableOpts) {
1159 // Here, we order cases by probability so the most likely case will be
1160 // checked first. However, two clusters can have the same probability in
1161 // which case their relative ordering is non-deterministic. So we use Low
1162 // as a tie-breaker as clusters are guaranteed to never overlap.
1163 llvm::sort(W.FirstCluster, W.LastCluster + 1,
1164 [](const CaseCluster &a, const CaseCluster &b) {
1165 return a.Prob != b.Prob
1166 ? a.Prob > b.Prob
1167 : a.Low->getValue().slt(b.Low->getValue());
1168 });
1169
1170 // Rearrange the case blocks so that the last one falls through if possible
1171 // without changing the order of probabilities.
1172 for (CaseClusterIt I = W.LastCluster; I > W.FirstCluster;) {
1173 --I;
1174 if (I->Prob > W.LastCluster->Prob)
1175 break;
1176 if (I->Kind == CC_Range && I->MBB == NextMBB) {
1177 std::swap(*I, *W.LastCluster);
1178 break;
1179 }
1180 }
1181 }
1182
1183 // Compute total probability.
1184 BranchProbability DefaultProb = W.DefaultProb;
1185 BranchProbability UnhandledProbs = DefaultProb;
1186 for (CaseClusterIt I = W.FirstCluster; I <= W.LastCluster; ++I)
1187 UnhandledProbs += I->Prob;
1188
1189 MachineBasicBlock *CurMBB = W.MBB;
1190 for (CaseClusterIt I = W.FirstCluster, E = W.LastCluster; I <= E; ++I) {
1191 bool FallthroughUnreachable = false;
1192 MachineBasicBlock *Fallthrough;
1193 if (I == W.LastCluster) {
1194 // For the last cluster, fall through to the default destination.
1195 Fallthrough = DefaultMBB;
1196 FallthroughUnreachable = isa<UnreachableInst>(
1197 DefaultMBB->getBasicBlock()->getFirstNonPHIOrDbg());
1198 } else {
1199 Fallthrough = CurMF->CreateMachineBasicBlock(CurMBB->getBasicBlock());
1200 CurMF->insert(BBI, Fallthrough);
1201 }
1202 UnhandledProbs -= I->Prob;
1203
1204 switch (I->Kind) {
1205 case CC_BitTests: {
1206 if (!lowerBitTestWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI,
1207 DefaultProb, UnhandledProbs, I, Fallthrough,
1208 FallthroughUnreachable)) {
1209 LLVM_DEBUG(dbgs() << "Failed to lower bit test for switch")do { } while (false);
1210 return false;
1211 }
1212 break;
1213 }
1214
1215 case CC_JumpTable: {
1216 if (!lowerJumpTableWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI,
1217 UnhandledProbs, I, Fallthrough,
1218 FallthroughUnreachable)) {
1219 LLVM_DEBUG(dbgs() << "Failed to lower jump table")do { } while (false);
1220 return false;
1221 }
1222 break;
1223 }
1224 case CC_Range: {
1225 if (!lowerSwitchRangeWorkItem(I, Cond, Fallthrough,
1226 FallthroughUnreachable, UnhandledProbs,
1227 CurMBB, MIB, SwitchMBB)) {
1228 LLVM_DEBUG(dbgs() << "Failed to lower switch range")do { } while (false);
1229 return false;
1230 }
1231 break;
1232 }
1233 }
1234 CurMBB = Fallthrough;
1235 }
1236
1237 return true;
1238}
1239
1240bool IRTranslator::translateIndirectBr(const User &U,
1241 MachineIRBuilder &MIRBuilder) {
1242 const IndirectBrInst &BrInst = cast<IndirectBrInst>(U);
1243
1244 const Register Tgt = getOrCreateVReg(*BrInst.getAddress());
1245 MIRBuilder.buildBrIndirect(Tgt);
1246
1247 // Link successors.
1248 SmallPtrSet<const BasicBlock *, 32> AddedSuccessors;
1249 MachineBasicBlock &CurBB = MIRBuilder.getMBB();
1250 for (const BasicBlock *Succ : successors(&BrInst)) {
1251 // It's legal for indirectbr instructions to have duplicate blocks in the
1252 // destination list. We don't allow this in MIR. Skip anything that's
1253 // already a successor.
1254 if (!AddedSuccessors.insert(Succ).second)
1255 continue;
1256 CurBB.addSuccessor(&getMBB(*Succ));
1257 }
1258
1259 return true;
1260}
1261
1262static bool isSwiftError(const Value *V) {
1263 if (auto Arg = dyn_cast<Argument>(V))
1264 return Arg->hasSwiftErrorAttr();
1265 if (auto AI = dyn_cast<AllocaInst>(V))
1266 return AI->isSwiftError();
1267 return false;
1268}
1269
1270bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) {
1271 const LoadInst &LI = cast<LoadInst>(U);
1272 if (DL->getTypeStoreSize(LI.getType()) == 0)
1273 return true;
1274
1275 ArrayRef<Register> Regs = getOrCreateVRegs(LI);
1276 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(LI);
1277 Register Base = getOrCreateVReg(*LI.getPointerOperand());
1278
1279 Type *OffsetIRTy = DL->getIntPtrType(LI.getPointerOperandType());
1280 LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
1281
1282 if (CLI->supportSwiftError() && isSwiftError(LI.getPointerOperand())) {
1283 assert(Regs.size() == 1 && "swifterror should be single pointer")(static_cast<void> (0));
1284 Register VReg = SwiftError.getOrCreateVRegUseAt(&LI, &MIRBuilder.getMBB(),
1285 LI.getPointerOperand());
1286 MIRBuilder.buildCopy(Regs[0], VReg);
1287 return true;
1288 }
1289
1290 auto &TLI = *MF->getSubtarget().getTargetLowering();
1291 MachineMemOperand::Flags Flags = TLI.getLoadMemOperandFlags(LI, *DL);
1292
1293 const MDNode *Ranges =
1294 Regs.size() == 1 ? LI.getMetadata(LLVMContext::MD_range) : nullptr;
1295 for (unsigned i = 0; i < Regs.size(); ++i) {
1296 Register Addr;
1297 MIRBuilder.materializePtrAdd(Addr, Base, OffsetTy, Offsets[i] / 8);
1298
1299 MachinePointerInfo Ptr(LI.getPointerOperand(), Offsets[i] / 8);
1300 Align BaseAlign = getMemOpAlign(LI);
1301 AAMDNodes AAMetadata;
1302 LI.getAAMetadata(AAMetadata);
1303 auto MMO = MF->getMachineMemOperand(
1304 Ptr, Flags, MRI->getType(Regs[i]),
1305 commonAlignment(BaseAlign, Offsets[i] / 8), AAMetadata, Ranges,
1306 LI.getSyncScopeID(), LI.getOrdering());
1307 MIRBuilder.buildLoad(Regs[i], Addr, *MMO);
1308 }
1309
1310 return true;
1311}
1312
1313bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) {
1314 const StoreInst &SI = cast<StoreInst>(U);
1315 if (DL->getTypeStoreSize(SI.getValueOperand()->getType()) == 0)
1316 return true;
1317
1318 ArrayRef<Register> Vals = getOrCreateVRegs(*SI.getValueOperand());
1319 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*SI.getValueOperand());
1320 Register Base = getOrCreateVReg(*SI.getPointerOperand());
1321
1322 Type *OffsetIRTy = DL->getIntPtrType(SI.getPointerOperandType());
1323 LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
1324
1325 if (CLI->supportSwiftError() && isSwiftError(SI.getPointerOperand())) {
1326 assert(Vals.size() == 1 && "swifterror should be single pointer")(static_cast<void> (0));
1327
1328 Register VReg = SwiftError.getOrCreateVRegDefAt(&SI, &MIRBuilder.getMBB(),
1329 SI.getPointerOperand());
1330 MIRBuilder.buildCopy(VReg, Vals[0]);
1331 return true;
1332 }
1333
1334 auto &TLI = *MF->getSubtarget().getTargetLowering();
1335 MachineMemOperand::Flags Flags = TLI.getStoreMemOperandFlags(SI, *DL);
1336
1337 for (unsigned i = 0; i < Vals.size(); ++i) {
1338 Register Addr;
1339 MIRBuilder.materializePtrAdd(Addr, Base, OffsetTy, Offsets[i] / 8);
1340
1341 MachinePointerInfo Ptr(SI.getPointerOperand(), Offsets[i] / 8);
1342 Align BaseAlign = getMemOpAlign(SI);
1343 AAMDNodes AAMetadata;
1344 SI.getAAMetadata(AAMetadata);
1345 auto MMO = MF->getMachineMemOperand(
1346 Ptr, Flags, MRI->getType(Vals[i]),
1347 commonAlignment(BaseAlign, Offsets[i] / 8), AAMetadata, nullptr,
1348 SI.getSyncScopeID(), SI.getOrdering());
1349 MIRBuilder.buildStore(Vals[i], Addr, *MMO);
1350 }
1351 return true;
1352}
1353
1354static uint64_t getOffsetFromIndices(const User &U, const DataLayout &DL) {
1355 const Value *Src = U.getOperand(0);
1356 Type *Int32Ty = Type::getInt32Ty(U.getContext());
1357
1358 // getIndexedOffsetInType is designed for GEPs, so the first index is the
1359 // usual array element rather than looking into the actual aggregate.
1360 SmallVector<Value *, 1> Indices;
1361 Indices.push_back(ConstantInt::get(Int32Ty, 0));
1362
1363 if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(&U)) {
1364 for (auto Idx : EVI->indices())
1365 Indices.push_back(ConstantInt::get(Int32Ty, Idx));
1366 } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(&U)) {
1367 for (auto Idx : IVI->indices())
1368 Indices.push_back(ConstantInt::get(Int32Ty, Idx));
1369 } else {
1370 for (unsigned i = 1; i < U.getNumOperands(); ++i)
1371 Indices.push_back(U.getOperand(i));
1372 }
1373
1374 return 8 * static_cast<uint64_t>(
1375 DL.getIndexedOffsetInType(Src->getType(), Indices));
1376}
1377
1378bool IRTranslator::translateExtractValue(const User &U,
1379 MachineIRBuilder &MIRBuilder) {
1380 const Value *Src = U.getOperand(0);
1381 uint64_t Offset = getOffsetFromIndices(U, *DL);
1382 ArrayRef<Register> SrcRegs = getOrCreateVRegs(*Src);
1383 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*Src);
1384 unsigned Idx = llvm::lower_bound(Offsets, Offset) - Offsets.begin();
1385 auto &DstRegs = allocateVRegs(U);
1386
1387 for (unsigned i = 0; i < DstRegs.size(); ++i)
1388 DstRegs[i] = SrcRegs[Idx++];
1389
1390 return true;
1391}
1392
1393bool IRTranslator::translateInsertValue(const User &U,
1394 MachineIRBuilder &MIRBuilder) {
1395 const Value *Src = U.getOperand(0);
1396 uint64_t Offset = getOffsetFromIndices(U, *DL);
1397 auto &DstRegs = allocateVRegs(U);
1398 ArrayRef<uint64_t> DstOffsets = *VMap.getOffsets(U);
1399 ArrayRef<Register> SrcRegs = getOrCreateVRegs(*Src);
1400 ArrayRef<Register> InsertedRegs = getOrCreateVRegs(*U.getOperand(1));
1401 auto InsertedIt = InsertedRegs.begin();
1402
1403 for (unsigned i = 0; i < DstRegs.size(); ++i) {
1404 if (DstOffsets[i] >= Offset && InsertedIt != InsertedRegs.end())
1405 DstRegs[i] = *InsertedIt++;
1406 else
1407 DstRegs[i] = SrcRegs[i];
1408 }
1409
1410 return true;
1411}
1412
1413bool IRTranslator::translateSelect(const User &U,
1414 MachineIRBuilder &MIRBuilder) {
1415 Register Tst = getOrCreateVReg(*U.getOperand(0));
1416 ArrayRef<Register> ResRegs = getOrCreateVRegs(U);
1417 ArrayRef<Register> Op0Regs = getOrCreateVRegs(*U.getOperand(1));
1418 ArrayRef<Register> Op1Regs = getOrCreateVRegs(*U.getOperand(2));
1419
1420 uint16_t Flags = 0;
1421 if (const SelectInst *SI = dyn_cast<SelectInst>(&U))
1422 Flags = MachineInstr::copyFlagsFromInstruction(*SI);
1423
1424 for (unsigned i = 0; i < ResRegs.size(); ++i) {
1425 MIRBuilder.buildSelect(ResRegs[i], Tst, Op0Regs[i], Op1Regs[i], Flags);
1426 }
1427
1428 return true;
1429}
1430
1431bool IRTranslator::translateCopy(const User &U, const Value &V,
1432 MachineIRBuilder &MIRBuilder) {
1433 Register Src = getOrCreateVReg(V);
1434 auto &Regs = *VMap.getVRegs(U);
1435 if (Regs.empty()) {
1436 Regs.push_back(Src);
1437 VMap.getOffsets(U)->push_back(0);
1438 } else {
1439 // If we already assigned a vreg for this instruction, we can't change that.
1440 // Emit a copy to satisfy the users we already emitted.
1441 MIRBuilder.buildCopy(Regs[0], Src);
1442 }
1443 return true;
1444}
1445
1446bool IRTranslator::translateBitCast(const User &U,
1447 MachineIRBuilder &MIRBuilder) {
1448 // If we're bitcasting to the source type, we can reuse the source vreg.
1449 if (getLLTForType(*U.getOperand(0)->getType(), *DL) ==
1450 getLLTForType(*U.getType(), *DL))
1451 return translateCopy(U, *U.getOperand(0), MIRBuilder);
1452
1453 return translateCast(TargetOpcode::G_BITCAST, U, MIRBuilder);
1454}
1455
1456bool IRTranslator::translateCast(unsigned Opcode, const User &U,
1457 MachineIRBuilder &MIRBuilder) {
1458 Register Op = getOrCreateVReg(*U.getOperand(0));
1459 Register Res = getOrCreateVReg(U);
1460 MIRBuilder.buildInstr(Opcode, {Res}, {Op});
1461 return true;
1462}
1463
1464bool IRTranslator::translateGetElementPtr(const User &U,
1465 MachineIRBuilder &MIRBuilder) {
1466 Value &Op0 = *U.getOperand(0);
1467 Register BaseReg = getOrCreateVReg(Op0);
1468 Type *PtrIRTy = Op0.getType();
1469 LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
1470 Type *OffsetIRTy = DL->getIntPtrType(PtrIRTy);
1471 LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
1472
1473 // Normalize Vector GEP - all scalar operands should be converted to the
1474 // splat vector.
1475 unsigned VectorWidth = 0;
1476
1477 // True if we should use a splat vector; using VectorWidth alone is not
1478 // sufficient.
1479 bool WantSplatVector = false;
1480 if (auto *VT = dyn_cast<VectorType>(U.getType())) {
1481 VectorWidth = cast<FixedVectorType>(VT)->getNumElements();
1482 // We don't produce 1 x N vectors; those are treated as scalars.
1483 WantSplatVector = VectorWidth > 1;
1484 }
1485
1486 // We might need to splat the base pointer into a vector if the offsets
1487 // are vectors.
1488 if (WantSplatVector && !PtrTy.isVector()) {
1489 BaseReg =
1490 MIRBuilder
1491 .buildSplatVector(LLT::fixed_vector(VectorWidth, PtrTy), BaseReg)
1492 .getReg(0);
1493 PtrIRTy = FixedVectorType::get(PtrIRTy, VectorWidth);
1494 PtrTy = getLLTForType(*PtrIRTy, *DL);
1495 OffsetIRTy = DL->getIntPtrType(PtrIRTy);
1496 OffsetTy = getLLTForType(*OffsetIRTy, *DL);
1497 }
1498
1499 int64_t Offset = 0;
1500 for (gep_type_iterator GTI = gep_type_begin(&U), E = gep_type_end(&U);
1501 GTI != E; ++GTI) {
1502 const Value *Idx = GTI.getOperand();
1503 if (StructType *StTy = GTI.getStructTypeOrNull()) {
1504 unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
1505 Offset += DL->getStructLayout(StTy)->getElementOffset(Field);
1506 continue;
1507 } else {
1508 uint64_t ElementSize = DL->getTypeAllocSize(GTI.getIndexedType());
1509
1510 // If this is a scalar constant or a splat vector of constants,
1511 // handle it quickly.
1512 if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
1513 Offset += ElementSize * CI->getSExtValue();
1514 continue;
1515 }
1516
1517 if (Offset != 0) {
1518 auto OffsetMIB = MIRBuilder.buildConstant({OffsetTy}, Offset);
1519 BaseReg = MIRBuilder.buildPtrAdd(PtrTy, BaseReg, OffsetMIB.getReg(0))
1520 .getReg(0);
1521 Offset = 0;
1522 }
1523
1524 Register IdxReg = getOrCreateVReg(*Idx);
1525 LLT IdxTy = MRI->getType(IdxReg);
1526 if (IdxTy != OffsetTy) {
1527 if (!IdxTy.isVector() && WantSplatVector) {
1528 IdxReg = MIRBuilder.buildSplatVector(
1529 OffsetTy.changeElementType(IdxTy), IdxReg).getReg(0);
1530 }
1531
1532 IdxReg = MIRBuilder.buildSExtOrTrunc(OffsetTy, IdxReg).getReg(0);
1533 }
1534
1535 // N = N + Idx * ElementSize;
1536 // Avoid doing it for ElementSize of 1.
1537 Register GepOffsetReg;
1538 if (ElementSize != 1) {
1539 auto ElementSizeMIB = MIRBuilder.buildConstant(
1540 getLLTForType(*OffsetIRTy, *DL), ElementSize);
1541 GepOffsetReg =
1542 MIRBuilder.buildMul(OffsetTy, IdxReg, ElementSizeMIB).getReg(0);
1543 } else
1544 GepOffsetReg = IdxReg;
1545
1546 BaseReg = MIRBuilder.buildPtrAdd(PtrTy, BaseReg, GepOffsetReg).getReg(0);
1547 }
1548 }
1549
1550 if (Offset != 0) {
1551 auto OffsetMIB =
1552 MIRBuilder.buildConstant(OffsetTy, Offset);
1553 MIRBuilder.buildPtrAdd(getOrCreateVReg(U), BaseReg, OffsetMIB.getReg(0));
1554 return true;
1555 }
1556
1557 MIRBuilder.buildCopy(getOrCreateVReg(U), BaseReg);
1558 return true;
1559}
1560
1561bool IRTranslator::translateMemFunc(const CallInst &CI,
1562 MachineIRBuilder &MIRBuilder,
1563 unsigned Opcode) {
1564
1565 // If the source is undef, then just emit a nop.
1566 if (isa<UndefValue>(CI.getArgOperand(1)))
1567 return true;
1568
1569 SmallVector<Register, 3> SrcRegs;
1570
1571 unsigned MinPtrSize = UINT_MAX(2147483647 *2U +1U);
1572 for (auto AI = CI.arg_begin(), AE = CI.arg_end(); std::next(AI) != AE; ++AI) {
1573 Register SrcReg = getOrCreateVReg(**AI);
1574 LLT SrcTy = MRI->getType(SrcReg);
1575 if (SrcTy.isPointer())
1576 MinPtrSize = std::min<unsigned>(SrcTy.getSizeInBits(), MinPtrSize);
1577 SrcRegs.push_back(SrcReg);
1578 }
1579
1580 LLT SizeTy = LLT::scalar(MinPtrSize);
1581
1582 // The size operand should be the minimum of the pointer sizes.
1583 Register &SizeOpReg = SrcRegs[SrcRegs.size() - 1];
1584 if (MRI->getType(SizeOpReg) != SizeTy)
1585 SizeOpReg = MIRBuilder.buildZExtOrTrunc(SizeTy, SizeOpReg).getReg(0);
1586
1587 auto ICall = MIRBuilder.buildInstr(Opcode);
1588 for (Register SrcReg : SrcRegs)
1589 ICall.addUse(SrcReg);
1590
1591 Align DstAlign;
1592 Align SrcAlign;
1593 unsigned IsVol =
1594 cast<ConstantInt>(CI.getArgOperand(CI.getNumArgOperands() - 1))
1595 ->getZExtValue();
1596
1597 if (auto *MCI = dyn_cast<MemCpyInst>(&CI)) {
1598 DstAlign = MCI->getDestAlign().valueOrOne();
1599 SrcAlign = MCI->getSourceAlign().valueOrOne();
1600 } else if (auto *MCI = dyn_cast<MemCpyInlineInst>(&CI)) {
1601 DstAlign = MCI->getDestAlign().valueOrOne();
1602 SrcAlign = MCI->getSourceAlign().valueOrOne();
1603 } else if (auto *MMI = dyn_cast<MemMoveInst>(&CI)) {
1604 DstAlign = MMI->getDestAlign().valueOrOne();
1605 SrcAlign = MMI->getSourceAlign().valueOrOne();
1606 } else {
1607 auto *MSI = cast<MemSetInst>(&CI);
1608 DstAlign = MSI->getDestAlign().valueOrOne();
1609 }
1610
1611 if (Opcode != TargetOpcode::G_MEMCPY_INLINE) {
1612 // We need to propagate the tail call flag from the IR inst as an argument.
1613 // Otherwise, we have to pessimize and assume later that we cannot tail call
1614 // any memory intrinsics.
1615 ICall.addImm(CI.isTailCall() ? 1 : 0);
1616 }
1617
1618 // Create mem operands to store the alignment and volatile info.
1619 auto VolFlag = IsVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone;
1620 ICall.addMemOperand(MF->getMachineMemOperand(
1621 MachinePointerInfo(CI.getArgOperand(0)),
1622 MachineMemOperand::MOStore | VolFlag, 1, DstAlign));
1623 if (Opcode != TargetOpcode::G_MEMSET)
1624 ICall.addMemOperand(MF->getMachineMemOperand(
1625 MachinePointerInfo(CI.getArgOperand(1)),
1626 MachineMemOperand::MOLoad | VolFlag, 1, SrcAlign));
1627
1628 return true;
1629}
1630
1631void IRTranslator::getStackGuard(Register DstReg,
1632 MachineIRBuilder &MIRBuilder) {
1633 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
1634 MRI->setRegClass(DstReg, TRI->getPointerRegClass(*MF));
1635 auto MIB =
1636 MIRBuilder.buildInstr(TargetOpcode::LOAD_STACK_GUARD, {DstReg}, {});
1637
1638 auto &TLI = *MF->getSubtarget().getTargetLowering();
1639 Value *Global = TLI.getSDagStackGuard(*MF->getFunction().getParent());
1640 if (!Global)
1641 return;
1642
1643 unsigned AddrSpace = Global->getType()->getPointerAddressSpace();
1644 LLT PtrTy = LLT::pointer(AddrSpace, DL->getPointerSizeInBits(AddrSpace));
1645
1646 MachinePointerInfo MPInfo(Global);
1647 auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant |
1648 MachineMemOperand::MODereferenceable;
1649 MachineMemOperand *MemRef = MF->getMachineMemOperand(
1650 MPInfo, Flags, PtrTy, DL->getPointerABIAlignment(AddrSpace));
1651 MIB.setMemRefs({MemRef});
1652}
1653
1654bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
1655 MachineIRBuilder &MIRBuilder) {
1656 ArrayRef<Register> ResRegs = getOrCreateVRegs(CI);
1657 MIRBuilder.buildInstr(
1658 Op, {ResRegs[0], ResRegs[1]},
1659 {getOrCreateVReg(*CI.getOperand(0)), getOrCreateVReg(*CI.getOperand(1))});
1660
1661 return true;
1662}
1663
1664bool IRTranslator::translateFixedPointIntrinsic(unsigned Op, const CallInst &CI,
1665 MachineIRBuilder &MIRBuilder) {
1666 Register Dst = getOrCreateVReg(CI);
1667 Register Src0 = getOrCreateVReg(*CI.getOperand(0));
1668 Register Src1 = getOrCreateVReg(*CI.getOperand(1));
1669 uint64_t Scale = cast<ConstantInt>(CI.getOperand(2))->getZExtValue();
1670 MIRBuilder.buildInstr(Op, {Dst}, { Src0, Src1, Scale });
1671 return true;
1672}
1673
1674unsigned IRTranslator::getSimpleIntrinsicOpcode(Intrinsic::ID ID) {
1675 switch (ID) {
1676 default:
1677 break;
1678 case Intrinsic::bswap:
1679 return TargetOpcode::G_BSWAP;
1680 case Intrinsic::bitreverse:
1681 return TargetOpcode::G_BITREVERSE;
1682 case Intrinsic::fshl:
1683 return TargetOpcode::G_FSHL;
1684 case Intrinsic::fshr:
1685 return TargetOpcode::G_FSHR;
1686 case Intrinsic::ceil:
1687 return TargetOpcode::G_FCEIL;
1688 case Intrinsic::cos:
1689 return TargetOpcode::G_FCOS;
1690 case Intrinsic::ctpop:
1691 return TargetOpcode::G_CTPOP;
1692 case Intrinsic::exp:
1693 return TargetOpcode::G_FEXP;
1694 case Intrinsic::exp2:
1695 return TargetOpcode::G_FEXP2;
1696 case Intrinsic::fabs:
1697 return TargetOpcode::G_FABS;
1698 case Intrinsic::copysign:
1699 return TargetOpcode::G_FCOPYSIGN;
1700 case Intrinsic::minnum:
1701 return TargetOpcode::G_FMINNUM;
1702 case Intrinsic::maxnum:
1703 return TargetOpcode::G_FMAXNUM;
1704 case Intrinsic::minimum:
1705 return TargetOpcode::G_FMINIMUM;
1706 case Intrinsic::maximum:
1707 return TargetOpcode::G_FMAXIMUM;
1708 case Intrinsic::canonicalize:
1709 return TargetOpcode::G_FCANONICALIZE;
1710 case Intrinsic::floor:
1711 return TargetOpcode::G_FFLOOR;
1712 case Intrinsic::fma:
1713 return TargetOpcode::G_FMA;
1714 case Intrinsic::log:
1715 return TargetOpcode::G_FLOG;
1716 case Intrinsic::log2:
1717 return TargetOpcode::G_FLOG2;
1718 case Intrinsic::log10:
1719 return TargetOpcode::G_FLOG10;
1720 case Intrinsic::nearbyint:
1721 return TargetOpcode::G_FNEARBYINT;
1722 case Intrinsic::pow:
1723 return TargetOpcode::G_FPOW;
1724 case Intrinsic::powi:
1725 return TargetOpcode::G_FPOWI;
1726 case Intrinsic::rint:
1727 return TargetOpcode::G_FRINT;
1728 case Intrinsic::round:
1729 return TargetOpcode::G_INTRINSIC_ROUND;
1730 case Intrinsic::roundeven:
1731 return TargetOpcode::G_INTRINSIC_ROUNDEVEN;
1732 case Intrinsic::sin:
1733 return TargetOpcode::G_FSIN;
1734 case Intrinsic::sqrt:
1735 return TargetOpcode::G_FSQRT;
1736 case Intrinsic::trunc:
1737 return TargetOpcode::G_INTRINSIC_TRUNC;
1738 case Intrinsic::readcyclecounter:
1739 return TargetOpcode::G_READCYCLECOUNTER;
1740 case Intrinsic::ptrmask:
1741 return TargetOpcode::G_PTRMASK;
1742 case Intrinsic::lrint:
1743 return TargetOpcode::G_INTRINSIC_LRINT;
1744 // FADD/FMUL require checking the FMF, so are handled elsewhere.
1745 case Intrinsic::vector_reduce_fmin:
1746 return TargetOpcode::G_VECREDUCE_FMIN;
1747 case Intrinsic::vector_reduce_fmax:
1748 return TargetOpcode::G_VECREDUCE_FMAX;
1749 case Intrinsic::vector_reduce_add:
1750 return TargetOpcode::G_VECREDUCE_ADD;
1751 case Intrinsic::vector_reduce_mul:
1752 return TargetOpcode::G_VECREDUCE_MUL;
1753 case Intrinsic::vector_reduce_and:
1754 return TargetOpcode::G_VECREDUCE_AND;
1755 case Intrinsic::vector_reduce_or:
1756 return TargetOpcode::G_VECREDUCE_OR;
1757 case Intrinsic::vector_reduce_xor:
1758 return TargetOpcode::G_VECREDUCE_XOR;
1759 case Intrinsic::vector_reduce_smax:
1760 return TargetOpcode::G_VECREDUCE_SMAX;
1761 case Intrinsic::vector_reduce_smin:
1762 return TargetOpcode::G_VECREDUCE_SMIN;
1763 case Intrinsic::vector_reduce_umax:
1764 return TargetOpcode::G_VECREDUCE_UMAX;
1765 case Intrinsic::vector_reduce_umin:
1766 return TargetOpcode::G_VECREDUCE_UMIN;
1767 case Intrinsic::lround:
1768 return TargetOpcode::G_LROUND;
1769 case Intrinsic::llround:
1770 return TargetOpcode::G_LLROUND;
1771 }
1772 return Intrinsic::not_intrinsic;
1773}
1774
1775bool IRTranslator::translateSimpleIntrinsic(const CallInst &CI,
1776 Intrinsic::ID ID,
1777 MachineIRBuilder &MIRBuilder) {
1778
1779 unsigned Op = getSimpleIntrinsicOpcode(ID);
1780
1781 // Is this a simple intrinsic?
1782 if (Op == Intrinsic::not_intrinsic)
1783 return false;
1784
1785 // Yes. Let's translate it.
1786 SmallVector<llvm::SrcOp, 4> VRegs;
1787 for (auto &Arg : CI.arg_operands())
1788 VRegs.push_back(getOrCreateVReg(*Arg));
1789
1790 MIRBuilder.buildInstr(Op, {getOrCreateVReg(CI)}, VRegs,
1791 MachineInstr::copyFlagsFromInstruction(CI));
1792 return true;
1793}
1794
1795// TODO: Include ConstainedOps.def when all strict instructions are defined.
1796static unsigned getConstrainedOpcode(Intrinsic::ID ID) {
1797 switch (ID) {
1798 case Intrinsic::experimental_constrained_fadd:
1799 return TargetOpcode::G_STRICT_FADD;
1800 case Intrinsic::experimental_constrained_fsub:
1801 return TargetOpcode::G_STRICT_FSUB;
1802 case Intrinsic::experimental_constrained_fmul:
1803 return TargetOpcode::G_STRICT_FMUL;
1804 case Intrinsic::experimental_constrained_fdiv:
1805 return TargetOpcode::G_STRICT_FDIV;
1806 case Intrinsic::experimental_constrained_frem:
1807 return TargetOpcode::G_STRICT_FREM;
1808 case Intrinsic::experimental_constrained_fma:
1809 return TargetOpcode::G_STRICT_FMA;
1810 case Intrinsic::experimental_constrained_sqrt:
1811 return TargetOpcode::G_STRICT_FSQRT;
1812 default:
1813 return 0;
1814 }
1815}
1816
1817bool IRTranslator::translateConstrainedFPIntrinsic(
1818 const ConstrainedFPIntrinsic &FPI, MachineIRBuilder &MIRBuilder) {
1819 fp::ExceptionBehavior EB = FPI.getExceptionBehavior().getValue();
1820
1821 unsigned Opcode = getConstrainedOpcode(FPI.getIntrinsicID());
1822 if (!Opcode)
1823 return false;
1824
1825 unsigned Flags = MachineInstr::copyFlagsFromInstruction(FPI);
1826 if (EB == fp::ExceptionBehavior::ebIgnore)
1827 Flags |= MachineInstr::NoFPExcept;
1828
1829 SmallVector<llvm::SrcOp, 4> VRegs;
1830 VRegs.push_back(getOrCreateVReg(*FPI.getArgOperand(0)));
1831 if (!FPI.isUnaryOp())
1832 VRegs.push_back(getOrCreateVReg(*FPI.getArgOperand(1)));
1833 if (FPI.isTernaryOp())
1834 VRegs.push_back(getOrCreateVReg(*FPI.getArgOperand(2)));
1835
1836 MIRBuilder.buildInstr(Opcode, {getOrCreateVReg(FPI)}, VRegs, Flags);
1837 return true;
1838}
1839
1840bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
1841 MachineIRBuilder &MIRBuilder) {
1842 if (auto *MI = dyn_cast<AnyMemIntrinsic>(&CI)) {
1843 if (ORE->enabled()) {
1844 const Function &F = *MI->getParent()->getParent();
1845 auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
1846 if (MemoryOpRemark::canHandle(MI, TLI)) {
1847 MemoryOpRemark R(*ORE, "gisel-irtranslator-memsize", *DL, TLI);
1848 R.visit(MI);
1849 }
1850 }
1851 }
1852
1853 // If this is a simple intrinsic (that is, we just need to add a def of
1854 // a vreg, and uses for each arg operand, then translate it.
1855 if (translateSimpleIntrinsic(CI, ID, MIRBuilder))
1856 return true;
1857
1858 switch (ID) {
1859 default:
1860 break;
1861 case Intrinsic::lifetime_start:
1862 case Intrinsic::lifetime_end: {
1863 // No stack colouring in O0, discard region information.
1864 if (MF->getTarget().getOptLevel() == CodeGenOpt::None)
1865 return true;
1866
1867 unsigned Op = ID == Intrinsic::lifetime_start ? TargetOpcode::LIFETIME_START
1868 : TargetOpcode::LIFETIME_END;
1869
1870 // Get the underlying objects for the location passed on the lifetime
1871 // marker.
1872 SmallVector<const Value *, 4> Allocas;
1873 getUnderlyingObjects(CI.getArgOperand(1), Allocas);
1874
1875 // Iterate over each underlying object, creating lifetime markers for each
1876 // static alloca. Quit if we find a non-static alloca.
1877 for (const Value *V : Allocas) {
1878 const AllocaInst *AI = dyn_cast<AllocaInst>(V);
1879 if (!AI)
1880 continue;
1881
1882 if (!AI->isStaticAlloca())
1883 return true;
1884
1885 MIRBuilder.buildInstr(Op).addFrameIndex(getOrCreateFrameIndex(*AI));
1886 }
1887 return true;
1888 }
1889 case Intrinsic::dbg_declare: {
1890 const DbgDeclareInst &DI = cast<DbgDeclareInst>(CI);
1891 assert(DI.getVariable() && "Missing variable")(static_cast<void> (0));
1892
1893 const Value *Address = DI.getAddress();
1894 if (!Address || isa<UndefValue>(Address)) {
1895 LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI << "\n")do { } while (false);
1896 return true;
1897 }
1898
1899 assert(DI.getVariable()->isValidLocationForIntrinsic((static_cast<void> (0))
1900 MIRBuilder.getDebugLoc()) &&(static_cast<void> (0))
1901 "Expected inlined-at fields to agree")(static_cast<void> (0));
1902 auto AI = dyn_cast<AllocaInst>(Address);
1903 if (AI && AI->isStaticAlloca()) {
1904 // Static allocas are tracked at the MF level, no need for DBG_VALUE
1905 // instructions (in fact, they get ignored if they *do* exist).
1906 MF->setVariableDbgInfo(DI.getVariable(), DI.getExpression(),
1907 getOrCreateFrameIndex(*AI), DI.getDebugLoc());
1908 } else {
1909 // A dbg.declare describes the address of a source variable, so lower it
1910 // into an indirect DBG_VALUE.
1911 MIRBuilder.buildIndirectDbgValue(getOrCreateVReg(*Address),
1912 DI.getVariable(), DI.getExpression());
1913 }
1914 return true;
1915 }
1916 case Intrinsic::dbg_label: {
1917 const DbgLabelInst &DI = cast<DbgLabelInst>(CI);
1918 assert(DI.getLabel() && "Missing label")(static_cast<void> (0));
1919
1920 assert(DI.getLabel()->isValidLocationForIntrinsic((static_cast<void> (0))
1921 MIRBuilder.getDebugLoc()) &&(static_cast<void> (0))
1922 "Expected inlined-at fields to agree")(static_cast<void> (0));
1923
1924 MIRBuilder.buildDbgLabel(DI.getLabel());
1925 return true;
1926 }
1927 case Intrinsic::vaend:
1928 // No target I know of cares about va_end. Certainly no in-tree target
1929 // does. Simplest intrinsic ever!
1930 return true;
1931 case Intrinsic::vastart: {
1932 auto &TLI = *MF->getSubtarget().getTargetLowering();
1933 Value *Ptr = CI.getArgOperand(0);
1934 unsigned ListSize = TLI.getVaListSizeInBits(*DL) / 8;
1935
1936 // FIXME: Get alignment
1937 MIRBuilder.buildInstr(TargetOpcode::G_VASTART, {}, {getOrCreateVReg(*Ptr)})
1938 .addMemOperand(MF->getMachineMemOperand(MachinePointerInfo(Ptr),
1939 MachineMemOperand::MOStore,
1940 ListSize, Align(1)));
1941 return true;
1942 }
1943 case Intrinsic::dbg_value: {
1944 // This form of DBG_VALUE is target-independent.
1945 const DbgValueInst &DI = cast<DbgValueInst>(CI);
1946 const Value *V = DI.getValue();
1947 assert(DI.getVariable()->isValidLocationForIntrinsic((static_cast<void> (0))
1948 MIRBuilder.getDebugLoc()) &&(static_cast<void> (0))
1949 "Expected inlined-at fields to agree")(static_cast<void> (0));
1950 if (!V || DI.hasArgList()) {
1951 // DI cannot produce a valid DBG_VALUE, so produce an undef DBG_VALUE to
1952 // terminate any prior location.
1953 MIRBuilder.buildIndirectDbgValue(0, DI.getVariable(), DI.getExpression());
1954 } else if (const auto *CI = dyn_cast<Constant>(V)) {
1955 MIRBuilder.buildConstDbgValue(*CI, DI.getVariable(), DI.getExpression());
1956 } else {
1957 for (Register Reg : getOrCreateVRegs(*V)) {
1958 // FIXME: This does not handle register-indirect values at offset 0. The
1959 // direct/indirect thing shouldn't really be handled by something as
1960 // implicit as reg+noreg vs reg+imm in the first place, but it seems
1961 // pretty baked in right now.
1962 MIRBuilder.buildDirectDbgValue(Reg, DI.getVariable(), DI.getExpression());
1963 }
1964 }
1965 return true;
1966 }
1967 case Intrinsic::uadd_with_overflow:
1968 return translateOverflowIntrinsic(CI, TargetOpcode::G_UADDO, MIRBuilder);
1969 case Intrinsic::sadd_with_overflow:
1970 return translateOverflowIntrinsic(CI, TargetOpcode::G_SADDO, MIRBuilder);
1971 case Intrinsic::usub_with_overflow:
1972 return translateOverflowIntrinsic(CI, TargetOpcode::G_USUBO, MIRBuilder);
1973 case Intrinsic::ssub_with_overflow:
1974 return translateOverflowIntrinsic(CI, TargetOpcode::G_SSUBO, MIRBuilder);
1975 case Intrinsic::umul_with_overflow:
1976 return translateOverflowIntrinsic(CI, TargetOpcode::G_UMULO, MIRBuilder);
1977 case Intrinsic::smul_with_overflow:
1978 return translateOverflowIntrinsic(CI, TargetOpcode::G_SMULO, MIRBuilder);
1979 case Intrinsic::uadd_sat:
1980 return translateBinaryOp(TargetOpcode::G_UADDSAT, CI, MIRBuilder);
1981 case Intrinsic::sadd_sat:
1982 return translateBinaryOp(TargetOpcode::G_SADDSAT, CI, MIRBuilder);
1983 case Intrinsic::usub_sat:
1984 return translateBinaryOp(TargetOpcode::G_USUBSAT, CI, MIRBuilder);
1985 case Intrinsic::ssub_sat:
1986 return translateBinaryOp(TargetOpcode::G_SSUBSAT, CI, MIRBuilder);
1987 case Intrinsic::ushl_sat:
1988 return translateBinaryOp(TargetOpcode::G_USHLSAT, CI, MIRBuilder);
1989 case Intrinsic::sshl_sat:
1990 return translateBinaryOp(TargetOpcode::G_SSHLSAT, CI, MIRBuilder);
1991 case Intrinsic::umin:
1992 return translateBinaryOp(TargetOpcode::G_UMIN, CI, MIRBuilder);
1993 case Intrinsic::umax:
1994 return translateBinaryOp(TargetOpcode::G_UMAX, CI, MIRBuilder);
1995 case Intrinsic::smin:
1996 return translateBinaryOp(TargetOpcode::G_SMIN, CI, MIRBuilder);
1997 case Intrinsic::smax:
1998 return translateBinaryOp(TargetOpcode::G_SMAX, CI, MIRBuilder);
1999 case Intrinsic::abs:
2000 // TODO: Preserve "int min is poison" arg in GMIR?
2001 return translateUnaryOp(TargetOpcode::G_ABS, CI, MIRBuilder);
2002 case Intrinsic::smul_fix:
2003 return translateFixedPointIntrinsic(TargetOpcode::G_SMULFIX, CI, MIRBuilder);
2004 case Intrinsic::umul_fix:
2005 return translateFixedPointIntrinsic(TargetOpcode::G_UMULFIX, CI, MIRBuilder);
2006 case Intrinsic::smul_fix_sat:
2007 return translateFixedPointIntrinsic(TargetOpcode::G_SMULFIXSAT, CI, MIRBuilder);
2008 case Intrinsic::umul_fix_sat:
2009 return translateFixedPointIntrinsic(TargetOpcode::G_UMULFIXSAT, CI, MIRBuilder);
2010 case Intrinsic::sdiv_fix:
2011 return translateFixedPointIntrinsic(TargetOpcode::G_SDIVFIX, CI, MIRBuilder);
2012 case Intrinsic::udiv_fix:
2013 return translateFixedPointIntrinsic(TargetOpcode::G_UDIVFIX, CI, MIRBuilder);
2014 case Intrinsic::sdiv_fix_sat:
2015 return translateFixedPointIntrinsic(TargetOpcode::G_SDIVFIXSAT, CI, MIRBuilder);
2016 case Intrinsic::udiv_fix_sat:
2017 return translateFixedPointIntrinsic(TargetOpcode::G_UDIVFIXSAT, CI, MIRBuilder);
2018 case Intrinsic::fmuladd: {
2019 const TargetMachine &TM = MF->getTarget();
2020 const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
2021 Register Dst = getOrCreateVReg(CI);
2022 Register Op0 = getOrCreateVReg(*CI.getArgOperand(0));
2023 Register Op1 = getOrCreateVReg(*CI.getArgOperand(1));
2024 Register Op2 = getOrCreateVReg(*CI.getArgOperand(2));
2025 if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
2026 TLI.isFMAFasterThanFMulAndFAdd(*MF,
2027 TLI.getValueType(*DL, CI.getType()))) {
2028 // TODO: Revisit this to see if we should move this part of the
2029 // lowering to the combiner.
2030 MIRBuilder.buildFMA(Dst, Op0, Op1, Op2,
2031 MachineInstr::copyFlagsFromInstruction(CI));
2032 } else {
2033 LLT Ty = getLLTForType(*CI.getType(), *DL);
2034 auto FMul = MIRBuilder.buildFMul(
2035 Ty, Op0, Op1, MachineInstr::copyFlagsFromInstruction(CI));
2036 MIRBuilder.buildFAdd(Dst, FMul, Op2,
2037 MachineInstr::copyFlagsFromInstruction(CI));
2038 }
2039 return true;
2040 }
2041 case Intrinsic::convert_from_fp16:
2042 // FIXME: This intrinsic should probably be removed from the IR.
2043 MIRBuilder.buildFPExt(getOrCreateVReg(CI),
2044 getOrCreateVReg(*CI.getArgOperand(0)),
2045 MachineInstr::copyFlagsFromInstruction(CI));
2046 return true;
2047 case Intrinsic::convert_to_fp16:
2048 // FIXME: This intrinsic should probably be removed from the IR.
2049 MIRBuilder.buildFPTrunc(getOrCreateVReg(CI),
2050 getOrCreateVReg(*CI.getArgOperand(0)),
2051 MachineInstr::copyFlagsFromInstruction(CI));
2052 return true;
2053 case Intrinsic::memcpy_inline:
2054 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMCPY_INLINE);
2055 case Intrinsic::memcpy:
2056 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMCPY);
2057 case Intrinsic::memmove:
2058 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMMOVE);
2059 case Intrinsic::memset:
2060 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMSET);
2061 case Intrinsic::eh_typeid_for: {
2062 GlobalValue *GV = ExtractTypeInfo(CI.getArgOperand(0));
2063 Register Reg = getOrCreateVReg(CI);
2064 unsigned TypeID = MF->getTypeIDFor(GV);
2065 MIRBuilder.buildConstant(Reg, TypeID);
2066 return true;
2067 }
2068 case Intrinsic::objectsize:
2069 llvm_unreachable("llvm.objectsize.* should have been lowered already")__builtin_unreachable();
2070
2071 case Intrinsic::is_constant:
2072 llvm_unreachable("llvm.is.constant.* should have been lowered already")__builtin_unreachable();
2073
2074 case Intrinsic::stackguard:
2075 getStackGuard(getOrCreateVReg(CI), MIRBuilder);
2076 return true;
2077 case Intrinsic::stackprotector: {
2078 LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
2079 Register GuardVal = MRI->createGenericVirtualRegister(PtrTy);
2080 getStackGuard(GuardVal, MIRBuilder);
2081
2082 AllocaInst *Slot = cast<AllocaInst>(CI.getArgOperand(1));
2083 int FI = getOrCreateFrameIndex(*Slot);
2084 MF->getFrameInfo().setStackProtectorIndex(FI);
2085
2086 MIRBuilder.buildStore(
2087 GuardVal, getOrCreateVReg(*Slot),
2088 *MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(*MF, FI),
2089 MachineMemOperand::MOStore |
2090 MachineMemOperand::MOVolatile,
2091 PtrTy, Align(8)));
2092 return true;
2093 }
2094 case Intrinsic::stacksave: {
2095 // Save the stack pointer to the location provided by the intrinsic.
2096 Register Reg = getOrCreateVReg(CI);
2097 Register StackPtr = MF->getSubtarget()
2098 .getTargetLowering()
2099 ->getStackPointerRegisterToSaveRestore();
2100
2101 // If the target doesn't specify a stack pointer, then fall back.
2102 if (!StackPtr)
2103 return false;
2104
2105 MIRBuilder.buildCopy(Reg, StackPtr);
2106 return true;
2107 }
2108 case Intrinsic::stackrestore: {
2109 // Restore the stack pointer from the location provided by the intrinsic.
2110 Register Reg = getOrCreateVReg(*CI.getArgOperand(0));
2111 Register StackPtr = MF->getSubtarget()
2112 .getTargetLowering()
2113 ->getStackPointerRegisterToSaveRestore();
2114
2115 // If the target doesn't specify a stack pointer, then fall back.
2116 if (!StackPtr)
2117 return false;
2118
2119 MIRBuilder.buildCopy(StackPtr, Reg);
2120 return true;
2121 }
2122 case Intrinsic::cttz:
2123 case Intrinsic::ctlz: {
2124 ConstantInt *Cst = cast<ConstantInt>(CI.getArgOperand(1));
2125 bool isTrailing = ID == Intrinsic::cttz;
2126 unsigned Opcode = isTrailing
2127 ? Cst->isZero() ? TargetOpcode::G_CTTZ
2128 : TargetOpcode::G_CTTZ_ZERO_UNDEF
2129 : Cst->isZero() ? TargetOpcode::G_CTLZ
2130 : TargetOpcode::G_CTLZ_ZERO_UNDEF;
2131 MIRBuilder.buildInstr(Opcode, {getOrCreateVReg(CI)},
2132 {getOrCreateVReg(*CI.getArgOperand(0))});
2133 return true;
2134 }
2135 case Intrinsic::invariant_start: {
2136 LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
2137 Register Undef = MRI->createGenericVirtualRegister(PtrTy);
2138 MIRBuilder.buildUndef(Undef);
2139 return true;
2140 }
2141 case Intrinsic::invariant_end:
2142 return true;
2143 case Intrinsic::expect:
2144 case Intrinsic::annotation:
2145 case Intrinsic::ptr_annotation:
2146 case Intrinsic::launder_invariant_group:
2147 case Intrinsic::strip_invariant_group: {
2148 // Drop the intrinsic, but forward the value.
2149 MIRBuilder.buildCopy(getOrCreateVReg(CI),
2150 getOrCreateVReg(*CI.getArgOperand(0)));
2151 return true;
2152 }
2153 case Intrinsic::assume:
2154 case Intrinsic::experimental_noalias_scope_decl:
2155 case Intrinsic::var_annotation:
2156 case Intrinsic::sideeffect:
2157 // Discard annotate attributes, assumptions, and artificial side-effects.
2158 return true;
2159 case Intrinsic::read_volatile_register:
2160 case Intrinsic::read_register: {
2161 Value *Arg = CI.getArgOperand(0);
2162 MIRBuilder
2163 .buildInstr(TargetOpcode::G_READ_REGISTER, {getOrCreateVReg(CI)}, {})
2164 .addMetadata(cast<MDNode>(cast<MetadataAsValue>(Arg)->getMetadata()));
2165 return true;
2166 }
2167 case Intrinsic::write_register: {
2168 Value *Arg = CI.getArgOperand(0);
2169 MIRBuilder.buildInstr(TargetOpcode::G_WRITE_REGISTER)
2170 .addMetadata(cast<MDNode>(cast<MetadataAsValue>(Arg)->getMetadata()))
2171 .addUse(getOrCreateVReg(*CI.getArgOperand(1)));
2172 return true;
2173 }
2174 case Intrinsic::localescape: {
2175 MachineBasicBlock &EntryMBB = MF->front();
2176 StringRef EscapedName = GlobalValue::dropLLVMManglingEscape(MF->getName());
2177
2178 // Directly emit some LOCAL_ESCAPE machine instrs. Label assignment emission
2179 // is the same on all targets.
2180 for (unsigned Idx = 0, E = CI.getNumArgOperands(); Idx < E; ++Idx) {
2181 Value *Arg = CI.getArgOperand(Idx)->stripPointerCasts();
2182 if (isa<ConstantPointerNull>(Arg))
2183 continue; // Skip null pointers. They represent a hole in index space.
2184
2185 int FI = getOrCreateFrameIndex(*cast<AllocaInst>(Arg));
2186 MCSymbol *FrameAllocSym =
2187 MF->getMMI().getContext().getOrCreateFrameAllocSymbol(EscapedName,
2188 Idx);
2189
2190 // This should be inserted at the start of the entry block.
2191 auto LocalEscape =
2192 MIRBuilder.buildInstrNoInsert(TargetOpcode::LOCAL_ESCAPE)
2193 .addSym(FrameAllocSym)
2194 .addFrameIndex(FI);
2195
2196 EntryMBB.insert(EntryMBB.begin(), LocalEscape);
2197 }
2198
2199 return true;
2200 }
2201 case Intrinsic::vector_reduce_fadd:
2202 case Intrinsic::vector_reduce_fmul: {
2203 // Need to check for the reassoc flag to decide whether we want a
2204 // sequential reduction opcode or not.
2205 Register Dst = getOrCreateVReg(CI);
2206 Register ScalarSrc = getOrCreateVReg(*CI.getArgOperand(0));
2207 Register VecSrc = getOrCreateVReg(*CI.getArgOperand(1));
2208 unsigned Opc = 0;
2209 if (!CI.hasAllowReassoc()) {
2210 // The sequential ordering case.
2211 Opc = ID == Intrinsic::vector_reduce_fadd
2212 ? TargetOpcode::G_VECREDUCE_SEQ_FADD
2213 : TargetOpcode::G_VECREDUCE_SEQ_FMUL;
2214 MIRBuilder.buildInstr(Opc, {Dst}, {ScalarSrc, VecSrc},
2215 MachineInstr::copyFlagsFromInstruction(CI));
2216 return true;
2217 }
2218 // We split the operation into a separate G_FADD/G_FMUL + the reduce,
2219 // since the associativity doesn't matter.
2220 unsigned ScalarOpc;
2221 if (ID == Intrinsic::vector_reduce_fadd) {
2222 Opc = TargetOpcode::G_VECREDUCE_FADD;
2223 ScalarOpc = TargetOpcode::G_FADD;
2224 } else {
2225 Opc = TargetOpcode::G_VECREDUCE_FMUL;
2226 ScalarOpc = TargetOpcode::G_FMUL;
2227 }
2228 LLT DstTy = MRI->getType(Dst);
2229 auto Rdx = MIRBuilder.buildInstr(
2230 Opc, {DstTy}, {VecSrc}, MachineInstr::copyFlagsFromInstruction(CI));
2231 MIRBuilder.buildInstr(ScalarOpc, {Dst}, {ScalarSrc, Rdx},
2232 MachineInstr::copyFlagsFromInstruction(CI));
2233
2234 return true;
2235 }
2236#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
2237 case Intrinsic::INTRINSIC:
2238#include "llvm/IR/ConstrainedOps.def"
2239 return translateConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(CI),
2240 MIRBuilder);
2241
2242 }
2243 return false;
2244}
2245
2246bool IRTranslator::translateInlineAsm(const CallBase &CB,
2247 MachineIRBuilder &MIRBuilder) {
2248
2249 const InlineAsmLowering *ALI = MF->getSubtarget().getInlineAsmLowering();
2250
2251 if (!ALI) {
2252 LLVM_DEBUG(do { } while (false)
2253 dbgs() << "Inline asm lowering is not supported for this target yet\n")do { } while (false);
2254 return false;
2255 }
2256
2257 return ALI->lowerInlineAsm(
2258 MIRBuilder, CB, [&](const Value &Val) { return getOrCreateVRegs(Val); });
2259}
2260
2261bool IRTranslator::translateCallBase(const CallBase &CB,
2262 MachineIRBuilder &MIRBuilder) {
2263 ArrayRef<Register> Res = getOrCreateVRegs(CB);
2264
2265 SmallVector<ArrayRef<Register>, 8> Args;
2266 Register SwiftInVReg = 0;
2267 Register SwiftErrorVReg = 0;
2268 for (auto &Arg : CB.args()) {
2269 if (CLI->supportSwiftError() && isSwiftError(Arg)) {
2270 assert(SwiftInVReg == 0 && "Expected only one swift error argument")(static_cast<void> (0));
2271 LLT Ty = getLLTForType(*Arg->getType(), *DL);
2272 SwiftInVReg = MRI->createGenericVirtualRegister(Ty);
2273 MIRBuilder.buildCopy(SwiftInVReg, SwiftError.getOrCreateVRegUseAt(
2274 &CB, &MIRBuilder.getMBB(), Arg));
2275 Args.emplace_back(makeArrayRef(SwiftInVReg));
2276 SwiftErrorVReg =
2277 SwiftError.getOrCreateVRegDefAt(&CB, &MIRBuilder.getMBB(), Arg);
2278 continue;
2279 }
2280 Args.push_back(getOrCreateVRegs(*Arg));
2281 }
2282
2283 if (auto *CI = dyn_cast<CallInst>(&CB)) {
2284 if (ORE->enabled()) {
2285 const Function &F = *CI->getParent()->getParent();
2286 auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
2287 if (MemoryOpRemark::canHandle(CI, TLI)) {
2288 MemoryOpRemark R(*ORE, "gisel-irtranslator-memsize", *DL, TLI);
2289 R.visit(CI);
2290 }
2291 }
2292 }
2293
2294 // We don't set HasCalls on MFI here yet because call lowering may decide to
2295 // optimize into tail calls. Instead, we defer that to selection where a final
2296 // scan is done to check if any instructions are calls.
2297 bool Success =
2298 CLI->lowerCall(MIRBuilder, CB, Res, Args, SwiftErrorVReg,
2299 [&]() { return getOrCreateVReg(*CB.getCalledOperand()); });
2300
2301 // Check if we just inserted a tail call.
2302 if (Success) {
2303 assert(!HasTailCall && "Can't tail call return twice from block?")(static_cast<void> (0));
2304 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
2305 HasTailCall = TII->isTailCall(*std::prev(MIRBuilder.getInsertPt()));
2306 }
2307
2308 return Success;
2309}
2310
2311bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) {
2312 const CallInst &CI = cast<CallInst>(U);
2313 auto TII = MF->getTarget().getIntrinsicInfo();
2314 const Function *F = CI.getCalledFunction();
2315
2316 // FIXME: support Windows dllimport function calls.
2317 if (F && (F->hasDLLImportStorageClass() ||
2318 (MF->getTarget().getTargetTriple().isOSWindows() &&
2319 F->hasExternalWeakLinkage())))
2320 return false;
2321
2322 // FIXME: support control flow guard targets.
2323 if (CI.countOperandBundlesOfType(LLVMContext::OB_cfguardtarget))
2324 return false;
2325
2326 if (CI.isInlineAsm())
2327 return translateInlineAsm(CI, MIRBuilder);
2328
2329 if (F && F->hasFnAttribute("dontcall")) {
2330 unsigned LocCookie = 0;
2331 if (MDNode *MD = CI.getMetadata("srcloc"))
2332 LocCookie =
2333 mdconst::extract<ConstantInt>(MD->getOperand(0))->getZExtValue();
2334 DiagnosticInfoDontCall D(F->getName(), LocCookie);
2335 F->getContext().diagnose(D);
2336 }
2337
2338 Intrinsic::ID ID = Intrinsic::not_intrinsic;
2339 if (F && F->isIntrinsic()) {
2340 ID = F->getIntrinsicID();
2341 if (TII && ID == Intrinsic::not_intrinsic)
2342 ID = static_cast<Intrinsic::ID>(TII->getIntrinsicID(F));
2343 }
2344
2345 if (!F || !F->isIntrinsic() || ID == Intrinsic::not_intrinsic)
2346 return translateCallBase(CI, MIRBuilder);
2347
2348 assert(ID != Intrinsic::not_intrinsic && "unknown intrinsic")(static_cast<void> (0));
2349
2350 if (translateKnownIntrinsic(CI, ID, MIRBuilder))
2351 return true;
2352
2353 ArrayRef<Register> ResultRegs;
2354 if (!CI.getType()->isVoidTy())
2355 ResultRegs = getOrCreateVRegs(CI);
2356
2357 // Ignore the callsite attributes. Backend code is most likely not expecting
2358 // an intrinsic to sometimes have side effects and sometimes not.
2359 MachineInstrBuilder MIB =
2360 MIRBuilder.buildIntrinsic(ID, ResultRegs, !F->doesNotAccessMemory());
2361 if (isa<FPMathOperator>(CI))
2362 MIB->copyIRFlags(CI);
2363
2364 for (auto &Arg : enumerate(CI.arg_operands())) {
2365 // If this is required to be an immediate, don't materialize it in a
2366 // register.
2367 if (CI.paramHasAttr(Arg.index(), Attribute::ImmArg)) {
2368 if (ConstantInt *CI = dyn_cast<ConstantInt>(Arg.value())) {
2369 // imm arguments are more convenient than cimm (and realistically
2370 // probably sufficient), so use them.
2371 assert(CI->getBitWidth() <= 64 &&(static_cast<void> (0))
2372 "large intrinsic immediates not handled")(static_cast<void> (0));
2373 MIB.addImm(CI->getSExtValue());
2374 } else {
2375 MIB.addFPImm(cast<ConstantFP>(Arg.value()));
2376 }
2377 } else if (auto MD = dyn_cast<MetadataAsValue>(Arg.value())) {
2378 auto *MDN = dyn_cast<MDNode>(MD->getMetadata());
2379 if (!MDN) // This was probably an MDString.
2380 return false;
2381 MIB.addMetadata(MDN);
2382 } else {
2383 ArrayRef<Register> VRegs = getOrCreateVRegs(*Arg.value());
2384 if (VRegs.size() > 1)
2385 return false;
2386 MIB.addUse(VRegs[0]);
2387 }
2388 }
2389
2390 // Add a MachineMemOperand if it is a target mem intrinsic.
2391 const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
2392 TargetLowering::IntrinsicInfo Info;
2393 // TODO: Add a GlobalISel version of getTgtMemIntrinsic.
2394 if (TLI.getTgtMemIntrinsic(Info, CI, *MF, ID)) {
2395 Align Alignment = Info.align.getValueOr(
2396 DL->getABITypeAlign(Info.memVT.getTypeForEVT(F->getContext())));
2397 LLT MemTy = Info.memVT.isSimple()
2398 ? getLLTForMVT(Info.memVT.getSimpleVT())
2399 : LLT::scalar(Info.memVT.getStoreSizeInBits());
2400 MIB.addMemOperand(MF->getMachineMemOperand(MachinePointerInfo(Info.ptrVal),
2401 Info.flags, MemTy, Alignment));
2402 }
2403
2404 return true;
2405}
2406
2407bool IRTranslator::findUnwindDestinations(
2408 const BasicBlock *EHPadBB,
2409 BranchProbability Prob,
2410 SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
2411 &UnwindDests) {
2412 EHPersonality Personality = classifyEHPersonality(
2413 EHPadBB->getParent()->getFunction().getPersonalityFn());
2414 bool IsMSVCCXX = Personality == EHPersonality::MSVC_CXX;
2415 bool IsCoreCLR = Personality == EHPersonality::CoreCLR;
2416 bool IsWasmCXX = Personality == EHPersonality::Wasm_CXX;
2417 bool IsSEH = isAsynchronousEHPersonality(Personality);
2418
2419 if (IsWasmCXX) {
2420 // Ignore this for now.
2421 return false;
2422 }
2423
2424 while (EHPadBB) {
2425 const Instruction *Pad = EHPadBB->getFirstNonPHI();
2426 BasicBlock *NewEHPadBB = nullptr;
2427 if (isa<LandingPadInst>(Pad)) {
2428 // Stop on landingpads. They are not funclets.
2429 UnwindDests.emplace_back(&getMBB(*EHPadBB), Prob);
2430 break;
2431 }
2432 if (isa<CleanupPadInst>(Pad)) {
2433 // Stop on cleanup pads. Cleanups are always funclet entries for all known
2434 // personalities.
2435 UnwindDests.emplace_back(&getMBB(*EHPadBB), Prob);
2436 UnwindDests.back().first->setIsEHScopeEntry();
2437 UnwindDests.back().first->setIsEHFuncletEntry();
2438 break;
2439 }
2440 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
2441 // Add the catchpad handlers to the possible destinations.
2442 for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
2443 UnwindDests.emplace_back(&getMBB(*CatchPadBB), Prob);
2444 // For MSVC++ and the CLR, catchblocks are funclets and need prologues.
2445 if (IsMSVCCXX || IsCoreCLR)
2446 UnwindDests.back().first->setIsEHFuncletEntry();
2447 if (!IsSEH)
2448 UnwindDests.back().first->setIsEHScopeEntry();
2449 }
2450 NewEHPadBB = CatchSwitch->getUnwindDest();
2451 } else {
2452 continue;
2453 }
2454
2455 BranchProbabilityInfo *BPI = FuncInfo.BPI;
2456 if (BPI && NewEHPadBB)
2457 Prob *= BPI->getEdgeProbability(EHPadBB, NewEHPadBB);
2458 EHPadBB = NewEHPadBB;
2459 }
2460 return true;
2461}
2462
2463bool IRTranslator::translateInvoke(const User &U,
2464 MachineIRBuilder &MIRBuilder) {
2465 const InvokeInst &I = cast<InvokeInst>(U);
2466 MCContext &Context = MF->getContext();
2467
2468 const BasicBlock *ReturnBB = I.getSuccessor(0);
2469 const BasicBlock *EHPadBB = I.getSuccessor(1);
2470
2471 const Function *Fn = I.getCalledFunction();
2472
2473 // FIXME: support invoking patchpoint and statepoint intrinsics.
2474 if (Fn && Fn->isIntrinsic())
2475 return false;
2476
2477 // FIXME: support whatever these are.
2478 if (I.countOperandBundlesOfType(LLVMContext::OB_deopt))
2479 return false;
2480
2481 // FIXME: support control flow guard targets.
2482 if (I.countOperandBundlesOfType(LLVMContext::OB_cfguardtarget))
2483 return false;
2484
2485 // FIXME: support Windows exception handling.
2486 if (!isa<LandingPadInst>(EHPadBB->getFirstNonPHI()))
2487 return false;
2488
2489 bool LowerInlineAsm = false;
2490 if (I.isInlineAsm()) {
2491 const InlineAsm *IA = cast<InlineAsm>(I.getCalledOperand());
2492 if (!IA->canThrow()) {
2493 // Fast path without emitting EH_LABELs.
2494
2495 if (!translateInlineAsm(I, MIRBuilder))
2496 return false;
2497
2498 MachineBasicBlock *InvokeMBB = &MIRBuilder.getMBB(),
2499 *ReturnMBB = &getMBB(*ReturnBB);
2500
2501 // Update successor info.
2502 addSuccessorWithProb(InvokeMBB, ReturnMBB, BranchProbability::getOne());
2503
2504 MIRBuilder.buildBr(*ReturnMBB);
2505 return true;
2506 } else {
2507 LowerInlineAsm = true;
2508 }
2509 }
2510
2511 // Emit the actual call, bracketed by EH_LABELs so that the MF knows about
2512 // the region covered by the try.
2513 MCSymbol *BeginSymbol = Context.createTempSymbol();
2514 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(BeginSymbol);
2515
2516 if (LowerInlineAsm) {
2517 if (!translateInlineAsm(I, MIRBuilder))
2518 return false;
2519 } else if (!translateCallBase(I, MIRBuilder))
2520 return false;
2521
2522 MCSymbol *EndSymbol = Context.createTempSymbol();
2523 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(EndSymbol);
2524
2525 SmallVector<std::pair<MachineBasicBlock *, BranchProbability>, 1> UnwindDests;
2526 BranchProbabilityInfo *BPI = FuncInfo.BPI;
2527 MachineBasicBlock *InvokeMBB = &MIRBuilder.getMBB();
2528 BranchProbability EHPadBBProb =
2529 BPI ? BPI->getEdgeProbability(InvokeMBB->getBasicBlock(), EHPadBB)
2530 : BranchProbability::getZero();
2531
2532 if (!findUnwindDestinations(EHPadBB, EHPadBBProb, UnwindDests))
2533 return false;
2534
2535 MachineBasicBlock &EHPadMBB = getMBB(*EHPadBB),
2536 &ReturnMBB = getMBB(*ReturnBB);
2537 // Update successor info.
2538 addSuccessorWithProb(InvokeMBB, &ReturnMBB);
2539 for (auto &UnwindDest : UnwindDests) {
2540 UnwindDest.first->setIsEHPad();
2541 addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second);
2542 }
2543 InvokeMBB->normalizeSuccProbs();
2544
2545 MF->addInvoke(&EHPadMBB, BeginSymbol, EndSymbol);
2546 MIRBuilder.buildBr(ReturnMBB);
2547 return true;
2548}
2549
2550bool IRTranslator::translateCallBr(const User &U,
2551 MachineIRBuilder &MIRBuilder) {
2552 // FIXME: Implement this.
2553 return false;
2554}
2555
2556bool IRTranslator::translateLandingPad(const User &U,
2557 MachineIRBuilder &MIRBuilder) {
2558 const LandingPadInst &LP = cast<LandingPadInst>(U);
2559
2560 MachineBasicBlock &MBB = MIRBuilder.getMBB();
2561
2562 MBB.setIsEHPad();
2563
2564 // If there aren't registers to copy the values into (e.g., during SjLj
2565 // exceptions), then don't bother.
2566 auto &TLI = *MF->getSubtarget().getTargetLowering();
2567 const Constant *PersonalityFn = MF->getFunction().getPersonalityFn();
2568 if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 &&
2569 TLI.getExceptionSelectorRegister(PersonalityFn) == 0)
2570 return true;
2571
2572 // If landingpad's return type is token type, we don't create DAG nodes
2573 // for its exception pointer and selector value. The extraction of exception
2574 // pointer or selector value from token type landingpads is not currently
2575 // supported.
2576 if (LP.getType()->isTokenTy())
2577 return true;
2578
2579 // Add a label to mark the beginning of the landing pad. Deletion of the
2580 // landing pad can thus be detected via the MachineModuleInfo.
2581 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL)
2582 .addSym(MF->addLandingPad(&MBB));
2583
2584 // If the unwinder does not preserve all registers, ensure that the
2585 // function marks the clobbered registers as used.
2586 const TargetRegisterInfo &TRI = *MF->getSubtarget().getRegisterInfo();
2587 if (auto *RegMask = TRI.getCustomEHPadPreservedMask(*MF))
2588 MF->getRegInfo().addPhysRegsUsedFromRegMask(RegMask);
2589
2590 LLT Ty = getLLTForType(*LP.getType(), *DL);
2591 Register Undef = MRI->createGenericVirtualRegister(Ty);
2592 MIRBuilder.buildUndef(Undef);
2593
2594 SmallVector<LLT, 2> Tys;
2595 for (Type *Ty : cast<StructType>(LP.getType())->elements())
2596 Tys.push_back(getLLTForType(*Ty, *DL));
2597 assert(Tys.size() == 2 && "Only two-valued landingpads are supported")(static_cast<void> (0));
2598
2599 // Mark exception register as live in.
2600 Register ExceptionReg = TLI.getExceptionPointerRegister(PersonalityFn);
2601 if (!ExceptionReg)
2602 return false;
2603
2604 MBB.addLiveIn(ExceptionReg);
2605 ArrayRef<Register> ResRegs = getOrCreateVRegs(LP);
2606 MIRBuilder.buildCopy(ResRegs[0], ExceptionReg);
2607
2608 Register SelectorReg = TLI.getExceptionSelectorRegister(PersonalityFn);
2609 if (!SelectorReg)
2610 return false;
2611
2612 MBB.addLiveIn(SelectorReg);
2613 Register PtrVReg = MRI->createGenericVirtualRegister(Tys[0]);
2614 MIRBuilder.buildCopy(PtrVReg, SelectorReg);
2615 MIRBuilder.buildCast(ResRegs[1], PtrVReg);
2616
2617 return true;
2618}
2619
2620bool IRTranslator::translateAlloca(const User &U,
2621 MachineIRBuilder &MIRBuilder) {
2622 auto &AI = cast<AllocaInst>(U);
2623
2624 if (AI.isSwiftError())
2625 return true;
2626
2627 if (AI.isStaticAlloca()) {
2628 Register Res = getOrCreateVReg(AI);
2629 int FI = getOrCreateFrameIndex(AI);
2630 MIRBuilder.buildFrameIndex(Res, FI);
2631 return true;
2632 }
2633
2634 // FIXME: support stack probing for Windows.
2635 if (MF->getTarget().getTargetTriple().isOSWindows())
2636 return false;
2637
2638 // Now we're in the harder dynamic case.
2639 Register NumElts = getOrCreateVReg(*AI.getArraySize());
2640 Type *IntPtrIRTy = DL->getIntPtrType(AI.getType());
2641 LLT IntPtrTy = getLLTForType(*IntPtrIRTy, *DL);
2642 if (MRI->getType(NumElts) != IntPtrTy) {
2643 Register ExtElts = MRI->createGenericVirtualRegister(IntPtrTy);
2644 MIRBuilder.buildZExtOrTrunc(ExtElts, NumElts);
2645 NumElts = ExtElts;
2646 }
2647
2648 Type *Ty = AI.getAllocatedType();
2649
2650 Register AllocSize = MRI->createGenericVirtualRegister(IntPtrTy);
2651 Register TySize =
2652 getOrCreateVReg(*ConstantInt::get(IntPtrIRTy, DL->getTypeAllocSize(Ty)));
2653 MIRBuilder.buildMul(AllocSize, NumElts, TySize);
2654
2655 // Round the size of the allocation up to the stack alignment size
2656 // by add SA-1 to the size. This doesn't overflow because we're computing
2657 // an address inside an alloca.
2658 Align StackAlign = MF->getSubtarget().getFrameLowering()->getStackAlign();
2659 auto SAMinusOne = MIRBuilder.buildConstant(IntPtrTy, StackAlign.value() - 1);
2660 auto AllocAdd = MIRBuilder.buildAdd(IntPtrTy, AllocSize, SAMinusOne,
2661 MachineInstr::NoUWrap);
2662 auto AlignCst =
2663 MIRBuilder.buildConstant(IntPtrTy, ~(uint64_t)(StackAlign.value() - 1));
2664 auto AlignedAlloc = MIRBuilder.buildAnd(IntPtrTy, AllocAdd, AlignCst);
2665
2666 Align Alignment = std::max(AI.getAlign(), DL->getPrefTypeAlign(Ty));
2667 if (Alignment <= StackAlign)
2668 Alignment = Align(1);
2669 MIRBuilder.buildDynStackAlloc(getOrCreateVReg(AI), AlignedAlloc, Alignment);
2670
2671 MF->getFrameInfo().CreateVariableSizedObject(Alignment, &AI);
2672 assert(MF->getFrameInfo().hasVarSizedObjects())(static_cast<void> (0));
2673 return true;
2674}
2675
2676bool IRTranslator::translateVAArg(const User &U, MachineIRBuilder &MIRBuilder) {
2677 // FIXME: We may need more info about the type. Because of how LLT works,
2678 // we're completely discarding the i64/double distinction here (amongst
2679 // others). Fortunately the ABIs I know of where that matters don't use va_arg
2680 // anyway but that's not guaranteed.
2681 MIRBuilder.buildInstr(TargetOpcode::G_VAARG, {getOrCreateVReg(U)},
2682 {getOrCreateVReg(*U.getOperand(0)),
2683 DL->getABITypeAlign(U.getType()).value()});
2684 return true;
2685}
2686
2687bool IRTranslator::translateInsertElement(const User &U,
2688 MachineIRBuilder &MIRBuilder) {
2689 // If it is a <1 x Ty> vector, use the scalar as it is
2690 // not a legal vector type in LLT.
2691 if (cast<FixedVectorType>(U.getType())->getNumElements() == 1)
2692 return translateCopy(U, *U.getOperand(1), MIRBuilder);
2693
2694 Register Res = getOrCreateVReg(U);
2695 Register Val = getOrCreateVReg(*U.getOperand(0));
2696 Register Elt = getOrCreateVReg(*U.getOperand(1));
2697 Register Idx = getOrCreateVReg(*U.getOperand(2));
2698 MIRBuilder.buildInsertVectorElement(Res, Val, Elt, Idx);
2699 return true;
2700}
2701
2702bool IRTranslator::translateExtractElement(const User &U,
2703 MachineIRBuilder &MIRBuilder) {
2704 // If it is a <1 x Ty> vector, use the scalar as it is
2705 // not a legal vector type in LLT.
2706 if (cast<FixedVectorType>(U.getOperand(0)->getType())->getNumElements() == 1)
2707 return translateCopy(U, *U.getOperand(0), MIRBuilder);
2708
2709 Register Res = getOrCreateVReg(U);
2710 Register Val = getOrCreateVReg(*U.getOperand(0));
2711 const auto &TLI = *MF->getSubtarget().getTargetLowering();
2712 unsigned PreferredVecIdxWidth = TLI.getVectorIdxTy(*DL).getSizeInBits();
2713 Register Idx;
2714 if (auto *CI = dyn_cast<ConstantInt>(U.getOperand(1))) {
2715 if (CI->getBitWidth() != PreferredVecIdxWidth) {
2716 APInt NewIdx = CI->getValue().sextOrTrunc(PreferredVecIdxWidth);
2717 auto *NewIdxCI = ConstantInt::get(CI->getContext(), NewIdx);
2718 Idx = getOrCreateVReg(*NewIdxCI);
2719 }
2720 }
2721 if (!Idx)
2722 Idx = getOrCreateVReg(*U.getOperand(1));
2723 if (MRI->getType(Idx).getSizeInBits() != PreferredVecIdxWidth) {
2724 const LLT VecIdxTy = LLT::scalar(PreferredVecIdxWidth);
2725 Idx = MIRBuilder.buildSExtOrTrunc(VecIdxTy, Idx).getReg(0);
2726 }
2727 MIRBuilder.buildExtractVectorElement(Res, Val, Idx);
2728 return true;
2729}
2730
2731bool IRTranslator::translateShuffleVector(const User &U,
2732 MachineIRBuilder &MIRBuilder) {
2733 ArrayRef<int> Mask;
2734 if (auto *SVI = dyn_cast<ShuffleVectorInst>(&U))
2735 Mask = SVI->getShuffleMask();
2736 else
2737 Mask = cast<ConstantExpr>(U).getShuffleMask();
2738 ArrayRef<int> MaskAlloc = MF->allocateShuffleMask(Mask);
2739 MIRBuilder
2740 .buildInstr(TargetOpcode::G_SHUFFLE_VECTOR, {getOrCreateVReg(U)},
2741 {getOrCreateVReg(*U.getOperand(0)),
2742 getOrCreateVReg(*U.getOperand(1))})
2743 .addShuffleMask(MaskAlloc);
2744 return true;
2745}
2746
2747bool IRTranslator::translatePHI(const User &U, MachineIRBuilder &MIRBuilder) {
2748 const PHINode &PI = cast<PHINode>(U);
2749
2750 SmallVector<MachineInstr *, 4> Insts;
2751 for (auto Reg : getOrCreateVRegs(PI)) {
2752 auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_PHI, {Reg}, {});
2753 Insts.push_back(MIB.getInstr());
2754 }
2755
2756 PendingPHIs.emplace_back(&PI, std::move(Insts));
2757 return true;
2758}
2759
2760bool IRTranslator::translateAtomicCmpXchg(const User &U,
2761 MachineIRBuilder &MIRBuilder) {
2762 const AtomicCmpXchgInst &I = cast<AtomicCmpXchgInst>(U);
2763
2764 auto &TLI = *MF->getSubtarget().getTargetLowering();
2765 auto Flags = TLI.getAtomicMemOperandFlags(I, *DL);
2766
2767 auto Res = getOrCreateVRegs(I);
2768 Register OldValRes = Res[0];
2769 Register SuccessRes = Res[1];
2770 Register Addr = getOrCreateVReg(*I.getPointerOperand());
2771 Register Cmp = getOrCreateVReg(*I.getCompareOperand());
2772 Register NewVal = getOrCreateVReg(*I.getNewValOperand());
2773
2774 AAMDNodes AAMetadata;
2775 I.getAAMetadata(AAMetadata);
2776
2777 MIRBuilder.buildAtomicCmpXchgWithSuccess(
2778 OldValRes, SuccessRes, Addr, Cmp, NewVal,
2779 *MF->getMachineMemOperand(
2780 MachinePointerInfo(I.getPointerOperand()), Flags, MRI->getType(Cmp),
2781 getMemOpAlign(I), AAMetadata, nullptr, I.getSyncScopeID(),
2782 I.getSuccessOrdering(), I.getFailureOrdering()));
2783 return true;
2784}
2785
2786bool IRTranslator::translateAtomicRMW(const User &U,
2787 MachineIRBuilder &MIRBuilder) {
2788 const AtomicRMWInst &I = cast<AtomicRMWInst>(U);
2789 auto &TLI = *MF->getSubtarget().getTargetLowering();
2790 auto Flags = TLI.getAtomicMemOperandFlags(I, *DL);
2791
2792 Register Res = getOrCreateVReg(I);
2793 Register Addr = getOrCreateVReg(*I.getPointerOperand());
2794 Register Val = getOrCreateVReg(*I.getValOperand());
2795
2796 unsigned Opcode = 0;
2797 switch (I.getOperation()) {
2798 default:
2799 return false;
2800 case AtomicRMWInst::Xchg:
2801 Opcode = TargetOpcode::G_ATOMICRMW_XCHG;
2802 break;
2803 case AtomicRMWInst::Add:
2804 Opcode = TargetOpcode::G_ATOMICRMW_ADD;
2805 break;
2806 case AtomicRMWInst::Sub:
2807 Opcode = TargetOpcode::G_ATOMICRMW_SUB;
2808 break;
2809 case AtomicRMWInst::And:
2810 Opcode = TargetOpcode::G_ATOMICRMW_AND;
2811 break;
2812 case AtomicRMWInst::Nand:
2813 Opcode = TargetOpcode::G_ATOMICRMW_NAND;
2814 break;
2815 case AtomicRMWInst::Or:
2816 Opcode = TargetOpcode::G_ATOMICRMW_OR;
2817 break;
2818 case AtomicRMWInst::Xor:
2819 Opcode = TargetOpcode::G_ATOMICRMW_XOR;
2820 break;
2821 case AtomicRMWInst::Max:
2822 Opcode = TargetOpcode::G_ATOMICRMW_MAX;
2823 break;
2824 case AtomicRMWInst::Min:
2825 Opcode = TargetOpcode::G_ATOMICRMW_MIN;
2826 break;
2827 case AtomicRMWInst::UMax:
2828 Opcode = TargetOpcode::G_ATOMICRMW_UMAX;
2829 break;
2830 case AtomicRMWInst::UMin:
2831 Opcode = TargetOpcode::G_ATOMICRMW_UMIN;
2832 break;
2833 case AtomicRMWInst::FAdd:
2834 Opcode = TargetOpcode::G_ATOMICRMW_FADD;
2835 break;
2836 case AtomicRMWInst::FSub:
2837 Opcode = TargetOpcode::G_ATOMICRMW_FSUB;
2838 break;
2839 }
2840
2841 AAMDNodes AAMetadata;
2842 I.getAAMetadata(AAMetadata);
2843
2844 MIRBuilder.buildAtomicRMW(
2845 Opcode, Res, Addr, Val,
2846 *MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
2847 Flags, MRI->getType(Val), getMemOpAlign(I),
2848 AAMetadata, nullptr, I.getSyncScopeID(),
2849 I.getOrdering()));
2850 return true;
2851}
2852
2853bool IRTranslator::translateFence(const User &U,
2854 MachineIRBuilder &MIRBuilder) {
2855 const FenceInst &Fence = cast<FenceInst>(U);
2856 MIRBuilder.buildFence(static_cast<unsigned>(Fence.getOrdering()),
2857 Fence.getSyncScopeID());
2858 return true;
2859}
2860
2861bool IRTranslator::translateFreeze(const User &U,
2862 MachineIRBuilder &MIRBuilder) {
2863 const ArrayRef<Register> DstRegs = getOrCreateVRegs(U);
2864 const ArrayRef<Register> SrcRegs = getOrCreateVRegs(*U.getOperand(0));
2865
2866 assert(DstRegs.size() == SrcRegs.size() &&(static_cast<void> (0))
2867 "Freeze with different source and destination type?")(static_cast<void> (0));
2868
2869 for (unsigned I = 0; I < DstRegs.size(); ++I) {
2870 MIRBuilder.buildFreeze(DstRegs[I], SrcRegs[I]);
2871 }
2872
2873 return true;
2874}
2875
2876void IRTranslator::finishPendingPhis() {
2877#ifndef NDEBUG1
2878 DILocationVerifier Verifier;
2879 GISelObserverWrapper WrapperObserver(&Verifier);
2880 RAIIDelegateInstaller DelInstall(*MF, &WrapperObserver);
2881#endif // ifndef NDEBUG
2882 for (auto &Phi : PendingPHIs) {
2883 const PHINode *PI = Phi.first;
2884 ArrayRef<MachineInstr *> ComponentPHIs = Phi.second;
2885 MachineBasicBlock *PhiMBB = ComponentPHIs[0]->getParent();
2886 EntryBuilder->setDebugLoc(PI->getDebugLoc());
2887#ifndef NDEBUG1
2888 Verifier.setCurrentInst(PI);
2889#endif // ifndef NDEBUG
2890
2891 SmallSet<const MachineBasicBlock *, 16> SeenPreds;
2892 for (unsigned i = 0; i < PI->getNumIncomingValues(); ++i) {
2893 auto IRPred = PI->getIncomingBlock(i);
2894 ArrayRef<Register> ValRegs = getOrCreateVRegs(*PI->getIncomingValue(i));
2895 for (auto Pred : getMachinePredBBs({IRPred, PI->getParent()})) {
2896 if (SeenPreds.count(Pred) || !PhiMBB->isPredecessor(Pred))
2897 continue;
2898 SeenPreds.insert(Pred);
2899 for (unsigned j = 0; j < ValRegs.size(); ++j) {
2900 MachineInstrBuilder MIB(*MF, ComponentPHIs[j]);
2901 MIB.addUse(ValRegs[j]);
2902 MIB.addMBB(Pred);
2903 }
2904 }
2905 }
2906 }
2907}
2908
2909bool IRTranslator::valueIsSplit(const Value &V,
2910 SmallVectorImpl<uint64_t> *Offsets) {
2911 SmallVector<LLT, 4> SplitTys;
2912 if (Offsets && !Offsets->empty())
2913 Offsets->clear();
2914 computeValueLLTs(*DL, *V.getType(), SplitTys, Offsets);
2915 return SplitTys.size() > 1;
2916}
2917
2918bool IRTranslator::translate(const Instruction &Inst) {
2919 CurBuilder->setDebugLoc(Inst.getDebugLoc());
2920
2921 auto &TLI = *MF->getSubtarget().getTargetLowering();
2922 if (TLI.fallBackToDAGISel(Inst))
2923 return false;
2924
2925 switch (Inst.getOpcode()) {
2926#define HANDLE_INST(NUM, OPCODE, CLASS) \
2927 case Instruction::OPCODE: \
2928 return translate##OPCODE(Inst, *CurBuilder.get());
2929#include "llvm/IR/Instruction.def"
2930 default:
2931 return false;
2932 }
2933}
2934
2935bool IRTranslator::translate(const Constant &C, Register Reg) {
2936 // We only emit constants into the entry block from here. To prevent jumpy
2937 // debug behaviour set the line to 0.
2938 if (auto CurrInstDL = CurBuilder->getDL())
1
Taking false branch
2939 EntryBuilder->setDebugLoc(DILocation::get(C.getContext(), 0, 0,
2940 CurrInstDL.getScope(),
2941 CurrInstDL.getInlinedAt()));
2942
2943 if (auto CI
2.1
'CI' is null
2.1
'CI' is null
2.1
'CI' is null
2.1
'CI' is null
= dyn_cast<ConstantInt>(&C))
2
Assuming the object is not a 'ConstantInt'
3
Taking false branch
2944 EntryBuilder->buildConstant(Reg, *CI);
2945 else if (auto CF
4.1
'CF' is null
4.1
'CF' is null
4.1
'CF' is null
4.1
'CF' is null
= dyn_cast<ConstantFP>(&C))
4
Assuming the object is not a 'ConstantFP'
5
Taking false branch
2946 EntryBuilder->buildFConstant(Reg, *CF);
2947 else if (isa<UndefValue>(C))
6
Assuming 'C' is not a 'UndefValue'
7
Taking false branch
2948 EntryBuilder->buildUndef(Reg);
2949 else if (isa<ConstantPointerNull>(C))
8
Assuming 'C' is not a 'ConstantPointerNull'
9
Taking false branch
2950 EntryBuilder->buildConstant(Reg, 0);
2951 else if (auto GV
10.1
'GV' is null
10.1
'GV' is null
10.1
'GV' is null
10.1
'GV' is null
= dyn_cast<GlobalValue>(&C))
10
Assuming the object is not a 'GlobalValue'
11
Taking false branch
2952 EntryBuilder->buildGlobalValue(Reg, GV);
2953 else if (auto CAZ
12.1
'CAZ' is null
12.1
'CAZ' is null
12.1
'CAZ' is null
12.1
'CAZ' is null
= dyn_cast<ConstantAggregateZero>(&C)) {
12
Assuming the object is not a 'ConstantAggregateZero'
13
Taking false branch
2954 if (!isa<FixedVectorType>(CAZ->getType()))
2955 return false;
2956 // Return the scalar if it is a <1 x Ty> vector.
2957 unsigned NumElts = CAZ->getElementCount().getFixedValue();
2958 if (NumElts == 1)
2959 return translateCopy(C, *CAZ->getElementValue(0u), *EntryBuilder.get());
2960 SmallVector<Register, 4> Ops;
2961 for (unsigned I = 0; I < NumElts; ++I) {
2962 Constant &Elt = *CAZ->getElementValue(I);
2963 Ops.push_back(getOrCreateVReg(Elt));
2964 }
2965 EntryBuilder->buildBuildVector(Reg, Ops);
2966 } else if (auto CV
14.1
'CV' is null
14.1
'CV' is null
14.1
'CV' is null
14.1
'CV' is null
= dyn_cast<ConstantDataVector>(&C)) {
14
Assuming the object is not a 'ConstantDataVector'
15
Taking false branch
2967 // Return the scalar if it is a <1 x Ty> vector.
2968 if (CV->getNumElements() == 1)
2969 return translateCopy(C, *CV->getElementAsConstant(0),
2970 *EntryBuilder.get());
2971 SmallVector<Register, 4> Ops;
2972 for (unsigned i = 0; i < CV->getNumElements(); ++i) {
2973 Constant &Elt = *CV->getElementAsConstant(i);
2974 Ops.push_back(getOrCreateVReg(Elt));
2975 }
2976 EntryBuilder->buildBuildVector(Reg, Ops);
2977 } else if (auto CE
16.1
'CE' is non-null
16.1
'CE' is non-null
16.1
'CE' is non-null
16.1
'CE' is non-null
= dyn_cast<ConstantExpr>(&C)) {
16
Assuming the object is a 'ConstantExpr'
17
Taking true branch
2978 switch(CE->getOpcode()) {
18
Control jumps to 'case FCmp:' at line 207
2979#define HANDLE_INST(NUM, OPCODE, CLASS) \
2980 case Instruction::OPCODE: \
2981 return translate##OPCODE(*CE, *EntryBuilder.get());
2982#include "llvm/IR/Instruction.def"
2983 default:
2984 return false;
2985 }
2986 } else if (auto CV = dyn_cast<ConstantVector>(&C)) {
2987 if (CV->getNumOperands() == 1)
2988 return translateCopy(C, *CV->getOperand(0), *EntryBuilder.get());
2989 SmallVector<Register, 4> Ops;
2990 for (unsigned i = 0; i < CV->getNumOperands(); ++i) {
2991 Ops.push_back(getOrCreateVReg(*CV->getOperand(i)));
2992 }
2993 EntryBuilder->buildBuildVector(Reg, Ops);
2994 } else if (auto *BA = dyn_cast<BlockAddress>(&C)) {
2995 EntryBuilder->buildBlockAddress(Reg, BA);
2996 } else
2997 return false;
2998
2999 return true;
3000}
3001
3002void IRTranslator::finalizeBasicBlock() {
3003 for (auto &BTB : SL->BitTestCases) {
3004 // Emit header first, if it wasn't already emitted.
3005 if (!BTB.Emitted)
3006 emitBitTestHeader(BTB, BTB.Parent);
3007
3008 BranchProbability UnhandledProb = BTB.Prob;
3009 for (unsigned j = 0, ej = BTB.Cases.size(); j != ej; ++j) {
3010 UnhandledProb -= BTB.Cases[j].ExtraProb;
3011 // Set the current basic block to the mbb we wish to insert the code into
3012 MachineBasicBlock *MBB = BTB.Cases[j].ThisBB;
3013 // If all cases cover a contiguous range, it is not necessary to jump to
3014 // the default block after the last bit test fails. This is because the
3015 // range check during bit test header creation has guaranteed that every
3016 // case here doesn't go outside the range. In this case, there is no need
3017 // to perform the last bit test, as it will always be true. Instead, make
3018 // the second-to-last bit-test fall through to the target of the last bit
3019 // test, and delete the last bit test.
3020
3021 MachineBasicBlock *NextMBB;
3022 if (BTB.ContiguousRange && j + 2 == ej) {
3023 // Second-to-last bit-test with contiguous range: fall through to the
3024 // target of the final bit test.
3025 NextMBB = BTB.Cases[j + 1].TargetBB;
3026 } else if (j + 1 == ej) {
3027 // For the last bit test, fall through to Default.
3028 NextMBB = BTB.Default;
3029 } else {
3030 // Otherwise, fall through to the next bit test.
3031 NextMBB = BTB.Cases[j + 1].ThisBB;
3032 }
3033
3034 emitBitTestCase(BTB, NextMBB, UnhandledProb, BTB.Reg, BTB.Cases[j], MBB);
3035
3036 if (BTB.ContiguousRange && j + 2 == ej) {
3037 // We need to record the replacement phi edge here that normally
3038 // happens in emitBitTestCase before we delete the case, otherwise the
3039 // phi edge will be lost.
3040 addMachineCFGPred({BTB.Parent->getBasicBlock(),
3041 BTB.Cases[ej - 1].TargetBB->getBasicBlock()},
3042 MBB);
3043 // Since we're not going to use the final bit test, remove it.
3044 BTB.Cases.pop_back();
3045 break;
3046 }
3047 }
3048 // This is "default" BB. We have two jumps to it. From "header" BB and from
3049 // last "case" BB, unless the latter was skipped.
3050 CFGEdge HeaderToDefaultEdge = {BTB.Parent->getBasicBlock(),
3051 BTB.Default->getBasicBlock()};
3052 addMachineCFGPred(HeaderToDefaultEdge, BTB.Parent);
3053 if (!BTB.ContiguousRange) {
3054 addMachineCFGPred(HeaderToDefaultEdge, BTB.Cases.back().ThisBB);
3055 }
3056 }
3057 SL->BitTestCases.clear();
3058
3059 for (auto &JTCase : SL->JTCases) {
3060 // Emit header first, if it wasn't already emitted.
3061 if (!JTCase.first.Emitted)
3062 emitJumpTableHeader(JTCase.second, JTCase.first, JTCase.first.HeaderBB);
3063
3064 emitJumpTable(JTCase.second, JTCase.second.MBB);
3065 }
3066 SL->JTCases.clear();
3067
3068 for (auto &SwCase : SL->SwitchCases)
3069 emitSwitchCase(SwCase, &CurBuilder->getMBB(), *CurBuilder);
3070 SL->SwitchCases.clear();
3071}
3072
3073void IRTranslator::finalizeFunction() {
3074 // Release the memory used by the different maps we
3075 // needed during the translation.
3076 PendingPHIs.clear();
3077 VMap.reset();
3078 FrameIndices.clear();
3079 MachinePreds.clear();
3080 // MachineIRBuilder::DebugLoc can outlive the DILocation it holds. Clear it
3081 // to avoid accessing free’d memory (in runOnMachineFunction) and to avoid
3082 // destroying it twice (in ~IRTranslator() and ~LLVMContext())
3083 EntryBuilder.reset();
3084 CurBuilder.reset();
3085 FuncInfo.clear();
3086}
3087
3088/// Returns true if a BasicBlock \p BB within a variadic function contains a
3089/// variadic musttail call.
3090static bool checkForMustTailInVarArgFn(bool IsVarArg, const BasicBlock &BB) {
3091 if (!IsVarArg)
3092 return false;
3093
3094 // Walk the block backwards, because tail calls usually only appear at the end
3095 // of a block.
3096 return std::any_of(BB.rbegin(), BB.rend(), [](const Instruction &I) {
3097 const auto *CI = dyn_cast<CallInst>(&I);
3098 return CI && CI->isMustTailCall();
3099 });
3100}
3101
3102bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) {
3103 MF = &CurMF;
3104 const Function &F = MF->getFunction();
3105 GISelCSEAnalysisWrapper &Wrapper =
3106 getAnalysis<GISelCSEAnalysisWrapperPass>().getCSEWrapper();
3107 // Set the CSEConfig and run the analysis.
3108 GISelCSEInfo *CSEInfo = nullptr;
3109 TPC = &getAnalysis<TargetPassConfig>();
3110 bool EnableCSE = EnableCSEInIRTranslator.getNumOccurrences()
3111 ? EnableCSEInIRTranslator
3112 : TPC->isGISelCSEEnabled();
3113
3114 if (EnableCSE) {
3115 EntryBuilder = std::make_unique<CSEMIRBuilder>(CurMF);
3116 CSEInfo = &Wrapper.get(TPC->getCSEConfig());
3117 EntryBuilder->setCSEInfo(CSEInfo);
3118 CurBuilder = std::make_unique<CSEMIRBuilder>(CurMF);
3119 CurBuilder->setCSEInfo(CSEInfo);
3120 } else {
3121 EntryBuilder = std::make_unique<MachineIRBuilder>();
3122 CurBuilder = std::make_unique<MachineIRBuilder>();
3123 }
3124 CLI = MF->getSubtarget().getCallLowering();
3125 CurBuilder->setMF(*MF);
3126 EntryBuilder->setMF(*MF);
3127 MRI = &MF->getRegInfo();
3128 DL = &F.getParent()->getDataLayout();
3129 ORE = std::make_unique<OptimizationRemarkEmitter>(&F);
3130 const TargetMachine &TM = MF->getTarget();
3131 TM.resetTargetOptions(F);
3132 EnableOpts = OptLevel != CodeGenOpt::None && !skipFunction(F);
3133 FuncInfo.MF = MF;
3134 if (EnableOpts)
3135 FuncInfo.BPI = &getAnalysis<BranchProbabilityInfoWrapperPass>().getBPI();
3136 else
3137 FuncInfo.BPI = nullptr;
3138
3139 FuncInfo.CanLowerReturn = CLI->checkReturnTypeForCallConv(*MF);
3140
3141 const auto &TLI = *MF->getSubtarget().getTargetLowering();
3142
3143 SL = std::make_unique<GISelSwitchLowering>(this, FuncInfo);
3144 SL->init(TLI, TM, *DL);
3145
3146
3147
3148 assert(PendingPHIs.empty() && "stale PHIs")(static_cast<void> (0));
3149
3150 // Targets which want to use big endian can enable it using
3151 // enableBigEndian()
3152 if (!DL->isLittleEndian() && !CLI->enableBigEndian()) {
3153 // Currently we don't properly handle big endian code.
3154 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
3155 F.getSubprogram(), &F.getEntryBlock());
3156 R << "unable to translate in big endian mode";
3157 reportTranslationError(*MF, *TPC, *ORE, R);
3158 }
3159
3160 // Release the per-function state when we return, whether we succeeded or not.
3161 auto FinalizeOnReturn = make_scope_exit([this]() { finalizeFunction(); });
3162
3163 // Setup a separate basic-block for the arguments and constants
3164 MachineBasicBlock *EntryBB = MF->CreateMachineBasicBlock();
3165 MF->push_back(EntryBB);
3166 EntryBuilder->setMBB(*EntryBB);
3167
3168 DebugLoc DbgLoc = F.getEntryBlock().getFirstNonPHI()->getDebugLoc();
3169 SwiftError.setFunction(CurMF);
3170 SwiftError.createEntriesInEntryBlock(DbgLoc);
3171
3172 bool IsVarArg = F.isVarArg();
3173 bool HasMustTailInVarArgFn = false;
3174
3175 // Create all blocks, in IR order, to preserve the layout.
3176 for (const BasicBlock &BB: F) {
3177 auto *&MBB = BBToMBB[&BB];
3178
3179 MBB = MF->CreateMachineBasicBlock(&BB);
3180 MF->push_back(MBB);
3181
3182 if (BB.hasAddressTaken())
3183 MBB->setHasAddressTaken();
3184
3185 if (!HasMustTailInVarArgFn)
3186 HasMustTailInVarArgFn = checkForMustTailInVarArgFn(IsVarArg, BB);
3187 }
3188
3189 MF->getFrameInfo().setHasMustTailInVarArgFunc(HasMustTailInVarArgFn);
3190
3191 // Make our arguments/constants entry block fallthrough to the IR entry block.
3192 EntryBB->addSuccessor(&getMBB(F.front()));
3193
3194 if (CLI->fallBackToDAGISel(*MF)) {
3195 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
3196 F.getSubprogram(), &F.getEntryBlock());
3197 R << "unable to lower function: " << ore::NV("Prototype", F.getType());
3198 reportTranslationError(*MF, *TPC, *ORE, R);
3199 return false;
3200 }
3201
3202 // Lower the actual args into this basic block.
3203 SmallVector<ArrayRef<Register>, 8> VRegArgs;
3204 for (const Argument &Arg: F.args()) {
3205 if (DL->getTypeStoreSize(Arg.getType()).isZero())
3206 continue; // Don't handle zero sized types.
3207 ArrayRef<Register> VRegs = getOrCreateVRegs(Arg);
3208 VRegArgs.push_back(VRegs);
3209
3210 if (Arg.hasSwiftErrorAttr()) {
3211 assert(VRegs.size() == 1 && "Too many vregs for Swift error")(static_cast<void> (0));
3212 SwiftError.setCurrentVReg(EntryBB, SwiftError.getFunctionArg(), VRegs[0]);
3213 }
3214 }
3215
3216 if (!CLI->lowerFormalArguments(*EntryBuilder.get(), F, VRegArgs, FuncInfo)) {
3217 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
3218 F.getSubprogram(), &F.getEntryBlock());
3219 R << "unable to lower arguments: " << ore::NV("Prototype", F.getType());
3220 reportTranslationError(*MF, *TPC, *ORE, R);
3221 return false;
3222 }
3223
3224 // Need to visit defs before uses when translating instructions.
3225 GISelObserverWrapper WrapperObserver;
3226 if (EnableCSE && CSEInfo)
3227 WrapperObserver.addObserver(CSEInfo);
3228 {
3229 ReversePostOrderTraversal<const Function *> RPOT(&F);
3230#ifndef NDEBUG1
3231 DILocationVerifier Verifier;
3232 WrapperObserver.addObserver(&Verifier);
3233#endif // ifndef NDEBUG
3234 RAIIDelegateInstaller DelInstall(*MF, &WrapperObserver);
3235 RAIIMFObserverInstaller ObsInstall(*MF, WrapperObserver);
3236 for (const BasicBlock *BB : RPOT) {
3237 MachineBasicBlock &MBB = getMBB(*BB);
3238 // Set the insertion point of all the following translations to
3239 // the end of this basic block.
3240 CurBuilder->setMBB(MBB);
3241 HasTailCall = false;
3242 for (const Instruction &Inst : *BB) {
3243 // If we translated a tail call in the last step, then we know
3244 // everything after the call is either a return, or something that is
3245 // handled by the call itself. (E.g. a lifetime marker or assume
3246 // intrinsic.) In this case, we should stop translating the block and
3247 // move on.
3248 if (HasTailCall)
3249 break;
3250#ifndef NDEBUG1
3251 Verifier.setCurrentInst(&Inst);
3252#endif // ifndef NDEBUG
3253 if (translate(Inst))
3254 continue;
3255
3256 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
3257 Inst.getDebugLoc(), BB);
3258 R << "unable to translate instruction: " << ore::NV("Opcode", &Inst);
3259
3260 if (ORE->allowExtraAnalysis("gisel-irtranslator")) {
3261 std::string InstStrStorage;
3262 raw_string_ostream InstStr(InstStrStorage);
3263 InstStr << Inst;
3264
3265 R << ": '" << InstStr.str() << "'";
3266 }
3267
3268 reportTranslationError(*MF, *TPC, *ORE, R);
3269 return false;
3270 }
3271
3272 finalizeBasicBlock();
3273 }
3274#ifndef NDEBUG1
3275 WrapperObserver.removeObserver(&Verifier);
3276#endif
3277 }
3278
3279 finishPendingPhis();
3280
3281 SwiftError.propagateVRegs();
3282
3283 // Merge the argument lowering and constants block with its single
3284 // successor, the LLVM-IR entry block. We want the basic block to
3285 // be maximal.
3286 assert(EntryBB->succ_size() == 1 &&(static_cast<void> (0))
3287 "Custom BB used for lowering should have only one successor")(static_cast<void> (0));
3288 // Get the successor of the current entry block.
3289 MachineBasicBlock &NewEntryBB = **EntryBB->succ_begin();
3290 assert(NewEntryBB.pred_size() == 1 &&(static_cast<void> (0))
3291 "LLVM-IR entry block has a predecessor!?")(static_cast<void> (0));
3292 // Move all the instruction from the current entry block to the
3293 // new entry block.
3294 NewEntryBB.splice(NewEntryBB.begin(), EntryBB, EntryBB->begin(),
3295 EntryBB->end());
3296
3297 // Update the live-in information for the new entry block.
3298 for (const MachineBasicBlock::RegisterMaskPair &LiveIn : EntryBB->liveins())
3299 NewEntryBB.addLiveIn(LiveIn);
3300 NewEntryBB.sortUniqueLiveIns();
3301
3302 // Get rid of the now empty basic block.
3303 EntryBB->removeSuccessor(&NewEntryBB);
3304 MF->remove(EntryBB);
3305 MF->DeleteMachineBasicBlock(EntryBB);
3306
3307 assert(&MF->front() == &NewEntryBB &&(static_cast<void> (0))
3308 "New entry wasn't next in the list of basic block!")(static_cast<void> (0));
3309
3310 // Initialize stack protector information.
3311 StackProtector &SP = getAnalysis<StackProtector>();
3312 SP.copyToMachineFrameInfo(MF->getFrameInfo());
3313
3314 return false;
3315}

/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/include/llvm/IR/Instruction.def

1//===-- llvm/Instruction.def - File that describes Instructions -*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains descriptions of the various LLVM instructions. This is
10// used as a central place for enumerating the different instructions and
11// should eventually be the place to put comments about the instructions.
12//
13//===----------------------------------------------------------------------===//
14
15// NOTE: NO INCLUDE GUARD DESIRED!
16
17// Provide definitions of macros so that users of this file do not have to
18// define everything to use it...
19//
20#ifndef FIRST_TERM_INST
21#define FIRST_TERM_INST(num)
22#endif
23#ifndef HANDLE_TERM_INST
24#ifndef HANDLE_INST
25#define HANDLE_TERM_INST(num, opcode, Class)
26#else
27#define HANDLE_TERM_INST(num, opcode, Class) HANDLE_INST(num, opcode, Class)
28#endif
29#endif
30#ifndef LAST_TERM_INST
31#define LAST_TERM_INST(num)
32#endif
33
34#ifndef FIRST_UNARY_INST
35#define FIRST_UNARY_INST(num)
36#endif
37#ifndef HANDLE_UNARY_INST
38#ifndef HANDLE_INST
39#define HANDLE_UNARY_INST(num, opcode, instclass)
40#else
41#define HANDLE_UNARY_INST(num, opcode, Class) HANDLE_INST(num, opcode, Class)
42#endif
43#endif
44#ifndef LAST_UNARY_INST
45#define LAST_UNARY_INST(num)
46#endif
47
48#ifndef FIRST_BINARY_INST
49#define FIRST_BINARY_INST(num)
50#endif
51#ifndef HANDLE_BINARY_INST
52#ifndef HANDLE_INST
53#define HANDLE_BINARY_INST(num, opcode, instclass)
54#else
55#define HANDLE_BINARY_INST(num, opcode, Class) HANDLE_INST(num, opcode, Class)
56#endif
57#endif
58#ifndef LAST_BINARY_INST
59#define LAST_BINARY_INST(num)
60#endif
61
62#ifndef FIRST_MEMORY_INST
63#define FIRST_MEMORY_INST(num)
64#endif
65#ifndef HANDLE_MEMORY_INST
66#ifndef HANDLE_INST
67#define HANDLE_MEMORY_INST(num, opcode, Class)
68#else
69#define HANDLE_MEMORY_INST(num, opcode, Class) HANDLE_INST(num, opcode, Class)
70#endif
71#endif
72#ifndef LAST_MEMORY_INST
73#define LAST_MEMORY_INST(num)
74#endif
75
76#ifndef FIRST_CAST_INST
77#define FIRST_CAST_INST(num)
78#endif
79#ifndef HANDLE_CAST_INST
80#ifndef HANDLE_INST
81#define HANDLE_CAST_INST(num, opcode, Class)
82#else
83#define HANDLE_CAST_INST(num, opcode, Class) HANDLE_INST(num, opcode, Class)
84#endif
85#endif
86#ifndef LAST_CAST_INST
87#define LAST_CAST_INST(num)
88#endif
89
90#ifndef FIRST_FUNCLETPAD_INST
91#define FIRST_FUNCLETPAD_INST(num)
92#endif
93#ifndef HANDLE_FUNCLETPAD_INST
94#ifndef HANDLE_INST
95#define HANDLE_FUNCLETPAD_INST(num, opcode, Class)
96#else
97#define HANDLE_FUNCLETPAD_INST(num, opcode, Class) HANDLE_INST(num, opcode, Class)
98#endif
99#endif
100#ifndef LAST_FUNCLETPAD_INST
101#define LAST_FUNCLETPAD_INST(num)
102#endif
103
104#ifndef FIRST_OTHER_INST
105#define FIRST_OTHER_INST(num)
106#endif
107#ifndef HANDLE_OTHER_INST
108#ifndef HANDLE_INST
109#define HANDLE_OTHER_INST(num, opcode, Class)
110#else
111#define HANDLE_OTHER_INST(num, opcode, Class) HANDLE_INST(num, opcode, Class)
112#endif
113#endif
114#ifndef LAST_OTHER_INST
115#define LAST_OTHER_INST(num)
116#endif
117
118#ifndef HANDLE_USER_INST
119#define HANDLE_USER_INST(num, opc, Class) HANDLE_OTHER_INST(num, opc, Class)
120#endif
121
122// Terminator Instructions - These instructions are used to terminate a basic
123// block of the program. Every basic block must end with one of these
124// instructions for it to be a well formed basic block.
125//
126 FIRST_TERM_INST ( 1)
127HANDLE_TERM_INST ( 1, Ret , ReturnInst)
128HANDLE_TERM_INST ( 2, Br , BranchInst)
129HANDLE_TERM_INST ( 3, Switch , SwitchInst)
130HANDLE_TERM_INST ( 4, IndirectBr , IndirectBrInst)
131HANDLE_TERM_INST ( 5, Invoke , InvokeInst)
132HANDLE_TERM_INST ( 6, Resume , ResumeInst)
133HANDLE_TERM_INST ( 7, Unreachable , UnreachableInst)
134HANDLE_TERM_INST ( 8, CleanupRet , CleanupReturnInst)
135HANDLE_TERM_INST ( 9, CatchRet , CatchReturnInst)
136HANDLE_TERM_INST (10, CatchSwitch , CatchSwitchInst)
137HANDLE_TERM_INST (11, CallBr , CallBrInst) // A call-site terminator
138 LAST_TERM_INST (11)
139
140// Standard unary operators...
141 FIRST_UNARY_INST(12)
142HANDLE_UNARY_INST(12, FNeg , UnaryOperator)
143 LAST_UNARY_INST(12)
144
145// Standard binary operators...
146 FIRST_BINARY_INST(13)
147HANDLE_BINARY_INST(13, Add , BinaryOperator)
148HANDLE_BINARY_INST(14, FAdd , BinaryOperator)
149HANDLE_BINARY_INST(15, Sub , BinaryOperator)
150HANDLE_BINARY_INST(16, FSub , BinaryOperator)
151HANDLE_BINARY_INST(17, Mul , BinaryOperator)
152HANDLE_BINARY_INST(18, FMul , BinaryOperator)
153HANDLE_BINARY_INST(19, UDiv , BinaryOperator)
154HANDLE_BINARY_INST(20, SDiv , BinaryOperator)
155HANDLE_BINARY_INST(21, FDiv , BinaryOperator)
156HANDLE_BINARY_INST(22, URem , BinaryOperator)
157HANDLE_BINARY_INST(23, SRem , BinaryOperator)
158HANDLE_BINARY_INST(24, FRem , BinaryOperator)
159
160// Logical operators (integer operands)
161HANDLE_BINARY_INST(25, Shl , BinaryOperator) // Shift left (logical)
162HANDLE_BINARY_INST(26, LShr , BinaryOperator) // Shift right (logical)
163HANDLE_BINARY_INST(27, AShr , BinaryOperator) // Shift right (arithmetic)
164HANDLE_BINARY_INST(28, And , BinaryOperator)
165HANDLE_BINARY_INST(29, Or , BinaryOperator)
166HANDLE_BINARY_INST(30, Xor , BinaryOperator)
167 LAST_BINARY_INST(30)
168
169// Memory operators...
170 FIRST_MEMORY_INST(31)
171HANDLE_MEMORY_INST(31, Alloca, AllocaInst) // Stack management
172HANDLE_MEMORY_INST(32, Load , LoadInst ) // Memory manipulation instrs
173HANDLE_MEMORY_INST(33, Store , StoreInst )
174HANDLE_MEMORY_INST(34, GetElementPtr, GetElementPtrInst)
175HANDLE_MEMORY_INST(35, Fence , FenceInst )
176HANDLE_MEMORY_INST(36, AtomicCmpXchg , AtomicCmpXchgInst )
177HANDLE_MEMORY_INST(37, AtomicRMW , AtomicRMWInst )
178 LAST_MEMORY_INST(37)
179
180// Cast operators ...
181// NOTE: The order matters here because CastInst::isEliminableCastPair
182// NOTE: (see Instructions.cpp) encodes a table based on this ordering.
183 FIRST_CAST_INST(38)
184HANDLE_CAST_INST(38, Trunc , TruncInst ) // Truncate integers
185HANDLE_CAST_INST(39, ZExt , ZExtInst ) // Zero extend integers
186HANDLE_CAST_INST(40, SExt , SExtInst ) // Sign extend integers
187HANDLE_CAST_INST(41, FPToUI , FPToUIInst ) // floating point -> UInt
188HANDLE_CAST_INST(42, FPToSI , FPToSIInst ) // floating point -> SInt
189HANDLE_CAST_INST(43, UIToFP , UIToFPInst ) // UInt -> floating point
190HANDLE_CAST_INST(44, SIToFP , SIToFPInst ) // SInt -> floating point
191HANDLE_CAST_INST(45, FPTrunc , FPTruncInst ) // Truncate floating point
192HANDLE_CAST_INST(46, FPExt , FPExtInst ) // Extend floating point
193HANDLE_CAST_INST(47, PtrToInt, PtrToIntInst) // Pointer -> Integer
194HANDLE_CAST_INST(48, IntToPtr, IntToPtrInst) // Integer -> Pointer
195HANDLE_CAST_INST(49, BitCast , BitCastInst ) // Type cast
196HANDLE_CAST_INST(50, AddrSpaceCast, AddrSpaceCastInst) // addrspace cast
197 LAST_CAST_INST(50)
198
199 FIRST_FUNCLETPAD_INST(51)
200HANDLE_FUNCLETPAD_INST(51, CleanupPad, CleanupPadInst)
201HANDLE_FUNCLETPAD_INST(52, CatchPad , CatchPadInst)
202 LAST_FUNCLETPAD_INST(52)
203
204// Other operators...
205 FIRST_OTHER_INST(53)
206HANDLE_OTHER_INST(53, ICmp , ICmpInst ) // Integer comparison instruction
207HANDLE_OTHER_INST(54, FCmp , FCmpInst ) // Floating point comparison instr.
19
Calling 'IRTranslator::translateFCmp'
208HANDLE_OTHER_INST(55, PHI , PHINode ) // PHI node instruction
209HANDLE_OTHER_INST(56, Call , CallInst ) // Call a function
210HANDLE_OTHER_INST(57, Select , SelectInst ) // select instruction
211HANDLE_USER_INST (58, UserOp1, Instruction) // May be used internally in a pass
212HANDLE_USER_INST (59, UserOp2, Instruction) // Internal to passes only
213HANDLE_OTHER_INST(60, VAArg , VAArgInst ) // vaarg instruction
214HANDLE_OTHER_INST(61, ExtractElement, ExtractElementInst)// extract from vector
215HANDLE_OTHER_INST(62, InsertElement, InsertElementInst) // insert into vector
216HANDLE_OTHER_INST(63, ShuffleVector, ShuffleVectorInst) // shuffle two vectors.
217HANDLE_OTHER_INST(64, ExtractValue, ExtractValueInst)// extract from aggregate
218HANDLE_OTHER_INST(65, InsertValue, InsertValueInst) // insert into aggregate
219HANDLE_OTHER_INST(66, LandingPad, LandingPadInst) // Landing pad instruction.
220HANDLE_OTHER_INST(67, Freeze, FreezeInst) // Freeze instruction.
221 LAST_OTHER_INST(67)
222
223#undef FIRST_TERM_INST
224#undef HANDLE_TERM_INST
225#undef LAST_TERM_INST
226
227#undef FIRST_UNARY_INST
228#undef HANDLE_UNARY_INST
229#undef LAST_UNARY_INST
230
231#undef FIRST_BINARY_INST
232#undef HANDLE_BINARY_INST
233#undef LAST_BINARY_INST
234
235#undef FIRST_MEMORY_INST
236#undef HANDLE_MEMORY_INST
237#undef LAST_MEMORY_INST
238
239#undef FIRST_CAST_INST
240#undef HANDLE_CAST_INST
241#undef LAST_CAST_INST
242
243#undef FIRST_FUNCLETPAD_INST
244#undef HANDLE_FUNCLETPAD_INST
245#undef LAST_FUNCLETPAD_INST
246
247#undef FIRST_OTHER_INST
248#undef HANDLE_OTHER_INST
249#undef LAST_OTHER_INST
250
251#undef HANDLE_USER_INST
252
253#ifdef HANDLE_INST
254#undef HANDLE_INST
255#endif

/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h

1//===- llvm/CodeGen/GlobalISel/IRTranslator.h - IRTranslator ----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file declares the IRTranslator pass.
10/// This pass is responsible for translating LLVM IR into MachineInstr.
11/// It uses target hooks to lower the ABI but aside from that, the pass
12/// generated code is generic. This is the default translator used for
13/// GlobalISel.
14///
15/// \todo Replace the comments with actual doxygen comments.
16//===----------------------------------------------------------------------===//
17
18#ifndef LLVM_CODEGEN_GLOBALISEL_IRTRANSLATOR_H
19#define LLVM_CODEGEN_GLOBALISEL_IRTRANSLATOR_H
20
21#include "llvm/ADT/DenseMap.h"
22#include "llvm/ADT/SmallVector.h"
23#include "llvm/CodeGen/FunctionLoweringInfo.h"
24#include "llvm/CodeGen/GlobalISel/CSEMIRBuilder.h"
25#include "llvm/CodeGen/MachineFunctionPass.h"
26#include "llvm/CodeGen/SwiftErrorValueTracking.h"
27#include "llvm/CodeGen/SwitchLoweringUtils.h"
28#include "llvm/IR/Intrinsics.h"
29#include "llvm/Support/Allocator.h"
30#include "llvm/Support/CodeGen.h"
31#include <memory>
32#include <utility>
33
34namespace llvm {
35
36class AllocaInst;
37class BasicBlock;
38class CallInst;
39class CallLowering;
40class Constant;
41class ConstrainedFPIntrinsic;
42class DataLayout;
43class Instruction;
44class MachineBasicBlock;
45class MachineFunction;
46class MachineInstr;
47class MachineRegisterInfo;
48class OptimizationRemarkEmitter;
49class PHINode;
50class TargetPassConfig;
51class User;
52class Value;
53
54// Technically the pass should run on an hypothetical MachineModule,
55// since it should translate Global into some sort of MachineGlobal.
56// The MachineGlobal should ultimately just be a transfer of ownership of
57// the interesting bits that are relevant to represent a global value.
58// That being said, we could investigate what would it cost to just duplicate
59// the information from the LLVM IR.
60// The idea is that ultimately we would be able to free up the memory used
61// by the LLVM IR as soon as the translation is over.
62class IRTranslator : public MachineFunctionPass {
63public:
64 static char ID;
65
66private:
67 /// Interface used to lower the everything related to calls.
68 const CallLowering *CLI;
69
70 /// This class contains the mapping between the Values to vreg related data.
71 class ValueToVRegInfo {
72 public:
73 ValueToVRegInfo() = default;
74
75 using VRegListT = SmallVector<Register, 1>;
76 using OffsetListT = SmallVector<uint64_t, 1>;
77
78 using const_vreg_iterator =
79 DenseMap<const Value *, VRegListT *>::const_iterator;
80 using const_offset_iterator =
81 DenseMap<const Value *, OffsetListT *>::const_iterator;
82
83 inline const_vreg_iterator vregs_end() const { return ValToVRegs.end(); }
84
85 VRegListT *getVRegs(const Value &V) {
86 auto It = ValToVRegs.find(&V);
87 if (It != ValToVRegs.end())
88 return It->second;
89
90 return insertVRegs(V);
91 }
92
93 OffsetListT *getOffsets(const Value &V) {
94 auto It = TypeToOffsets.find(V.getType());
95 if (It != TypeToOffsets.end())
96 return It->second;
97
98 return insertOffsets(V);
99 }
100
101 const_vreg_iterator findVRegs(const Value &V) const {
102 return ValToVRegs.find(&V);
103 }
104
105 bool contains(const Value &V) const {
106 return ValToVRegs.find(&V) != ValToVRegs.end();
107 }
108
109 void reset() {
110 ValToVRegs.clear();
111 TypeToOffsets.clear();
112 VRegAlloc.DestroyAll();
113 OffsetAlloc.DestroyAll();
114 }
115
116 private:
117 VRegListT *insertVRegs(const Value &V) {
118 assert(ValToVRegs.find(&V) == ValToVRegs.end() && "Value already exists")(static_cast<void> (0));
119
120 // We placement new using our fast allocator since we never try to free
121 // the vectors until translation is finished.
122 auto *VRegList = new (VRegAlloc.Allocate()) VRegListT();
123 ValToVRegs[&V] = VRegList;
124 return VRegList;
125 }
126
127 OffsetListT *insertOffsets(const Value &V) {
128 assert(TypeToOffsets.find(V.getType()) == TypeToOffsets.end() &&(static_cast<void> (0))
129 "Type already exists")(static_cast<void> (0));
130
131 auto *OffsetList = new (OffsetAlloc.Allocate()) OffsetListT();
132 TypeToOffsets[V.getType()] = OffsetList;
133 return OffsetList;
134 }
135 SpecificBumpPtrAllocator<VRegListT> VRegAlloc;
136 SpecificBumpPtrAllocator<OffsetListT> OffsetAlloc;
137
138 // We store pointers to vectors here since references may be invalidated
139 // while we hold them if we stored the vectors directly.
140 DenseMap<const Value *, VRegListT*> ValToVRegs;
141 DenseMap<const Type *, OffsetListT*> TypeToOffsets;
142 };
143
144 /// Mapping of the values of the current LLVM IR function to the related
145 /// virtual registers and offsets.
146 ValueToVRegInfo VMap;
147
148 // N.b. it's not completely obvious that this will be sufficient for every
149 // LLVM IR construct (with "invoke" being the obvious candidate to mess up our
150 // lives.
151 DenseMap<const BasicBlock *, MachineBasicBlock *> BBToMBB;
152
153 // One BasicBlock can be translated to multiple MachineBasicBlocks. For such
154 // BasicBlocks translated to multiple MachineBasicBlocks, MachinePreds retains
155 // a mapping between the edges arriving at the BasicBlock to the corresponding
156 // created MachineBasicBlocks. Some BasicBlocks that get translated to a
157 // single MachineBasicBlock may also end up in this Map.
158 using CFGEdge = std::pair<const BasicBlock *, const BasicBlock *>;
159 DenseMap<CFGEdge, SmallVector<MachineBasicBlock *, 1>> MachinePreds;
160
161 // List of stubbed PHI instructions, for values and basic blocks to be filled
162 // in once all MachineBasicBlocks have been created.
163 SmallVector<std::pair<const PHINode *, SmallVector<MachineInstr *, 1>>, 4>
164 PendingPHIs;
165
166 /// Record of what frame index has been allocated to specified allocas for
167 /// this function.
168 DenseMap<const AllocaInst *, int> FrameIndices;
169
170 SwiftErrorValueTracking SwiftError;
171
172 /// \name Methods for translating form LLVM IR to MachineInstr.
173 /// \see ::translate for general information on the translate methods.
174 /// @{
175
176 /// Translate \p Inst into its corresponding MachineInstr instruction(s).
177 /// Insert the newly translated instruction(s) right where the CurBuilder
178 /// is set.
179 ///
180 /// The general algorithm is:
181 /// 1. Look for a virtual register for each operand or
182 /// create one.
183 /// 2 Update the VMap accordingly.
184 /// 2.alt. For constant arguments, if they are compile time constants,
185 /// produce an immediate in the right operand and do not touch
186 /// ValToReg. Actually we will go with a virtual register for each
187 /// constants because it may be expensive to actually materialize the
188 /// constant. Moreover, if the constant spans on several instructions,
189 /// CSE may not catch them.
190 /// => Update ValToVReg and remember that we saw a constant in Constants.
191 /// We will materialize all the constants in finalize.
192 /// Note: we would need to do something so that we can recognize such operand
193 /// as constants.
194 /// 3. Create the generic instruction.
195 ///
196 /// \return true if the translation succeeded.
197 bool translate(const Instruction &Inst);
198
199 /// Materialize \p C into virtual-register \p Reg. The generic instructions
200 /// performing this materialization will be inserted into the entry block of
201 /// the function.
202 ///
203 /// \return true if the materialization succeeded.
204 bool translate(const Constant &C, Register Reg);
205
206 // Translate U as a copy of V.
207 bool translateCopy(const User &U, const Value &V,
208 MachineIRBuilder &MIRBuilder);
209
210 /// Translate an LLVM bitcast into generic IR. Either a COPY or a G_BITCAST is
211 /// emitted.
212 bool translateBitCast(const User &U, MachineIRBuilder &MIRBuilder);
213
214 /// Translate an LLVM load instruction into generic IR.
215 bool translateLoad(const User &U, MachineIRBuilder &MIRBuilder);
216
217 /// Translate an LLVM store instruction into generic IR.
218 bool translateStore(const User &U, MachineIRBuilder &MIRBuilder);
219
220 /// Translate an LLVM string intrinsic (memcpy, memset, ...).
221 bool translateMemFunc(const CallInst &CI, MachineIRBuilder &MIRBuilder,
222 unsigned Opcode);
223
224 void getStackGuard(Register DstReg, MachineIRBuilder &MIRBuilder);
225
226 bool translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
227 MachineIRBuilder &MIRBuilder);
228 bool translateFixedPointIntrinsic(unsigned Op, const CallInst &CI,
229 MachineIRBuilder &MIRBuilder);
230
231 /// Helper function for translateSimpleIntrinsic.
232 /// \return The generic opcode for \p IntrinsicID if \p IntrinsicID is a
233 /// simple intrinsic (ceil, fabs, etc.). Otherwise, returns
234 /// Intrinsic::not_intrinsic.
235 unsigned getSimpleIntrinsicOpcode(Intrinsic::ID ID);
236
237 /// Translates the intrinsics defined in getSimpleIntrinsicOpcode.
238 /// \return true if the translation succeeded.
239 bool translateSimpleIntrinsic(const CallInst &CI, Intrinsic::ID ID,
240 MachineIRBuilder &MIRBuilder);
241
242 bool translateConstrainedFPIntrinsic(const ConstrainedFPIntrinsic &FPI,
243 MachineIRBuilder &MIRBuilder);
244
245 bool translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
246 MachineIRBuilder &MIRBuilder);
247
248 bool translateInlineAsm(const CallBase &CB, MachineIRBuilder &MIRBuilder);
249
250 /// Returns true if the value should be split into multiple LLTs.
251 /// If \p Offsets is given then the split type's offsets will be stored in it.
252 /// If \p Offsets is not empty it will be cleared first.
253 bool valueIsSplit(const Value &V,
254 SmallVectorImpl<uint64_t> *Offsets = nullptr);
255
256 /// Common code for translating normal calls or invokes.
257 bool translateCallBase(const CallBase &CB, MachineIRBuilder &MIRBuilder);
258
259 /// Translate call instruction.
260 /// \pre \p U is a call instruction.
261 bool translateCall(const User &U, MachineIRBuilder &MIRBuilder);
262
263 /// When an invoke or a cleanupret unwinds to the next EH pad, there are
264 /// many places it could ultimately go. In the IR, we have a single unwind
265 /// destination, but in the machine CFG, we enumerate all the possible blocks.
266 /// This function skips over imaginary basic blocks that hold catchswitch
267 /// instructions, and finds all the "real" machine
268 /// basic block destinations. As those destinations may not be successors of
269 /// EHPadBB, here we also calculate the edge probability to those
270 /// destinations. The passed-in Prob is the edge probability to EHPadBB.
271 bool findUnwindDestinations(
272 const BasicBlock *EHPadBB, BranchProbability Prob,
273 SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
274 &UnwindDests);
275
276 bool translateInvoke(const User &U, MachineIRBuilder &MIRBuilder);
277
278 bool translateCallBr(const User &U, MachineIRBuilder &MIRBuilder);
279
280 bool translateLandingPad(const User &U, MachineIRBuilder &MIRBuilder);
281
282 /// Translate one of LLVM's cast instructions into MachineInstrs, with the
283 /// given generic Opcode.
284 bool translateCast(unsigned Opcode, const User &U,
285 MachineIRBuilder &MIRBuilder);
286
287 /// Translate a phi instruction.
288 bool translatePHI(const User &U, MachineIRBuilder &MIRBuilder);
289
290 /// Translate a comparison (icmp or fcmp) instruction or constant.
291 bool translateCompare(const User &U, MachineIRBuilder &MIRBuilder);
292
293 /// Translate an integer compare instruction (or constant).
294 bool translateICmp(const User &U, MachineIRBuilder &MIRBuilder) {
295 return translateCompare(U, MIRBuilder);
296 }
297
298 /// Translate a floating-point compare instruction (or constant).
299 bool translateFCmp(const User &U, MachineIRBuilder &MIRBuilder) {
300 return translateCompare(U, MIRBuilder);
20
Calling 'IRTranslator::translateCompare'
301 }
302
303 /// Add remaining operands onto phis we've translated. Executed after all
304 /// MachineBasicBlocks for the function have been created.
305 void finishPendingPhis();
306
307 /// Translate \p Inst into a unary operation \p Opcode.
308 /// \pre \p U is a unary operation.
309 bool translateUnaryOp(unsigned Opcode, const User &U,
310 MachineIRBuilder &MIRBuilder);
311
312 /// Translate \p Inst into a binary operation \p Opcode.
313 /// \pre \p U is a binary operation.
314 bool translateBinaryOp(unsigned Opcode, const User &U,
315 MachineIRBuilder &MIRBuilder);
316
317 /// If the set of cases should be emitted as a series of branches, return
318 /// true. If we should emit this as a bunch of and/or'd together conditions,
319 /// return false.
320 bool shouldEmitAsBranches(const std::vector<SwitchCG::CaseBlock> &Cases);
321 /// Helper method for findMergedConditions.
322 /// This function emits a branch and is used at the leaves of an OR or an
323 /// AND operator tree.
324 void emitBranchForMergedCondition(const Value *Cond, MachineBasicBlock *TBB,
325 MachineBasicBlock *FBB,
326 MachineBasicBlock *CurBB,
327 MachineBasicBlock *SwitchBB,
328 BranchProbability TProb,
329 BranchProbability FProb, bool InvertCond);
330 /// Used during condbr translation to find trees of conditions that can be
331 /// optimized.
332 void findMergedConditions(const Value *Cond, MachineBasicBlock *TBB,
333 MachineBasicBlock *FBB, MachineBasicBlock *CurBB,
334 MachineBasicBlock *SwitchBB,
335 Instruction::BinaryOps Opc, BranchProbability TProb,
336 BranchProbability FProb, bool InvertCond);
337
338 /// Translate branch (br) instruction.
339 /// \pre \p U is a branch instruction.
340 bool translateBr(const User &U, MachineIRBuilder &MIRBuilder);
341
342 // Begin switch lowering functions.
343 bool emitJumpTableHeader(SwitchCG::JumpTable &JT,
344 SwitchCG::JumpTableHeader &JTH,
345 MachineBasicBlock *HeaderBB);
346 void emitJumpTable(SwitchCG::JumpTable &JT, MachineBasicBlock *MBB);
347
348 void emitSwitchCase(SwitchCG::CaseBlock &CB, MachineBasicBlock *SwitchBB,
349 MachineIRBuilder &MIB);
350
351 /// Generate for for the BitTest header block, which precedes each sequence of
352 /// BitTestCases.
353 void emitBitTestHeader(SwitchCG::BitTestBlock &BTB,
354 MachineBasicBlock *SwitchMBB);
355 /// Generate code to produces one "bit test" for a given BitTestCase \p B.
356 void emitBitTestCase(SwitchCG::BitTestBlock &BB, MachineBasicBlock *NextMBB,
357 BranchProbability BranchProbToNext, Register Reg,
358 SwitchCG::BitTestCase &B, MachineBasicBlock *SwitchBB);
359
360 bool lowerJumpTableWorkItem(
361 SwitchCG::SwitchWorkListItem W, MachineBasicBlock *SwitchMBB,
362 MachineBasicBlock *CurMBB, MachineBasicBlock *DefaultMBB,
363 MachineIRBuilder &MIB, MachineFunction::iterator BBI,
364 BranchProbability UnhandledProbs, SwitchCG::CaseClusterIt I,
365 MachineBasicBlock *Fallthrough, bool FallthroughUnreachable);
366
367 bool lowerSwitchRangeWorkItem(SwitchCG::CaseClusterIt I, Value *Cond,
368 MachineBasicBlock *Fallthrough,
369 bool FallthroughUnreachable,
370 BranchProbability UnhandledProbs,
371 MachineBasicBlock *CurMBB,
372 MachineIRBuilder &MIB,
373 MachineBasicBlock *SwitchMBB);
374
375 bool lowerBitTestWorkItem(
376 SwitchCG::SwitchWorkListItem W, MachineBasicBlock *SwitchMBB,
377 MachineBasicBlock *CurMBB, MachineBasicBlock *DefaultMBB,
378 MachineIRBuilder &MIB, MachineFunction::iterator BBI,
379 BranchProbability DefaultProb, BranchProbability UnhandledProbs,
380 SwitchCG::CaseClusterIt I, MachineBasicBlock *Fallthrough,
381 bool FallthroughUnreachable);
382
383 bool lowerSwitchWorkItem(SwitchCG::SwitchWorkListItem W, Value *Cond,
384 MachineBasicBlock *SwitchMBB,
385 MachineBasicBlock *DefaultMBB,
386 MachineIRBuilder &MIB);
387
388 bool translateSwitch(const User &U, MachineIRBuilder &MIRBuilder);
389 // End switch lowering section.
390
391 bool translateIndirectBr(const User &U, MachineIRBuilder &MIRBuilder);
392
393 bool translateExtractValue(const User &U, MachineIRBuilder &MIRBuilder);
394
395 bool translateInsertValue(const User &U, MachineIRBuilder &MIRBuilder);
396
397 bool translateSelect(const User &U, MachineIRBuilder &MIRBuilder);
398
399 bool translateGetElementPtr(const User &U, MachineIRBuilder &MIRBuilder);
400
401 bool translateAlloca(const User &U, MachineIRBuilder &MIRBuilder);
402
403 /// Translate return (ret) instruction.
404 /// The target needs to implement CallLowering::lowerReturn for
405 /// this to succeed.
406 /// \pre \p U is a return instruction.
407 bool translateRet(const User &U, MachineIRBuilder &MIRBuilder);
408
409 bool translateFNeg(const User &U, MachineIRBuilder &MIRBuilder);
410
411 bool translateAdd(const User &U, MachineIRBuilder &MIRBuilder) {
412 return translateBinaryOp(TargetOpcode::G_ADD, U, MIRBuilder);
413 }
414 bool translateSub(const User &U, MachineIRBuilder &MIRBuilder) {
415 return translateBinaryOp(TargetOpcode::G_SUB, U, MIRBuilder);
416 }
417 bool translateAnd(const User &U, MachineIRBuilder &MIRBuilder) {
418 return translateBinaryOp(TargetOpcode::G_AND, U, MIRBuilder);
419 }
420 bool translateMul(const User &U, MachineIRBuilder &MIRBuilder) {
421 return translateBinaryOp(TargetOpcode::G_MUL, U, MIRBuilder);
422 }
423 bool translateOr(const User &U, MachineIRBuilder &MIRBuilder) {
424 return translateBinaryOp(TargetOpcode::G_OR, U, MIRBuilder);
425 }
426 bool translateXor(const User &U, MachineIRBuilder &MIRBuilder) {
427 return translateBinaryOp(TargetOpcode::G_XOR, U, MIRBuilder);
428 }
429
430 bool translateUDiv(const User &U, MachineIRBuilder &MIRBuilder) {
431 return translateBinaryOp(TargetOpcode::G_UDIV, U, MIRBuilder);
432 }
433 bool translateSDiv(const User &U, MachineIRBuilder &MIRBuilder) {
434 return translateBinaryOp(TargetOpcode::G_SDIV, U, MIRBuilder);
435 }
436 bool translateURem(const User &U, MachineIRBuilder &MIRBuilder) {
437 return translateBinaryOp(TargetOpcode::G_UREM, U, MIRBuilder);
438 }
439 bool translateSRem(const User &U, MachineIRBuilder &MIRBuilder) {
440 return translateBinaryOp(TargetOpcode::G_SREM, U, MIRBuilder);
441 }
442 bool translateIntToPtr(const User &U, MachineIRBuilder &MIRBuilder) {
443 return translateCast(TargetOpcode::G_INTTOPTR, U, MIRBuilder);
444 }
445 bool translatePtrToInt(const User &U, MachineIRBuilder &MIRBuilder) {
446 return translateCast(TargetOpcode::G_PTRTOINT, U, MIRBuilder);
447 }
448 bool translateTrunc(const User &U, MachineIRBuilder &MIRBuilder) {
449 return translateCast(TargetOpcode::G_TRUNC, U, MIRBuilder);
450 }
451 bool translateFPTrunc(const User &U, MachineIRBuilder &MIRBuilder) {
452 return translateCast(TargetOpcode::G_FPTRUNC, U, MIRBuilder);
453 }
454 bool translateFPExt(const User &U, MachineIRBuilder &MIRBuilder) {
455 return translateCast(TargetOpcode::G_FPEXT, U, MIRBuilder);
456 }
457 bool translateFPToUI(const User &U, MachineIRBuilder &MIRBuilder) {
458 return translateCast(TargetOpcode::G_FPTOUI, U, MIRBuilder);
459 }
460 bool translateFPToSI(const User &U, MachineIRBuilder &MIRBuilder) {
461 return translateCast(TargetOpcode::G_FPTOSI, U, MIRBuilder);
462 }
463 bool translateUIToFP(const User &U, MachineIRBuilder &MIRBuilder) {
464 return translateCast(TargetOpcode::G_UITOFP, U, MIRBuilder);
465 }
466 bool translateSIToFP(const User &U, MachineIRBuilder &MIRBuilder) {
467 return translateCast(TargetOpcode::G_SITOFP, U, MIRBuilder);
468 }
469 bool translateUnreachable(const User &U, MachineIRBuilder &MIRBuilder) {
470 return true;
471 }
472 bool translateSExt(const User &U, MachineIRBuilder &MIRBuilder) {
473 return translateCast(TargetOpcode::G_SEXT, U, MIRBuilder);
474 }
475
476 bool translateZExt(const User &U, MachineIRBuilder &MIRBuilder) {
477 return translateCast(TargetOpcode::G_ZEXT, U, MIRBuilder);
478 }
479
480 bool translateShl(const User &U, MachineIRBuilder &MIRBuilder) {
481 return translateBinaryOp(TargetOpcode::G_SHL, U, MIRBuilder);
482 }
483 bool translateLShr(const User &U, MachineIRBuilder &MIRBuilder) {
484 return translateBinaryOp(TargetOpcode::G_LSHR, U, MIRBuilder);
485 }
486 bool translateAShr(const User &U, MachineIRBuilder &MIRBuilder) {
487 return translateBinaryOp(TargetOpcode::G_ASHR, U, MIRBuilder);
488 }
489
490 bool translateFAdd(const User &U, MachineIRBuilder &MIRBuilder) {
491 return translateBinaryOp(TargetOpcode::G_FADD, U, MIRBuilder);
492 }
493 bool translateFSub(const User &U, MachineIRBuilder &MIRBuilder) {
494 return translateBinaryOp(TargetOpcode::G_FSUB, U, MIRBuilder);
495 }
496 bool translateFMul(const User &U, MachineIRBuilder &MIRBuilder) {
497 return translateBinaryOp(TargetOpcode::G_FMUL, U, MIRBuilder);
498 }
499 bool translateFDiv(const User &U, MachineIRBuilder &MIRBuilder) {
500 return translateBinaryOp(TargetOpcode::G_FDIV, U, MIRBuilder);
501 }
502 bool translateFRem(const User &U, MachineIRBuilder &MIRBuilder) {
503 return translateBinaryOp(TargetOpcode::G_FREM, U, MIRBuilder);
504 }
505
506 bool translateVAArg(const User &U, MachineIRBuilder &MIRBuilder);
507
508 bool translateInsertElement(const User &U, MachineIRBuilder &MIRBuilder);
509
510 bool translateExtractElement(const User &U, MachineIRBuilder &MIRBuilder);
511
512 bool translateShuffleVector(const User &U, MachineIRBuilder &MIRBuilder);
513
514 bool translateAtomicCmpXchg(const User &U, MachineIRBuilder &MIRBuilder);
515 bool translateAtomicRMW(const User &U, MachineIRBuilder &MIRBuilder);
516 bool translateFence(const User &U, MachineIRBuilder &MIRBuilder);
517 bool translateFreeze(const User &U, MachineIRBuilder &MIRBuilder);
518
519 // Stubs to keep the compiler happy while we implement the rest of the
520 // translation.
521 bool translateResume(const User &U, MachineIRBuilder &MIRBuilder) {
522 return false;
523 }
524 bool translateCleanupRet(const User &U, MachineIRBuilder &MIRBuilder) {
525 return false;
526 }
527 bool translateCatchRet(const User &U, MachineIRBuilder &MIRBuilder) {
528 return false;
529 }
530 bool translateCatchSwitch(const User &U, MachineIRBuilder &MIRBuilder) {
531 return false;
532 }
533 bool translateAddrSpaceCast(const User &U, MachineIRBuilder &MIRBuilder) {
534 return translateCast(TargetOpcode::G_ADDRSPACE_CAST, U, MIRBuilder);
535 }
536 bool translateCleanupPad(const User &U, MachineIRBuilder &MIRBuilder) {
537 return false;
538 }
539 bool translateCatchPad(const User &U, MachineIRBuilder &MIRBuilder) {
540 return false;
541 }
542 bool translateUserOp1(const User &U, MachineIRBuilder &MIRBuilder) {
543 return false;
544 }
545 bool translateUserOp2(const User &U, MachineIRBuilder &MIRBuilder) {
546 return false;
547 }
548
549 /// @}
550
551 // Builder for machine instruction a la IRBuilder.
552 // I.e., compared to regular MIBuilder, this one also inserts the instruction
553 // in the current block, it can creates block, etc., basically a kind of
554 // IRBuilder, but for Machine IR.
555 // CSEMIRBuilder CurBuilder;
556 std::unique_ptr<MachineIRBuilder> CurBuilder;
557
558 // Builder set to the entry block (just after ABI lowering instructions). Used
559 // as a convenient location for Constants.
560 // CSEMIRBuilder EntryBuilder;
561 std::unique_ptr<MachineIRBuilder> EntryBuilder;
562
563 // The MachineFunction currently being translated.
564 MachineFunction *MF;
565
566 /// MachineRegisterInfo used to create virtual registers.
567 MachineRegisterInfo *MRI = nullptr;
568
569 const DataLayout *DL;
570
571 /// Current target configuration. Controls how the pass handles errors.
572 const TargetPassConfig *TPC;
573
574 CodeGenOpt::Level OptLevel;
575
576 /// Current optimization remark emitter. Used to report failures.
577 std::unique_ptr<OptimizationRemarkEmitter> ORE;
578
579 FunctionLoweringInfo FuncInfo;
580
581 // True when either the Target Machine specifies no optimizations or the
582 // function has the optnone attribute.
583 bool EnableOpts = false;
584
585 /// True when the block contains a tail call. This allows the IRTranslator to
586 /// stop translating such blocks early.
587 bool HasTailCall = false;
588
589 /// Switch analysis and optimization.
590 class GISelSwitchLowering : public SwitchCG::SwitchLowering {
591 public:
592 GISelSwitchLowering(IRTranslator *irt, FunctionLoweringInfo &funcinfo)
593 : SwitchLowering(funcinfo), IRT(irt) {
594 assert(irt && "irt is null!")(static_cast<void> (0));
595 }
596
597 virtual void addSuccessorWithProb(
598 MachineBasicBlock *Src, MachineBasicBlock *Dst,
599 BranchProbability Prob = BranchProbability::getUnknown()) override {
600 IRT->addSuccessorWithProb(Src, Dst, Prob);
601 }
602
603 virtual ~GISelSwitchLowering() = default;
604
605 private:
606 IRTranslator *IRT;
607 };
608
609 std::unique_ptr<GISelSwitchLowering> SL;
610
611 // * Insert all the code needed to materialize the constants
612 // at the proper place. E.g., Entry block or dominator block
613 // of each constant depending on how fancy we want to be.
614 // * Clear the different maps.
615 void finalizeFunction();
616
617 // Handle emitting jump tables for each basic block.
618 void finalizeBasicBlock();
619
620 /// Get the VRegs that represent \p Val.
621 /// Non-aggregate types have just one corresponding VReg and the list can be
622 /// used as a single "unsigned". Aggregates get flattened. If such VRegs do
623 /// not exist, they are created.
624 ArrayRef<Register> getOrCreateVRegs(const Value &Val);
625
626 Register getOrCreateVReg(const Value &Val) {
627 auto Regs = getOrCreateVRegs(Val);
628 if (Regs.empty())
629 return 0;
630 assert(Regs.size() == 1 &&(static_cast<void> (0))
631 "attempt to get single VReg for aggregate or void")(static_cast<void> (0));
632 return Regs[0];
633 }
634
635 /// Allocate some vregs and offsets in the VMap. Then populate just the
636 /// offsets while leaving the vregs empty.
637 ValueToVRegInfo::VRegListT &allocateVRegs(const Value &Val);
638
639 /// Get the frame index that represents \p Val.
640 /// If such VReg does not exist, it is created.
641 int getOrCreateFrameIndex(const AllocaInst &AI);
642
643 /// Get the alignment of the given memory operation instruction. This will
644 /// either be the explicitly specified value or the ABI-required alignment for
645 /// the type being accessed (according to the Module's DataLayout).
646 Align getMemOpAlign(const Instruction &I);
647
648 /// Get the MachineBasicBlock that represents \p BB. Specifically, the block
649 /// returned will be the head of the translated block (suitable for branch
650 /// destinations).
651 MachineBasicBlock &getMBB(const BasicBlock &BB);
652
653 /// Record \p NewPred as a Machine predecessor to `Edge.second`, corresponding
654 /// to `Edge.first` at the IR level. This is used when IRTranslation creates
655 /// multiple MachineBasicBlocks for a given IR block and the CFG is no longer
656 /// represented simply by the IR-level CFG.
657 void addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred);
658
659 /// Returns the Machine IR predecessors for the given IR CFG edge. Usually
660 /// this is just the single MachineBasicBlock corresponding to the predecessor
661 /// in the IR. More complex lowering can result in multiple MachineBasicBlocks
662 /// preceding the original though (e.g. switch instructions).
663 SmallVector<MachineBasicBlock *, 1> getMachinePredBBs(CFGEdge Edge) {
664 auto RemappedEdge = MachinePreds.find(Edge);
665 if (RemappedEdge != MachinePreds.end())
666 return RemappedEdge->second;
667 return SmallVector<MachineBasicBlock *, 4>(1, &getMBB(*Edge.first));
668 }
669
670 /// Return branch probability calculated by BranchProbabilityInfo for IR
671 /// blocks.
672 BranchProbability getEdgeProbability(const MachineBasicBlock *Src,
673 const MachineBasicBlock *Dst) const;
674
675 void addSuccessorWithProb(
676 MachineBasicBlock *Src, MachineBasicBlock *Dst,
677 BranchProbability Prob = BranchProbability::getUnknown());
678
679public:
680 IRTranslator(CodeGenOpt::Level OptLevel = CodeGenOpt::None);
681
682 StringRef getPassName() const override { return "IRTranslator"; }
683
684 void getAnalysisUsage(AnalysisUsage &AU) const override;
685
686 // Algo:
687 // CallLowering = MF.subtarget.getCallLowering()
688 // F = MF.getParent()
689 // MIRBuilder.reset(MF)
690 // getMBB(F.getEntryBB())
691 // CallLowering->translateArguments(MIRBuilder, F, ValToVReg)
692 // for each bb in F
693 // getMBB(bb)
694 // for each inst in bb
695 // if (!translate(MIRBuilder, inst, ValToVReg, ConstantToSequence))
696 // report_fatal_error("Don't know how to translate input");
697 // finalize()
698 bool runOnMachineFunction(MachineFunction &MF) override;
699};
700
701} // end namespace llvm
702
703#endif // LLVM_CODEGEN_GLOBALISEL_IRTRANSLATOR_H

/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/include/llvm/IR/InstrTypes.h

1//===- llvm/InstrTypes.h - Important Instruction subclasses -----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines various meta classes of instructions that exist in the VM
10// representation. Specific concrete subclasses of these may be found in the
11// i*.h files...
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_IR_INSTRTYPES_H
16#define LLVM_IR_INSTRTYPES_H
17
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/None.h"
20#include "llvm/ADT/Optional.h"
21#include "llvm/ADT/STLExtras.h"
22#include "llvm/ADT/StringMap.h"
23#include "llvm/ADT/StringRef.h"
24#include "llvm/ADT/Twine.h"
25#include "llvm/ADT/iterator_range.h"
26#include "llvm/IR/Attributes.h"
27#include "llvm/IR/CallingConv.h"
28#include "llvm/IR/Constants.h"
29#include "llvm/IR/DerivedTypes.h"
30#include "llvm/IR/Function.h"
31#include "llvm/IR/Instruction.h"
32#include "llvm/IR/LLVMContext.h"
33#include "llvm/IR/OperandTraits.h"
34#include "llvm/IR/Type.h"
35#include "llvm/IR/User.h"
36#include "llvm/IR/Value.h"
37#include "llvm/Support/Casting.h"
38#include "llvm/Support/ErrorHandling.h"
39#include <algorithm>
40#include <cassert>
41#include <cstddef>
42#include <cstdint>
43#include <iterator>
44#include <string>
45#include <vector>
46
47namespace llvm {
48
49namespace Intrinsic {
50typedef unsigned ID;
51}
52
53//===----------------------------------------------------------------------===//
54// UnaryInstruction Class
55//===----------------------------------------------------------------------===//
56
57class UnaryInstruction : public Instruction {
58protected:
59 UnaryInstruction(Type *Ty, unsigned iType, Value *V,
60 Instruction *IB = nullptr)
61 : Instruction(Ty, iType, &Op<0>(), 1, IB) {
62 Op<0>() = V;
63 }
64 UnaryInstruction(Type *Ty, unsigned iType, Value *V, BasicBlock *IAE)
65 : Instruction(Ty, iType, &Op<0>(), 1, IAE) {
66 Op<0>() = V;
67 }
68
69public:
70 // allocate space for exactly one operand
71 void *operator new(size_t S) { return User::operator new(S, 1); }
72 void operator delete(void *Ptr) { User::operator delete(Ptr); }
73
74 /// Transparently provide more efficient getOperand methods.
75 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
76
77 // Methods for support type inquiry through isa, cast, and dyn_cast:
78 static bool classof(const Instruction *I) {
79 return I->isUnaryOp() ||
80 I->getOpcode() == Instruction::Alloca ||
81 I->getOpcode() == Instruction::Load ||
82 I->getOpcode() == Instruction::VAArg ||
83 I->getOpcode() == Instruction::ExtractValue ||
84 (I->getOpcode() >= CastOpsBegin && I->getOpcode() < CastOpsEnd);
85 }
86 static bool classof(const Value *V) {
87 return isa<Instruction>(V) && classof(cast<Instruction>(V));
88 }
89};
90
91template <>
92struct OperandTraits<UnaryInstruction> :
93 public FixedNumOperandTraits<UnaryInstruction, 1> {
94};
95
96DEFINE_TRANSPARENT_OPERAND_ACCESSORS(UnaryInstruction, Value)UnaryInstruction::op_iterator UnaryInstruction::op_begin() { return
OperandTraits<UnaryInstruction>::op_begin(this); } UnaryInstruction
::const_op_iterator UnaryInstruction::op_begin() const { return
OperandTraits<UnaryInstruction>::op_begin(const_cast<
UnaryInstruction*>(this)); } UnaryInstruction::op_iterator
UnaryInstruction::op_end() { return OperandTraits<UnaryInstruction
>::op_end(this); } UnaryInstruction::const_op_iterator UnaryInstruction
::op_end() const { return OperandTraits<UnaryInstruction>
::op_end(const_cast<UnaryInstruction*>(this)); } Value *
UnaryInstruction::getOperand(unsigned i_nocapture) const { (static_cast
<void> (0)); return cast_or_null<Value>( OperandTraits
<UnaryInstruction>::op_begin(const_cast<UnaryInstruction
*>(this))[i_nocapture].get()); } void UnaryInstruction::setOperand
(unsigned i_nocapture, Value *Val_nocapture) { (static_cast<
void> (0)); OperandTraits<UnaryInstruction>::op_begin
(this)[i_nocapture] = Val_nocapture; } unsigned UnaryInstruction
::getNumOperands() const { return OperandTraits<UnaryInstruction
>::operands(this); } template <int Idx_nocapture> Use
&UnaryInstruction::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
UnaryInstruction::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
97
98//===----------------------------------------------------------------------===//
99// UnaryOperator Class
100//===----------------------------------------------------------------------===//
101
102class UnaryOperator : public UnaryInstruction {
103 void AssertOK();
104
105protected:
106 UnaryOperator(UnaryOps iType, Value *S, Type *Ty,
107 const Twine &Name, Instruction *InsertBefore);
108 UnaryOperator(UnaryOps iType, Value *S, Type *Ty,
109 const Twine &Name, BasicBlock *InsertAtEnd);
110
111 // Note: Instruction needs to be a friend here to call cloneImpl.
112 friend class Instruction;
113
114 UnaryOperator *cloneImpl() const;
115
116public:
117
118 /// Construct a unary instruction, given the opcode and an operand.
119 /// Optionally (if InstBefore is specified) insert the instruction
120 /// into a BasicBlock right before the specified instruction. The specified
121 /// Instruction is allowed to be a dereferenced end iterator.
122 ///
123 static UnaryOperator *Create(UnaryOps Op, Value *S,
124 const Twine &Name = Twine(),
125 Instruction *InsertBefore = nullptr);
126
127 /// Construct a unary instruction, given the opcode and an operand.
128 /// Also automatically insert this instruction to the end of the
129 /// BasicBlock specified.
130 ///
131 static UnaryOperator *Create(UnaryOps Op, Value *S,
132 const Twine &Name,
133 BasicBlock *InsertAtEnd);
134
135 /// These methods just forward to Create, and are useful when you
136 /// statically know what type of instruction you're going to create. These
137 /// helpers just save some typing.
138#define HANDLE_UNARY_INST(N, OPC, CLASS) \
139 static UnaryOperator *Create##OPC(Value *V, const Twine &Name = "") {\
140 return Create(Instruction::OPC, V, Name);\
141 }
142#include "llvm/IR/Instruction.def"
143#define HANDLE_UNARY_INST(N, OPC, CLASS) \
144 static UnaryOperator *Create##OPC(Value *V, const Twine &Name, \
145 BasicBlock *BB) {\
146 return Create(Instruction::OPC, V, Name, BB);\
147 }
148#include "llvm/IR/Instruction.def"
149#define HANDLE_UNARY_INST(N, OPC, CLASS) \
150 static UnaryOperator *Create##OPC(Value *V, const Twine &Name, \
151 Instruction *I) {\
152 return Create(Instruction::OPC, V, Name, I);\
153 }
154#include "llvm/IR/Instruction.def"
155
156 static UnaryOperator *
157 CreateWithCopiedFlags(UnaryOps Opc, Value *V, Instruction *CopyO,
158 const Twine &Name = "",
159 Instruction *InsertBefore = nullptr) {
160 UnaryOperator *UO = Create(Opc, V, Name, InsertBefore);
161 UO->copyIRFlags(CopyO);
162 return UO;
163 }
164
165 static UnaryOperator *CreateFNegFMF(Value *Op, Instruction *FMFSource,
166 const Twine &Name = "",
167 Instruction *InsertBefore = nullptr) {
168 return CreateWithCopiedFlags(Instruction::FNeg, Op, FMFSource, Name,
169 InsertBefore);
170 }
171
172 UnaryOps getOpcode() const {
173 return static_cast<UnaryOps>(Instruction::getOpcode());
174 }
175
176 // Methods for support type inquiry through isa, cast, and dyn_cast:
177 static bool classof(const Instruction *I) {
178 return I->isUnaryOp();
179 }
180 static bool classof(const Value *V) {
181 return isa<Instruction>(V) && classof(cast<Instruction>(V));
182 }
183};
184
185//===----------------------------------------------------------------------===//
186// BinaryOperator Class
187//===----------------------------------------------------------------------===//
188
189class BinaryOperator : public Instruction {
190 void AssertOK();
191
192protected:
193 BinaryOperator(BinaryOps iType, Value *S1, Value *S2, Type *Ty,
194 const Twine &Name, Instruction *InsertBefore);
195 BinaryOperator(BinaryOps iType, Value *S1, Value *S2, Type *Ty,
196 const Twine &Name, BasicBlock *InsertAtEnd);
197
198 // Note: Instruction needs to be a friend here to call cloneImpl.
199 friend class Instruction;
200
201 BinaryOperator *cloneImpl() const;
202
203public:
204 // allocate space for exactly two operands
205 void *operator new(size_t S) { return User::operator new(S, 2); }
206 void operator delete(void *Ptr) { User::operator delete(Ptr); }
207
208 /// Transparently provide more efficient getOperand methods.
209 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
210
211 /// Construct a binary instruction, given the opcode and the two
212 /// operands. Optionally (if InstBefore is specified) insert the instruction
213 /// into a BasicBlock right before the specified instruction. The specified
214 /// Instruction is allowed to be a dereferenced end iterator.
215 ///
216 static BinaryOperator *Create(BinaryOps Op, Value *S1, Value *S2,
217 const Twine &Name = Twine(),
218 Instruction *InsertBefore = nullptr);
219
220 /// Construct a binary instruction, given the opcode and the two
221 /// operands. Also automatically insert this instruction to the end of the
222 /// BasicBlock specified.
223 ///
224 static BinaryOperator *Create(BinaryOps Op, Value *S1, Value *S2,
225 const Twine &Name, BasicBlock *InsertAtEnd);
226
227 /// These methods just forward to Create, and are useful when you
228 /// statically know what type of instruction you're going to create. These
229 /// helpers just save some typing.
230#define HANDLE_BINARY_INST(N, OPC, CLASS) \
231 static BinaryOperator *Create##OPC(Value *V1, Value *V2, \
232 const Twine &Name = "") {\
233 return Create(Instruction::OPC, V1, V2, Name);\
234 }
235#include "llvm/IR/Instruction.def"
236#define HANDLE_BINARY_INST(N, OPC, CLASS) \
237 static BinaryOperator *Create##OPC(Value *V1, Value *V2, \
238 const Twine &Name, BasicBlock *BB) {\
239 return Create(Instruction::OPC, V1, V2, Name, BB);\
240 }
241#include "llvm/IR/Instruction.def"
242#define HANDLE_BINARY_INST(N, OPC, CLASS) \
243 static BinaryOperator *Create##OPC(Value *V1, Value *V2, \
244 const Twine &Name, Instruction *I) {\
245 return Create(Instruction::OPC, V1, V2, Name, I);\
246 }
247#include "llvm/IR/Instruction.def"
248
249 static BinaryOperator *
250 CreateWithCopiedFlags(BinaryOps Opc, Value *V1, Value *V2, Instruction *CopyO,
251 const Twine &Name = "",
252 Instruction *InsertBefore = nullptr) {
253 BinaryOperator *BO = Create(Opc, V1, V2, Name, InsertBefore);
254 BO->copyIRFlags(CopyO);
255 return BO;
256 }
257
258 static BinaryOperator *CreateFAddFMF(Value *V1, Value *V2,
259 Instruction *FMFSource,
260 const Twine &Name = "") {
261 return CreateWithCopiedFlags(Instruction::FAdd, V1, V2, FMFSource, Name);
262 }
263 static BinaryOperator *CreateFSubFMF(Value *V1, Value *V2,
264 Instruction *FMFSource,
265 const Twine &Name = "") {
266 return CreateWithCopiedFlags(Instruction::FSub, V1, V2, FMFSource, Name);
267 }
268 static BinaryOperator *CreateFMulFMF(Value *V1, Value *V2,
269 Instruction *FMFSource,
270 const Twine &Name = "") {
271 return CreateWithCopiedFlags(Instruction::FMul, V1, V2, FMFSource, Name);
272 }
273 static BinaryOperator *CreateFDivFMF(Value *V1, Value *V2,
274 Instruction *FMFSource,
275 const Twine &Name = "") {
276 return CreateWithCopiedFlags(Instruction::FDiv, V1, V2, FMFSource, Name);
277 }
278 static BinaryOperator *CreateFRemFMF(Value *V1, Value *V2,
279 Instruction *FMFSource,
280 const Twine &Name = "") {
281 return CreateWithCopiedFlags(Instruction::FRem, V1, V2, FMFSource, Name);
282 }
283
284 static BinaryOperator *CreateNSW(BinaryOps Opc, Value *V1, Value *V2,
285 const Twine &Name = "") {
286 BinaryOperator *BO = Create(Opc, V1, V2, Name);
287 BO->setHasNoSignedWrap(true);
288 return BO;
289 }
290 static BinaryOperator *CreateNSW(BinaryOps Opc, Value *V1, Value *V2,
291 const Twine &Name, BasicBlock *BB) {
292 BinaryOperator *BO = Create(Opc, V1, V2, Name, BB);
293 BO->setHasNoSignedWrap(true);
294 return BO;
295 }
296 static BinaryOperator *CreateNSW(BinaryOps Opc, Value *V1, Value *V2,
297 const Twine &Name, Instruction *I) {
298 BinaryOperator *BO = Create(Opc, V1, V2, Name, I);
299 BO->setHasNoSignedWrap(true);
300 return BO;
301 }
302
303 static BinaryOperator *CreateNUW(BinaryOps Opc, Value *V1, Value *V2,
304 const Twine &Name = "") {
305 BinaryOperator *BO = Create(Opc, V1, V2, Name);
306 BO->setHasNoUnsignedWrap(true);
307 return BO;
308 }
309 static BinaryOperator *CreateNUW(BinaryOps Opc, Value *V1, Value *V2,
310 const Twine &Name, BasicBlock *BB) {
311 BinaryOperator *BO = Create(Opc, V1, V2, Name, BB);
312 BO->setHasNoUnsignedWrap(true);
313 return BO;
314 }
315 static BinaryOperator *CreateNUW(BinaryOps Opc, Value *V1, Value *V2,
316 const Twine &Name, Instruction *I) {
317 BinaryOperator *BO = Create(Opc, V1, V2, Name, I);
318 BO->setHasNoUnsignedWrap(true);
319 return BO;
320 }
321
322 static BinaryOperator *CreateExact(BinaryOps Opc, Value *V1, Value *V2,
323 const Twine &Name = "") {
324 BinaryOperator *BO = Create(Opc, V1, V2, Name);
325 BO->setIsExact(true);
326 return BO;
327 }
328 static BinaryOperator *CreateExact(BinaryOps Opc, Value *V1, Value *V2,
329 const Twine &Name, BasicBlock *BB) {
330 BinaryOperator *BO = Create(Opc, V1, V2, Name, BB);
331 BO->setIsExact(true);
332 return BO;
333 }
334 static BinaryOperator *CreateExact(BinaryOps Opc, Value *V1, Value *V2,
335 const Twine &Name, Instruction *I) {
336 BinaryOperator *BO = Create(Opc, V1, V2, Name, I);
337 BO->setIsExact(true);
338 return BO;
339 }
340
341#define DEFINE_HELPERS(OPC, NUWNSWEXACT) \
342 static BinaryOperator *Create##NUWNSWEXACT##OPC(Value *V1, Value *V2, \
343 const Twine &Name = "") { \
344 return Create##NUWNSWEXACT(Instruction::OPC, V1, V2, Name); \
345 } \
346 static BinaryOperator *Create##NUWNSWEXACT##OPC( \
347 Value *V1, Value *V2, const Twine &Name, BasicBlock *BB) { \
348 return Create##NUWNSWEXACT(Instruction::OPC, V1, V2, Name, BB); \
349 } \
350 static BinaryOperator *Create##NUWNSWEXACT##OPC( \
351 Value *V1, Value *V2, const Twine &Name, Instruction *I) { \
352 return Create##NUWNSWEXACT(Instruction::OPC, V1, V2, Name, I); \
353 }
354
355 DEFINE_HELPERS(Add, NSW) // CreateNSWAdd
356 DEFINE_HELPERS(Add, NUW) // CreateNUWAdd
357 DEFINE_HELPERS(Sub, NSW) // CreateNSWSub
358 DEFINE_HELPERS(Sub, NUW) // CreateNUWSub
359 DEFINE_HELPERS(Mul, NSW) // CreateNSWMul
360 DEFINE_HELPERS(Mul, NUW) // CreateNUWMul
361 DEFINE_HELPERS(Shl, NSW) // CreateNSWShl
362 DEFINE_HELPERS(Shl, NUW) // CreateNUWShl
363
364 DEFINE_HELPERS(SDiv, Exact) // CreateExactSDiv
365 DEFINE_HELPERS(UDiv, Exact) // CreateExactUDiv
366 DEFINE_HELPERS(AShr, Exact) // CreateExactAShr
367 DEFINE_HELPERS(LShr, Exact) // CreateExactLShr
368
369#undef DEFINE_HELPERS
370
371 /// Helper functions to construct and inspect unary operations (NEG and NOT)
372 /// via binary operators SUB and XOR:
373 ///
374 /// Create the NEG and NOT instructions out of SUB and XOR instructions.
375 ///
376 static BinaryOperator *CreateNeg(Value *Op, const Twine &Name = "",
377 Instruction *InsertBefore = nullptr);
378 static BinaryOperator *CreateNeg(Value *Op, const Twine &Name,
379 BasicBlock *InsertAtEnd);
380 static BinaryOperator *CreateNSWNeg(Value *Op, const Twine &Name = "",
381 Instruction *InsertBefore = nullptr);
382 static BinaryOperator *CreateNSWNeg(Value *Op, const Twine &Name,
383 BasicBlock *InsertAtEnd);
384 static BinaryOperator *CreateNUWNeg(Value *Op, const Twine &Name = "",
385 Instruction *InsertBefore = nullptr);
386 static BinaryOperator *CreateNUWNeg(Value *Op, const Twine &Name,
387 BasicBlock *InsertAtEnd);
388 static BinaryOperator *CreateNot(Value *Op, const Twine &Name = "",
389 Instruction *InsertBefore = nullptr);
390 static BinaryOperator *CreateNot(Value *Op, const Twine &Name,
391 BasicBlock *InsertAtEnd);
392
393 BinaryOps getOpcode() const {
394 return static_cast<BinaryOps>(Instruction::getOpcode());
395 }
396
397 /// Exchange the two operands to this instruction.
398 /// This instruction is safe to use on any binary instruction and
399 /// does not modify the semantics of the instruction. If the instruction
400 /// cannot be reversed (ie, it's a Div), then return true.
401 ///
402 bool swapOperands();
403
404 // Methods for support type inquiry through isa, cast, and dyn_cast:
405 static bool classof(const Instruction *I) {
406 return I->isBinaryOp();
407 }
408 static bool classof(const Value *V) {
409 return isa<Instruction>(V) && classof(cast<Instruction>(V));
410 }
411};
412
413template <>
414struct OperandTraits<BinaryOperator> :
415 public FixedNumOperandTraits<BinaryOperator, 2> {
416};
417
418DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BinaryOperator, Value)BinaryOperator::op_iterator BinaryOperator::op_begin() { return
OperandTraits<BinaryOperator>::op_begin(this); } BinaryOperator
::const_op_iterator BinaryOperator::op_begin() const { return
OperandTraits<BinaryOperator>::op_begin(const_cast<
BinaryOperator*>(this)); } BinaryOperator::op_iterator BinaryOperator
::op_end() { return OperandTraits<BinaryOperator>::op_end
(this); } BinaryOperator::const_op_iterator BinaryOperator::op_end
() const { return OperandTraits<BinaryOperator>::op_end
(const_cast<BinaryOperator*>(this)); } Value *BinaryOperator
::getOperand(unsigned i_nocapture) const { (static_cast<void
> (0)); return cast_or_null<Value>( OperandTraits<
BinaryOperator>::op_begin(const_cast<BinaryOperator*>
(this))[i_nocapture].get()); } void BinaryOperator::setOperand
(unsigned i_nocapture, Value *Val_nocapture) { (static_cast<
void> (0)); OperandTraits<BinaryOperator>::op_begin(
this)[i_nocapture] = Val_nocapture; } unsigned BinaryOperator
::getNumOperands() const { return OperandTraits<BinaryOperator
>::operands(this); } template <int Idx_nocapture> Use
&BinaryOperator::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
BinaryOperator::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
419
420//===----------------------------------------------------------------------===//
421// CastInst Class
422//===----------------------------------------------------------------------===//
423
424/// This is the base class for all instructions that perform data
425/// casts. It is simply provided so that instruction category testing
426/// can be performed with code like:
427///
428/// if (isa<CastInst>(Instr)) { ... }
429/// Base class of casting instructions.
430class CastInst : public UnaryInstruction {
431protected:
432 /// Constructor with insert-before-instruction semantics for subclasses
433 CastInst(Type *Ty, unsigned iType, Value *S,
434 const Twine &NameStr = "", Instruction *InsertBefore = nullptr)
435 : UnaryInstruction(Ty, iType, S, InsertBefore) {
436 setName(NameStr);
437 }
438 /// Constructor with insert-at-end-of-block semantics for subclasses
439 CastInst(Type *Ty, unsigned iType, Value *S,
440 const Twine &NameStr, BasicBlock *InsertAtEnd)
441 : UnaryInstruction(Ty, iType, S, InsertAtEnd) {
442 setName(NameStr);
443 }
444
445public:
446 /// Provides a way to construct any of the CastInst subclasses using an
447 /// opcode instead of the subclass's constructor. The opcode must be in the
448 /// CastOps category (Instruction::isCast(opcode) returns true). This
449 /// constructor has insert-before-instruction semantics to automatically
450 /// insert the new CastInst before InsertBefore (if it is non-null).
451 /// Construct any of the CastInst subclasses
452 static CastInst *Create(
453 Instruction::CastOps, ///< The opcode of the cast instruction
454 Value *S, ///< The value to be casted (operand 0)
455 Type *Ty, ///< The type to which cast should be made
456 const Twine &Name = "", ///< Name for the instruction
457 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
458 );
459 /// Provides a way to construct any of the CastInst subclasses using an
460 /// opcode instead of the subclass's constructor. The opcode must be in the
461 /// CastOps category. This constructor has insert-at-end-of-block semantics
462 /// to automatically insert the new CastInst at the end of InsertAtEnd (if
463 /// its non-null).
464 /// Construct any of the CastInst subclasses
465 static CastInst *Create(
466 Instruction::CastOps, ///< The opcode for the cast instruction
467 Value *S, ///< The value to be casted (operand 0)
468 Type *Ty, ///< The type to which operand is casted
469 const Twine &Name, ///< The name for the instruction
470 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
471 );
472
473 /// Create a ZExt or BitCast cast instruction
474 static CastInst *CreateZExtOrBitCast(
475 Value *S, ///< The value to be casted (operand 0)
476 Type *Ty, ///< The type to which cast should be made
477 const Twine &Name = "", ///< Name for the instruction
478 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
479 );
480
481 /// Create a ZExt or BitCast cast instruction
482 static CastInst *CreateZExtOrBitCast(
483 Value *S, ///< The value to be casted (operand 0)
484 Type *Ty, ///< The type to which operand is casted
485 const Twine &Name, ///< The name for the instruction
486 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
487 );
488
489 /// Create a SExt or BitCast cast instruction
490 static CastInst *CreateSExtOrBitCast(
491 Value *S, ///< The value to be casted (operand 0)
492 Type *Ty, ///< The type to which cast should be made
493 const Twine &Name = "", ///< Name for the instruction
494 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
495 );
496
497 /// Create a SExt or BitCast cast instruction
498 static CastInst *CreateSExtOrBitCast(
499 Value *S, ///< The value to be casted (operand 0)
500 Type *Ty, ///< The type to which operand is casted
501 const Twine &Name, ///< The name for the instruction
502 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
503 );
504
505 /// Create a BitCast AddrSpaceCast, or a PtrToInt cast instruction.
506 static CastInst *CreatePointerCast(
507 Value *S, ///< The pointer value to be casted (operand 0)
508 Type *Ty, ///< The type to which operand is casted
509 const Twine &Name, ///< The name for the instruction
510 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
511 );
512
513 /// Create a BitCast, AddrSpaceCast or a PtrToInt cast instruction.
514 static CastInst *CreatePointerCast(
515 Value *S, ///< The pointer value to be casted (operand 0)
516 Type *Ty, ///< The type to which cast should be made
517 const Twine &Name = "", ///< Name for the instruction
518 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
519 );
520
521 /// Create a BitCast or an AddrSpaceCast cast instruction.
522 static CastInst *CreatePointerBitCastOrAddrSpaceCast(
523 Value *S, ///< The pointer value to be casted (operand 0)
524 Type *Ty, ///< The type to which operand is casted
525 const Twine &Name, ///< The name for the instruction
526 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
527 );
528
529 /// Create a BitCast or an AddrSpaceCast cast instruction.
530 static CastInst *CreatePointerBitCastOrAddrSpaceCast(
531 Value *S, ///< The pointer value to be casted (operand 0)
532 Type *Ty, ///< The type to which cast should be made
533 const Twine &Name = "", ///< Name for the instruction
534 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
535 );
536
537 /// Create a BitCast, a PtrToInt, or an IntToPTr cast instruction.
538 ///
539 /// If the value is a pointer type and the destination an integer type,
540 /// creates a PtrToInt cast. If the value is an integer type and the
541 /// destination a pointer type, creates an IntToPtr cast. Otherwise, creates
542 /// a bitcast.
543 static CastInst *CreateBitOrPointerCast(
544 Value *S, ///< The pointer value to be casted (operand 0)
545 Type *Ty, ///< The type to which cast should be made
546 const Twine &Name = "", ///< Name for the instruction
547 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
548 );
549
550 /// Create a ZExt, BitCast, or Trunc for int -> int casts.
551 static CastInst *CreateIntegerCast(
552 Value *S, ///< The pointer value to be casted (operand 0)
553 Type *Ty, ///< The type to which cast should be made
554 bool isSigned, ///< Whether to regard S as signed or not
555 const Twine &Name = "", ///< Name for the instruction
556 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
557 );
558
559 /// Create a ZExt, BitCast, or Trunc for int -> int casts.
560 static CastInst *CreateIntegerCast(
561 Value *S, ///< The integer value to be casted (operand 0)
562 Type *Ty, ///< The integer type to which operand is casted
563 bool isSigned, ///< Whether to regard S as signed or not
564 const Twine &Name, ///< The name for the instruction
565 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
566 );
567
568 /// Create an FPExt, BitCast, or FPTrunc for fp -> fp casts
569 static CastInst *CreateFPCast(
570 Value *S, ///< The floating point value to be casted
571 Type *Ty, ///< The floating point type to cast to
572 const Twine &Name = "", ///< Name for the instruction
573 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
574 );
575
576 /// Create an FPExt, BitCast, or FPTrunc for fp -> fp casts
577 static CastInst *CreateFPCast(
578 Value *S, ///< The floating point value to be casted
579 Type *Ty, ///< The floating point type to cast to
580 const Twine &Name, ///< The name for the instruction
581 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
582 );
583
584 /// Create a Trunc or BitCast cast instruction
585 static CastInst *CreateTruncOrBitCast(
586 Value *S, ///< The value to be casted (operand 0)
587 Type *Ty, ///< The type to which cast should be made
588 const Twine &Name = "", ///< Name for the instruction
589 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
590 );
591
592 /// Create a Trunc or BitCast cast instruction
593 static CastInst *CreateTruncOrBitCast(
594 Value *S, ///< The value to be casted (operand 0)
595 Type *Ty, ///< The type to which operand is casted
596 const Twine &Name, ///< The name for the instruction
597 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
598 );
599
600 /// Check whether a bitcast between these types is valid
601 static bool isBitCastable(
602 Type *SrcTy, ///< The Type from which the value should be cast.
603 Type *DestTy ///< The Type to which the value should be cast.
604 );
605
606 /// Check whether a bitcast, inttoptr, or ptrtoint cast between these
607 /// types is valid and a no-op.
608 ///
609 /// This ensures that any pointer<->integer cast has enough bits in the
610 /// integer and any other cast is a bitcast.
611 static bool isBitOrNoopPointerCastable(
612 Type *SrcTy, ///< The Type from which the value should be cast.
613 Type *DestTy, ///< The Type to which the value should be cast.
614 const DataLayout &DL);
615
616 /// Returns the opcode necessary to cast Val into Ty using usual casting
617 /// rules.
618 /// Infer the opcode for cast operand and type
619 static Instruction::CastOps getCastOpcode(
620 const Value *Val, ///< The value to cast
621 bool SrcIsSigned, ///< Whether to treat the source as signed
622 Type *Ty, ///< The Type to which the value should be casted
623 bool DstIsSigned ///< Whether to treate the dest. as signed
624 );
625
626 /// There are several places where we need to know if a cast instruction
627 /// only deals with integer source and destination types. To simplify that
628 /// logic, this method is provided.
629 /// @returns true iff the cast has only integral typed operand and dest type.
630 /// Determine if this is an integer-only cast.
631 bool isIntegerCast() const;
632
633 /// A lossless cast is one that does not alter the basic value. It implies
634 /// a no-op cast but is more stringent, preventing things like int->float,
635 /// long->double, or int->ptr.
636 /// @returns true iff the cast is lossless.
637 /// Determine if this is a lossless cast.
638 bool isLosslessCast() const;
639
640 /// A no-op cast is one that can be effected without changing any bits.
641 /// It implies that the source and destination types are the same size. The
642 /// DataLayout argument is to determine the pointer size when examining casts
643 /// involving Integer and Pointer types. They are no-op casts if the integer
644 /// is the same size as the pointer. However, pointer size varies with
645 /// platform. Note that a precondition of this method is that the cast is
646 /// legal - i.e. the instruction formed with these operands would verify.
647 static bool isNoopCast(
648 Instruction::CastOps Opcode, ///< Opcode of cast
649 Type *SrcTy, ///< SrcTy of cast
650 Type *DstTy, ///< DstTy of cast
651 const DataLayout &DL ///< DataLayout to get the Int Ptr type from.
652 );
653
654 /// Determine if this cast is a no-op cast.
655 ///
656 /// \param DL is the DataLayout to determine pointer size.
657 bool isNoopCast(const DataLayout &DL) const;
658
659 /// Determine how a pair of casts can be eliminated, if they can be at all.
660 /// This is a helper function for both CastInst and ConstantExpr.
661 /// @returns 0 if the CastInst pair can't be eliminated, otherwise
662 /// returns Instruction::CastOps value for a cast that can replace
663 /// the pair, casting SrcTy to DstTy.
664 /// Determine if a cast pair is eliminable
665 static unsigned isEliminableCastPair(
666 Instruction::CastOps firstOpcode, ///< Opcode of first cast
667 Instruction::CastOps secondOpcode, ///< Opcode of second cast
668 Type *SrcTy, ///< SrcTy of 1st cast
669 Type *MidTy, ///< DstTy of 1st cast & SrcTy of 2nd cast
670 Type *DstTy, ///< DstTy of 2nd cast
671 Type *SrcIntPtrTy, ///< Integer type corresponding to Ptr SrcTy, or null
672 Type *MidIntPtrTy, ///< Integer type corresponding to Ptr MidTy, or null
673 Type *DstIntPtrTy ///< Integer type corresponding to Ptr DstTy, or null
674 );
675
676 /// Return the opcode of this CastInst
677 Instruction::CastOps getOpcode() const {
678 return Instruction::CastOps(Instruction::getOpcode());
679 }
680
681 /// Return the source type, as a convenience
682 Type* getSrcTy() const { return getOperand(0)->getType(); }
683 /// Return the destination type, as a convenience
684 Type* getDestTy() const { return getType(); }
685
686 /// This method can be used to determine if a cast from SrcTy to DstTy using
687 /// Opcode op is valid or not.
688 /// @returns true iff the proposed cast is valid.
689 /// Determine if a cast is valid without creating one.
690 static bool castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy);
691 static bool castIsValid(Instruction::CastOps op, Value *S, Type *DstTy) {
692 return castIsValid(op, S->getType(), DstTy);
693 }
694
695 /// Methods for support type inquiry through isa, cast, and dyn_cast:
696 static bool classof(const Instruction *I) {
697 return I->isCast();
698 }
699 static bool classof(const Value *V) {
700 return isa<Instruction>(V) && classof(cast<Instruction>(V));
701 }
702};
703
704//===----------------------------------------------------------------------===//
705// CmpInst Class
706//===----------------------------------------------------------------------===//
707
708/// This class is the base class for the comparison instructions.
709/// Abstract base class of comparison instructions.
710class CmpInst : public Instruction {
711public:
712 /// This enumeration lists the possible predicates for CmpInst subclasses.
713 /// Values in the range 0-31 are reserved for FCmpInst, while values in the
714 /// range 32-64 are reserved for ICmpInst. This is necessary to ensure the
715 /// predicate values are not overlapping between the classes.
716 ///
717 /// Some passes (e.g. InstCombine) depend on the bit-wise characteristics of
718 /// FCMP_* values. Changing the bit patterns requires a potential change to
719 /// those passes.
720 enum Predicate : unsigned {
721 // Opcode U L G E Intuitive operation
722 FCMP_FALSE = 0, ///< 0 0 0 0 Always false (always folded)
723 FCMP_OEQ = 1, ///< 0 0 0 1 True if ordered and equal
724 FCMP_OGT = 2, ///< 0 0 1 0 True if ordered and greater than
725 FCMP_OGE = 3, ///< 0 0 1 1 True if ordered and greater than or equal
726 FCMP_OLT = 4, ///< 0 1 0 0 True if ordered and less than
727 FCMP_OLE = 5, ///< 0 1 0 1 True if ordered and less than or equal
728 FCMP_ONE = 6, ///< 0 1 1 0 True if ordered and operands are unequal
729 FCMP_ORD = 7, ///< 0 1 1 1 True if ordered (no nans)
730 FCMP_UNO = 8, ///< 1 0 0 0 True if unordered: isnan(X) | isnan(Y)
731 FCMP_UEQ = 9, ///< 1 0 0 1 True if unordered or equal
732 FCMP_UGT = 10, ///< 1 0 1 0 True if unordered or greater than
733 FCMP_UGE = 11, ///< 1 0 1 1 True if unordered, greater than, or equal
734 FCMP_ULT = 12, ///< 1 1 0 0 True if unordered or less than
735 FCMP_ULE = 13, ///< 1 1 0 1 True if unordered, less than, or equal
736 FCMP_UNE = 14, ///< 1 1 1 0 True if unordered or not equal
737 FCMP_TRUE = 15, ///< 1 1 1 1 Always true (always folded)
738 FIRST_FCMP_PREDICATE = FCMP_FALSE,
739 LAST_FCMP_PREDICATE = FCMP_TRUE,
740 BAD_FCMP_PREDICATE = FCMP_TRUE + 1,
741 ICMP_EQ = 32, ///< equal
742 ICMP_NE = 33, ///< not equal
743 ICMP_UGT = 34, ///< unsigned greater than
744 ICMP_UGE = 35, ///< unsigned greater or equal
745 ICMP_ULT = 36, ///< unsigned less than
746 ICMP_ULE = 37, ///< unsigned less or equal
747 ICMP_SGT = 38, ///< signed greater than
748 ICMP_SGE = 39, ///< signed greater or equal
749 ICMP_SLT = 40, ///< signed less than
750 ICMP_SLE = 41, ///< signed less or equal
751 FIRST_ICMP_PREDICATE = ICMP_EQ,
752 LAST_ICMP_PREDICATE = ICMP_SLE,
753 BAD_ICMP_PREDICATE = ICMP_SLE + 1
754 };
755 using PredicateField =
756 Bitfield::Element<Predicate, 0, 6, LAST_ICMP_PREDICATE>;
757
758protected:
759 CmpInst(Type *ty, Instruction::OtherOps op, Predicate pred,
760 Value *LHS, Value *RHS, const Twine &Name = "",
761 Instruction *InsertBefore = nullptr,
762 Instruction *FlagsSource = nullptr);
763
764 CmpInst(Type *ty, Instruction::OtherOps op, Predicate pred,
765 Value *LHS, Value *RHS, const Twine &Name,
766 BasicBlock *InsertAtEnd);
767
768public:
769 // allocate space for exactly two operands
770 void *operator new(size_t S) { return User::operator new(S, 2); }
771 void operator delete(void *Ptr) { User::operator delete(Ptr); }
772
773 /// Construct a compare instruction, given the opcode, the predicate and
774 /// the two operands. Optionally (if InstBefore is specified) insert the
775 /// instruction into a BasicBlock right before the specified instruction.
776 /// The specified Instruction is allowed to be a dereferenced end iterator.
777 /// Create a CmpInst
778 static CmpInst *Create(OtherOps Op,
779 Predicate predicate, Value *S1,
780 Value *S2, const Twine &Name = "",
781 Instruction *InsertBefore = nullptr);
782
783 /// Construct a compare instruction, given the opcode, the predicate and the
784 /// two operands. Also automatically insert this instruction to the end of
785 /// the BasicBlock specified.
786 /// Create a CmpInst
787 static CmpInst *Create(OtherOps Op, Predicate predicate, Value *S1,
788 Value *S2, const Twine &Name, BasicBlock *InsertAtEnd);
789
790 /// Get the opcode casted to the right type
791 OtherOps getOpcode() const {
792 return static_cast<OtherOps>(Instruction::getOpcode());
793 }
794
795 /// Return the predicate for this instruction.
796 Predicate getPredicate() const { return getSubclassData<PredicateField>(); }
797
798 /// Set the predicate for this instruction to the specified value.
799 void setPredicate(Predicate P) { setSubclassData<PredicateField>(P); }
800
801 static bool isFPPredicate(Predicate P) {
802 static_assert(FIRST_FCMP_PREDICATE == 0,
803 "FIRST_FCMP_PREDICATE is required to be 0");
804 return P <= LAST_FCMP_PREDICATE;
805 }
806
807 static bool isIntPredicate(Predicate P) {
808 return P >= FIRST_ICMP_PREDICATE && P <= LAST_ICMP_PREDICATE;
26
Assuming 'P' is < FIRST_ICMP_PREDICATE
27
Returning zero, which participates in a condition later
809 }
810
811 static StringRef getPredicateName(Predicate P);
812
813 bool isFPPredicate() const { return isFPPredicate(getPredicate()); }
814 bool isIntPredicate() const { return isIntPredicate(getPredicate()); }
815
816 /// For example, EQ -> NE, UGT -> ULE, SLT -> SGE,
817 /// OEQ -> UNE, UGT -> OLE, OLT -> UGE, etc.
818 /// @returns the inverse predicate for the instruction's current predicate.
819 /// Return the inverse of the instruction's predicate.
820 Predicate getInversePredicate() const {
821 return getInversePredicate(getPredicate());
822 }
823
824 /// For example, EQ -> NE, UGT -> ULE, SLT -> SGE,
825 /// OEQ -> UNE, UGT -> OLE, OLT -> UGE, etc.
826 /// @returns the inverse predicate for predicate provided in \p pred.
827 /// Return the inverse of a given predicate
828 static Predicate getInversePredicate(Predicate pred);
829
830 /// For example, EQ->EQ, SLE->SGE, ULT->UGT,
831 /// OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
832 /// @returns the predicate that would be the result of exchanging the two
833 /// operands of the CmpInst instruction without changing the result
834 /// produced.
835 /// Return the predicate as if the operands were swapped
836 Predicate getSwappedPredicate() const {
837 return getSwappedPredicate(getPredicate());
838 }
839
840 /// This is a static version that you can use without an instruction
841 /// available.
842 /// Return the predicate as if the operands were swapped.
843 static Predicate getSwappedPredicate(Predicate pred);
844
845 /// This is a static version that you can use without an instruction
846 /// available.
847 /// @returns true if the comparison predicate is strict, false otherwise.
848 static bool isStrictPredicate(Predicate predicate);
849
850 /// @returns true if the comparison predicate is strict, false otherwise.
851 /// Determine if this instruction is using an strict comparison predicate.
852 bool isStrictPredicate() const { return isStrictPredicate(getPredicate()); }
853
854 /// This is a static version that you can use without an instruction
855 /// available.
856 /// @returns true if the comparison predicate is non-strict, false otherwise.
857 static bool isNonStrictPredicate(Predicate predicate);
858
859 /// @returns true if the comparison predicate is non-strict, false otherwise.
860 /// Determine if this instruction is using an non-strict comparison predicate.
861 bool isNonStrictPredicate() const {
862 return isNonStrictPredicate(getPredicate());
863 }
864
865 /// For example, SGE -> SGT, SLE -> SLT, ULE -> ULT, UGE -> UGT.
866 /// Returns the strict version of non-strict comparisons.
867 Predicate getStrictPredicate() const {
868 return getStrictPredicate(getPredicate());
869 }
870
871 /// This is a static version that you can use without an instruction
872 /// available.
873 /// @returns the strict version of comparison provided in \p pred.
874 /// If \p pred is not a strict comparison predicate, returns \p pred.
875 /// Returns the strict version of non-strict comparisons.
876 static Predicate getStrictPredicate(Predicate pred);
877
878 /// For example, SGT -> SGE, SLT -> SLE, ULT -> ULE, UGT -> UGE.
879 /// Returns the non-strict version of strict comparisons.
880 Predicate getNonStrictPredicate() const {
881 return getNonStrictPredicate(getPredicate());
882 }
883
884 /// This is a static version that you can use without an instruction
885 /// available.
886 /// @returns the non-strict version of comparison provided in \p pred.
887 /// If \p pred is not a strict comparison predicate, returns \p pred.
888 /// Returns the non-strict version of strict comparisons.
889 static Predicate getNonStrictPredicate(Predicate pred);
890
891 /// This is a static version that you can use without an instruction
892 /// available.
893 /// Return the flipped strictness of predicate
894 static Predicate getFlippedStrictnessPredicate(Predicate pred);
895
896 /// For predicate of kind "is X or equal to 0" returns the predicate "is X".
897 /// For predicate of kind "is X" returns the predicate "is X or equal to 0".
898 /// does not support other kind of predicates.
899 /// @returns the predicate that does not contains is equal to zero if
900 /// it had and vice versa.
901 /// Return the flipped strictness of predicate
902 Predicate getFlippedStrictnessPredicate() const {
903 return getFlippedStrictnessPredicate(getPredicate());
904 }
905
906 /// Provide more efficient getOperand methods.
907 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
908
909 /// This is just a convenience that dispatches to the subclasses.
910 /// Swap the operands and adjust predicate accordingly to retain
911 /// the same comparison.
912 void swapOperands();
913
914 /// This is just a convenience that dispatches to the subclasses.
915 /// Determine if this CmpInst is commutative.
916 bool isCommutative() const;
917
918 /// Determine if this is an equals/not equals predicate.
919 /// This is a static version that you can use without an instruction
920 /// available.
921 static bool isEquality(Predicate pred);
922
923 /// Determine if this is an equals/not equals predicate.
924 bool isEquality() const { return isEquality(getPredicate()); }
925
926 /// Return true if the predicate is relational (not EQ or NE).
927 static bool isRelational(Predicate P) { return !isEquality(P); }
928
929 /// Return true if the predicate is relational (not EQ or NE).
930 bool isRelational() const { return !isEquality(); }
931
932 /// @returns true if the comparison is signed, false otherwise.
933 /// Determine if this instruction is using a signed comparison.
934 bool isSigned() const {
935 return isSigned(getPredicate());
936 }
937
938 /// @returns true if the comparison is unsigned, false otherwise.
939 /// Determine if this instruction is using an unsigned comparison.
940 bool isUnsigned() const {
941 return isUnsigned(getPredicate());
942 }
943
944 /// For example, ULT->SLT, ULE->SLE, UGT->SGT, UGE->SGE, SLT->Failed assert
945 /// @returns the signed version of the unsigned predicate pred.
946 /// return the signed version of a predicate
947 static Predicate getSignedPredicate(Predicate pred);
948
949 /// For example, ULT->SLT, ULE->SLE, UGT->SGT, UGE->SGE, SLT->Failed assert
950 /// @returns the signed version of the predicate for this instruction (which
951 /// has to be an unsigned predicate).
952 /// return the signed version of a predicate
953 Predicate getSignedPredicate() {
954 return getSignedPredicate(getPredicate());
955 }
956
957 /// For example, SLT->ULT, SLE->ULE, SGT->UGT, SGE->UGE, ULT->Failed assert
958 /// @returns the unsigned version of the signed predicate pred.
959 static Predicate getUnsignedPredicate(Predicate pred);
960
961 /// For example, SLT->ULT, SLE->ULE, SGT->UGT, SGE->UGE, ULT->Failed assert
962 /// @returns the unsigned version of the predicate for this instruction (which
963 /// has to be an signed predicate).
964 /// return the unsigned version of a predicate
965 Predicate getUnsignedPredicate() {
966 return getUnsignedPredicate(getPredicate());
967 }
968
969 /// For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->Failed assert
970 /// @returns the unsigned version of the signed predicate pred or
971 /// the signed version of the signed predicate pred.
972 static Predicate getFlippedSignednessPredicate(Predicate pred);
973
974 /// For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->Failed assert
975 /// @returns the unsigned version of the signed predicate pred or
976 /// the signed version of the signed predicate pred.
977 Predicate getFlippedSignednessPredicate() {
978 return getFlippedSignednessPredicate(getPredicate());
979 }
980
981 /// This is just a convenience.
982 /// Determine if this is true when both operands are the same.
983 bool isTrueWhenEqual() const {
984 return isTrueWhenEqual(getPredicate());
985 }
986
987 /// This is just a convenience.
988 /// Determine if this is false when both operands are the same.
989 bool isFalseWhenEqual() const {
990 return isFalseWhenEqual(getPredicate());
991 }
992
993 /// @returns true if the predicate is unsigned, false otherwise.
994 /// Determine if the predicate is an unsigned operation.
995 static bool isUnsigned(Predicate predicate);
996
997 /// @returns true if the predicate is signed, false otherwise.
998 /// Determine if the predicate is an signed operation.
999 static bool isSigned(Predicate predicate);
1000
1001 /// Determine if the predicate is an ordered operation.
1002 static bool isOrdered(Predicate predicate);
1003
1004 /// Determine if the predicate is an unordered operation.
1005 static bool isUnordered(Predicate predicate);
1006
1007 /// Determine if the predicate is true when comparing a value with itself.
1008 static bool isTrueWhenEqual(Predicate predicate);
1009
1010 /// Determine if the predicate is false when comparing a value with itself.
1011 static bool isFalseWhenEqual(Predicate predicate);
1012
1013 /// Determine if Pred1 implies Pred2 is true when two compares have matching
1014 /// operands.
1015 static bool isImpliedTrueByMatchingCmp(Predicate Pred1, Predicate Pred2);
1016
1017 /// Determine if Pred1 implies Pred2 is false when two compares have matching
1018 /// operands.
1019 static bool isImpliedFalseByMatchingCmp(Predicate Pred1, Predicate Pred2);
1020
1021 /// Methods for support type inquiry through isa, cast, and dyn_cast:
1022 static bool classof(const Instruction *I) {
1023 return I->getOpcode() == Instruction::ICmp ||
1024 I->getOpcode() == Instruction::FCmp;
1025 }
1026 static bool classof(const Value *V) {
1027 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1028 }
1029
1030 /// Create a result type for fcmp/icmp
1031 static Type* makeCmpResultType(Type* opnd_type) {
1032 if (VectorType* vt = dyn_cast<VectorType>(opnd_type)) {
1033 return VectorType::get(Type::getInt1Ty(opnd_type->getContext()),
1034 vt->getElementCount());
1035 }
1036 return Type::getInt1Ty(opnd_type->getContext());
1037 }
1038
1039private:
1040 // Shadow Value::setValueSubclassData with a private forwarding method so that
1041 // subclasses cannot accidentally use it.
1042 void setValueSubclassData(unsigned short D) {
1043 Value::setValueSubclassData(D);
1044 }
1045};
1046
1047// FIXME: these are redundant if CmpInst < BinaryOperator
1048template <>
1049struct OperandTraits<CmpInst> : public FixedNumOperandTraits<CmpInst, 2> {
1050};
1051
1052DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CmpInst, Value)CmpInst::op_iterator CmpInst::op_begin() { return OperandTraits
<CmpInst>::op_begin(this); } CmpInst::const_op_iterator
CmpInst::op_begin() const { return OperandTraits<CmpInst>
::op_begin(const_cast<CmpInst*>(this)); } CmpInst::op_iterator
CmpInst::op_end() { return OperandTraits<CmpInst>::op_end
(this); } CmpInst::const_op_iterator CmpInst::op_end() const {
return OperandTraits<CmpInst>::op_end(const_cast<CmpInst
*>(this)); } Value *CmpInst::getOperand(unsigned i_nocapture
) const { (static_cast<void> (0)); return cast_or_null<
Value>( OperandTraits<CmpInst>::op_begin(const_cast<
CmpInst*>(this))[i_nocapture].get()); } void CmpInst::setOperand
(unsigned i_nocapture, Value *Val_nocapture) { (static_cast<
void> (0)); OperandTraits<CmpInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned CmpInst::getNumOperands() const
{ return OperandTraits<CmpInst>::operands(this); } template
<int Idx_nocapture> Use &CmpInst::Op() { return this
->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture
> const Use &CmpInst::Op() const { return this->OpFrom
<Idx_nocapture>(this); }
1053
1054/// A lightweight accessor for an operand bundle meant to be passed
1055/// around by value.
1056struct OperandBundleUse {
1057 ArrayRef<Use> Inputs;
1058
1059 OperandBundleUse() = default;
1060 explicit OperandBundleUse(StringMapEntry<uint32_t> *Tag, ArrayRef<Use> Inputs)
1061 : Inputs(Inputs), Tag(Tag) {}
1062
1063 /// Return true if the operand at index \p Idx in this operand bundle
1064 /// has the attribute A.
1065 bool operandHasAttr(unsigned Idx, Attribute::AttrKind A) const {
1066 if (isDeoptOperandBundle())
1067 if (A == Attribute::ReadOnly || A == Attribute::NoCapture)
1068 return Inputs[Idx]->getType()->isPointerTy();
1069
1070 // Conservative answer: no operands have any attributes.
1071 return false;
1072 }
1073
1074 /// Return the tag of this operand bundle as a string.
1075 StringRef getTagName() const {
1076 return Tag->getKey();
1077 }
1078
1079 /// Return the tag of this operand bundle as an integer.
1080 ///
1081 /// Operand bundle tags are interned by LLVMContextImpl::getOrInsertBundleTag,
1082 /// and this function returns the unique integer getOrInsertBundleTag
1083 /// associated the tag of this operand bundle to.
1084 uint32_t getTagID() const {
1085 return Tag->getValue();
1086 }
1087
1088 /// Return true if this is a "deopt" operand bundle.
1089 bool isDeoptOperandBundle() const {
1090 return getTagID() == LLVMContext::OB_deopt;
1091 }
1092
1093 /// Return true if this is a "funclet" operand bundle.
1094 bool isFuncletOperandBundle() const {
1095 return getTagID() == LLVMContext::OB_funclet;
1096 }
1097
1098 /// Return true if this is a "cfguardtarget" operand bundle.
1099 bool isCFGuardTargetOperandBundle() const {
1100 return getTagID() == LLVMContext::OB_cfguardtarget;
1101 }
1102
1103private:
1104 /// Pointer to an entry in LLVMContextImpl::getOrInsertBundleTag.
1105 StringMapEntry<uint32_t> *Tag;
1106};
1107
1108/// A container for an operand bundle being viewed as a set of values
1109/// rather than a set of uses.
1110///
1111/// Unlike OperandBundleUse, OperandBundleDefT owns the memory it carries, and
1112/// so it is possible to create and pass around "self-contained" instances of
1113/// OperandBundleDef and ConstOperandBundleDef.
1114template <typename InputTy> class OperandBundleDefT {
1115 std::string Tag;
1116 std::vector<InputTy> Inputs;
1117
1118public:
1119 explicit OperandBundleDefT(std::string Tag, std::vector<InputTy> Inputs)
1120 : Tag(std::move(Tag)), Inputs(std::move(Inputs)) {}
1121 explicit OperandBundleDefT(std::string Tag, ArrayRef<InputTy> Inputs)
1122 : Tag(std::move(Tag)), Inputs(Inputs) {}
1123
1124 explicit OperandBundleDefT(const OperandBundleUse &OBU) {
1125 Tag = std::string(OBU.getTagName());
1126 llvm::append_range(Inputs, OBU.Inputs);
1127 }
1128
1129 ArrayRef<InputTy> inputs() const { return Inputs; }
1130
1131 using input_iterator = typename std::vector<InputTy>::const_iterator;
1132
1133 size_t input_size() const { return Inputs.size(); }
1134 input_iterator input_begin() const { return Inputs.begin(); }
1135 input_iterator input_end() const { return Inputs.end(); }
1136
1137 StringRef getTag() const { return Tag; }
1138};
1139
1140using OperandBundleDef = OperandBundleDefT<Value *>;
1141using ConstOperandBundleDef = OperandBundleDefT<const Value *>;
1142
1143//===----------------------------------------------------------------------===//
1144// CallBase Class
1145//===----------------------------------------------------------------------===//
1146
1147/// Base class for all callable instructions (InvokeInst and CallInst)
1148/// Holds everything related to calling a function.
1149///
1150/// All call-like instructions are required to use a common operand layout:
1151/// - Zero or more arguments to the call,
1152/// - Zero or more operand bundles with zero or more operand inputs each
1153/// bundle,
1154/// - Zero or more subclass controlled operands
1155/// - The called function.
1156///
1157/// This allows this base class to easily access the called function and the
1158/// start of the arguments without knowing how many other operands a particular
1159/// subclass requires. Note that accessing the end of the argument list isn't
1160/// as cheap as most other operations on the base class.
1161class CallBase : public Instruction {
1162protected:
1163 // The first two bits are reserved by CallInst for fast retrieval,
1164 using CallInstReservedField = Bitfield::Element<unsigned, 0, 2>;
1165 using CallingConvField =
1166 Bitfield::Element<CallingConv::ID, CallInstReservedField::NextBit, 10,
1167 CallingConv::MaxID>;
1168 static_assert(
1169 Bitfield::areContiguous<CallInstReservedField, CallingConvField>(),
1170 "Bitfields must be contiguous");
1171
1172 /// The last operand is the called operand.
1173 static constexpr int CalledOperandOpEndIdx = -1;
1174
1175 AttributeList Attrs; ///< parameter attributes for callable
1176 FunctionType *FTy;
1177
1178 template <class... ArgsTy>
1179 CallBase(AttributeList const &A, FunctionType *FT, ArgsTy &&... Args)
1180 : Instruction(std::forward<ArgsTy>(Args)...), Attrs(A), FTy(FT) {}
1181
1182 using Instruction::Instruction;
1183
1184 bool hasDescriptor() const { return Value::HasDescriptor; }
1185
1186 unsigned getNumSubclassExtraOperands() const {
1187 switch (getOpcode()) {
1188 case Instruction::Call:
1189 return 0;
1190 case Instruction::Invoke:
1191 return 2;
1192 case Instruction::CallBr:
1193 return getNumSubclassExtraOperandsDynamic();
1194 }
1195 llvm_unreachable("Invalid opcode!")__builtin_unreachable();
1196 }
1197
1198 /// Get the number of extra operands for instructions that don't have a fixed
1199 /// number of extra operands.
1200 unsigned getNumSubclassExtraOperandsDynamic() const;
1201
1202public:
1203 using Instruction::getContext;
1204
1205 /// Create a clone of \p CB with a different set of operand bundles and
1206 /// insert it before \p InsertPt.
1207 ///
1208 /// The returned call instruction is identical \p CB in every way except that
1209 /// the operand bundles for the new instruction are set to the operand bundles
1210 /// in \p Bundles.
1211 static CallBase *Create(CallBase *CB, ArrayRef<OperandBundleDef> Bundles,
1212 Instruction *InsertPt = nullptr);
1213
1214 /// Create a clone of \p CB with the operand bundle with the tag matching
1215 /// \p Bundle's tag replaced with Bundle, and insert it before \p InsertPt.
1216 ///
1217 /// The returned call instruction is identical \p CI in every way except that
1218 /// the specified operand bundle has been replaced.
1219 static CallBase *Create(CallBase *CB,
1220 OperandBundleDef Bundle,
1221 Instruction *InsertPt = nullptr);
1222
1223 /// Create a clone of \p CB with operand bundle \p OB added.
1224 static CallBase *addOperandBundle(CallBase *CB, uint32_t ID,
1225 OperandBundleDef OB,
1226 Instruction *InsertPt = nullptr);
1227
1228 /// Create a clone of \p CB with operand bundle \p ID removed.
1229 static CallBase *removeOperandBundle(CallBase *CB, uint32_t ID,
1230 Instruction *InsertPt = nullptr);
1231
1232 static bool classof(const Instruction *I) {
1233 return I->getOpcode() == Instruction::Call ||
1234 I->getOpcode() == Instruction::Invoke ||
1235 I->getOpcode() == Instruction::CallBr;
1236 }
1237 static bool classof(const Value *V) {
1238 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1239 }
1240
1241 FunctionType *getFunctionType() const { return FTy; }
1242
1243 void mutateFunctionType(FunctionType *FTy) {
1244 Value::mutateType(FTy->getReturnType());
1245 this->FTy = FTy;
1246 }
1247
1248 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1249
1250 /// data_operands_begin/data_operands_end - Return iterators iterating over
1251 /// the call / invoke argument list and bundle operands. For invokes, this is
1252 /// the set of instruction operands except the invoke target and the two
1253 /// successor blocks; and for calls this is the set of instruction operands
1254 /// except the call target.
1255 User::op_iterator data_operands_begin() { return op_begin(); }
1256 User::const_op_iterator data_operands_begin() const {
1257 return const_cast<CallBase *>(this)->data_operands_begin();
1258 }
1259 User::op_iterator data_operands_end() {
1260 // Walk from the end of the operands over the called operand and any
1261 // subclass operands.
1262 return op_end() - getNumSubclassExtraOperands() - 1;
1263 }
1264 User::const_op_iterator data_operands_end() const {
1265 return const_cast<CallBase *>(this)->data_operands_end();
1266 }
1267 iterator_range<User::op_iterator> data_ops() {
1268 return make_range(data_operands_begin(), data_operands_end());
1269 }
1270 iterator_range<User::const_op_iterator> data_ops() const {
1271 return make_range(data_operands_begin(), data_operands_end());
1272 }
1273 bool data_operands_empty() const {
1274 return data_operands_end() == data_operands_begin();
1275 }
1276 unsigned data_operands_size() const {
1277 return std::distance(data_operands_begin(), data_operands_end());
1278 }
1279
1280 bool isDataOperand(const Use *U) const {
1281 assert(this == U->getUser() &&(static_cast<void> (0))
1282 "Only valid to query with a use of this instruction!")(static_cast<void> (0));
1283 return data_operands_begin() <= U && U < data_operands_end();
1284 }
1285 bool isDataOperand(Value::const_user_iterator UI) const {
1286 return isDataOperand(&UI.getUse());
1287 }
1288
1289 /// Given a value use iterator, return the data operand corresponding to it.
1290 /// Iterator must actually correspond to a data operand.
1291 unsigned getDataOperandNo(Value::const_user_iterator UI) const {
1292 return getDataOperandNo(&UI.getUse());
1293 }
1294
1295 /// Given a use for a data operand, get the data operand number that
1296 /// corresponds to it.
1297 unsigned getDataOperandNo(const Use *U) const {
1298 assert(isDataOperand(U) && "Data operand # out of range!")(static_cast<void> (0));
1299 return U - data_operands_begin();
1300 }
1301
1302 /// Return the iterator pointing to the beginning of the argument list.
1303 User::op_iterator arg_begin() { return op_begin(); }
1304 User::const_op_iterator arg_begin() const {
1305 return const_cast<CallBase *>(this)->arg_begin();
1306 }
1307
1308 /// Return the iterator pointing to the end of the argument list.
1309 User::op_iterator arg_end() {
1310 // From the end of the data operands, walk backwards past the bundle
1311 // operands.
1312 return data_operands_end() - getNumTotalBundleOperands();
1313 }
1314 User::const_op_iterator arg_end() const {
1315 return const_cast<CallBase *>(this)->arg_end();
1316 }
1317
1318 /// Iteration adapter for range-for loops.
1319 iterator_range<User::op_iterator> args() {
1320 return make_range(arg_begin(), arg_end());
1321 }
1322 iterator_range<User::const_op_iterator> args() const {
1323 return make_range(arg_begin(), arg_end());
1324 }
1325 bool arg_empty() const { return arg_end() == arg_begin(); }
1326 unsigned arg_size() const { return arg_end() - arg_begin(); }
1327
1328 // Legacy API names that duplicate the above and will be removed once users
1329 // are migrated.
1330 iterator_range<User::op_iterator> arg_operands() {
1331 return make_range(arg_begin(), arg_end());
1332 }
1333 iterator_range<User::const_op_iterator> arg_operands() const {
1334 return make_range(arg_begin(), arg_end());
1335 }
1336 unsigned getNumArgOperands() const { return arg_size(); }
1337
1338 Value *getArgOperand(unsigned i) const {
1339 assert(i < getNumArgOperands() && "Out of bounds!")(static_cast<void> (0));
1340 return getOperand(i);
1341 }
1342
1343 void setArgOperand(unsigned i, Value *v) {
1344 assert(i < getNumArgOperands() && "Out of bounds!")(static_cast<void> (0));
1345 setOperand(i, v);
1346 }
1347
1348 /// Wrappers for getting the \c Use of a call argument.
1349 const Use &getArgOperandUse(unsigned i) const {
1350 assert(i < getNumArgOperands() && "Out of bounds!")(static_cast<void> (0));
1351 return User::getOperandUse(i);
1352 }
1353 Use &getArgOperandUse(unsigned i) {
1354 assert(i < getNumArgOperands() && "Out of bounds!")(static_cast<void> (0));
1355 return User::getOperandUse(i);
1356 }
1357
1358 bool isArgOperand(const Use *U) const {
1359 assert(this == U->getUser() &&(static_cast<void> (0))
1360 "Only valid to query with a use of this instruction!")(static_cast<void> (0));
1361 return arg_begin() <= U && U < arg_end();
1362 }
1363 bool isArgOperand(Value::const_user_iterator UI) const {
1364 return isArgOperand(&UI.getUse());
1365 }
1366
1367 /// Given a use for a arg operand, get the arg operand number that
1368 /// corresponds to it.
1369 unsigned getArgOperandNo(const Use *U) const {
1370 assert(isArgOperand(U) && "Arg operand # out of range!")(static_cast<void> (0));
1371 return U - arg_begin();
1372 }
1373
1374 /// Given a value use iterator, return the arg operand number corresponding to
1375 /// it. Iterator must actually correspond to a data operand.
1376 unsigned getArgOperandNo(Value::const_user_iterator UI) const {
1377 return getArgOperandNo(&UI.getUse());
1378 }
1379
1380 /// Returns true if this CallSite passes the given Value* as an argument to
1381 /// the called function.
1382 bool hasArgument(const Value *V) const {
1383 return llvm::is_contained(args(), V);
1384 }
1385
1386 Value *getCalledOperand() const { return Op<CalledOperandOpEndIdx>(); }
1387
1388 const Use &getCalledOperandUse() const { return Op<CalledOperandOpEndIdx>(); }
1389 Use &getCalledOperandUse() { return Op<CalledOperandOpEndIdx>(); }
1390
1391 /// Returns the function called, or null if this is an
1392 /// indirect function invocation.
1393 Function *getCalledFunction() const {
1394 return dyn_cast_or_null<Function>(getCalledOperand());
1395 }
1396
1397 /// Return true if the callsite is an indirect call.
1398 bool isIndirectCall() const;
1399
1400 /// Determine whether the passed iterator points to the callee operand's Use.
1401 bool isCallee(Value::const_user_iterator UI) const {
1402 return isCallee(&UI.getUse());
1403 }
1404
1405 /// Determine whether this Use is the callee operand's Use.
1406 bool isCallee(const Use *U) const { return &getCalledOperandUse() == U; }
1407
1408 /// Helper to get the caller (the parent function).
1409 Function *getCaller();
1410 const Function *getCaller() const {
1411 return const_cast<CallBase *>(this)->getCaller();
1412 }
1413
1414 /// Tests if this call site must be tail call optimized. Only a CallInst can
1415 /// be tail call optimized.
1416 bool isMustTailCall() const;
1417
1418 /// Tests if this call site is marked as a tail call.
1419 bool isTailCall() const;
1420
1421 /// Returns the intrinsic ID of the intrinsic called or
1422 /// Intrinsic::not_intrinsic if the called function is not an intrinsic, or if
1423 /// this is an indirect call.
1424 Intrinsic::ID getIntrinsicID() const;
1425
1426 void setCalledOperand(Value *V) { Op<CalledOperandOpEndIdx>() = V; }
1427
1428 /// Sets the function called, including updating the function type.
1429 void setCalledFunction(Function *Fn) {
1430 setCalledFunction(Fn->getFunctionType(), Fn);
1431 }
1432
1433 /// Sets the function called, including updating the function type.
1434 void setCalledFunction(FunctionCallee Fn) {
1435 setCalledFunction(Fn.getFunctionType(), Fn.getCallee());
1436 }
1437
1438 /// Sets the function called, including updating to the specified function
1439 /// type.
1440 void setCalledFunction(FunctionType *FTy, Value *Fn) {
1441 this->FTy = FTy;
1442 assert(cast<PointerType>(Fn->getType())->isOpaqueOrPointeeTypeMatches(FTy))(static_cast<void> (0));
1443 // This function doesn't mutate the return type, only the function
1444 // type. Seems broken, but I'm just gonna stick an assert in for now.
1445 assert(getType() == FTy->getReturnType())(static_cast<void> (0));
1446 setCalledOperand(Fn);
1447 }
1448
1449 CallingConv::ID getCallingConv() const {
1450 return getSubclassData<CallingConvField>();
1451 }
1452
1453 void setCallingConv(CallingConv::ID CC) {
1454 setSubclassData<CallingConvField>(CC);
1455 }
1456
1457 /// Check if this call is an inline asm statement.
1458 bool isInlineAsm() const { return isa<InlineAsm>(getCalledOperand()); }
1459
1460 /// \name Attribute API
1461 ///
1462 /// These methods access and modify attributes on this call (including
1463 /// looking through to the attributes on the called function when necessary).
1464 ///@{
1465
1466 /// Return the parameter attributes for this call.
1467 ///
1468 AttributeList getAttributes() const { return Attrs; }
1469
1470 /// Set the parameter attributes for this call.
1471 ///
1472 void setAttributes(AttributeList A) { Attrs = A; }
1473
1474 /// Determine whether this call has the given attribute. If it does not
1475 /// then determine if the called function has the attribute, but only if
1476 /// the attribute is allowed for the call.
1477 bool hasFnAttr(Attribute::AttrKind Kind) const {
1478 assert(Kind != Attribute::NoBuiltin &&(static_cast<void> (0))
1479 "Use CallBase::isNoBuiltin() to check for Attribute::NoBuiltin")(static_cast<void> (0));
1480 return hasFnAttrImpl(Kind);
1481 }
1482
1483 /// Determine whether this call has the given attribute. If it does not
1484 /// then determine if the called function has the attribute, but only if
1485 /// the attribute is allowed for the call.
1486 bool hasFnAttr(StringRef Kind) const { return hasFnAttrImpl(Kind); }
1487
1488 // TODO: remove non-AtIndex versions of these methods.
1489 /// adds the attribute to the list of attributes.
1490 void addAttributeAtIndex(unsigned i, Attribute::AttrKind Kind) {
1491 Attrs = Attrs.addAttributeAtIndex(getContext(), i, Kind);
1492 }
1493
1494 /// adds the attribute to the list of attributes.
1495 void addAttributeAtIndex(unsigned i, Attribute Attr) {
1496 Attrs = Attrs.addAttributeAtIndex(getContext(), i, Attr);
1497 }
1498
1499 /// Adds the attribute to the function.
1500 void addFnAttr(Attribute::AttrKind Kind) {
1501 Attrs = Attrs.addFnAttribute(getContext(), Kind);
1502 }
1503
1504 /// Adds the attribute to the function.
1505 void addFnAttr(Attribute Attr) {
1506 Attrs = Attrs.addFnAttribute(getContext(), Attr);
1507 }
1508
1509 /// Adds the attribute to the return value.
1510 void addRetAttr(Attribute::AttrKind Kind) {
1511 Attrs = Attrs.addRetAttribute(getContext(), Kind);
1512 }
1513
1514 /// Adds the attribute to the return value.
1515 void addRetAttr(Attribute Attr) {
1516 Attrs = Attrs.addRetAttribute(getContext(), Attr);
1517 }
1518
1519 /// Adds the attribute to the indicated argument
1520 void addParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) {
1521 assert(ArgNo < getNumArgOperands() && "Out of bounds")(static_cast<void> (0));
1522 Attrs = Attrs.addParamAttribute(getContext(), ArgNo, Kind);
1523 }
1524
1525 /// Adds the attribute to the indicated argument
1526 void addParamAttr(unsigned ArgNo, Attribute Attr) {
1527 assert(ArgNo < getNumArgOperands() && "Out of bounds")(static_cast<void> (0));
1528 Attrs = Attrs.addParamAttribute(getContext(), ArgNo, Attr);
1529 }
1530
1531 /// removes the attribute from the list of attributes.
1532 void removeAttributeAtIndex(unsigned i, Attribute::AttrKind Kind) {
1533 Attrs = Attrs.removeAttributeAtIndex(getContext(), i, Kind);
1534 }
1535
1536 /// removes the attribute from the list of attributes.
1537 void removeAttributeAtIndex(unsigned i, StringRef Kind) {
1538 Attrs = Attrs.removeAttributeAtIndex(getContext(), i, Kind);
1539 }
1540
1541 /// Removes the attributes from the function
1542 void removeFnAttrs(const AttrBuilder &AttrsToRemove) {
1543 Attrs = Attrs.removeFnAttributes(getContext(), AttrsToRemove);
1544 }
1545
1546 /// Removes the attribute from the function
1547 void removeFnAttr(Attribute::AttrKind Kind) {
1548 Attrs = Attrs.removeFnAttribute(getContext(), Kind);
1549 }
1550
1551 /// Removes the attribute from the return value
1552 void removeRetAttr(Attribute::AttrKind Kind) {
1553 Attrs = Attrs.removeRetAttribute(getContext(), Kind);
1554 }
1555
1556 /// Removes the attributes from the return value
1557 void removeRetAttrs(const AttrBuilder &AttrsToRemove) {
1558 Attrs = Attrs.removeRetAttributes(getContext(), AttrsToRemove);
1559 }
1560
1561 /// Removes the attribute from the given argument
1562 void removeParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) {
1563 assert(ArgNo < getNumArgOperands() && "Out of bounds")(static_cast<void> (0));
1564 Attrs = Attrs.removeParamAttribute(getContext(), ArgNo, Kind);
1565 }
1566
1567 /// Removes the attribute from the given argument
1568 void removeParamAttr(unsigned ArgNo, StringRef Kind) {
1569 assert(ArgNo < getNumArgOperands() && "Out of bounds")(static_cast<void> (0));
1570 Attrs = Attrs.removeParamAttribute(getContext(), ArgNo, Kind);
1571 }
1572
1573 /// Removes the attributes from the given argument
1574 void removeParamAttrs(unsigned ArgNo, const AttrBuilder &AttrsToRemove) {
1575 Attrs = Attrs.removeParamAttributes(getContext(), ArgNo, AttrsToRemove);
1576 }
1577
1578 /// adds the dereferenceable attribute to the list of attributes.
1579 void addDereferenceableParamAttr(unsigned i, uint64_t Bytes) {
1580 Attrs = Attrs.addDereferenceableParamAttr(getContext(), i, Bytes);
1581 }
1582
1583 /// adds the dereferenceable attribute to the list of attributes.
1584 void addDereferenceableRetAttr(uint64_t Bytes) {
1585 Attrs = Attrs.addDereferenceableRetAttr(getContext(), Bytes);
1586 }
1587
1588 /// Determine whether the return value has the given attribute.
1589 bool hasRetAttr(Attribute::AttrKind Kind) const {
1590 return hasRetAttrImpl(Kind);
1591 }
1592 /// Determine whether the return value has the given attribute.
1593 bool hasRetAttr(StringRef Kind) const { return hasRetAttrImpl(Kind); }
1594
1595 /// Determine whether the argument or parameter has the given attribute.
1596 bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const;
1597
1598 /// Get the attribute of a given kind at a position.
1599 Attribute getAttributeAtIndex(unsigned i, Attribute::AttrKind Kind) const {
1600 return getAttributes().getAttributeAtIndex(i, Kind);
1601 }
1602
1603 /// Get the attribute of a given kind at a position.
1604 Attribute getAttributeAtIndex(unsigned i, StringRef Kind) const {
1605 return getAttributes().getAttributeAtIndex(i, Kind);
1606 }
1607
1608 /// Get the attribute of a given kind for the function.
1609 Attribute getFnAttr(StringRef Kind) const {
1610 return getAttributes().getFnAttr(Kind);
1611 }
1612
1613 /// Get the attribute of a given kind for the function.
1614 Attribute getFnAttr(Attribute::AttrKind Kind) const {
1615 return getAttributes().getFnAttr(Kind);
1616 }
1617
1618 /// Get the attribute of a given kind from a given arg
1619 Attribute getParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const {
1620 assert(ArgNo < getNumArgOperands() && "Out of bounds")(static_cast<void> (0));
1621 return getAttributes().getParamAttr(ArgNo, Kind);
1622 }
1623
1624 /// Get the attribute of a given kind from a given arg
1625 Attribute getParamAttr(unsigned ArgNo, StringRef Kind) const {
1626 assert(ArgNo < getNumArgOperands() && "Out of bounds")(static_cast<void> (0));
1627 return getAttributes().getParamAttr(ArgNo, Kind);
1628 }
1629
1630 /// Return true if the data operand at index \p i has the attribute \p
1631 /// A.
1632 ///
1633 /// Data operands include call arguments and values used in operand bundles,
1634 /// but does not include the callee operand. This routine dispatches to the
1635 /// underlying AttributeList or the OperandBundleUser as appropriate.
1636 ///
1637 /// The index \p i is interpreted as
1638 ///
1639 /// \p i == Attribute::ReturnIndex -> the return value
1640 /// \p i in [1, arg_size + 1) -> argument number (\p i - 1)
1641 /// \p i in [arg_size + 1, data_operand_size + 1) -> bundle operand at index
1642 /// (\p i - 1) in the operand list.
1643 bool dataOperandHasImpliedAttr(unsigned i, Attribute::AttrKind Kind) const {
1644 // Note that we have to add one because `i` isn't zero-indexed.
1645 assert(i < (getNumArgOperands() + getNumTotalBundleOperands() + 1) &&(static_cast<void> (0))
1646 "Data operand index out of bounds!")(static_cast<void> (0));
1647
1648 // The attribute A can either be directly specified, if the operand in
1649 // question is a call argument; or be indirectly implied by the kind of its
1650 // containing operand bundle, if the operand is a bundle operand.
1651
1652 if (i == AttributeList::ReturnIndex)
1653 return hasRetAttr(Kind);
1654
1655 // FIXME: Avoid these i - 1 calculations and update the API to use
1656 // zero-based indices.
1657 if (i < (getNumArgOperands() + 1))
1658 return paramHasAttr(i - 1, Kind);
1659
1660 assert(hasOperandBundles() && i >= (getBundleOperandsStartIndex() + 1) &&(static_cast<void> (0))
1661 "Must be either a call argument or an operand bundle!")(static_cast<void> (0));
1662 return bundleOperandHasAttr(i - 1, Kind);
1663 }
1664
1665 /// Determine whether this data operand is not captured.
1666 // FIXME: Once this API is no longer duplicated in `CallSite`, rename this to
1667 // better indicate that this may return a conservative answer.
1668 bool doesNotCapture(unsigned OpNo) const {
1669 return dataOperandHasImpliedAttr(OpNo + 1, Attribute::NoCapture);
1670 }
1671
1672 /// Determine whether this argument is passed by value.
1673 bool isByValArgument(unsigned ArgNo) const {
1674 return paramHasAttr(ArgNo, Attribute::ByVal);
1675 }
1676
1677 /// Determine whether this argument is passed in an alloca.
1678 bool isInAllocaArgument(unsigned ArgNo) const {
1679 return paramHasAttr(ArgNo, Attribute::InAlloca);
1680 }
1681
1682 /// Determine whether this argument is passed by value, in an alloca, or is
1683 /// preallocated.
1684 bool isPassPointeeByValueArgument(unsigned ArgNo) const {
1685 return paramHasAttr(ArgNo, Attribute::ByVal) ||
1686 paramHasAttr(ArgNo, Attribute::InAlloca) ||
1687 paramHasAttr(ArgNo, Attribute::Preallocated);
1688 }
1689
1690 /// Determine whether passing undef to this argument is undefined behavior.
1691 /// If passing undef to this argument is UB, passing poison is UB as well
1692 /// because poison is more undefined than undef.
1693 bool isPassingUndefUB(unsigned ArgNo) const {
1694 return paramHasAttr(ArgNo, Attribute::NoUndef) ||
1695 // dereferenceable implies noundef.
1696 paramHasAttr(ArgNo, Attribute::Dereferenceable) ||
1697 // dereferenceable implies noundef, and null is a well-defined value.
1698 paramHasAttr(ArgNo, Attribute::DereferenceableOrNull);
1699 }
1700
1701 /// Determine if there are is an inalloca argument. Only the last argument can
1702 /// have the inalloca attribute.
1703 bool hasInAllocaArgument() const {
1704 return !arg_empty() && paramHasAttr(arg_size() - 1, Attribute::InAlloca);
1705 }
1706
1707 // FIXME: Once this API is no longer duplicated in `CallSite`, rename this to
1708 // better indicate that this may return a conservative answer.
1709 bool doesNotAccessMemory(unsigned OpNo) const {
1710 return dataOperandHasImpliedAttr(OpNo + 1, Attribute::ReadNone);
1711 }
1712
1713 // FIXME: Once this API is no longer duplicated in `CallSite`, rename this to
1714 // better indicate that this may return a conservative answer.
1715 bool onlyReadsMemory(unsigned OpNo) const {
1716 return dataOperandHasImpliedAttr(OpNo + 1, Attribute::ReadOnly) ||
1717 dataOperandHasImpliedAttr(OpNo + 1, Attribute::ReadNone);
1718 }
1719
1720 // FIXME: Once this API is no longer duplicated in `CallSite`, rename this to
1721 // better indicate that this may return a conservative answer.
1722 bool doesNotReadMemory(unsigned OpNo) const {
1723 return dataOperandHasImpliedAttr(OpNo + 1, Attribute::WriteOnly) ||
1724 dataOperandHasImpliedAttr(OpNo + 1, Attribute::ReadNone);
1725 }
1726
1727 /// Extract the alignment of the return value.
1728 MaybeAlign getRetAlign() const { return Attrs.getRetAlignment(); }
1729
1730 /// Extract the alignment for a call or parameter (0=unknown).
1731 MaybeAlign getParamAlign(unsigned ArgNo) const {
1732 return Attrs.getParamAlignment(ArgNo);
1733 }
1734
1735 MaybeAlign getParamStackAlign(unsigned ArgNo) const {
1736 return Attrs.getParamStackAlignment(ArgNo);
1737 }
1738
1739 /// Extract the byval type for a call or parameter.
1740 Type *getParamByValType(unsigned ArgNo) const {
1741 if (auto *Ty = Attrs.getParamByValType(ArgNo))
1742 return Ty;
1743 if (const Function *F = getCalledFunction())
1744 return F->getAttributes().getParamByValType(ArgNo);
1745 return nullptr;
1746 }
1747
1748 /// Extract the preallocated type for a call or parameter.
1749 Type *getParamPreallocatedType(unsigned ArgNo) const {
1750 if (auto *Ty = Attrs.getParamPreallocatedType(ArgNo))
1751 return Ty;
1752 if (const Function *F = getCalledFunction())
1753 return F->getAttributes().getParamPreallocatedType(ArgNo);
1754 return nullptr;
1755 }
1756
1757 /// Extract the preallocated type for a call or parameter.
1758 Type *getParamInAllocaType(unsigned ArgNo) const {
1759 if (auto *Ty = Attrs.getParamInAllocaType(ArgNo))
1760 return Ty;
1761 if (const Function *F = getCalledFunction())
1762 return F->getAttributes().getParamInAllocaType(ArgNo);
1763 return nullptr;
1764 }
1765
1766 /// Extract the number of dereferenceable bytes for a call or
1767 /// parameter (0=unknown).
1768 uint64_t getRetDereferenceableBytes() const {
1769 return Attrs.getRetDereferenceableBytes();
1770 }
1771
1772 /// Extract the number of dereferenceable bytes for a call or
1773 /// parameter (0=unknown).
1774 uint64_t getParamDereferenceableBytes(unsigned i) const {
1775 return Attrs.getParamDereferenceableBytes(i);
1776 }
1777
1778 /// Extract the number of dereferenceable_or_null bytes for a call
1779 /// (0=unknown).
1780 uint64_t getRetDereferenceableOrNullBytes() const {
1781 return Attrs.getRetDereferenceableOrNullBytes();
1782 }
1783
1784 /// Extract the number of dereferenceable_or_null bytes for a
1785 /// parameter (0=unknown).
1786 uint64_t getParamDereferenceableOrNullBytes(unsigned i) const {
1787 return Attrs.getParamDereferenceableOrNullBytes(i);
1788 }
1789
1790 /// Return true if the return value is known to be not null.
1791 /// This may be because it has the nonnull attribute, or because at least
1792 /// one byte is dereferenceable and the pointer is in addrspace(0).
1793 bool isReturnNonNull() const;
1794
1795 /// Determine if the return value is marked with NoAlias attribute.
1796 bool returnDoesNotAlias() const {
1797 return Attrs.hasRetAttr(Attribute::NoAlias);
1798 }
1799
1800 /// If one of the arguments has the 'returned' attribute, returns its
1801 /// operand value. Otherwise, return nullptr.
1802 Value *getReturnedArgOperand() const;
1803
1804 /// Return true if the call should not be treated as a call to a
1805 /// builtin.
1806 bool isNoBuiltin() const {
1807 return hasFnAttrImpl(Attribute::NoBuiltin) &&
1808 !hasFnAttrImpl(Attribute::Builtin);
1809 }
1810
1811 /// Determine if the call requires strict floating point semantics.
1812 bool isStrictFP() const { return hasFnAttr(Attribute::StrictFP); }
1813
1814 /// Return true if the call should not be inlined.
1815 bool isNoInline() const { return hasFnAttr(Attribute::NoInline); }
1816 void setIsNoInline() { addFnAttr(Attribute::NoInline); }
1817 /// Determine if the call does not access memory.
1818 bool doesNotAccessMemory() const { return hasFnAttr(Attribute::ReadNone); }
1819 void setDoesNotAccessMemory() { addFnAttr(Attribute::ReadNone); }
1820
1821 /// Determine if the call does not access or only reads memory.
1822 bool onlyReadsMemory() const {
1823 return doesNotAccessMemory() || hasFnAttr(Attribute::ReadOnly);
1824 }
1825
1826 void setOnlyReadsMemory() { addFnAttr(Attribute::ReadOnly); }
1827
1828 /// Determine if the call does not access or only writes memory.
1829 bool doesNotReadMemory() const {
1830 return doesNotAccessMemory() || hasFnAttr(Attribute::WriteOnly);
1831 }
1832 void setDoesNotReadMemory() { addFnAttr(Attribute::WriteOnly); }
1833
1834 /// Determine if the call can access memmory only using pointers based
1835 /// on its arguments.
1836 bool onlyAccessesArgMemory() const {
1837 return hasFnAttr(Attribute::ArgMemOnly);
1838 }
1839 void setOnlyAccessesArgMemory() { addFnAttr(Attribute::ArgMemOnly); }
1840
1841 /// Determine if the function may only access memory that is
1842 /// inaccessible from the IR.
1843 bool onlyAccessesInaccessibleMemory() const {
1844 return hasFnAttr(Attribute::InaccessibleMemOnly);
1845 }
1846 void setOnlyAccessesInaccessibleMemory() {
1847 addFnAttr(Attribute::InaccessibleMemOnly);
1848 }
1849
1850 /// Determine if the function may only access memory that is
1851 /// either inaccessible from the IR or pointed to by its arguments.
1852 bool onlyAccessesInaccessibleMemOrArgMem() const {
1853 return hasFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
1854 }
1855 void setOnlyAccessesInaccessibleMemOrArgMem() {
1856 addFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
1857 }
1858 /// Determine if the call cannot return.
1859 bool doesNotReturn() const { return hasFnAttr(Attribute::NoReturn); }
1860 void setDoesNotReturn() { addFnAttr(Attribute::NoReturn); }
1861
1862 /// Determine if the call should not perform indirect branch tracking.
1863 bool doesNoCfCheck() const { return hasFnAttr(Attribute::NoCfCheck); }
1864
1865 /// Determine if the call cannot unwind.
1866 bool doesNotThrow() const { return hasFnAttr(Attribute::NoUnwind); }
1867 void setDoesNotThrow() { addFnAttr(Attribute::NoUnwind); }
1868
1869 /// Determine if the invoke cannot be duplicated.
1870 bool cannotDuplicate() const { return hasFnAttr(Attribute::NoDuplicate); }
1871 void setCannotDuplicate() { addFnAttr(Attribute::NoDuplicate); }
1872
1873 /// Determine if the call cannot be tail merged.
1874 bool cannotMerge() const { return hasFnAttr(Attribute::NoMerge); }
1875 void setCannotMerge() { addFnAttr(Attribute::NoMerge); }
1876
1877 /// Determine if the invoke is convergent
1878 bool isConvergent() const { return hasFnAttr(Attribute::Convergent); }
1879 void setConvergent() { addFnAttr(Attribute::Convergent); }
1880 void setNotConvergent() { removeFnAttr(Attribute::Convergent); }
1881
1882 /// Determine if the call returns a structure through first
1883 /// pointer argument.
1884 bool hasStructRetAttr() const {
1885 if (getNumArgOperands() == 0)
1886 return false;
1887
1888 // Be friendly and also check the callee.
1889 return paramHasAttr(0, Attribute::StructRet);
1890 }
1891
1892 /// Determine if any call argument is an aggregate passed by value.
1893 bool hasByValArgument() const {
1894 return Attrs.hasAttrSomewhere(Attribute::ByVal);
1895 }
1896
1897 ///@{
1898 // End of attribute API.
1899
1900 /// \name Operand Bundle API
1901 ///
1902 /// This group of methods provides the API to access and manipulate operand
1903 /// bundles on this call.
1904 /// @{
1905
1906 /// Return the number of operand bundles associated with this User.
1907 unsigned getNumOperandBundles() const {
1908 return std::distance(bundle_op_info_begin(), bundle_op_info_end());
1909 }
1910
1911 /// Return true if this User has any operand bundles.
1912 bool hasOperandBundles() const { return getNumOperandBundles() != 0; }
1913
1914 /// Return the index of the first bundle operand in the Use array.
1915 unsigned getBundleOperandsStartIndex() const {
1916 assert(hasOperandBundles() && "Don't call otherwise!")(static_cast<void> (0));
1917 return bundle_op_info_begin()->Begin;
1918 }
1919
1920 /// Return the index of the last bundle operand in the Use array.
1921 unsigned getBundleOperandsEndIndex() const {
1922 assert(hasOperandBundles() && "Don't call otherwise!")(static_cast<void> (0));
1923 return bundle_op_info_end()[-1].End;
1924 }
1925
1926 /// Return true if the operand at index \p Idx is a bundle operand.
1927 bool isBundleOperand(unsigned Idx) const {
1928 return hasOperandBundles() && Idx >= getBundleOperandsStartIndex() &&
1929 Idx < getBundleOperandsEndIndex();
1930 }
1931
1932 /// Returns true if the use is a bundle operand.
1933 bool isBundleOperand(const Use *U) const {
1934 assert(this == U->getUser() &&(static_cast<void> (0))
1935 "Only valid to query with a use of this instruction!")(static_cast<void> (0));
1936 return hasOperandBundles() && isBundleOperand(U - op_begin());
1937 }
1938 bool isBundleOperand(Value::const_user_iterator UI) const {
1939 return isBundleOperand(&UI.getUse());
1940 }
1941
1942 /// Return the total number operands (not operand bundles) used by
1943 /// every operand bundle in this OperandBundleUser.
1944 unsigned getNumTotalBundleOperands() const {
1945 if (!hasOperandBundles())
1946 return 0;
1947
1948 unsigned Begin = getBundleOperandsStartIndex();
1949 unsigned End = getBundleOperandsEndIndex();
1950
1951 assert(Begin <= End && "Should be!")(static_cast<void> (0));
1952 return End - Begin;
1953 }
1954
1955 /// Return the operand bundle at a specific index.
1956 OperandBundleUse getOperandBundleAt(unsigned Index) const {
1957 assert(Index < getNumOperandBundles() && "Index out of bounds!")(static_cast<void> (0));
1958 return operandBundleFromBundleOpInfo(*(bundle_op_info_begin() + Index));
1959 }
1960
1961 /// Return the number of operand bundles with the tag Name attached to
1962 /// this instruction.
1963 unsigned countOperandBundlesOfType(StringRef Name) const {
1964 unsigned Count = 0;
1965 for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i)
1966 if (getOperandBundleAt(i).getTagName() == Name)
1967 Count++;
1968
1969 return Count;
1970 }
1971
1972 /// Return the number of operand bundles with the tag ID attached to
1973 /// this instruction.
1974 unsigned countOperandBundlesOfType(uint32_t ID) const {
1975 unsigned Count = 0;
1976 for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i)
1977 if (getOperandBundleAt(i).getTagID() == ID)
1978 Count++;
1979
1980 return Count;
1981 }
1982
1983 /// Return an operand bundle by name, if present.
1984 ///
1985 /// It is an error to call this for operand bundle types that may have
1986 /// multiple instances of them on the same instruction.
1987 Optional<OperandBundleUse> getOperandBundle(StringRef Name) const {
1988 assert(countOperandBundlesOfType(Name) < 2 && "Precondition violated!")(static_cast<void> (0));
1989
1990 for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i) {
1991 OperandBundleUse U = getOperandBundleAt(i);
1992 if (U.getTagName() == Name)
1993 return U;
1994 }
1995
1996 return None;
1997 }
1998
1999 /// Return an operand bundle by tag ID, if present.
2000 ///
2001 /// It is an error to call this for operand bundle types that may have
2002 /// multiple instances of them on the same instruction.
2003 Optional<OperandBundleUse> getOperandBundle(uint32_t ID) const {
2004 assert(countOperandBundlesOfType(ID) < 2 && "Precondition violated!")(static_cast<void> (0));
2005
2006 for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i) {
2007 OperandBundleUse U = getOperandBundleAt(i);
2008 if (U.getTagID() == ID)
2009 return U;
2010 }
2011
2012 return None;
2013 }
2014
2015 /// Return the list of operand bundles attached to this instruction as
2016 /// a vector of OperandBundleDefs.
2017 ///
2018 /// This function copies the OperandBundeUse instances associated with this
2019 /// OperandBundleUser to a vector of OperandBundleDefs. Note:
2020 /// OperandBundeUses and OperandBundleDefs are non-trivially *different*
2021 /// representations of operand bundles (see documentation above).
2022 void getOperandBundlesAsDefs(SmallVectorImpl<OperandBundleDef> &Defs) const;
2023
2024 /// Return the operand bundle for the operand at index OpIdx.
2025 ///
2026 /// It is an error to call this with an OpIdx that does not correspond to an
2027 /// bundle operand.
2028 OperandBundleUse getOperandBundleForOperand(unsigned OpIdx) const {
2029 return operandBundleFromBundleOpInfo(getBundleOpInfoForOperand(OpIdx));
2030 }
2031
2032 /// Return true if this operand bundle user has operand bundles that
2033 /// may read from the heap.
2034 bool hasReadingOperandBundles() const;
2035
2036 /// Return true if this operand bundle user has operand bundles that
2037 /// may write to the heap.
2038 bool hasClobberingOperandBundles() const {
2039 for (auto &BOI : bundle_op_infos()) {
2040 if (BOI.Tag->second == LLVMContext::OB_deopt ||
2041 BOI.Tag->second == LLVMContext::OB_funclet)
2042 continue;
2043
2044 // This instruction has an operand bundle that is not known to us.
2045 // Assume the worst.
2046 return true;
2047 }
2048
2049 return false;
2050 }
2051
2052 /// Return true if the bundle operand at index \p OpIdx has the
2053 /// attribute \p A.
2054 bool bundleOperandHasAttr(unsigned OpIdx, Attribute::AttrKind A) const {
2055 auto &BOI = getBundleOpInfoForOperand(OpIdx);
2056 auto OBU = operandBundleFromBundleOpInfo(BOI);
2057 return OBU.operandHasAttr(OpIdx - BOI.Begin, A);
2058 }
2059
2060 /// Return true if \p Other has the same sequence of operand bundle
2061 /// tags with the same number of operands on each one of them as this
2062 /// OperandBundleUser.
2063 bool hasIdenticalOperandBundleSchema(const CallBase &Other) const {
2064 if (getNumOperandBundles() != Other.getNumOperandBundles())
2065 return false;
2066
2067 return std::equal(bundle_op_info_begin(), bundle_op_info_end(),
2068 Other.bundle_op_info_begin());
2069 }
2070
2071 /// Return true if this operand bundle user contains operand bundles
2072 /// with tags other than those specified in \p IDs.
2073 bool hasOperandBundlesOtherThan(ArrayRef<uint32_t> IDs) const {
2074 for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i) {
2075 uint32_t ID = getOperandBundleAt(i).getTagID();
2076 if (!is_contained(IDs, ID))
2077 return true;
2078 }
2079 return false;
2080 }
2081
2082 /// Is the function attribute S disallowed by some operand bundle on
2083 /// this operand bundle user?
2084 bool isFnAttrDisallowedByOpBundle(StringRef S) const {
2085 // Operand bundles only possibly disallow readnone, readonly and argmemonly
2086 // attributes. All String attributes are fine.
2087 return false;
2088 }
2089
2090 /// Is the function attribute A disallowed by some operand bundle on
2091 /// this operand bundle user?
2092 bool isFnAttrDisallowedByOpBundle(Attribute::AttrKind A) const {
2093 switch (A) {
2094 default:
2095 return false;
2096
2097 case Attribute::InaccessibleMemOrArgMemOnly:
2098 return hasReadingOperandBundles();
2099
2100 case Attribute::InaccessibleMemOnly:
2101 return hasReadingOperandBundles();
2102
2103 case Attribute::ArgMemOnly:
2104 return hasReadingOperandBundles();
2105
2106 case Attribute::ReadNone:
2107 return hasReadingOperandBundles();
2108
2109 case Attribute::ReadOnly:
2110 return hasClobberingOperandBundles();
2111 }
2112
2113 llvm_unreachable("switch has a default case!")__builtin_unreachable();
2114 }
2115
2116 /// Used to keep track of an operand bundle. See the main comment on
2117 /// OperandBundleUser above.
2118 struct BundleOpInfo {
2119 /// The operand bundle tag, interned by
2120 /// LLVMContextImpl::getOrInsertBundleTag.
2121 StringMapEntry<uint32_t> *Tag;
2122
2123 /// The index in the Use& vector where operands for this operand
2124 /// bundle starts.
2125 uint32_t Begin;
2126
2127 /// The index in the Use& vector where operands for this operand
2128 /// bundle ends.
2129 uint32_t End;
2130
2131 bool operator==(const BundleOpInfo &Other) const {
2132 return Tag == Other.Tag && Begin == Other.Begin && End == Other.End;
2133 }
2134 };
2135
2136 /// Simple helper function to map a BundleOpInfo to an
2137 /// OperandBundleUse.
2138 OperandBundleUse
2139 operandBundleFromBundleOpInfo(const BundleOpInfo &BOI) const {
2140 auto begin = op_begin();
2141 ArrayRef<Use> Inputs(begin + BOI.Begin, begin + BOI.End);
2142 return OperandBundleUse(BOI.Tag, Inputs);
2143 }
2144
2145 using bundle_op_iterator = BundleOpInfo *;
2146 using const_bundle_op_iterator = const BundleOpInfo *;
2147
2148 /// Return the start of the list of BundleOpInfo instances associated
2149 /// with this OperandBundleUser.
2150 ///
2151 /// OperandBundleUser uses the descriptor area co-allocated with the host User
2152 /// to store some meta information about which operands are "normal" operands,
2153 /// and which ones belong to some operand bundle.
2154 ///
2155 /// The layout of an operand bundle user is
2156 ///
2157 /// +-----------uint32_t End-------------------------------------+
2158 /// | |
2159 /// | +--------uint32_t Begin--------------------+ |
2160 /// | | | |
2161 /// ^ ^ v v
2162 /// |------|------|----|----|----|----|----|---------|----|---------|----|-----
2163 /// | BOI0 | BOI1 | .. | DU | U0 | U1 | .. | BOI0_U0 | .. | BOI1_U0 | .. | Un
2164 /// |------|------|----|----|----|----|----|---------|----|---------|----|-----
2165 /// v v ^ ^
2166 /// | | | |
2167 /// | +--------uint32_t Begin------------+ |
2168 /// | |
2169 /// +-----------uint32_t End-----------------------------+
2170 ///
2171 ///
2172 /// BOI0, BOI1 ... are descriptions of operand bundles in this User's use
2173 /// list. These descriptions are installed and managed by this class, and
2174 /// they're all instances of OperandBundleUser<T>::BundleOpInfo.
2175 ///
2176 /// DU is an additional descriptor installed by User's 'operator new' to keep
2177 /// track of the 'BOI0 ... BOIN' co-allocation. OperandBundleUser does not
2178 /// access or modify DU in any way, it's an implementation detail private to
2179 /// User.
2180 ///
2181 /// The regular Use& vector for the User starts at U0. The operand bundle
2182 /// uses are part of the Use& vector, just like normal uses. In the diagram
2183 /// above, the operand bundle uses start at BOI0_U0. Each instance of
2184 /// BundleOpInfo has information about a contiguous set of uses constituting
2185 /// an operand bundle, and the total set of operand bundle uses themselves
2186 /// form a contiguous set of uses (i.e. there are no gaps between uses
2187 /// corresponding to individual operand bundles).
2188 ///
2189 /// This class does not know the location of the set of operand bundle uses
2190 /// within the use list -- that is decided by the User using this class via
2191 /// the BeginIdx argument in populateBundleOperandInfos.
2192 ///
2193 /// Currently operand bundle users with hung-off operands are not supported.
2194 bundle_op_iterator bundle_op_info_begin() {
2195 if (!hasDescriptor())
2196 return nullptr;
2197
2198 uint8_t *BytesBegin = getDescriptor().begin();
2199 return reinterpret_cast<bundle_op_iterator>(BytesBegin);
2200 }
2201
2202 /// Return the start of the list of BundleOpInfo instances associated
2203 /// with this OperandBundleUser.
2204 const_bundle_op_iterator bundle_op_info_begin() const {
2205 auto *NonConstThis = const_cast<CallBase *>(this);
2206 return NonConstThis->bundle_op_info_begin();
2207 }
2208
2209 /// Return the end of the list of BundleOpInfo instances associated
2210 /// with this OperandBundleUser.
2211 bundle_op_iterator bundle_op_info_end() {
2212 if (!hasDescriptor())
2213 return nullptr;
2214
2215 uint8_t *BytesEnd = getDescriptor().end();
2216 return reinterpret_cast<bundle_op_iterator>(BytesEnd);
2217 }
2218
2219 /// Return the end of the list of BundleOpInfo instances associated
2220 /// with this OperandBundleUser.
2221 const_bundle_op_iterator bundle_op_info_end() const {
2222 auto *NonConstThis = const_cast<CallBase *>(this);
2223 return NonConstThis->bundle_op_info_end();
2224 }
2225
2226 /// Return the range [\p bundle_op_info_begin, \p bundle_op_info_end).
2227 iterator_range<bundle_op_iterator> bundle_op_infos() {
2228 return make_range(bundle_op_info_begin(), bundle_op_info_end());
2229 }
2230
2231 /// Return the range [\p bundle_op_info_begin, \p bundle_op_info_end).
2232 iterator_range<const_bundle_op_iterator> bundle_op_infos() const {
2233 return make_range(bundle_op_info_begin(), bundle_op_info_end());
2234 }
2235
2236 /// Populate the BundleOpInfo instances and the Use& vector from \p
2237 /// Bundles. Return the op_iterator pointing to the Use& one past the last
2238 /// last bundle operand use.
2239 ///
2240 /// Each \p OperandBundleDef instance is tracked by a OperandBundleInfo
2241 /// instance allocated in this User's descriptor.
2242 op_iterator populateBundleOperandInfos(ArrayRef<OperandBundleDef> Bundles,
2243 const unsigned BeginIndex);
2244
2245public:
2246 /// Return the BundleOpInfo for the operand at index OpIdx.
2247 ///
2248 /// It is an error to call this with an OpIdx that does not correspond to an
2249 /// bundle operand.
2250 BundleOpInfo &getBundleOpInfoForOperand(unsigned OpIdx);
2251 const BundleOpInfo &getBundleOpInfoForOperand(unsigned OpIdx) const {
2252 return const_cast<CallBase *>(this)->getBundleOpInfoForOperand(OpIdx);
2253 }
2254
2255protected:
2256 /// Return the total number of values used in \p Bundles.
2257 static unsigned CountBundleInputs(ArrayRef<OperandBundleDef> Bundles) {
2258 unsigned Total = 0;
2259 for (auto &B : Bundles)
2260 Total += B.input_size();
2261 return Total;
2262 }
2263
2264 /// @}
2265 // End of operand bundle API.
2266
2267private:
2268 bool hasFnAttrOnCalledFunction(Attribute::AttrKind Kind) const;
2269 bool hasFnAttrOnCalledFunction(StringRef Kind) const;
2270
2271 template <typename AttrKind> bool hasFnAttrImpl(AttrKind Kind) const {
2272 if (Attrs.hasFnAttr(Kind))
2273 return true;
2274
2275 // Operand bundles override attributes on the called function, but don't
2276 // override attributes directly present on the call instruction.
2277 if (isFnAttrDisallowedByOpBundle(Kind))
2278 return false;
2279
2280 return hasFnAttrOnCalledFunction(Kind);
2281 }
2282
2283 /// Determine whether the return value has the given attribute. Supports
2284 /// Attribute::AttrKind and StringRef as \p AttrKind types.
2285 template <typename AttrKind> bool hasRetAttrImpl(AttrKind Kind) const {
2286 if (Attrs.hasRetAttr(Kind))
2287 return true;
2288
2289 // Look at the callee, if available.
2290 if (const Function *F = getCalledFunction())
2291 return F->getAttributes().hasRetAttr(Kind);
2292 return false;
2293 }
2294};
2295
2296template <>
2297struct OperandTraits<CallBase> : public VariadicOperandTraits<CallBase, 1> {};
2298
2299DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CallBase, Value)CallBase::op_iterator CallBase::op_begin() { return OperandTraits
<CallBase>::op_begin(this); } CallBase::const_op_iterator
CallBase::op_begin() const { return OperandTraits<CallBase
>::op_begin(const_cast<CallBase*>(this)); } CallBase
::op_iterator CallBase::op_end() { return OperandTraits<CallBase
>::op_end(this); } CallBase::const_op_iterator CallBase::op_end
() const { return OperandTraits<CallBase>::op_end(const_cast
<CallBase*>(this)); } Value *CallBase::getOperand(unsigned
i_nocapture) const { (static_cast<void> (0)); return cast_or_null
<Value>( OperandTraits<CallBase>::op_begin(const_cast
<CallBase*>(this))[i_nocapture].get()); } void CallBase
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<void> (0)); OperandTraits<CallBase>::op_begin(this
)[i_nocapture] = Val_nocapture; } unsigned CallBase::getNumOperands
() const { return OperandTraits<CallBase>::operands(this
); } template <int Idx_nocapture> Use &CallBase::Op
() { return this->OpFrom<Idx_nocapture>(this); } template
<int Idx_nocapture> const Use &CallBase::Op() const
{ return this->OpFrom<Idx_nocapture>(this); }
2300
2301//===----------------------------------------------------------------------===//
2302// FuncletPadInst Class
2303//===----------------------------------------------------------------------===//
2304class FuncletPadInst : public Instruction {
2305private:
2306 FuncletPadInst(const FuncletPadInst &CPI);
2307
2308 explicit FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
2309 ArrayRef<Value *> Args, unsigned Values,
2310 const Twine &NameStr, Instruction *InsertBefore);
2311 explicit FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
2312 ArrayRef<Value *> Args, unsigned Values,
2313 const Twine &NameStr, BasicBlock *InsertAtEnd);
2314
2315 void init(Value *ParentPad, ArrayRef<Value *> Args, const Twine &NameStr);
2316
2317protected:
2318 // Note: Instruction needs to be a friend here to call cloneImpl.
2319 friend class Instruction;
2320 friend class CatchPadInst;
2321 friend class CleanupPadInst;
2322
2323 FuncletPadInst *cloneImpl() const;
2324
2325public:
2326 /// Provide fast operand accessors
2327 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2328
2329 /// getNumArgOperands - Return the number of funcletpad arguments.
2330 ///
2331 unsigned getNumArgOperands() const { return getNumOperands() - 1; }
2332
2333 /// Convenience accessors
2334
2335 /// Return the outer EH-pad this funclet is nested within.
2336 ///
2337 /// Note: This returns the associated CatchSwitchInst if this FuncletPadInst
2338 /// is a CatchPadInst.
2339 Value *getParentPad() const { return Op<-1>(); }
2340 void setParentPad(Value *ParentPad) {
2341 assert(ParentPad)(static_cast<void> (0));
2342 Op<-1>() = ParentPad;
2343 }
2344
2345 /// getArgOperand/setArgOperand - Return/set the i-th funcletpad argument.
2346 ///
2347 Value *getArgOperand(unsigned i) const { return getOperand(i); }
2348 void setArgOperand(unsigned i, Value *v) { setOperand(i, v); }
2349
2350 /// arg_operands - iteration adapter for range-for loops.
2351 op_range arg_operands() { return op_range(op_begin(), op_end() - 1); }
2352
2353 /// arg_operands - iteration adapter for range-for loops.
2354 const_op_range arg_operands() const {
2355 return const_op_range(op_begin(), op_end() - 1);
2356 }
2357
2358 // Methods for support type inquiry through isa, cast, and dyn_cast:
2359 static bool classof(const Instruction *I) { return I->isFuncletPad(); }
2360 static bool classof(const Value *V) {
2361 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2362 }
2363};
2364
2365template <>
2366struct OperandTraits<FuncletPadInst>
2367 : public VariadicOperandTraits<FuncletPadInst, /*MINARITY=*/1> {};
2368
2369DEFINE_TRANSPARENT_OPERAND_ACCESSORS(FuncletPadInst, Value)FuncletPadInst::op_iterator FuncletPadInst::op_begin() { return
OperandTraits<FuncletPadInst>::op_begin(this); } FuncletPadInst
::const_op_iterator FuncletPadInst::op_begin() const { return
OperandTraits<FuncletPadInst>::op_begin(const_cast<
FuncletPadInst*>(this)); } FuncletPadInst::op_iterator FuncletPadInst
::op_end() { return OperandTraits<FuncletPadInst>::op_end
(this); } FuncletPadInst::const_op_iterator FuncletPadInst::op_end
() const { return OperandTraits<FuncletPadInst>::op_end
(const_cast<FuncletPadInst*>(this)); } Value *FuncletPadInst
::getOperand(unsigned i_nocapture) const { (static_cast<void
> (0)); return cast_or_null<Value>( OperandTraits<
FuncletPadInst>::op_begin(const_cast<FuncletPadInst*>
(this))[i_nocapture].get()); } void FuncletPadInst::setOperand
(unsigned i_nocapture, Value *Val_nocapture) { (static_cast<
void> (0)); OperandTraits<FuncletPadInst>::op_begin(
this)[i_nocapture] = Val_nocapture; } unsigned FuncletPadInst
::getNumOperands() const { return OperandTraits<FuncletPadInst
>::operands(this); } template <int Idx_nocapture> Use
&FuncletPadInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
FuncletPadInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
2370
2371} // end namespace llvm
2372
2373#endif // LLVM_IR_INSTRTYPES_H