Bug Summary

File:lib/CodeGen/GlobalISel/IRTranslator.cpp
Warning:line 352, column 57
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name IRTranslator.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mthread-model posix -mframe-pointer=none -fmath-errno -masm-verbose -mconstructor-aliases -munwind-tables -fuse-init-array -target-cpu x86-64 -dwarf-column-info -debugger-tuning=gdb -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-10/lib/clang/10.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-10~svn374710/build-llvm/lib/CodeGen/GlobalISel -I /build/llvm-toolchain-snapshot-10~svn374710/lib/CodeGen/GlobalISel -I /build/llvm-toolchain-snapshot-10~svn374710/build-llvm/include -I /build/llvm-toolchain-snapshot-10~svn374710/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-10/lib/clang/10.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-10~svn374710/build-llvm/lib/CodeGen/GlobalISel -fdebug-prefix-map=/build/llvm-toolchain-snapshot-10~svn374710=. -ferror-limit 19 -fmessage-length 0 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -o /tmp/scan-build-2019-10-13-141012-12518-1 -x c++ /build/llvm-toolchain-snapshot-10~svn374710/lib/CodeGen/GlobalISel/IRTranslator.cpp

/build/llvm-toolchain-snapshot-10~svn374710/lib/CodeGen/GlobalISel/IRTranslator.cpp

1//===- llvm/CodeGen/GlobalISel/IRTranslator.cpp - IRTranslator ---*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the IRTranslator class.
10//===----------------------------------------------------------------------===//
11
12#include "llvm/CodeGen/GlobalISel/IRTranslator.h"
13#include "llvm/ADT/PostOrderIterator.h"
14#include "llvm/ADT/STLExtras.h"
15#include "llvm/ADT/ScopeExit.h"
16#include "llvm/ADT/SmallSet.h"
17#include "llvm/ADT/SmallVector.h"
18#include "llvm/Analysis/BranchProbabilityInfo.h"
19#include "llvm/Analysis/OptimizationRemarkEmitter.h"
20#include "llvm/Analysis/ValueTracking.h"
21#include "llvm/CodeGen/Analysis.h"
22#include "llvm/CodeGen/FunctionLoweringInfo.h"
23#include "llvm/CodeGen/GlobalISel/CallLowering.h"
24#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
25#include "llvm/CodeGen/LowLevelType.h"
26#include "llvm/CodeGen/MachineBasicBlock.h"
27#include "llvm/CodeGen/MachineFrameInfo.h"
28#include "llvm/CodeGen/MachineFunction.h"
29#include "llvm/CodeGen/MachineInstrBuilder.h"
30#include "llvm/CodeGen/MachineMemOperand.h"
31#include "llvm/CodeGen/MachineOperand.h"
32#include "llvm/CodeGen/MachineRegisterInfo.h"
33#include "llvm/CodeGen/StackProtector.h"
34#include "llvm/CodeGen/TargetFrameLowering.h"
35#include "llvm/CodeGen/TargetInstrInfo.h"
36#include "llvm/CodeGen/TargetLowering.h"
37#include "llvm/CodeGen/TargetPassConfig.h"
38#include "llvm/CodeGen/TargetRegisterInfo.h"
39#include "llvm/CodeGen/TargetSubtargetInfo.h"
40#include "llvm/IR/BasicBlock.h"
41#include "llvm/IR/CFG.h"
42#include "llvm/IR/Constant.h"
43#include "llvm/IR/Constants.h"
44#include "llvm/IR/DataLayout.h"
45#include "llvm/IR/DebugInfo.h"
46#include "llvm/IR/DerivedTypes.h"
47#include "llvm/IR/Function.h"
48#include "llvm/IR/GetElementPtrTypeIterator.h"
49#include "llvm/IR/InlineAsm.h"
50#include "llvm/IR/InstrTypes.h"
51#include "llvm/IR/Instructions.h"
52#include "llvm/IR/IntrinsicInst.h"
53#include "llvm/IR/Intrinsics.h"
54#include "llvm/IR/LLVMContext.h"
55#include "llvm/IR/Metadata.h"
56#include "llvm/IR/Type.h"
57#include "llvm/IR/User.h"
58#include "llvm/IR/Value.h"
59#include "llvm/MC/MCContext.h"
60#include "llvm/Pass.h"
61#include "llvm/Support/Casting.h"
62#include "llvm/Support/CodeGen.h"
63#include "llvm/Support/Debug.h"
64#include "llvm/Support/ErrorHandling.h"
65#include "llvm/Support/LowLevelTypeImpl.h"
66#include "llvm/Support/MathExtras.h"
67#include "llvm/Support/raw_ostream.h"
68#include "llvm/Target/TargetIntrinsicInfo.h"
69#include "llvm/Target/TargetMachine.h"
70#include <algorithm>
71#include <cassert>
72#include <cstdint>
73#include <iterator>
74#include <string>
75#include <utility>
76#include <vector>
77
78#define DEBUG_TYPE"irtranslator" "irtranslator"
79
80using namespace llvm;
81
82static cl::opt<bool>
83 EnableCSEInIRTranslator("enable-cse-in-irtranslator",
84 cl::desc("Should enable CSE in irtranslator"),
85 cl::Optional, cl::init(false));
86char IRTranslator::ID = 0;
87
88INITIALIZE_PASS_BEGIN(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",static void *initializeIRTranslatorPassOnce(PassRegistry &
Registry) {
89 false, false)static void *initializeIRTranslatorPassOnce(PassRegistry &
Registry) {
90INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)initializeTargetPassConfigPass(Registry);
91INITIALIZE_PASS_DEPENDENCY(GISelCSEAnalysisWrapperPass)initializeGISelCSEAnalysisWrapperPassPass(Registry);
92INITIALIZE_PASS_END(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",PassInfo *PI = new PassInfo( "IRTranslator LLVM IR -> MI",
"irtranslator", &IRTranslator::ID, PassInfo::NormalCtor_t
(callDefaultCtor<IRTranslator>), false, false); Registry
.registerPass(*PI, true); return PI; } static llvm::once_flag
InitializeIRTranslatorPassFlag; void llvm::initializeIRTranslatorPass
(PassRegistry &Registry) { llvm::call_once(InitializeIRTranslatorPassFlag
, initializeIRTranslatorPassOnce, std::ref(Registry)); }
93 false, false)PassInfo *PI = new PassInfo( "IRTranslator LLVM IR -> MI",
"irtranslator", &IRTranslator::ID, PassInfo::NormalCtor_t
(callDefaultCtor<IRTranslator>), false, false); Registry
.registerPass(*PI, true); return PI; } static llvm::once_flag
InitializeIRTranslatorPassFlag; void llvm::initializeIRTranslatorPass
(PassRegistry &Registry) { llvm::call_once(InitializeIRTranslatorPassFlag
, initializeIRTranslatorPassOnce, std::ref(Registry)); }
94
95static void reportTranslationError(MachineFunction &MF,
96 const TargetPassConfig &TPC,
97 OptimizationRemarkEmitter &ORE,
98 OptimizationRemarkMissed &R) {
99 MF.getProperties().set(MachineFunctionProperties::Property::FailedISel);
100
101 // Print the function name explicitly if we don't have a debug location (which
102 // makes the diagnostic less useful) or if we're going to emit a raw error.
103 if (!R.getLocation().isValid() || TPC.isGlobalISelAbortEnabled())
104 R << (" (in function: " + MF.getName() + ")").str();
105
106 if (TPC.isGlobalISelAbortEnabled())
107 report_fatal_error(R.getMsg());
108 else
109 ORE.emit(R);
110}
111
112IRTranslator::IRTranslator() : MachineFunctionPass(ID) { }
113
114#ifndef NDEBUG
115namespace {
116/// Verify that every instruction created has the same DILocation as the
117/// instruction being translated.
118class DILocationVerifier : public GISelChangeObserver {
119 const Instruction *CurrInst = nullptr;
120
121public:
122 DILocationVerifier() = default;
123 ~DILocationVerifier() = default;
124
125 const Instruction *getCurrentInst() const { return CurrInst; }
126 void setCurrentInst(const Instruction *Inst) { CurrInst = Inst; }
127
128 void erasingInstr(MachineInstr &MI) override {}
129 void changingInstr(MachineInstr &MI) override {}
130 void changedInstr(MachineInstr &MI) override {}
131
132 void createdInstr(MachineInstr &MI) override {
133 assert(getCurrentInst() && "Inserted instruction without a current MI")((getCurrentInst() && "Inserted instruction without a current MI"
) ? static_cast<void> (0) : __assert_fail ("getCurrentInst() && \"Inserted instruction without a current MI\""
, "/build/llvm-toolchain-snapshot-10~svn374710/lib/CodeGen/GlobalISel/IRTranslator.cpp"
, 133, __PRETTY_FUNCTION__))
;
134
135 // Only print the check message if we're actually checking it.
136#ifndef NDEBUG
137 LLVM_DEBUG(dbgs() << "Checking DILocation from " << *CurrInstdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("irtranslator")) { dbgs() << "Checking DILocation from "
<< *CurrInst << " was copied to " << MI; }
} while (false)
138 << " was copied to " << MI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("irtranslator")) { dbgs() << "Checking DILocation from "
<< *CurrInst << " was copied to " << MI; }
} while (false)
;
139#endif
140 // We allow insts in the entry block to have a debug loc line of 0 because
141 // they could have originated from constants, and we don't want a jumpy
142 // debug experience.
143 assert((CurrInst->getDebugLoc() == MI.getDebugLoc() ||(((CurrInst->getDebugLoc() == MI.getDebugLoc() || MI.getDebugLoc
().getLine() == 0) && "Line info was not transferred to all instructions"
) ? static_cast<void> (0) : __assert_fail ("(CurrInst->getDebugLoc() == MI.getDebugLoc() || MI.getDebugLoc().getLine() == 0) && \"Line info was not transferred to all instructions\""
, "/build/llvm-toolchain-snapshot-10~svn374710/lib/CodeGen/GlobalISel/IRTranslator.cpp"
, 145, __PRETTY_FUNCTION__))
144 MI.getDebugLoc().getLine() == 0) &&(((CurrInst->getDebugLoc() == MI.getDebugLoc() || MI.getDebugLoc
().getLine() == 0) && "Line info was not transferred to all instructions"
) ? static_cast<void> (0) : __assert_fail ("(CurrInst->getDebugLoc() == MI.getDebugLoc() || MI.getDebugLoc().getLine() == 0) && \"Line info was not transferred to all instructions\""
, "/build/llvm-toolchain-snapshot-10~svn374710/lib/CodeGen/GlobalISel/IRTranslator.cpp"
, 145, __PRETTY_FUNCTION__))
145 "Line info was not transferred to all instructions")(((CurrInst->getDebugLoc() == MI.getDebugLoc() || MI.getDebugLoc
().getLine() == 0) && "Line info was not transferred to all instructions"
) ? static_cast<void> (0) : __assert_fail ("(CurrInst->getDebugLoc() == MI.getDebugLoc() || MI.getDebugLoc().getLine() == 0) && \"Line info was not transferred to all instructions\""
, "/build/llvm-toolchain-snapshot-10~svn374710/lib/CodeGen/GlobalISel/IRTranslator.cpp"
, 145, __PRETTY_FUNCTION__))
;
146 }
147};
148} // namespace
149#endif // ifndef NDEBUG
150
151
152void IRTranslator::getAnalysisUsage(AnalysisUsage &AU) const {
153 AU.addRequired<StackProtector>();
154 AU.addRequired<TargetPassConfig>();
155 AU.addRequired<GISelCSEAnalysisWrapperPass>();
156 getSelectionDAGFallbackAnalysisUsage(AU);
157 MachineFunctionPass::getAnalysisUsage(AU);
158}
159
160IRTranslator::ValueToVRegInfo::VRegListT &
161IRTranslator::allocateVRegs(const Value &Val) {
162 assert(!VMap.contains(Val) && "Value already allocated in VMap")((!VMap.contains(Val) && "Value already allocated in VMap"
) ? static_cast<void> (0) : __assert_fail ("!VMap.contains(Val) && \"Value already allocated in VMap\""
, "/build/llvm-toolchain-snapshot-10~svn374710/lib/CodeGen/GlobalISel/IRTranslator.cpp"
, 162, __PRETTY_FUNCTION__))
;
163 auto *Regs = VMap.getVRegs(Val);
164 auto *Offsets = VMap.getOffsets(Val);
165 SmallVector<LLT, 4> SplitTys;
166 computeValueLLTs(*DL, *Val.getType(), SplitTys,
167 Offsets->empty() ? Offsets : nullptr);
168 for (unsigned i = 0; i < SplitTys.size(); ++i)
169 Regs->push_back(0);
170 return *Regs;
171}
172
173ArrayRef<Register> IRTranslator::getOrCreateVRegs(const Value &Val) {
174 auto VRegsIt = VMap.findVRegs(Val);
175 if (VRegsIt != VMap.vregs_end())
176 return *VRegsIt->second;
177
178 if (Val.getType()->isVoidTy())
179 return *VMap.getVRegs(Val);
180
181 // Create entry for this type.
182 auto *VRegs = VMap.getVRegs(Val);
183 auto *Offsets = VMap.getOffsets(Val);
184
185 assert(Val.getType()->isSized() &&((Val.getType()->isSized() && "Don't know how to create an empty vreg"
) ? static_cast<void> (0) : __assert_fail ("Val.getType()->isSized() && \"Don't know how to create an empty vreg\""
, "/build/llvm-toolchain-snapshot-10~svn374710/lib/CodeGen/GlobalISel/IRTranslator.cpp"
, 186, __PRETTY_FUNCTION__))
186 "Don't know how to create an empty vreg")((Val.getType()->isSized() && "Don't know how to create an empty vreg"
) ? static_cast<void> (0) : __assert_fail ("Val.getType()->isSized() && \"Don't know how to create an empty vreg\""
, "/build/llvm-toolchain-snapshot-10~svn374710/lib/CodeGen/GlobalISel/IRTranslator.cpp"
, 186, __PRETTY_FUNCTION__))
;
187
188 SmallVector<LLT, 4> SplitTys;
189 computeValueLLTs(*DL, *Val.getType(), SplitTys,
190 Offsets->empty() ? Offsets : nullptr);
191
192 if (!isa<Constant>(Val)) {
193 for (auto Ty : SplitTys)
194 VRegs->push_back(MRI->createGenericVirtualRegister(Ty));
195 return *VRegs;
196 }
197
198 if (Val.getType()->isAggregateType()) {
199 // UndefValue, ConstantAggregateZero
200 auto &C = cast<Constant>(Val);
201 unsigned Idx = 0;
202 while (auto Elt = C.getAggregateElement(Idx++)) {
203 auto EltRegs = getOrCreateVRegs(*Elt);
204 llvm::copy(EltRegs, std::back_inserter(*VRegs));
205 }
206 } else {
207 assert(SplitTys.size() == 1 && "unexpectedly split LLT")((SplitTys.size() == 1 && "unexpectedly split LLT") ?
static_cast<void> (0) : __assert_fail ("SplitTys.size() == 1 && \"unexpectedly split LLT\""
, "/build/llvm-toolchain-snapshot-10~svn374710/lib/CodeGen/GlobalISel/IRTranslator.cpp"
, 207, __PRETTY_FUNCTION__))
;
208 VRegs->push_back(MRI->createGenericVirtualRegister(SplitTys[0]));
209 bool Success = translate(cast<Constant>(Val), VRegs->front());
210 if (!Success) {
211 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
212 MF->getFunction().getSubprogram(),
213 &MF->getFunction().getEntryBlock());
214 R << "unable to translate constant: " << ore::NV("Type", Val.getType());
215 reportTranslationError(*MF, *TPC, *ORE, R);
216 return *VRegs;
217 }
218 }
219
220 return *VRegs;
221}
222
223int IRTranslator::getOrCreateFrameIndex(const AllocaInst &AI) {
224 if (FrameIndices.find(&AI) != FrameIndices.end())
225 return FrameIndices[&AI];
226
227 unsigned ElementSize = DL->getTypeAllocSize(AI.getAllocatedType());
228 unsigned Size =
229 ElementSize * cast<ConstantInt>(AI.getArraySize())->getZExtValue();
230
231 // Always allocate at least one byte.
232 Size = std::max(Size, 1u);
233
234 unsigned Alignment = AI.getAlignment();
235 if (!Alignment)
236 Alignment = DL->getABITypeAlignment(AI.getAllocatedType());
237
238 int &FI = FrameIndices[&AI];
239 FI = MF->getFrameInfo().CreateStackObject(Size, Alignment, false, &AI);
240 return FI;
241}
242
243unsigned IRTranslator::getMemOpAlignment(const Instruction &I) {
244 unsigned Alignment = 0;
245 Type *ValTy = nullptr;
246 if (const StoreInst *SI = dyn_cast<StoreInst>(&I)) {
247 Alignment = SI->getAlignment();
248 ValTy = SI->getValueOperand()->getType();
249 } else if (const LoadInst *LI = dyn_cast<LoadInst>(&I)) {
250 Alignment = LI->getAlignment();
251 ValTy = LI->getType();
252 } else if (const AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(&I)) {
253 // TODO(PR27168): This instruction has no alignment attribute, but unlike
254 // the default alignment for load/store, the default here is to assume
255 // it has NATURAL alignment, not DataLayout-specified alignment.
256 const DataLayout &DL = AI->getModule()->getDataLayout();
257 Alignment = DL.getTypeStoreSize(AI->getCompareOperand()->getType());
258 ValTy = AI->getCompareOperand()->getType();
259 } else if (const AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(&I)) {
260 // TODO(PR27168): This instruction has no alignment attribute, but unlike
261 // the default alignment for load/store, the default here is to assume
262 // it has NATURAL alignment, not DataLayout-specified alignment.
263 const DataLayout &DL = AI->getModule()->getDataLayout();
264 Alignment = DL.getTypeStoreSize(AI->getValOperand()->getType());
265 ValTy = AI->getType();
266 } else {
267 OptimizationRemarkMissed R("gisel-irtranslator", "", &I);
268 R << "unable to translate memop: " << ore::NV("Opcode", &I);
269 reportTranslationError(*MF, *TPC, *ORE, R);
270 return 1;
271 }
272
273 return Alignment ? Alignment : DL->getABITypeAlignment(ValTy);
274}
275
276MachineBasicBlock &IRTranslator::getMBB(const BasicBlock &BB) {
277 MachineBasicBlock *&MBB = BBToMBB[&BB];
278 assert(MBB && "BasicBlock was not encountered before")((MBB && "BasicBlock was not encountered before") ? static_cast
<void> (0) : __assert_fail ("MBB && \"BasicBlock was not encountered before\""
, "/build/llvm-toolchain-snapshot-10~svn374710/lib/CodeGen/GlobalISel/IRTranslator.cpp"
, 278, __PRETTY_FUNCTION__))
;
279 return *MBB;
280}
281
282void IRTranslator::addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred) {
283 assert(NewPred && "new predecessor must be a real MachineBasicBlock")((NewPred && "new predecessor must be a real MachineBasicBlock"
) ? static_cast<void> (0) : __assert_fail ("NewPred && \"new predecessor must be a real MachineBasicBlock\""
, "/build/llvm-toolchain-snapshot-10~svn374710/lib/CodeGen/GlobalISel/IRTranslator.cpp"
, 283, __PRETTY_FUNCTION__))
;
284 MachinePreds[Edge].push_back(NewPred);
285}
286
287bool IRTranslator::translateBinaryOp(unsigned Opcode, const User &U,
288 MachineIRBuilder &MIRBuilder) {
289 // Get or create a virtual register for each value.
290 // Unless the value is a Constant => loadimm cst?
291 // or inline constant each time?
292 // Creation of a virtual register needs to have a size.
293 Register Op0 = getOrCreateVReg(*U.getOperand(0));
294 Register Op1 = getOrCreateVReg(*U.getOperand(1));
295 Register Res = getOrCreateVReg(U);
296 uint16_t Flags = 0;
297 if (isa<Instruction>(U)) {
298 const Instruction &I = cast<Instruction>(U);
299 Flags = MachineInstr::copyFlagsFromInstruction(I);
300 }
301
302 MIRBuilder.buildInstr(Opcode, {Res}, {Op0, Op1}, Flags);
303 return true;
304}
305
306bool IRTranslator::translateFSub(const User &U, MachineIRBuilder &MIRBuilder) {
307 // -0.0 - X --> G_FNEG
308 if (isa<Constant>(U.getOperand(0)) &&
309 U.getOperand(0) == ConstantFP::getZeroValueForNegation(U.getType())) {
310 Register Op1 = getOrCreateVReg(*U.getOperand(1));
311 Register Res = getOrCreateVReg(U);
312 uint16_t Flags = 0;
313 if (isa<Instruction>(U)) {
314 const Instruction &I = cast<Instruction>(U);
315 Flags = MachineInstr::copyFlagsFromInstruction(I);
316 }
317 // Negate the last operand of the FSUB
318 MIRBuilder.buildInstr(TargetOpcode::G_FNEG, {Res}, {Op1}, Flags);
319 return true;
320 }
321 return translateBinaryOp(TargetOpcode::G_FSUB, U, MIRBuilder);
322}
323
324bool IRTranslator::translateFNeg(const User &U, MachineIRBuilder &MIRBuilder) {
325 Register Op0 = getOrCreateVReg(*U.getOperand(0));
326 Register Res = getOrCreateVReg(U);
327 uint16_t Flags = 0;
328 if (isa<Instruction>(U)) {
329 const Instruction &I = cast<Instruction>(U);
330 Flags = MachineInstr::copyFlagsFromInstruction(I);
331 }
332 MIRBuilder.buildInstr(TargetOpcode::G_FNEG, {Res}, {Op0}, Flags);
333 return true;
334}
335
336bool IRTranslator::translateCompare(const User &U,
337 MachineIRBuilder &MIRBuilder) {
338 const CmpInst *CI = dyn_cast<CmpInst>(&U);
19
Assuming the object is not a 'CmpInst'
20
'CI' initialized to a null pointer value
339 Register Op0 = getOrCreateVReg(*U.getOperand(0));
340 Register Op1 = getOrCreateVReg(*U.getOperand(1));
341 Register Res = getOrCreateVReg(U);
342 CmpInst::Predicate Pred =
343 CI
20.1
'CI' is null
20.1
'CI' is null
20.1
'CI' is null
20.1
'CI' is null
? CI->getPredicate() : static_cast<CmpInst::Predicate>(
21
'?' condition is false
344 cast<ConstantExpr>(U).getPredicate());
22
'U' is a 'ConstantExpr'
345 if (CmpInst::isIntPredicate(Pred))
23
Calling 'CmpInst::isIntPredicate'
26
Returning from 'CmpInst::isIntPredicate'
27
Taking false branch
346 MIRBuilder.buildICmp(Pred, Res, Op0, Op1);
347 else if (Pred == CmpInst::FCMP_FALSE)
28
Assuming 'Pred' is not equal to FCMP_FALSE
29
Taking false branch
348 MIRBuilder.buildCopy(
349 Res, getOrCreateVReg(*Constant::getNullValue(CI->getType())));
350 else if (Pred == CmpInst::FCMP_TRUE)
30
Assuming 'Pred' is equal to FCMP_TRUE
31
Taking true branch
351 MIRBuilder.buildCopy(
352 Res, getOrCreateVReg(*Constant::getAllOnesValue(CI->getType())));
32
Called C++ object pointer is null
353 else {
354 MIRBuilder.buildInstr(TargetOpcode::G_FCMP, {Res}, {Pred, Op0, Op1},
355 MachineInstr::copyFlagsFromInstruction(*CI));
356 }
357
358 return true;
359}
360
361bool IRTranslator::translateRet(const User &U, MachineIRBuilder &MIRBuilder) {
362 const ReturnInst &RI = cast<ReturnInst>(U);
363 const Value *Ret = RI.getReturnValue();
364 if (Ret && DL->getTypeStoreSize(Ret->getType()) == 0)
365 Ret = nullptr;
366
367 ArrayRef<Register> VRegs;
368 if (Ret)
369 VRegs = getOrCreateVRegs(*Ret);
370
371 Register SwiftErrorVReg = 0;
372 if (CLI->supportSwiftError() && SwiftError.getFunctionArg()) {
373 SwiftErrorVReg = SwiftError.getOrCreateVRegUseAt(
374 &RI, &MIRBuilder.getMBB(), SwiftError.getFunctionArg());
375 }
376
377 // The target may mess up with the insertion point, but
378 // this is not important as a return is the last instruction
379 // of the block anyway.
380 return CLI->lowerReturn(MIRBuilder, Ret, VRegs, SwiftErrorVReg);
381}
382
383bool IRTranslator::translateBr(const User &U, MachineIRBuilder &MIRBuilder) {
384 const BranchInst &BrInst = cast<BranchInst>(U);
385 unsigned Succ = 0;
386 if (!BrInst.isUnconditional()) {
387 // We want a G_BRCOND to the true BB followed by an unconditional branch.
388 Register Tst = getOrCreateVReg(*BrInst.getCondition());
389 const BasicBlock &TrueTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ++));
390 MachineBasicBlock &TrueBB = getMBB(TrueTgt);
391 MIRBuilder.buildBrCond(Tst, TrueBB);
392 }
393
394 const BasicBlock &BrTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ));
395 MachineBasicBlock &TgtBB = getMBB(BrTgt);
396 MachineBasicBlock &CurBB = MIRBuilder.getMBB();
397
398 // If the unconditional target is the layout successor, fallthrough.
399 if (!CurBB.isLayoutSuccessor(&TgtBB))
400 MIRBuilder.buildBr(TgtBB);
401
402 // Link successors.
403 for (const BasicBlock *Succ : successors(&BrInst))
404 CurBB.addSuccessor(&getMBB(*Succ));
405 return true;
406}
407
408void IRTranslator::addSuccessorWithProb(MachineBasicBlock *Src,
409 MachineBasicBlock *Dst,
410 BranchProbability Prob) {
411 if (!FuncInfo.BPI) {
412 Src->addSuccessorWithoutProb(Dst);
413 return;
414 }
415 if (Prob.isUnknown())
416 Prob = getEdgeProbability(Src, Dst);
417 Src->addSuccessor(Dst, Prob);
418}
419
420BranchProbability
421IRTranslator::getEdgeProbability(const MachineBasicBlock *Src,
422 const MachineBasicBlock *Dst) const {
423 const BasicBlock *SrcBB = Src->getBasicBlock();
424 const BasicBlock *DstBB = Dst->getBasicBlock();
425 if (!FuncInfo.BPI) {
426 // If BPI is not available, set the default probability as 1 / N, where N is
427 // the number of successors.
428 auto SuccSize = std::max<uint32_t>(succ_size(SrcBB), 1);
429 return BranchProbability(1, SuccSize);
430 }
431 return FuncInfo.BPI->getEdgeProbability(SrcBB, DstBB);
432}
433
434bool IRTranslator::translateSwitch(const User &U, MachineIRBuilder &MIB) {
435 using namespace SwitchCG;
436 // Extract cases from the switch.
437 const SwitchInst &SI = cast<SwitchInst>(U);
438 BranchProbabilityInfo *BPI = FuncInfo.BPI;
439 CaseClusterVector Clusters;
440 Clusters.reserve(SI.getNumCases());
441 for (auto &I : SI.cases()) {
442 MachineBasicBlock *Succ = &getMBB(*I.getCaseSuccessor());
443 assert(Succ && "Could not find successor mbb in mapping")((Succ && "Could not find successor mbb in mapping") ?
static_cast<void> (0) : __assert_fail ("Succ && \"Could not find successor mbb in mapping\""
, "/build/llvm-toolchain-snapshot-10~svn374710/lib/CodeGen/GlobalISel/IRTranslator.cpp"
, 443, __PRETTY_FUNCTION__))
;
444 const ConstantInt *CaseVal = I.getCaseValue();
445 BranchProbability Prob =
446 BPI ? BPI->getEdgeProbability(SI.getParent(), I.getSuccessorIndex())
447 : BranchProbability(1, SI.getNumCases() + 1);
448 Clusters.push_back(CaseCluster::range(CaseVal, CaseVal, Succ, Prob));
449 }
450
451 MachineBasicBlock *DefaultMBB = &getMBB(*SI.getDefaultDest());
452
453 // Cluster adjacent cases with the same destination. We do this at all
454 // optimization levels because it's cheap to do and will make codegen faster
455 // if there are many clusters.
456 sortAndRangeify(Clusters);
457
458 MachineBasicBlock *SwitchMBB = &getMBB(*SI.getParent());
459
460 // If there is only the default destination, jump there directly.
461 if (Clusters.empty()) {
462 SwitchMBB->addSuccessor(DefaultMBB);
463 if (DefaultMBB != SwitchMBB->getNextNode())
464 MIB.buildBr(*DefaultMBB);
465 return true;
466 }
467
468 SL->findJumpTables(Clusters, &SI, DefaultMBB);
469
470 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("irtranslator")) { { dbgs() << "Case clusters: "; for (
const CaseCluster &C : Clusters) { if (C.Kind == CC_JumpTable
) dbgs() << "JT:"; if (C.Kind == CC_BitTests) dbgs() <<
"BT:"; C.Low->getValue().print(dbgs(), true); if (C.Low !=
C.High) { dbgs() << '-'; C.High->getValue().print(dbgs
(), true); } dbgs() << ' '; } dbgs() << '\n'; }; }
} while (false)
471 dbgs() << "Case clusters: ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("irtranslator")) { { dbgs() << "Case clusters: "; for (
const CaseCluster &C : Clusters) { if (C.Kind == CC_JumpTable
) dbgs() << "JT:"; if (C.Kind == CC_BitTests) dbgs() <<
"BT:"; C.Low->getValue().print(dbgs(), true); if (C.Low !=
C.High) { dbgs() << '-'; C.High->getValue().print(dbgs
(), true); } dbgs() << ' '; } dbgs() << '\n'; }; }
} while (false)
472 for (const CaseCluster &C : Clusters) {do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("irtranslator")) { { dbgs() << "Case clusters: "; for (
const CaseCluster &C : Clusters) { if (C.Kind == CC_JumpTable
) dbgs() << "JT:"; if (C.Kind == CC_BitTests) dbgs() <<
"BT:"; C.Low->getValue().print(dbgs(), true); if (C.Low !=
C.High) { dbgs() << '-'; C.High->getValue().print(dbgs
(), true); } dbgs() << ' '; } dbgs() << '\n'; }; }
} while (false)
473 if (C.Kind == CC_JumpTable)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("irtranslator")) { { dbgs() << "Case clusters: "; for (
const CaseCluster &C : Clusters) { if (C.Kind == CC_JumpTable
) dbgs() << "JT:"; if (C.Kind == CC_BitTests) dbgs() <<
"BT:"; C.Low->getValue().print(dbgs(), true); if (C.Low !=
C.High) { dbgs() << '-'; C.High->getValue().print(dbgs
(), true); } dbgs() << ' '; } dbgs() << '\n'; }; }
} while (false)
474 dbgs() << "JT:";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("irtranslator")) { { dbgs() << "Case clusters: "; for (
const CaseCluster &C : Clusters) { if (C.Kind == CC_JumpTable
) dbgs() << "JT:"; if (C.Kind == CC_BitTests) dbgs() <<
"BT:"; C.Low->getValue().print(dbgs(), true); if (C.Low !=
C.High) { dbgs() << '-'; C.High->getValue().print(dbgs
(), true); } dbgs() << ' '; } dbgs() << '\n'; }; }
} while (false)
475 if (C.Kind == CC_BitTests)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("irtranslator")) { { dbgs() << "Case clusters: "; for (
const CaseCluster &C : Clusters) { if (C.Kind == CC_JumpTable
) dbgs() << "JT:"; if (C.Kind == CC_BitTests) dbgs() <<
"BT:"; C.Low->getValue().print(dbgs(), true); if (C.Low !=
C.High) { dbgs() << '-'; C.High->getValue().print(dbgs
(), true); } dbgs() << ' '; } dbgs() << '\n'; }; }
} while (false)
476 dbgs() << "BT:";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("irtranslator")) { { dbgs() << "Case clusters: "; for (
const CaseCluster &C : Clusters) { if (C.Kind == CC_JumpTable
) dbgs() << "JT:"; if (C.Kind == CC_BitTests) dbgs() <<
"BT:"; C.Low->getValue().print(dbgs(), true); if (C.Low !=
C.High) { dbgs() << '-'; C.High->getValue().print(dbgs
(), true); } dbgs() << ' '; } dbgs() << '\n'; }; }
} while (false)
477
478 C.Low->getValue().print(dbgs(), true);do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("irtranslator")) { { dbgs() << "Case clusters: "; for (
const CaseCluster &C : Clusters) { if (C.Kind == CC_JumpTable
) dbgs() << "JT:"; if (C.Kind == CC_BitTests) dbgs() <<
"BT:"; C.Low->getValue().print(dbgs(), true); if (C.Low !=
C.High) { dbgs() << '-'; C.High->getValue().print(dbgs
(), true); } dbgs() << ' '; } dbgs() << '\n'; }; }
} while (false)
479 if (C.Low != C.High) {do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("irtranslator")) { { dbgs() << "Case clusters: "; for (
const CaseCluster &C : Clusters) { if (C.Kind == CC_JumpTable
) dbgs() << "JT:"; if (C.Kind == CC_BitTests) dbgs() <<
"BT:"; C.Low->getValue().print(dbgs(), true); if (C.Low !=
C.High) { dbgs() << '-'; C.High->getValue().print(dbgs
(), true); } dbgs() << ' '; } dbgs() << '\n'; }; }
} while (false)
480 dbgs() << '-';do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("irtranslator")) { { dbgs() << "Case clusters: "; for (
const CaseCluster &C : Clusters) { if (C.Kind == CC_JumpTable
) dbgs() << "JT:"; if (C.Kind == CC_BitTests) dbgs() <<
"BT:"; C.Low->getValue().print(dbgs(), true); if (C.Low !=
C.High) { dbgs() << '-'; C.High->getValue().print(dbgs
(), true); } dbgs() << ' '; } dbgs() << '\n'; }; }
} while (false)
481 C.High->getValue().print(dbgs(), true);do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("irtranslator")) { { dbgs() << "Case clusters: "; for (
const CaseCluster &C : Clusters) { if (C.Kind == CC_JumpTable
) dbgs() << "JT:"; if (C.Kind == CC_BitTests) dbgs() <<
"BT:"; C.Low->getValue().print(dbgs(), true); if (C.Low !=
C.High) { dbgs() << '-'; C.High->getValue().print(dbgs
(), true); } dbgs() << ' '; } dbgs() << '\n'; }; }
} while (false)
482 }do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("irtranslator")) { { dbgs() << "Case clusters: "; for (
const CaseCluster &C : Clusters) { if (C.Kind == CC_JumpTable
) dbgs() << "JT:"; if (C.Kind == CC_BitTests) dbgs() <<
"BT:"; C.Low->getValue().print(dbgs(), true); if (C.Low !=
C.High) { dbgs() << '-'; C.High->getValue().print(dbgs
(), true); } dbgs() << ' '; } dbgs() << '\n'; }; }
} while (false)
483 dbgs() << ' ';do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("irtranslator")) { { dbgs() << "Case clusters: "; for (
const CaseCluster &C : Clusters) { if (C.Kind == CC_JumpTable
) dbgs() << "JT:"; if (C.Kind == CC_BitTests) dbgs() <<
"BT:"; C.Low->getValue().print(dbgs(), true); if (C.Low !=
C.High) { dbgs() << '-'; C.High->getValue().print(dbgs
(), true); } dbgs() << ' '; } dbgs() << '\n'; }; }
} while (false)
484 }do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("irtranslator")) { { dbgs() << "Case clusters: "; for (
const CaseCluster &C : Clusters) { if (C.Kind == CC_JumpTable
) dbgs() << "JT:"; if (C.Kind == CC_BitTests) dbgs() <<
"BT:"; C.Low->getValue().print(dbgs(), true); if (C.Low !=
C.High) { dbgs() << '-'; C.High->getValue().print(dbgs
(), true); } dbgs() << ' '; } dbgs() << '\n'; }; }
} while (false)
485 dbgs() << '\n';do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("irtranslator")) { { dbgs() << "Case clusters: "; for (
const CaseCluster &C : Clusters) { if (C.Kind == CC_JumpTable
) dbgs() << "JT:"; if (C.Kind == CC_BitTests) dbgs() <<
"BT:"; C.Low->getValue().print(dbgs(), true); if (C.Low !=
C.High) { dbgs() << '-'; C.High->getValue().print(dbgs
(), true); } dbgs() << ' '; } dbgs() << '\n'; }; }
} while (false)
486 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("irtranslator")) { { dbgs() << "Case clusters: "; for (
const CaseCluster &C : Clusters) { if (C.Kind == CC_JumpTable
) dbgs() << "JT:"; if (C.Kind == CC_BitTests) dbgs() <<
"BT:"; C.Low->getValue().print(dbgs(), true); if (C.Low !=
C.High) { dbgs() << '-'; C.High->getValue().print(dbgs
(), true); } dbgs() << ' '; } dbgs() << '\n'; }; }
} while (false)
;
487
488 assert(!Clusters.empty())((!Clusters.empty()) ? static_cast<void> (0) : __assert_fail
("!Clusters.empty()", "/build/llvm-toolchain-snapshot-10~svn374710/lib/CodeGen/GlobalISel/IRTranslator.cpp"
, 488, __PRETTY_FUNCTION__))
;
489 SwitchWorkList WorkList;
490 CaseClusterIt First = Clusters.begin();
491 CaseClusterIt Last = Clusters.end() - 1;
492 auto DefaultProb = getEdgeProbability(SwitchMBB, DefaultMBB);
493 WorkList.push_back({SwitchMBB, First, Last, nullptr, nullptr, DefaultProb});
494
495 // FIXME: At the moment we don't do any splitting optimizations here like
496 // SelectionDAG does, so this worklist only has one entry.
497 while (!WorkList.empty()) {
498 SwitchWorkListItem W = WorkList.back();
499 WorkList.pop_back();
500 if (!lowerSwitchWorkItem(W, SI.getCondition(), SwitchMBB, DefaultMBB, MIB))
501 return false;
502 }
503 return true;
504}
505
506void IRTranslator::emitJumpTable(SwitchCG::JumpTable &JT,
507 MachineBasicBlock *MBB) {
508 // Emit the code for the jump table
509 assert(JT.Reg != -1U && "Should lower JT Header first!")((JT.Reg != -1U && "Should lower JT Header first!") ?
static_cast<void> (0) : __assert_fail ("JT.Reg != -1U && \"Should lower JT Header first!\""
, "/build/llvm-toolchain-snapshot-10~svn374710/lib/CodeGen/GlobalISel/IRTranslator.cpp"
, 509, __PRETTY_FUNCTION__))
;
510 MachineIRBuilder MIB(*MBB->getParent());
511 MIB.setMBB(*MBB);
512 MIB.setDebugLoc(CurBuilder->getDebugLoc());
513
514 Type *PtrIRTy = Type::getInt8PtrTy(MF->getFunction().getContext());
515 const LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
516
517 auto Table = MIB.buildJumpTable(PtrTy, JT.JTI);
518 MIB.buildBrJT(Table.getReg(0), JT.JTI, JT.Reg);
519}
520
521bool IRTranslator::emitJumpTableHeader(SwitchCG::JumpTable &JT,
522 SwitchCG::JumpTableHeader &JTH,
523 MachineBasicBlock *HeaderBB) {
524 MachineIRBuilder MIB(*HeaderBB->getParent());
525 MIB.setMBB(*HeaderBB);
526 MIB.setDebugLoc(CurBuilder->getDebugLoc());
527
528 const Value &SValue = *JTH.SValue;
529 // Subtract the lowest switch case value from the value being switched on.
530 const LLT SwitchTy = getLLTForType(*SValue.getType(), *DL);
531 Register SwitchOpReg = getOrCreateVReg(SValue);
532 auto FirstCst = MIB.buildConstant(SwitchTy, JTH.First);
533 auto Sub = MIB.buildSub({SwitchTy}, SwitchOpReg, FirstCst);
534
535 // This value may be smaller or larger than the target's pointer type, and
536 // therefore require extension or truncating.
537 Type *PtrIRTy = SValue.getType()->getPointerTo();
538 const LLT PtrScalarTy = LLT::scalar(DL->getTypeSizeInBits(PtrIRTy));
539 Sub = MIB.buildZExtOrTrunc(PtrScalarTy, Sub);
540
541 JT.Reg = Sub.getReg(0);
542
543 if (JTH.OmitRangeCheck) {
544 if (JT.MBB != HeaderBB->getNextNode())
545 MIB.buildBr(*JT.MBB);
546 return true;
547 }
548
549 // Emit the range check for the jump table, and branch to the default block
550 // for the switch statement if the value being switched on exceeds the
551 // largest case in the switch.
552 auto Cst = getOrCreateVReg(
553 *ConstantInt::get(SValue.getType(), JTH.Last - JTH.First));
554 Cst = MIB.buildZExtOrTrunc(PtrScalarTy, Cst).getReg(0);
555 auto Cmp = MIB.buildICmp(CmpInst::ICMP_UGT, LLT::scalar(1), Sub, Cst);
556
557 auto BrCond = MIB.buildBrCond(Cmp.getReg(0), *JT.Default);
558
559 // Avoid emitting unnecessary branches to the next block.
560 if (JT.MBB != HeaderBB->getNextNode())
561 BrCond = MIB.buildBr(*JT.MBB);
562 return true;
563}
564
565void IRTranslator::emitSwitchCase(SwitchCG::CaseBlock &CB,
566 MachineBasicBlock *SwitchBB,
567 MachineIRBuilder &MIB) {
568 Register CondLHS = getOrCreateVReg(*CB.CmpLHS);
569 Register Cond;
570 DebugLoc OldDbgLoc = MIB.getDebugLoc();
571 MIB.setDebugLoc(CB.DbgLoc);
572 MIB.setMBB(*CB.ThisBB);
573
574 if (CB.PredInfo.NoCmp) {
575 // Branch or fall through to TrueBB.
576 addSuccessorWithProb(CB.ThisBB, CB.TrueBB, CB.TrueProb);
577 addMachineCFGPred({SwitchBB->getBasicBlock(), CB.TrueBB->getBasicBlock()},
578 CB.ThisBB);
579 CB.ThisBB->normalizeSuccProbs();
580 if (CB.TrueBB != CB.ThisBB->getNextNode())
581 MIB.buildBr(*CB.TrueBB);
582 MIB.setDebugLoc(OldDbgLoc);
583 return;
584 }
585
586 const LLT i1Ty = LLT::scalar(1);
587 // Build the compare.
588 if (!CB.CmpMHS) {
589 Register CondRHS = getOrCreateVReg(*CB.CmpRHS);
590 Cond = MIB.buildICmp(CB.PredInfo.Pred, i1Ty, CondLHS, CondRHS).getReg(0);
591 } else {
592 assert(CB.PredInfo.Pred == CmpInst::ICMP_SLE &&((CB.PredInfo.Pred == CmpInst::ICMP_SLE && "Can only handle SLE ranges"
) ? static_cast<void> (0) : __assert_fail ("CB.PredInfo.Pred == CmpInst::ICMP_SLE && \"Can only handle SLE ranges\""
, "/build/llvm-toolchain-snapshot-10~svn374710/lib/CodeGen/GlobalISel/IRTranslator.cpp"
, 593, __PRETTY_FUNCTION__))
593 "Can only handle SLE ranges")((CB.PredInfo.Pred == CmpInst::ICMP_SLE && "Can only handle SLE ranges"
) ? static_cast<void> (0) : __assert_fail ("CB.PredInfo.Pred == CmpInst::ICMP_SLE && \"Can only handle SLE ranges\""
, "/build/llvm-toolchain-snapshot-10~svn374710/lib/CodeGen/GlobalISel/IRTranslator.cpp"
, 593, __PRETTY_FUNCTION__))
;
594
595 const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue();
596 const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue();
597
598 Register CmpOpReg = getOrCreateVReg(*CB.CmpMHS);
599 if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) {
600 Register CondRHS = getOrCreateVReg(*CB.CmpRHS);
601 Cond =
602 MIB.buildICmp(CmpInst::ICMP_SLE, i1Ty, CmpOpReg, CondRHS).getReg(0);
603 } else {
604 const LLT &CmpTy = MRI->getType(CmpOpReg);
605 auto Sub = MIB.buildSub({CmpTy}, CmpOpReg, CondLHS);
606 auto Diff = MIB.buildConstant(CmpTy, High - Low);
607 Cond = MIB.buildICmp(CmpInst::ICMP_ULE, i1Ty, Sub, Diff).getReg(0);
608 }
609 }
610
611 // Update successor info
612 addSuccessorWithProb(CB.ThisBB, CB.TrueBB, CB.TrueProb);
613
614 addMachineCFGPred({SwitchBB->getBasicBlock(), CB.TrueBB->getBasicBlock()},
615 CB.ThisBB);
616
617 // TrueBB and FalseBB are always different unless the incoming IR is
618 // degenerate. This only happens when running llc on weird IR.
619 if (CB.TrueBB != CB.FalseBB)
620 addSuccessorWithProb(CB.ThisBB, CB.FalseBB, CB.FalseProb);
621 CB.ThisBB->normalizeSuccProbs();
622
623 // if (SwitchBB->getBasicBlock() != CB.FalseBB->getBasicBlock())
624 addMachineCFGPred({SwitchBB->getBasicBlock(), CB.FalseBB->getBasicBlock()},
625 CB.ThisBB);
626
627 // If the lhs block is the next block, invert the condition so that we can
628 // fall through to the lhs instead of the rhs block.
629 if (CB.TrueBB == CB.ThisBB->getNextNode()) {
630 std::swap(CB.TrueBB, CB.FalseBB);
631 auto True = MIB.buildConstant(i1Ty, 1);
632 Cond = MIB.buildInstr(TargetOpcode::G_XOR, {i1Ty}, {Cond, True}, None)
633 .getReg(0);
634 }
635
636 MIB.buildBrCond(Cond, *CB.TrueBB);
637 MIB.buildBr(*CB.FalseBB);
638 MIB.setDebugLoc(OldDbgLoc);
639}
640
641bool IRTranslator::lowerJumpTableWorkItem(SwitchCG::SwitchWorkListItem W,
642 MachineBasicBlock *SwitchMBB,
643 MachineBasicBlock *CurMBB,
644 MachineBasicBlock *DefaultMBB,
645 MachineIRBuilder &MIB,
646 MachineFunction::iterator BBI,
647 BranchProbability UnhandledProbs,
648 SwitchCG::CaseClusterIt I,
649 MachineBasicBlock *Fallthrough,
650 bool FallthroughUnreachable) {
651 using namespace SwitchCG;
652 MachineFunction *CurMF = SwitchMBB->getParent();
653 // FIXME: Optimize away range check based on pivot comparisons.
654 JumpTableHeader *JTH = &SL->JTCases[I->JTCasesIndex].first;
655 SwitchCG::JumpTable *JT = &SL->JTCases[I->JTCasesIndex].second;
656 BranchProbability DefaultProb = W.DefaultProb;
657
658 // The jump block hasn't been inserted yet; insert it here.
659 MachineBasicBlock *JumpMBB = JT->MBB;
660 CurMF->insert(BBI, JumpMBB);
661
662 // Since the jump table block is separate from the switch block, we need
663 // to keep track of it as a machine predecessor to the default block,
664 // otherwise we lose the phi edges.
665 addMachineCFGPred({SwitchMBB->getBasicBlock(), DefaultMBB->getBasicBlock()},
666 CurMBB);
667 addMachineCFGPred({SwitchMBB->getBasicBlock(), DefaultMBB->getBasicBlock()},
668 JumpMBB);
669
670 auto JumpProb = I->Prob;
671 auto FallthroughProb = UnhandledProbs;
672
673 // If the default statement is a target of the jump table, we evenly
674 // distribute the default probability to successors of CurMBB. Also
675 // update the probability on the edge from JumpMBB to Fallthrough.
676 for (MachineBasicBlock::succ_iterator SI = JumpMBB->succ_begin(),
677 SE = JumpMBB->succ_end();
678 SI != SE; ++SI) {
679 if (*SI == DefaultMBB) {
680 JumpProb += DefaultProb / 2;
681 FallthroughProb -= DefaultProb / 2;
682 JumpMBB->setSuccProbability(SI, DefaultProb / 2);
683 JumpMBB->normalizeSuccProbs();
684 } else {
685 // Also record edges from the jump table block to it's successors.
686 addMachineCFGPred({SwitchMBB->getBasicBlock(), (*SI)->getBasicBlock()},
687 JumpMBB);
688 }
689 }
690
691 // Skip the range check if the fallthrough block is unreachable.
692 if (FallthroughUnreachable)
693 JTH->OmitRangeCheck = true;
694
695 if (!JTH->OmitRangeCheck)
696 addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb);
697 addSuccessorWithProb(CurMBB, JumpMBB, JumpProb);
698 CurMBB->normalizeSuccProbs();
699
700 // The jump table header will be inserted in our current block, do the
701 // range check, and fall through to our fallthrough block.
702 JTH->HeaderBB = CurMBB;
703 JT->Default = Fallthrough; // FIXME: Move Default to JumpTableHeader.
704
705 // If we're in the right place, emit the jump table header right now.
706 if (CurMBB == SwitchMBB) {
707 if (!emitJumpTableHeader(*JT, *JTH, CurMBB))
708 return false;
709 JTH->Emitted = true;
710 }
711 return true;
712}
713bool IRTranslator::lowerSwitchRangeWorkItem(SwitchCG::CaseClusterIt I,
714 Value *Cond,
715 MachineBasicBlock *Fallthrough,
716 bool FallthroughUnreachable,
717 BranchProbability UnhandledProbs,
718 MachineBasicBlock *CurMBB,
719 MachineIRBuilder &MIB,
720 MachineBasicBlock *SwitchMBB) {
721 using namespace SwitchCG;
722 const Value *RHS, *LHS, *MHS;
723 CmpInst::Predicate Pred;
724 if (I->Low == I->High) {
725 // Check Cond == I->Low.
726 Pred = CmpInst::ICMP_EQ;
727 LHS = Cond;
728 RHS = I->Low;
729 MHS = nullptr;
730 } else {
731 // Check I->Low <= Cond <= I->High.
732 Pred = CmpInst::ICMP_SLE;
733 LHS = I->Low;
734 MHS = Cond;
735 RHS = I->High;
736 }
737
738 // If Fallthrough is unreachable, fold away the comparison.
739 // The false probability is the sum of all unhandled cases.
740 CaseBlock CB(Pred, FallthroughUnreachable, LHS, RHS, MHS, I->MBB, Fallthrough,
741 CurMBB, MIB.getDebugLoc(), I->Prob, UnhandledProbs);
742
743 emitSwitchCase(CB, SwitchMBB, MIB);
744 return true;
745}
746
747bool IRTranslator::lowerSwitchWorkItem(SwitchCG::SwitchWorkListItem W,
748 Value *Cond,
749 MachineBasicBlock *SwitchMBB,
750 MachineBasicBlock *DefaultMBB,
751 MachineIRBuilder &MIB) {
752 using namespace SwitchCG;
753 MachineFunction *CurMF = FuncInfo.MF;
754 MachineBasicBlock *NextMBB = nullptr;
755 MachineFunction::iterator BBI(W.MBB);
756 if (++BBI != FuncInfo.MF->end())
757 NextMBB = &*BBI;
758
759 if (EnableOpts) {
760 // Here, we order cases by probability so the most likely case will be
761 // checked first. However, two clusters can have the same probability in
762 // which case their relative ordering is non-deterministic. So we use Low
763 // as a tie-breaker as clusters are guaranteed to never overlap.
764 llvm::sort(W.FirstCluster, W.LastCluster + 1,
765 [](const CaseCluster &a, const CaseCluster &b) {
766 return a.Prob != b.Prob
767 ? a.Prob > b.Prob
768 : a.Low->getValue().slt(b.Low->getValue());
769 });
770
771 // Rearrange the case blocks so that the last one falls through if possible
772 // without changing the order of probabilities.
773 for (CaseClusterIt I = W.LastCluster; I > W.FirstCluster;) {
774 --I;
775 if (I->Prob > W.LastCluster->Prob)
776 break;
777 if (I->Kind == CC_Range && I->MBB == NextMBB) {
778 std::swap(*I, *W.LastCluster);
779 break;
780 }
781 }
782 }
783
784 // Compute total probability.
785 BranchProbability DefaultProb = W.DefaultProb;
786 BranchProbability UnhandledProbs = DefaultProb;
787 for (CaseClusterIt I = W.FirstCluster; I <= W.LastCluster; ++I)
788 UnhandledProbs += I->Prob;
789
790 MachineBasicBlock *CurMBB = W.MBB;
791 for (CaseClusterIt I = W.FirstCluster, E = W.LastCluster; I <= E; ++I) {
792 bool FallthroughUnreachable = false;
793 MachineBasicBlock *Fallthrough;
794 if (I == W.LastCluster) {
795 // For the last cluster, fall through to the default destination.
796 Fallthrough = DefaultMBB;
797 FallthroughUnreachable = isa<UnreachableInst>(
798 DefaultMBB->getBasicBlock()->getFirstNonPHIOrDbg());
799 } else {
800 Fallthrough = CurMF->CreateMachineBasicBlock(CurMBB->getBasicBlock());
801 CurMF->insert(BBI, Fallthrough);
802 }
803 UnhandledProbs -= I->Prob;
804
805 switch (I->Kind) {
806 case CC_BitTests: {
807 LLVM_DEBUG(dbgs() << "Switch to bit test optimization unimplemented")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("irtranslator")) { dbgs() << "Switch to bit test optimization unimplemented"
; } } while (false)
;
808 return false; // Bit tests currently unimplemented.
809 }
810 case CC_JumpTable: {
811 if (!lowerJumpTableWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI,
812 UnhandledProbs, I, Fallthrough,
813 FallthroughUnreachable)) {
814 LLVM_DEBUG(dbgs() << "Failed to lower jump table")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("irtranslator")) { dbgs() << "Failed to lower jump table"
; } } while (false)
;
815 return false;
816 }
817 break;
818 }
819 case CC_Range: {
820 if (!lowerSwitchRangeWorkItem(I, Cond, Fallthrough,
821 FallthroughUnreachable, UnhandledProbs,
822 CurMBB, MIB, SwitchMBB)) {
823 LLVM_DEBUG(dbgs() << "Failed to lower switch range")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("irtranslator")) { dbgs() << "Failed to lower switch range"
; } } while (false)
;
824 return false;
825 }
826 break;
827 }
828 }
829 CurMBB = Fallthrough;
830 }
831
832 return true;
833}
834
835bool IRTranslator::translateIndirectBr(const User &U,
836 MachineIRBuilder &MIRBuilder) {
837 const IndirectBrInst &BrInst = cast<IndirectBrInst>(U);
838
839 const Register Tgt = getOrCreateVReg(*BrInst.getAddress());
840 MIRBuilder.buildBrIndirect(Tgt);
841
842 // Link successors.
843 MachineBasicBlock &CurBB = MIRBuilder.getMBB();
844 for (const BasicBlock *Succ : successors(&BrInst))
845 CurBB.addSuccessor(&getMBB(*Succ));
846
847 return true;
848}
849
850static bool isSwiftError(const Value *V) {
851 if (auto Arg = dyn_cast<Argument>(V))
852 return Arg->hasSwiftErrorAttr();
853 if (auto AI = dyn_cast<AllocaInst>(V))
854 return AI->isSwiftError();
855 return false;
856}
857
858bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) {
859 const LoadInst &LI = cast<LoadInst>(U);
860
861 auto Flags = LI.isVolatile() ? MachineMemOperand::MOVolatile
862 : MachineMemOperand::MONone;
863 Flags |= MachineMemOperand::MOLoad;
864
865 if (DL->getTypeStoreSize(LI.getType()) == 0)
866 return true;
867
868 ArrayRef<Register> Regs = getOrCreateVRegs(LI);
869 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(LI);
870 Register Base = getOrCreateVReg(*LI.getPointerOperand());
871
872 Type *OffsetIRTy = DL->getIntPtrType(LI.getPointerOperandType());
873 LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
874
875 if (CLI->supportSwiftError() && isSwiftError(LI.getPointerOperand())) {
876 assert(Regs.size() == 1 && "swifterror should be single pointer")((Regs.size() == 1 && "swifterror should be single pointer"
) ? static_cast<void> (0) : __assert_fail ("Regs.size() == 1 && \"swifterror should be single pointer\""
, "/build/llvm-toolchain-snapshot-10~svn374710/lib/CodeGen/GlobalISel/IRTranslator.cpp"
, 876, __PRETTY_FUNCTION__))
;
877 Register VReg = SwiftError.getOrCreateVRegUseAt(&LI, &MIRBuilder.getMBB(),
878 LI.getPointerOperand());
879 MIRBuilder.buildCopy(Regs[0], VReg);
880 return true;
881 }
882
883 const MDNode *Ranges =
884 Regs.size() == 1 ? LI.getMetadata(LLVMContext::MD_range) : nullptr;
885 for (unsigned i = 0; i < Regs.size(); ++i) {
886 Register Addr;
887 MIRBuilder.materializeGEP(Addr, Base, OffsetTy, Offsets[i] / 8);
888
889 MachinePointerInfo Ptr(LI.getPointerOperand(), Offsets[i] / 8);
890 unsigned BaseAlign = getMemOpAlignment(LI);
891 auto MMO = MF->getMachineMemOperand(
892 Ptr, Flags, (MRI->getType(Regs[i]).getSizeInBits() + 7) / 8,
893 MinAlign(BaseAlign, Offsets[i] / 8), AAMDNodes(), Ranges,
894 LI.getSyncScopeID(), LI.getOrdering());
895 MIRBuilder.buildLoad(Regs[i], Addr, *MMO);
896 }
897
898 return true;
899}
900
901bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) {
902 const StoreInst &SI = cast<StoreInst>(U);
903 auto Flags = SI.isVolatile() ? MachineMemOperand::MOVolatile
904 : MachineMemOperand::MONone;
905 Flags |= MachineMemOperand::MOStore;
906
907 if (DL->getTypeStoreSize(SI.getValueOperand()->getType()) == 0)
908 return true;
909
910 ArrayRef<Register> Vals = getOrCreateVRegs(*SI.getValueOperand());
911 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*SI.getValueOperand());
912 Register Base = getOrCreateVReg(*SI.getPointerOperand());
913
914 Type *OffsetIRTy = DL->getIntPtrType(SI.getPointerOperandType());
915 LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
916
917 if (CLI->supportSwiftError() && isSwiftError(SI.getPointerOperand())) {
918 assert(Vals.size() == 1 && "swifterror should be single pointer")((Vals.size() == 1 && "swifterror should be single pointer"
) ? static_cast<void> (0) : __assert_fail ("Vals.size() == 1 && \"swifterror should be single pointer\""
, "/build/llvm-toolchain-snapshot-10~svn374710/lib/CodeGen/GlobalISel/IRTranslator.cpp"
, 918, __PRETTY_FUNCTION__))
;
919
920 Register VReg = SwiftError.getOrCreateVRegDefAt(&SI, &MIRBuilder.getMBB(),
921 SI.getPointerOperand());
922 MIRBuilder.buildCopy(VReg, Vals[0]);
923 return true;
924 }
925
926 for (unsigned i = 0; i < Vals.size(); ++i) {
927 Register Addr;
928 MIRBuilder.materializeGEP(Addr, Base, OffsetTy, Offsets[i] / 8);
929
930 MachinePointerInfo Ptr(SI.getPointerOperand(), Offsets[i] / 8);
931 unsigned BaseAlign = getMemOpAlignment(SI);
932 auto MMO = MF->getMachineMemOperand(
933 Ptr, Flags, (MRI->getType(Vals[i]).getSizeInBits() + 7) / 8,
934 MinAlign(BaseAlign, Offsets[i] / 8), AAMDNodes(), nullptr,
935 SI.getSyncScopeID(), SI.getOrdering());
936 MIRBuilder.buildStore(Vals[i], Addr, *MMO);
937 }
938 return true;
939}
940
941static uint64_t getOffsetFromIndices(const User &U, const DataLayout &DL) {
942 const Value *Src = U.getOperand(0);
943 Type *Int32Ty = Type::getInt32Ty(U.getContext());
944
945 // getIndexedOffsetInType is designed for GEPs, so the first index is the
946 // usual array element rather than looking into the actual aggregate.
947 SmallVector<Value *, 1> Indices;
948 Indices.push_back(ConstantInt::get(Int32Ty, 0));
949
950 if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(&U)) {
951 for (auto Idx : EVI->indices())
952 Indices.push_back(ConstantInt::get(Int32Ty, Idx));
953 } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(&U)) {
954 for (auto Idx : IVI->indices())
955 Indices.push_back(ConstantInt::get(Int32Ty, Idx));
956 } else {
957 for (unsigned i = 1; i < U.getNumOperands(); ++i)
958 Indices.push_back(U.getOperand(i));
959 }
960
961 return 8 * static_cast<uint64_t>(
962 DL.getIndexedOffsetInType(Src->getType(), Indices));
963}
964
965bool IRTranslator::translateExtractValue(const User &U,
966 MachineIRBuilder &MIRBuilder) {
967 const Value *Src = U.getOperand(0);
968 uint64_t Offset = getOffsetFromIndices(U, *DL);
969 ArrayRef<Register> SrcRegs = getOrCreateVRegs(*Src);
970 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*Src);
971 unsigned Idx = llvm::lower_bound(Offsets, Offset) - Offsets.begin();
972 auto &DstRegs = allocateVRegs(U);
973
974 for (unsigned i = 0; i < DstRegs.size(); ++i)
975 DstRegs[i] = SrcRegs[Idx++];
976
977 return true;
978}
979
980bool IRTranslator::translateInsertValue(const User &U,
981 MachineIRBuilder &MIRBuilder) {
982 const Value *Src = U.getOperand(0);
983 uint64_t Offset = getOffsetFromIndices(U, *DL);
984 auto &DstRegs = allocateVRegs(U);
985 ArrayRef<uint64_t> DstOffsets = *VMap.getOffsets(U);
986 ArrayRef<Register> SrcRegs = getOrCreateVRegs(*Src);
987 ArrayRef<Register> InsertedRegs = getOrCreateVRegs(*U.getOperand(1));
988 auto InsertedIt = InsertedRegs.begin();
989
990 for (unsigned i = 0; i < DstRegs.size(); ++i) {
991 if (DstOffsets[i] >= Offset && InsertedIt != InsertedRegs.end())
992 DstRegs[i] = *InsertedIt++;
993 else
994 DstRegs[i] = SrcRegs[i];
995 }
996
997 return true;
998}
999
1000bool IRTranslator::translateSelect(const User &U,
1001 MachineIRBuilder &MIRBuilder) {
1002 Register Tst = getOrCreateVReg(*U.getOperand(0));
1003 ArrayRef<Register> ResRegs = getOrCreateVRegs(U);
1004 ArrayRef<Register> Op0Regs = getOrCreateVRegs(*U.getOperand(1));
1005 ArrayRef<Register> Op1Regs = getOrCreateVRegs(*U.getOperand(2));
1006
1007 const SelectInst &SI = cast<SelectInst>(U);
1008 uint16_t Flags = 0;
1009 if (const CmpInst *Cmp = dyn_cast<CmpInst>(SI.getCondition()))
1010 Flags = MachineInstr::copyFlagsFromInstruction(*Cmp);
1011
1012 for (unsigned i = 0; i < ResRegs.size(); ++i) {
1013 MIRBuilder.buildInstr(TargetOpcode::G_SELECT, {ResRegs[i]},
1014 {Tst, Op0Regs[i], Op1Regs[i]}, Flags);
1015 }
1016
1017 return true;
1018}
1019
1020bool IRTranslator::translateBitCast(const User &U,
1021 MachineIRBuilder &MIRBuilder) {
1022 // If we're bitcasting to the source type, we can reuse the source vreg.
1023 if (getLLTForType(*U.getOperand(0)->getType(), *DL) ==
1024 getLLTForType(*U.getType(), *DL)) {
1025 Register SrcReg = getOrCreateVReg(*U.getOperand(0));
1026 auto &Regs = *VMap.getVRegs(U);
1027 // If we already assigned a vreg for this bitcast, we can't change that.
1028 // Emit a copy to satisfy the users we already emitted.
1029 if (!Regs.empty())
1030 MIRBuilder.buildCopy(Regs[0], SrcReg);
1031 else {
1032 Regs.push_back(SrcReg);
1033 VMap.getOffsets(U)->push_back(0);
1034 }
1035 return true;
1036 }
1037 return translateCast(TargetOpcode::G_BITCAST, U, MIRBuilder);
1038}
1039
1040bool IRTranslator::translateCast(unsigned Opcode, const User &U,
1041 MachineIRBuilder &MIRBuilder) {
1042 Register Op = getOrCreateVReg(*U.getOperand(0));
1043 Register Res = getOrCreateVReg(U);
1044 MIRBuilder.buildInstr(Opcode, {Res}, {Op});
1045 return true;
1046}
1047
1048bool IRTranslator::translateGetElementPtr(const User &U,
1049 MachineIRBuilder &MIRBuilder) {
1050 // FIXME: support vector GEPs.
1051 if (U.getType()->isVectorTy())
1052 return false;
1053
1054 Value &Op0 = *U.getOperand(0);
1055 Register BaseReg = getOrCreateVReg(Op0);
1056 Type *PtrIRTy = Op0.getType();
1057 LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
1058 Type *OffsetIRTy = DL->getIntPtrType(PtrIRTy);
1059 LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
1060
1061 int64_t Offset = 0;
1062 for (gep_type_iterator GTI = gep_type_begin(&U), E = gep_type_end(&U);
1063 GTI != E; ++GTI) {
1064 const Value *Idx = GTI.getOperand();
1065 if (StructType *StTy = GTI.getStructTypeOrNull()) {
1066 unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
1067 Offset += DL->getStructLayout(StTy)->getElementOffset(Field);
1068 continue;
1069 } else {
1070 uint64_t ElementSize = DL->getTypeAllocSize(GTI.getIndexedType());
1071
1072 // If this is a scalar constant or a splat vector of constants,
1073 // handle it quickly.
1074 if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
1075 Offset += ElementSize * CI->getSExtValue();
1076 continue;
1077 }
1078
1079 if (Offset != 0) {
1080 LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
1081 auto OffsetMIB = MIRBuilder.buildConstant({OffsetTy}, Offset);
1082 BaseReg =
1083 MIRBuilder.buildGEP(PtrTy, BaseReg, OffsetMIB.getReg(0)).getReg(0);
1084 Offset = 0;
1085 }
1086
1087 Register IdxReg = getOrCreateVReg(*Idx);
1088 if (MRI->getType(IdxReg) != OffsetTy)
1089 IdxReg = MIRBuilder.buildSExtOrTrunc(OffsetTy, IdxReg).getReg(0);
1090
1091 // N = N + Idx * ElementSize;
1092 // Avoid doing it for ElementSize of 1.
1093 Register GepOffsetReg;
1094 if (ElementSize != 1) {
1095 auto ElementSizeMIB = MIRBuilder.buildConstant(
1096 getLLTForType(*OffsetIRTy, *DL), ElementSize);
1097 GepOffsetReg =
1098 MIRBuilder.buildMul(OffsetTy, ElementSizeMIB, IdxReg).getReg(0);
1099 } else
1100 GepOffsetReg = IdxReg;
1101
1102 BaseReg = MIRBuilder.buildGEP(PtrTy, BaseReg, GepOffsetReg).getReg(0);
1103 }
1104 }
1105
1106 if (Offset != 0) {
1107 auto OffsetMIB =
1108 MIRBuilder.buildConstant(getLLTForType(*OffsetIRTy, *DL), Offset);
1109 MIRBuilder.buildGEP(getOrCreateVReg(U), BaseReg, OffsetMIB.getReg(0));
1110 return true;
1111 }
1112
1113 MIRBuilder.buildCopy(getOrCreateVReg(U), BaseReg);
1114 return true;
1115}
1116
1117bool IRTranslator::translateMemFunc(const CallInst &CI,
1118 MachineIRBuilder &MIRBuilder,
1119 Intrinsic::ID ID) {
1120
1121 // If the source is undef, then just emit a nop.
1122 if (isa<UndefValue>(CI.getArgOperand(1)))
1123 return true;
1124
1125 ArrayRef<Register> Res;
1126 auto ICall = MIRBuilder.buildIntrinsic(ID, Res, true);
1127 for (auto AI = CI.arg_begin(), AE = CI.arg_end(); std::next(AI) != AE; ++AI)
1128 ICall.addUse(getOrCreateVReg(**AI));
1129
1130 unsigned DstAlign = 0, SrcAlign = 0;
1131 unsigned IsVol =
1132 cast<ConstantInt>(CI.getArgOperand(CI.getNumArgOperands() - 1))
1133 ->getZExtValue();
1134
1135 if (auto *MCI = dyn_cast<MemCpyInst>(&CI)) {
1136 DstAlign = std::max<unsigned>(MCI->getDestAlignment(), 1);
1137 SrcAlign = std::max<unsigned>(MCI->getSourceAlignment(), 1);
1138 } else if (auto *MMI = dyn_cast<MemMoveInst>(&CI)) {
1139 DstAlign = std::max<unsigned>(MMI->getDestAlignment(), 1);
1140 SrcAlign = std::max<unsigned>(MMI->getSourceAlignment(), 1);
1141 } else {
1142 auto *MSI = cast<MemSetInst>(&CI);
1143 DstAlign = std::max<unsigned>(MSI->getDestAlignment(), 1);
1144 }
1145
1146 // We need to propagate the tail call flag from the IR inst as an argument.
1147 // Otherwise, we have to pessimize and assume later that we cannot tail call
1148 // any memory intrinsics.
1149 ICall.addImm(CI.isTailCall() ? 1 : 0);
1150
1151 // Create mem operands to store the alignment and volatile info.
1152 auto VolFlag = IsVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone;
1153 ICall.addMemOperand(MF->getMachineMemOperand(
1154 MachinePointerInfo(CI.getArgOperand(0)),
1155 MachineMemOperand::MOStore | VolFlag, 1, DstAlign));
1156 if (ID != Intrinsic::memset)
1157 ICall.addMemOperand(MF->getMachineMemOperand(
1158 MachinePointerInfo(CI.getArgOperand(1)),
1159 MachineMemOperand::MOLoad | VolFlag, 1, SrcAlign));
1160
1161 return true;
1162}
1163
1164void IRTranslator::getStackGuard(Register DstReg,
1165 MachineIRBuilder &MIRBuilder) {
1166 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
1167 MRI->setRegClass(DstReg, TRI->getPointerRegClass(*MF));
1168 auto MIB = MIRBuilder.buildInstr(TargetOpcode::LOAD_STACK_GUARD);
1169 MIB.addDef(DstReg);
1170
1171 auto &TLI = *MF->getSubtarget().getTargetLowering();
1172 Value *Global = TLI.getSDagStackGuard(*MF->getFunction().getParent());
1173 if (!Global)
1174 return;
1175
1176 MachinePointerInfo MPInfo(Global);
1177 auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant |
1178 MachineMemOperand::MODereferenceable;
1179 MachineMemOperand *MemRef =
1180 MF->getMachineMemOperand(MPInfo, Flags, DL->getPointerSizeInBits() / 8,
1181 DL->getPointerABIAlignment(0).value());
1182 MIB.setMemRefs({MemRef});
1183}
1184
1185bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
1186 MachineIRBuilder &MIRBuilder) {
1187 ArrayRef<Register> ResRegs = getOrCreateVRegs(CI);
1188 MIRBuilder.buildInstr(Op)
1189 .addDef(ResRegs[0])
1190 .addDef(ResRegs[1])
1191 .addUse(getOrCreateVReg(*CI.getOperand(0)))
1192 .addUse(getOrCreateVReg(*CI.getOperand(1)));
1193
1194 return true;
1195}
1196
1197unsigned IRTranslator::getSimpleIntrinsicOpcode(Intrinsic::ID ID) {
1198 switch (ID) {
1199 default:
1200 break;
1201 case Intrinsic::bswap:
1202 return TargetOpcode::G_BSWAP;
1203 case Intrinsic::bitreverse:
1204 return TargetOpcode::G_BITREVERSE;
1205 case Intrinsic::ceil:
1206 return TargetOpcode::G_FCEIL;
1207 case Intrinsic::cos:
1208 return TargetOpcode::G_FCOS;
1209 case Intrinsic::ctpop:
1210 return TargetOpcode::G_CTPOP;
1211 case Intrinsic::exp:
1212 return TargetOpcode::G_FEXP;
1213 case Intrinsic::exp2:
1214 return TargetOpcode::G_FEXP2;
1215 case Intrinsic::fabs:
1216 return TargetOpcode::G_FABS;
1217 case Intrinsic::copysign:
1218 return TargetOpcode::G_FCOPYSIGN;
1219 case Intrinsic::minnum:
1220 return TargetOpcode::G_FMINNUM;
1221 case Intrinsic::maxnum:
1222 return TargetOpcode::G_FMAXNUM;
1223 case Intrinsic::minimum:
1224 return TargetOpcode::G_FMINIMUM;
1225 case Intrinsic::maximum:
1226 return TargetOpcode::G_FMAXIMUM;
1227 case Intrinsic::canonicalize:
1228 return TargetOpcode::G_FCANONICALIZE;
1229 case Intrinsic::floor:
1230 return TargetOpcode::G_FFLOOR;
1231 case Intrinsic::fma:
1232 return TargetOpcode::G_FMA;
1233 case Intrinsic::log:
1234 return TargetOpcode::G_FLOG;
1235 case Intrinsic::log2:
1236 return TargetOpcode::G_FLOG2;
1237 case Intrinsic::log10:
1238 return TargetOpcode::G_FLOG10;
1239 case Intrinsic::nearbyint:
1240 return TargetOpcode::G_FNEARBYINT;
1241 case Intrinsic::pow:
1242 return TargetOpcode::G_FPOW;
1243 case Intrinsic::rint:
1244 return TargetOpcode::G_FRINT;
1245 case Intrinsic::round:
1246 return TargetOpcode::G_INTRINSIC_ROUND;
1247 case Intrinsic::sin:
1248 return TargetOpcode::G_FSIN;
1249 case Intrinsic::sqrt:
1250 return TargetOpcode::G_FSQRT;
1251 case Intrinsic::trunc:
1252 return TargetOpcode::G_INTRINSIC_TRUNC;
1253 }
1254 return Intrinsic::not_intrinsic;
1255}
1256
1257bool IRTranslator::translateSimpleIntrinsic(const CallInst &CI,
1258 Intrinsic::ID ID,
1259 MachineIRBuilder &MIRBuilder) {
1260
1261 unsigned Op = getSimpleIntrinsicOpcode(ID);
1262
1263 // Is this a simple intrinsic?
1264 if (Op == Intrinsic::not_intrinsic)
1265 return false;
1266
1267 // Yes. Let's translate it.
1268 SmallVector<llvm::SrcOp, 4> VRegs;
1269 for (auto &Arg : CI.arg_operands())
1270 VRegs.push_back(getOrCreateVReg(*Arg));
1271
1272 MIRBuilder.buildInstr(Op, {getOrCreateVReg(CI)}, VRegs,
1273 MachineInstr::copyFlagsFromInstruction(CI));
1274 return true;
1275}
1276
1277bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
1278 MachineIRBuilder &MIRBuilder) {
1279
1280 // If this is a simple intrinsic (that is, we just need to add a def of
1281 // a vreg, and uses for each arg operand, then translate it.
1282 if (translateSimpleIntrinsic(CI, ID, MIRBuilder))
1283 return true;
1284
1285 switch (ID) {
1286 default:
1287 break;
1288 case Intrinsic::lifetime_start:
1289 case Intrinsic::lifetime_end: {
1290 // No stack colouring in O0, discard region information.
1291 if (MF->getTarget().getOptLevel() == CodeGenOpt::None)
1292 return true;
1293
1294 unsigned Op = ID == Intrinsic::lifetime_start ? TargetOpcode::LIFETIME_START
1295 : TargetOpcode::LIFETIME_END;
1296
1297 // Get the underlying objects for the location passed on the lifetime
1298 // marker.
1299 SmallVector<const Value *, 4> Allocas;
1300 GetUnderlyingObjects(CI.getArgOperand(1), Allocas, *DL);
1301
1302 // Iterate over each underlying object, creating lifetime markers for each
1303 // static alloca. Quit if we find a non-static alloca.
1304 for (const Value *V : Allocas) {
1305 const AllocaInst *AI = dyn_cast<AllocaInst>(V);
1306 if (!AI)
1307 continue;
1308
1309 if (!AI->isStaticAlloca())
1310 return true;
1311
1312 MIRBuilder.buildInstr(Op).addFrameIndex(getOrCreateFrameIndex(*AI));
1313 }
1314 return true;
1315 }
1316 case Intrinsic::dbg_declare: {
1317 const DbgDeclareInst &DI = cast<DbgDeclareInst>(CI);
1318 assert(DI.getVariable() && "Missing variable")((DI.getVariable() && "Missing variable") ? static_cast
<void> (0) : __assert_fail ("DI.getVariable() && \"Missing variable\""
, "/build/llvm-toolchain-snapshot-10~svn374710/lib/CodeGen/GlobalISel/IRTranslator.cpp"
, 1318, __PRETTY_FUNCTION__))
;
1319
1320 const Value *Address = DI.getAddress();
1321 if (!Address || isa<UndefValue>(Address)) {
1322 LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("irtranslator")) { dbgs() << "Dropping debug info for "
<< DI << "\n"; } } while (false)
;
1323 return true;
1324 }
1325
1326 assert(DI.getVariable()->isValidLocationForIntrinsic(((DI.getVariable()->isValidLocationForIntrinsic( MIRBuilder
.getDebugLoc()) && "Expected inlined-at fields to agree"
) ? static_cast<void> (0) : __assert_fail ("DI.getVariable()->isValidLocationForIntrinsic( MIRBuilder.getDebugLoc()) && \"Expected inlined-at fields to agree\""
, "/build/llvm-toolchain-snapshot-10~svn374710/lib/CodeGen/GlobalISel/IRTranslator.cpp"
, 1328, __PRETTY_FUNCTION__))
1327 MIRBuilder.getDebugLoc()) &&((DI.getVariable()->isValidLocationForIntrinsic( MIRBuilder
.getDebugLoc()) && "Expected inlined-at fields to agree"
) ? static_cast<void> (0) : __assert_fail ("DI.getVariable()->isValidLocationForIntrinsic( MIRBuilder.getDebugLoc()) && \"Expected inlined-at fields to agree\""
, "/build/llvm-toolchain-snapshot-10~svn374710/lib/CodeGen/GlobalISel/IRTranslator.cpp"
, 1328, __PRETTY_FUNCTION__))
1328 "Expected inlined-at fields to agree")((DI.getVariable()->isValidLocationForIntrinsic( MIRBuilder
.getDebugLoc()) && "Expected inlined-at fields to agree"
) ? static_cast<void> (0) : __assert_fail ("DI.getVariable()->isValidLocationForIntrinsic( MIRBuilder.getDebugLoc()) && \"Expected inlined-at fields to agree\""
, "/build/llvm-toolchain-snapshot-10~svn374710/lib/CodeGen/GlobalISel/IRTranslator.cpp"
, 1328, __PRETTY_FUNCTION__))
;
1329 auto AI = dyn_cast<AllocaInst>(Address);
1330 if (AI && AI->isStaticAlloca()) {
1331 // Static allocas are tracked at the MF level, no need for DBG_VALUE
1332 // instructions (in fact, they get ignored if they *do* exist).
1333 MF->setVariableDbgInfo(DI.getVariable(), DI.getExpression(),
1334 getOrCreateFrameIndex(*AI), DI.getDebugLoc());
1335 } else {
1336 // A dbg.declare describes the address of a source variable, so lower it
1337 // into an indirect DBG_VALUE.
1338 MIRBuilder.buildIndirectDbgValue(getOrCreateVReg(*Address),
1339 DI.getVariable(), DI.getExpression());
1340 }
1341 return true;
1342 }
1343 case Intrinsic::dbg_label: {
1344 const DbgLabelInst &DI = cast<DbgLabelInst>(CI);
1345 assert(DI.getLabel() && "Missing label")((DI.getLabel() && "Missing label") ? static_cast<
void> (0) : __assert_fail ("DI.getLabel() && \"Missing label\""
, "/build/llvm-toolchain-snapshot-10~svn374710/lib/CodeGen/GlobalISel/IRTranslator.cpp"
, 1345, __PRETTY_FUNCTION__))
;
1346
1347 assert(DI.getLabel()->isValidLocationForIntrinsic(((DI.getLabel()->isValidLocationForIntrinsic( MIRBuilder.getDebugLoc
()) && "Expected inlined-at fields to agree") ? static_cast
<void> (0) : __assert_fail ("DI.getLabel()->isValidLocationForIntrinsic( MIRBuilder.getDebugLoc()) && \"Expected inlined-at fields to agree\""
, "/build/llvm-toolchain-snapshot-10~svn374710/lib/CodeGen/GlobalISel/IRTranslator.cpp"
, 1349, __PRETTY_FUNCTION__))
1348 MIRBuilder.getDebugLoc()) &&((DI.getLabel()->isValidLocationForIntrinsic( MIRBuilder.getDebugLoc
()) && "Expected inlined-at fields to agree") ? static_cast
<void> (0) : __assert_fail ("DI.getLabel()->isValidLocationForIntrinsic( MIRBuilder.getDebugLoc()) && \"Expected inlined-at fields to agree\""
, "/build/llvm-toolchain-snapshot-10~svn374710/lib/CodeGen/GlobalISel/IRTranslator.cpp"
, 1349, __PRETTY_FUNCTION__))
1349 "Expected inlined-at fields to agree")((DI.getLabel()->isValidLocationForIntrinsic( MIRBuilder.getDebugLoc
()) && "Expected inlined-at fields to agree") ? static_cast
<void> (0) : __assert_fail ("DI.getLabel()->isValidLocationForIntrinsic( MIRBuilder.getDebugLoc()) && \"Expected inlined-at fields to agree\""
, "/build/llvm-toolchain-snapshot-10~svn374710/lib/CodeGen/GlobalISel/IRTranslator.cpp"
, 1349, __PRETTY_FUNCTION__))
;
1350
1351 MIRBuilder.buildDbgLabel(DI.getLabel());
1352 return true;
1353 }
1354 case Intrinsic::vaend:
1355 // No target I know of cares about va_end. Certainly no in-tree target
1356 // does. Simplest intrinsic ever!
1357 return true;
1358 case Intrinsic::vastart: {
1359 auto &TLI = *MF->getSubtarget().getTargetLowering();
1360 Value *Ptr = CI.getArgOperand(0);
1361 unsigned ListSize = TLI.getVaListSizeInBits(*DL) / 8;
1362
1363 // FIXME: Get alignment
1364 MIRBuilder.buildInstr(TargetOpcode::G_VASTART)
1365 .addUse(getOrCreateVReg(*Ptr))
1366 .addMemOperand(MF->getMachineMemOperand(
1367 MachinePointerInfo(Ptr), MachineMemOperand::MOStore, ListSize, 1));
1368 return true;
1369 }
1370 case Intrinsic::dbg_value: {
1371 // This form of DBG_VALUE is target-independent.
1372 const DbgValueInst &DI = cast<DbgValueInst>(CI);
1373 const Value *V = DI.getValue();
1374 assert(DI.getVariable()->isValidLocationForIntrinsic(((DI.getVariable()->isValidLocationForIntrinsic( MIRBuilder
.getDebugLoc()) && "Expected inlined-at fields to agree"
) ? static_cast<void> (0) : __assert_fail ("DI.getVariable()->isValidLocationForIntrinsic( MIRBuilder.getDebugLoc()) && \"Expected inlined-at fields to agree\""
, "/build/llvm-toolchain-snapshot-10~svn374710/lib/CodeGen/GlobalISel/IRTranslator.cpp"
, 1376, __PRETTY_FUNCTION__))
1375 MIRBuilder.getDebugLoc()) &&((DI.getVariable()->isValidLocationForIntrinsic( MIRBuilder
.getDebugLoc()) && "Expected inlined-at fields to agree"
) ? static_cast<void> (0) : __assert_fail ("DI.getVariable()->isValidLocationForIntrinsic( MIRBuilder.getDebugLoc()) && \"Expected inlined-at fields to agree\""
, "/build/llvm-toolchain-snapshot-10~svn374710/lib/CodeGen/GlobalISel/IRTranslator.cpp"
, 1376, __PRETTY_FUNCTION__))
1376 "Expected inlined-at fields to agree")((DI.getVariable()->isValidLocationForIntrinsic( MIRBuilder
.getDebugLoc()) && "Expected inlined-at fields to agree"
) ? static_cast<void> (0) : __assert_fail ("DI.getVariable()->isValidLocationForIntrinsic( MIRBuilder.getDebugLoc()) && \"Expected inlined-at fields to agree\""
, "/build/llvm-toolchain-snapshot-10~svn374710/lib/CodeGen/GlobalISel/IRTranslator.cpp"
, 1376, __PRETTY_FUNCTION__))
;
1377 if (!V) {
1378 // Currently the optimizer can produce this; insert an undef to
1379 // help debugging. Probably the optimizer should not do this.
1380 MIRBuilder.buildIndirectDbgValue(0, DI.getVariable(), DI.getExpression());
1381 } else if (const auto *CI = dyn_cast<Constant>(V)) {
1382 MIRBuilder.buildConstDbgValue(*CI, DI.getVariable(), DI.getExpression());
1383 } else {
1384 for (Register Reg : getOrCreateVRegs(*V)) {
1385 // FIXME: This does not handle register-indirect values at offset 0. The
1386 // direct/indirect thing shouldn't really be handled by something as
1387 // implicit as reg+noreg vs reg+imm in the first place, but it seems
1388 // pretty baked in right now.
1389 MIRBuilder.buildDirectDbgValue(Reg, DI.getVariable(), DI.getExpression());
1390 }
1391 }
1392 return true;
1393 }
1394 case Intrinsic::uadd_with_overflow:
1395 return translateOverflowIntrinsic(CI, TargetOpcode::G_UADDO, MIRBuilder);
1396 case Intrinsic::sadd_with_overflow:
1397 return translateOverflowIntrinsic(CI, TargetOpcode::G_SADDO, MIRBuilder);
1398 case Intrinsic::usub_with_overflow:
1399 return translateOverflowIntrinsic(CI, TargetOpcode::G_USUBO, MIRBuilder);
1400 case Intrinsic::ssub_with_overflow:
1401 return translateOverflowIntrinsic(CI, TargetOpcode::G_SSUBO, MIRBuilder);
1402 case Intrinsic::umul_with_overflow:
1403 return translateOverflowIntrinsic(CI, TargetOpcode::G_UMULO, MIRBuilder);
1404 case Intrinsic::smul_with_overflow:
1405 return translateOverflowIntrinsic(CI, TargetOpcode::G_SMULO, MIRBuilder);
1406 case Intrinsic::fmuladd: {
1407 const TargetMachine &TM = MF->getTarget();
1408 const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
1409 Register Dst = getOrCreateVReg(CI);
1410 Register Op0 = getOrCreateVReg(*CI.getArgOperand(0));
1411 Register Op1 = getOrCreateVReg(*CI.getArgOperand(1));
1412 Register Op2 = getOrCreateVReg(*CI.getArgOperand(2));
1413 if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
1414 TLI.isFMAFasterThanFMulAndFAdd(TLI.getValueType(*DL, CI.getType()))) {
1415 // TODO: Revisit this to see if we should move this part of the
1416 // lowering to the combiner.
1417 MIRBuilder.buildInstr(TargetOpcode::G_FMA, {Dst}, {Op0, Op1, Op2},
1418 MachineInstr::copyFlagsFromInstruction(CI));
1419 } else {
1420 LLT Ty = getLLTForType(*CI.getType(), *DL);
1421 auto FMul = MIRBuilder.buildInstr(TargetOpcode::G_FMUL, {Ty}, {Op0, Op1},
1422 MachineInstr::copyFlagsFromInstruction(CI));
1423 MIRBuilder.buildInstr(TargetOpcode::G_FADD, {Dst}, {FMul, Op2},
1424 MachineInstr::copyFlagsFromInstruction(CI));
1425 }
1426 return true;
1427 }
1428 case Intrinsic::memcpy:
1429 case Intrinsic::memmove:
1430 case Intrinsic::memset:
1431 return translateMemFunc(CI, MIRBuilder, ID);
1432 case Intrinsic::eh_typeid_for: {
1433 GlobalValue *GV = ExtractTypeInfo(CI.getArgOperand(0));
1434 Register Reg = getOrCreateVReg(CI);
1435 unsigned TypeID = MF->getTypeIDFor(GV);
1436 MIRBuilder.buildConstant(Reg, TypeID);
1437 return true;
1438 }
1439 case Intrinsic::objectsize: {
1440 // If we don't know by now, we're never going to know.
1441 const ConstantInt *Min = cast<ConstantInt>(CI.getArgOperand(1));
1442
1443 MIRBuilder.buildConstant(getOrCreateVReg(CI), Min->isZero() ? -1ULL : 0);
1444 return true;
1445 }
1446 case Intrinsic::is_constant:
1447 // If this wasn't constant-folded away by now, then it's not a
1448 // constant.
1449 MIRBuilder.buildConstant(getOrCreateVReg(CI), 0);
1450 return true;
1451 case Intrinsic::stackguard:
1452 getStackGuard(getOrCreateVReg(CI), MIRBuilder);
1453 return true;
1454 case Intrinsic::stackprotector: {
1455 LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
1456 Register GuardVal = MRI->createGenericVirtualRegister(PtrTy);
1457 getStackGuard(GuardVal, MIRBuilder);
1458
1459 AllocaInst *Slot = cast<AllocaInst>(CI.getArgOperand(1));
1460 int FI = getOrCreateFrameIndex(*Slot);
1461 MF->getFrameInfo().setStackProtectorIndex(FI);
1462
1463 MIRBuilder.buildStore(
1464 GuardVal, getOrCreateVReg(*Slot),
1465 *MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(*MF, FI),
1466 MachineMemOperand::MOStore |
1467 MachineMemOperand::MOVolatile,
1468 PtrTy.getSizeInBits() / 8, 8));
1469 return true;
1470 }
1471 case Intrinsic::stacksave: {
1472 // Save the stack pointer to the location provided by the intrinsic.
1473 Register Reg = getOrCreateVReg(CI);
1474 Register StackPtr = MF->getSubtarget()
1475 .getTargetLowering()
1476 ->getStackPointerRegisterToSaveRestore();
1477
1478 // If the target doesn't specify a stack pointer, then fall back.
1479 if (!StackPtr)
1480 return false;
1481
1482 MIRBuilder.buildCopy(Reg, StackPtr);
1483 return true;
1484 }
1485 case Intrinsic::stackrestore: {
1486 // Restore the stack pointer from the location provided by the intrinsic.
1487 Register Reg = getOrCreateVReg(*CI.getArgOperand(0));
1488 Register StackPtr = MF->getSubtarget()
1489 .getTargetLowering()
1490 ->getStackPointerRegisterToSaveRestore();
1491
1492 // If the target doesn't specify a stack pointer, then fall back.
1493 if (!StackPtr)
1494 return false;
1495
1496 MIRBuilder.buildCopy(StackPtr, Reg);
1497 return true;
1498 }
1499 case Intrinsic::cttz:
1500 case Intrinsic::ctlz: {
1501 ConstantInt *Cst = cast<ConstantInt>(CI.getArgOperand(1));
1502 bool isTrailing = ID == Intrinsic::cttz;
1503 unsigned Opcode = isTrailing
1504 ? Cst->isZero() ? TargetOpcode::G_CTTZ
1505 : TargetOpcode::G_CTTZ_ZERO_UNDEF
1506 : Cst->isZero() ? TargetOpcode::G_CTLZ
1507 : TargetOpcode::G_CTLZ_ZERO_UNDEF;
1508 MIRBuilder.buildInstr(Opcode)
1509 .addDef(getOrCreateVReg(CI))
1510 .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
1511 return true;
1512 }
1513 case Intrinsic::invariant_start: {
1514 LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
1515 Register Undef = MRI->createGenericVirtualRegister(PtrTy);
1516 MIRBuilder.buildUndef(Undef);
1517 return true;
1518 }
1519 case Intrinsic::invariant_end:
1520 return true;
1521 case Intrinsic::assume:
1522 case Intrinsic::var_annotation:
1523 case Intrinsic::sideeffect:
1524 // Discard annotate attributes, assumptions, and artificial side-effects.
1525 return true;
1526 }
1527 return false;
1528}
1529
1530bool IRTranslator::translateInlineAsm(const CallInst &CI,
1531 MachineIRBuilder &MIRBuilder) {
1532 const InlineAsm &IA = cast<InlineAsm>(*CI.getCalledValue());
1533 if (!IA.getConstraintString().empty())
1534 return false;
1535
1536 unsigned ExtraInfo = 0;
1537 if (IA.hasSideEffects())
1538 ExtraInfo |= InlineAsm::Extra_HasSideEffects;
1539 if (IA.getDialect() == InlineAsm::AD_Intel)
1540 ExtraInfo |= InlineAsm::Extra_AsmDialect;
1541
1542 MIRBuilder.buildInstr(TargetOpcode::INLINEASM)
1543 .addExternalSymbol(IA.getAsmString().c_str())
1544 .addImm(ExtraInfo);
1545
1546 return true;
1547}
1548
1549bool IRTranslator::translateCallSite(const ImmutableCallSite &CS,
1550 MachineIRBuilder &MIRBuilder) {
1551 const Instruction &I = *CS.getInstruction();
1552 ArrayRef<Register> Res = getOrCreateVRegs(I);
1553
1554 SmallVector<ArrayRef<Register>, 8> Args;
1555 Register SwiftInVReg = 0;
1556 Register SwiftErrorVReg = 0;
1557 for (auto &Arg : CS.args()) {
1558 if (CLI->supportSwiftError() && isSwiftError(Arg)) {
1559 assert(SwiftInVReg == 0 && "Expected only one swift error argument")((SwiftInVReg == 0 && "Expected only one swift error argument"
) ? static_cast<void> (0) : __assert_fail ("SwiftInVReg == 0 && \"Expected only one swift error argument\""
, "/build/llvm-toolchain-snapshot-10~svn374710/lib/CodeGen/GlobalISel/IRTranslator.cpp"
, 1559, __PRETTY_FUNCTION__))
;
1560 LLT Ty = getLLTForType(*Arg->getType(), *DL);
1561 SwiftInVReg = MRI->createGenericVirtualRegister(Ty);
1562 MIRBuilder.buildCopy(SwiftInVReg, SwiftError.getOrCreateVRegUseAt(
1563 &I, &MIRBuilder.getMBB(), Arg));
1564 Args.emplace_back(makeArrayRef(SwiftInVReg));
1565 SwiftErrorVReg =
1566 SwiftError.getOrCreateVRegDefAt(&I, &MIRBuilder.getMBB(), Arg);
1567 continue;
1568 }
1569 Args.push_back(getOrCreateVRegs(*Arg));
1570 }
1571
1572 // We don't set HasCalls on MFI here yet because call lowering may decide to
1573 // optimize into tail calls. Instead, we defer that to selection where a final
1574 // scan is done to check if any instructions are calls.
1575 bool Success =
1576 CLI->lowerCall(MIRBuilder, CS, Res, Args, SwiftErrorVReg,
1577 [&]() { return getOrCreateVReg(*CS.getCalledValue()); });
1578
1579 // Check if we just inserted a tail call.
1580 if (Success) {
1581 assert(!HasTailCall && "Can't tail call return twice from block?")((!HasTailCall && "Can't tail call return twice from block?"
) ? static_cast<void> (0) : __assert_fail ("!HasTailCall && \"Can't tail call return twice from block?\""
, "/build/llvm-toolchain-snapshot-10~svn374710/lib/CodeGen/GlobalISel/IRTranslator.cpp"
, 1581, __PRETTY_FUNCTION__))
;
1582 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
1583 HasTailCall = TII->isTailCall(*std::prev(MIRBuilder.getInsertPt()));
1584 }
1585
1586 return Success;
1587}
1588
1589bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) {
1590 const CallInst &CI = cast<CallInst>(U);
1591 auto TII = MF->getTarget().getIntrinsicInfo();
1592 const Function *F = CI.getCalledFunction();
1593
1594 // FIXME: support Windows dllimport function calls.
1595 if (F && F->hasDLLImportStorageClass())
1596 return false;
1597
1598 if (CI.isInlineAsm())
1599 return translateInlineAsm(CI, MIRBuilder);
1600
1601 Intrinsic::ID ID = Intrinsic::not_intrinsic;
1602 if (F && F->isIntrinsic()) {
1603 ID = F->getIntrinsicID();
1604 if (TII && ID == Intrinsic::not_intrinsic)
1605 ID = static_cast<Intrinsic::ID>(TII->getIntrinsicID(F));
1606 }
1607
1608 if (!F || !F->isIntrinsic() || ID == Intrinsic::not_intrinsic)
1609 return translateCallSite(&CI, MIRBuilder);
1610
1611 assert(ID != Intrinsic::not_intrinsic && "unknown intrinsic")((ID != Intrinsic::not_intrinsic && "unknown intrinsic"
) ? static_cast<void> (0) : __assert_fail ("ID != Intrinsic::not_intrinsic && \"unknown intrinsic\""
, "/build/llvm-toolchain-snapshot-10~svn374710/lib/CodeGen/GlobalISel/IRTranslator.cpp"
, 1611, __PRETTY_FUNCTION__))
;
1612
1613 if (translateKnownIntrinsic(CI, ID, MIRBuilder))
1614 return true;
1615
1616 ArrayRef<Register> ResultRegs;
1617 if (!CI.getType()->isVoidTy())
1618 ResultRegs = getOrCreateVRegs(CI);
1619
1620 // Ignore the callsite attributes. Backend code is most likely not expecting
1621 // an intrinsic to sometimes have side effects and sometimes not.
1622 MachineInstrBuilder MIB =
1623 MIRBuilder.buildIntrinsic(ID, ResultRegs, !F->doesNotAccessMemory());
1624 if (isa<FPMathOperator>(CI))
1625 MIB->copyIRFlags(CI);
1626
1627 for (auto &Arg : enumerate(CI.arg_operands())) {
1628 // Some intrinsics take metadata parameters. Reject them.
1629 if (isa<MetadataAsValue>(Arg.value()))
1630 return false;
1631
1632 // If this is required to be an immediate, don't materialize it in a
1633 // register.
1634 if (CI.paramHasAttr(Arg.index(), Attribute::ImmArg)) {
1635 if (ConstantInt *CI = dyn_cast<ConstantInt>(Arg.value())) {
1636 // imm arguments are more convenient than cimm (and realistically
1637 // probably sufficient), so use them.
1638 assert(CI->getBitWidth() <= 64 &&((CI->getBitWidth() <= 64 && "large intrinsic immediates not handled"
) ? static_cast<void> (0) : __assert_fail ("CI->getBitWidth() <= 64 && \"large intrinsic immediates not handled\""
, "/build/llvm-toolchain-snapshot-10~svn374710/lib/CodeGen/GlobalISel/IRTranslator.cpp"
, 1639, __PRETTY_FUNCTION__))
1639 "large intrinsic immediates not handled")((CI->getBitWidth() <= 64 && "large intrinsic immediates not handled"
) ? static_cast<void> (0) : __assert_fail ("CI->getBitWidth() <= 64 && \"large intrinsic immediates not handled\""
, "/build/llvm-toolchain-snapshot-10~svn374710/lib/CodeGen/GlobalISel/IRTranslator.cpp"
, 1639, __PRETTY_FUNCTION__))
;
1640 MIB.addImm(CI->getSExtValue());
1641 } else {
1642 MIB.addFPImm(cast<ConstantFP>(Arg.value()));
1643 }
1644 } else {
1645 ArrayRef<Register> VRegs = getOrCreateVRegs(*Arg.value());
1646 if (VRegs.size() > 1)
1647 return false;
1648 MIB.addUse(VRegs[0]);
1649 }
1650 }
1651
1652 // Add a MachineMemOperand if it is a target mem intrinsic.
1653 const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
1654 TargetLowering::IntrinsicInfo Info;
1655 // TODO: Add a GlobalISel version of getTgtMemIntrinsic.
1656 if (TLI.getTgtMemIntrinsic(Info, CI, *MF, ID)) {
1657 MaybeAlign Align = Info.align;
1658 if (!Align)
1659 Align = MaybeAlign(
1660 DL->getABITypeAlignment(Info.memVT.getTypeForEVT(F->getContext())));
1661
1662 uint64_t Size = Info.memVT.getStoreSize();
1663 MIB.addMemOperand(MF->getMachineMemOperand(
1664 MachinePointerInfo(Info.ptrVal), Info.flags, Size, Align->value()));
1665 }
1666
1667 return true;
1668}
1669
1670bool IRTranslator::translateInvoke(const User &U,
1671 MachineIRBuilder &MIRBuilder) {
1672 const InvokeInst &I = cast<InvokeInst>(U);
1673 MCContext &Context = MF->getContext();
1674
1675 const BasicBlock *ReturnBB = I.getSuccessor(0);
1676 const BasicBlock *EHPadBB = I.getSuccessor(1);
1677
1678 const Value *Callee = I.getCalledValue();
1679 const Function *Fn = dyn_cast<Function>(Callee);
1680 if (isa<InlineAsm>(Callee))
1681 return false;
1682
1683 // FIXME: support invoking patchpoint and statepoint intrinsics.
1684 if (Fn && Fn->isIntrinsic())
1685 return false;
1686
1687 // FIXME: support whatever these are.
1688 if (I.countOperandBundlesOfType(LLVMContext::OB_deopt))
1689 return false;
1690
1691 // FIXME: support Windows exception handling.
1692 if (!isa<LandingPadInst>(EHPadBB->front()))
1693 return false;
1694
1695 // Emit the actual call, bracketed by EH_LABELs so that the MF knows about
1696 // the region covered by the try.
1697 MCSymbol *BeginSymbol = Context.createTempSymbol();
1698 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(BeginSymbol);
1699
1700 if (!translateCallSite(&I, MIRBuilder))
1701 return false;
1702
1703 MCSymbol *EndSymbol = Context.createTempSymbol();
1704 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(EndSymbol);
1705
1706 // FIXME: track probabilities.
1707 MachineBasicBlock &EHPadMBB = getMBB(*EHPadBB),
1708 &ReturnMBB = getMBB(*ReturnBB);
1709 MF->addInvoke(&EHPadMBB, BeginSymbol, EndSymbol);
1710 MIRBuilder.getMBB().addSuccessor(&ReturnMBB);
1711 MIRBuilder.getMBB().addSuccessor(&EHPadMBB);
1712 MIRBuilder.buildBr(ReturnMBB);
1713
1714 return true;
1715}
1716
1717bool IRTranslator::translateCallBr(const User &U,
1718 MachineIRBuilder &MIRBuilder) {
1719 // FIXME: Implement this.
1720 return false;
1721}
1722
1723bool IRTranslator::translateLandingPad(const User &U,
1724 MachineIRBuilder &MIRBuilder) {
1725 const LandingPadInst &LP = cast<LandingPadInst>(U);
1726
1727 MachineBasicBlock &MBB = MIRBuilder.getMBB();
1728
1729 MBB.setIsEHPad();
1730
1731 // If there aren't registers to copy the values into (e.g., during SjLj
1732 // exceptions), then don't bother.
1733 auto &TLI = *MF->getSubtarget().getTargetLowering();
1734 const Constant *PersonalityFn = MF->getFunction().getPersonalityFn();
1735 if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 &&
1736 TLI.getExceptionSelectorRegister(PersonalityFn) == 0)
1737 return true;
1738
1739 // If landingpad's return type is token type, we don't create DAG nodes
1740 // for its exception pointer and selector value. The extraction of exception
1741 // pointer or selector value from token type landingpads is not currently
1742 // supported.
1743 if (LP.getType()->isTokenTy())
1744 return true;
1745
1746 // Add a label to mark the beginning of the landing pad. Deletion of the
1747 // landing pad can thus be detected via the MachineModuleInfo.
1748 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL)
1749 .addSym(MF->addLandingPad(&MBB));
1750
1751 LLT Ty = getLLTForType(*LP.getType(), *DL);
1752 Register Undef = MRI->createGenericVirtualRegister(Ty);
1753 MIRBuilder.buildUndef(Undef);
1754
1755 SmallVector<LLT, 2> Tys;
1756 for (Type *Ty : cast<StructType>(LP.getType())->elements())
1757 Tys.push_back(getLLTForType(*Ty, *DL));
1758 assert(Tys.size() == 2 && "Only two-valued landingpads are supported")((Tys.size() == 2 && "Only two-valued landingpads are supported"
) ? static_cast<void> (0) : __assert_fail ("Tys.size() == 2 && \"Only two-valued landingpads are supported\""
, "/build/llvm-toolchain-snapshot-10~svn374710/lib/CodeGen/GlobalISel/IRTranslator.cpp"
, 1758, __PRETTY_FUNCTION__))
;
1759
1760 // Mark exception register as live in.
1761 Register ExceptionReg = TLI.getExceptionPointerRegister(PersonalityFn);
1762 if (!ExceptionReg)
1763 return false;
1764
1765 MBB.addLiveIn(ExceptionReg);
1766 ArrayRef<Register> ResRegs = getOrCreateVRegs(LP);
1767 MIRBuilder.buildCopy(ResRegs[0], ExceptionReg);
1768
1769 Register SelectorReg = TLI.getExceptionSelectorRegister(PersonalityFn);
1770 if (!SelectorReg)
1771 return false;
1772
1773 MBB.addLiveIn(SelectorReg);
1774 Register PtrVReg = MRI->createGenericVirtualRegister(Tys[0]);
1775 MIRBuilder.buildCopy(PtrVReg, SelectorReg);
1776 MIRBuilder.buildCast(ResRegs[1], PtrVReg);
1777
1778 return true;
1779}
1780
1781bool IRTranslator::translateAlloca(const User &U,
1782 MachineIRBuilder &MIRBuilder) {
1783 auto &AI = cast<AllocaInst>(U);
1784
1785 if (AI.isSwiftError())
1786 return true;
1787
1788 if (AI.isStaticAlloca()) {
1789 Register Res = getOrCreateVReg(AI);
1790 int FI = getOrCreateFrameIndex(AI);
1791 MIRBuilder.buildFrameIndex(Res, FI);
1792 return true;
1793 }
1794
1795 // FIXME: support stack probing for Windows.
1796 if (MF->getTarget().getTargetTriple().isOSWindows())
1797 return false;
1798
1799 // Now we're in the harder dynamic case.
1800 Type *Ty = AI.getAllocatedType();
1801 unsigned Align =
1802 std::max((unsigned)DL->getPrefTypeAlignment(Ty), AI.getAlignment());
1803
1804 Register NumElts = getOrCreateVReg(*AI.getArraySize());
1805
1806 Type *IntPtrIRTy = DL->getIntPtrType(AI.getType());
1807 LLT IntPtrTy = getLLTForType(*IntPtrIRTy, *DL);
1808 if (MRI->getType(NumElts) != IntPtrTy) {
1809 Register ExtElts = MRI->createGenericVirtualRegister(IntPtrTy);
1810 MIRBuilder.buildZExtOrTrunc(ExtElts, NumElts);
1811 NumElts = ExtElts;
1812 }
1813
1814 Register AllocSize = MRI->createGenericVirtualRegister(IntPtrTy);
1815 Register TySize =
1816 getOrCreateVReg(*ConstantInt::get(IntPtrIRTy, DL->getTypeAllocSize(Ty)));
1817 MIRBuilder.buildMul(AllocSize, NumElts, TySize);
1818
1819 unsigned StackAlign =
1820 MF->getSubtarget().getFrameLowering()->getStackAlignment();
1821 if (Align <= StackAlign)
1822 Align = 0;
1823
1824 // Round the size of the allocation up to the stack alignment size
1825 // by add SA-1 to the size. This doesn't overflow because we're computing
1826 // an address inside an alloca.
1827 auto SAMinusOne = MIRBuilder.buildConstant(IntPtrTy, StackAlign - 1);
1828 auto AllocAdd = MIRBuilder.buildAdd(IntPtrTy, AllocSize, SAMinusOne,
1829 MachineInstr::NoUWrap);
1830 auto AlignCst =
1831 MIRBuilder.buildConstant(IntPtrTy, ~(uint64_t)(StackAlign - 1));
1832 auto AlignedAlloc = MIRBuilder.buildAnd(IntPtrTy, AllocAdd, AlignCst);
1833
1834 MIRBuilder.buildDynStackAlloc(getOrCreateVReg(AI), AlignedAlloc, Align);
1835
1836 MF->getFrameInfo().CreateVariableSizedObject(Align ? Align : 1, &AI);
1837 assert(MF->getFrameInfo().hasVarSizedObjects())((MF->getFrameInfo().hasVarSizedObjects()) ? static_cast<
void> (0) : __assert_fail ("MF->getFrameInfo().hasVarSizedObjects()"
, "/build/llvm-toolchain-snapshot-10~svn374710/lib/CodeGen/GlobalISel/IRTranslator.cpp"
, 1837, __PRETTY_FUNCTION__))
;
1838 return true;
1839}
1840
1841bool IRTranslator::translateVAArg(const User &U, MachineIRBuilder &MIRBuilder) {
1842 // FIXME: We may need more info about the type. Because of how LLT works,
1843 // we're completely discarding the i64/double distinction here (amongst
1844 // others). Fortunately the ABIs I know of where that matters don't use va_arg
1845 // anyway but that's not guaranteed.
1846 MIRBuilder.buildInstr(TargetOpcode::G_VAARG)
1847 .addDef(getOrCreateVReg(U))
1848 .addUse(getOrCreateVReg(*U.getOperand(0)))
1849 .addImm(DL->getABITypeAlignment(U.getType()));
1850 return true;
1851}
1852
1853bool IRTranslator::translateInsertElement(const User &U,
1854 MachineIRBuilder &MIRBuilder) {
1855 // If it is a <1 x Ty> vector, use the scalar as it is
1856 // not a legal vector type in LLT.
1857 if (U.getType()->getVectorNumElements() == 1) {
1858 Register Elt = getOrCreateVReg(*U.getOperand(1));
1859 auto &Regs = *VMap.getVRegs(U);
1860 if (Regs.empty()) {
1861 Regs.push_back(Elt);
1862 VMap.getOffsets(U)->push_back(0);
1863 } else {
1864 MIRBuilder.buildCopy(Regs[0], Elt);
1865 }
1866 return true;
1867 }
1868
1869 Register Res = getOrCreateVReg(U);
1870 Register Val = getOrCreateVReg(*U.getOperand(0));
1871 Register Elt = getOrCreateVReg(*U.getOperand(1));
1872 Register Idx = getOrCreateVReg(*U.getOperand(2));
1873 MIRBuilder.buildInsertVectorElement(Res, Val, Elt, Idx);
1874 return true;
1875}
1876
1877bool IRTranslator::translateExtractElement(const User &U,
1878 MachineIRBuilder &MIRBuilder) {
1879 // If it is a <1 x Ty> vector, use the scalar as it is
1880 // not a legal vector type in LLT.
1881 if (U.getOperand(0)->getType()->getVectorNumElements() == 1) {
1882 Register Elt = getOrCreateVReg(*U.getOperand(0));
1883 auto &Regs = *VMap.getVRegs(U);
1884 if (Regs.empty()) {
1885 Regs.push_back(Elt);
1886 VMap.getOffsets(U)->push_back(0);
1887 } else {
1888 MIRBuilder.buildCopy(Regs[0], Elt);
1889 }
1890 return true;
1891 }
1892 Register Res = getOrCreateVReg(U);
1893 Register Val = getOrCreateVReg(*U.getOperand(0));
1894 const auto &TLI = *MF->getSubtarget().getTargetLowering();
1895 unsigned PreferredVecIdxWidth = TLI.getVectorIdxTy(*DL).getSizeInBits();
1896 Register Idx;
1897 if (auto *CI = dyn_cast<ConstantInt>(U.getOperand(1))) {
1898 if (CI->getBitWidth() != PreferredVecIdxWidth) {
1899 APInt NewIdx = CI->getValue().sextOrTrunc(PreferredVecIdxWidth);
1900 auto *NewIdxCI = ConstantInt::get(CI->getContext(), NewIdx);
1901 Idx = getOrCreateVReg(*NewIdxCI);
1902 }
1903 }
1904 if (!Idx)
1905 Idx = getOrCreateVReg(*U.getOperand(1));
1906 if (MRI->getType(Idx).getSizeInBits() != PreferredVecIdxWidth) {
1907 const LLT &VecIdxTy = LLT::scalar(PreferredVecIdxWidth);
1908 Idx = MIRBuilder.buildSExtOrTrunc(VecIdxTy, Idx)->getOperand(0).getReg();
1909 }
1910 MIRBuilder.buildExtractVectorElement(Res, Val, Idx);
1911 return true;
1912}
1913
1914bool IRTranslator::translateShuffleVector(const User &U,
1915 MachineIRBuilder &MIRBuilder) {
1916 MIRBuilder.buildInstr(TargetOpcode::G_SHUFFLE_VECTOR)
1917 .addDef(getOrCreateVReg(U))
1918 .addUse(getOrCreateVReg(*U.getOperand(0)))
1919 .addUse(getOrCreateVReg(*U.getOperand(1)))
1920 .addShuffleMask(cast<Constant>(U.getOperand(2)));
1921 return true;
1922}
1923
1924bool IRTranslator::translatePHI(const User &U, MachineIRBuilder &MIRBuilder) {
1925 const PHINode &PI = cast<PHINode>(U);
1926
1927 SmallVector<MachineInstr *, 4> Insts;
1928 for (auto Reg : getOrCreateVRegs(PI)) {
1929 auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_PHI, {Reg}, {});
1930 Insts.push_back(MIB.getInstr());
1931 }
1932
1933 PendingPHIs.emplace_back(&PI, std::move(Insts));
1934 return true;
1935}
1936
1937bool IRTranslator::translateAtomicCmpXchg(const User &U,
1938 MachineIRBuilder &MIRBuilder) {
1939 const AtomicCmpXchgInst &I = cast<AtomicCmpXchgInst>(U);
1940
1941 if (I.isWeak())
1942 return false;
1943
1944 auto Flags = I.isVolatile() ? MachineMemOperand::MOVolatile
1945 : MachineMemOperand::MONone;
1946 Flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
1947
1948 Type *ResType = I.getType();
1949 Type *ValType = ResType->Type::getStructElementType(0);
1950
1951 auto Res = getOrCreateVRegs(I);
1952 Register OldValRes = Res[0];
1953 Register SuccessRes = Res[1];
1954 Register Addr = getOrCreateVReg(*I.getPointerOperand());
1955 Register Cmp = getOrCreateVReg(*I.getCompareOperand());
1956 Register NewVal = getOrCreateVReg(*I.getNewValOperand());
1957
1958 MIRBuilder.buildAtomicCmpXchgWithSuccess(
1959 OldValRes, SuccessRes, Addr, Cmp, NewVal,
1960 *MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
1961 Flags, DL->getTypeStoreSize(ValType),
1962 getMemOpAlignment(I), AAMDNodes(), nullptr,
1963 I.getSyncScopeID(), I.getSuccessOrdering(),
1964 I.getFailureOrdering()));
1965 return true;
1966}
1967
1968bool IRTranslator::translateAtomicRMW(const User &U,
1969 MachineIRBuilder &MIRBuilder) {
1970 const AtomicRMWInst &I = cast<AtomicRMWInst>(U);
1971
1972 auto Flags = I.isVolatile() ? MachineMemOperand::MOVolatile
1973 : MachineMemOperand::MONone;
1974 Flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
1975
1976 Type *ResType = I.getType();
1977
1978 Register Res = getOrCreateVReg(I);
1979 Register Addr = getOrCreateVReg(*I.getPointerOperand());
1980 Register Val = getOrCreateVReg(*I.getValOperand());
1981
1982 unsigned Opcode = 0;
1983 switch (I.getOperation()) {
1984 default:
1985 return false;
1986 case AtomicRMWInst::Xchg:
1987 Opcode = TargetOpcode::G_ATOMICRMW_XCHG;
1988 break;
1989 case AtomicRMWInst::Add:
1990 Opcode = TargetOpcode::G_ATOMICRMW_ADD;
1991 break;
1992 case AtomicRMWInst::Sub:
1993 Opcode = TargetOpcode::G_ATOMICRMW_SUB;
1994 break;
1995 case AtomicRMWInst::And:
1996 Opcode = TargetOpcode::G_ATOMICRMW_AND;
1997 break;
1998 case AtomicRMWInst::Nand:
1999 Opcode = TargetOpcode::G_ATOMICRMW_NAND;
2000 break;
2001 case AtomicRMWInst::Or:
2002 Opcode = TargetOpcode::G_ATOMICRMW_OR;
2003 break;
2004 case AtomicRMWInst::Xor:
2005 Opcode = TargetOpcode::G_ATOMICRMW_XOR;
2006 break;
2007 case AtomicRMWInst::Max:
2008 Opcode = TargetOpcode::G_ATOMICRMW_MAX;
2009 break;
2010 case AtomicRMWInst::Min:
2011 Opcode = TargetOpcode::G_ATOMICRMW_MIN;
2012 break;
2013 case AtomicRMWInst::UMax:
2014 Opcode = TargetOpcode::G_ATOMICRMW_UMAX;
2015 break;
2016 case AtomicRMWInst::UMin:
2017 Opcode = TargetOpcode::G_ATOMICRMW_UMIN;
2018 break;
2019 case AtomicRMWInst::FAdd:
2020 Opcode = TargetOpcode::G_ATOMICRMW_FADD;
2021 break;
2022 case AtomicRMWInst::FSub:
2023 Opcode = TargetOpcode::G_ATOMICRMW_FSUB;
2024 break;
2025 }
2026
2027 MIRBuilder.buildAtomicRMW(
2028 Opcode, Res, Addr, Val,
2029 *MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
2030 Flags, DL->getTypeStoreSize(ResType),
2031 getMemOpAlignment(I), AAMDNodes(), nullptr,
2032 I.getSyncScopeID(), I.getOrdering()));
2033 return true;
2034}
2035
2036bool IRTranslator::translateFence(const User &U,
2037 MachineIRBuilder &MIRBuilder) {
2038 const FenceInst &Fence = cast<FenceInst>(U);
2039 MIRBuilder.buildFence(static_cast<unsigned>(Fence.getOrdering()),
2040 Fence.getSyncScopeID());
2041 return true;
2042}
2043
2044void IRTranslator::finishPendingPhis() {
2045#ifndef NDEBUG
2046 DILocationVerifier Verifier;
2047 GISelObserverWrapper WrapperObserver(&Verifier);
2048 RAIIDelegateInstaller DelInstall(*MF, &WrapperObserver);
2049#endif // ifndef NDEBUG
2050 for (auto &Phi : PendingPHIs) {
2051 const PHINode *PI = Phi.first;
2052 ArrayRef<MachineInstr *> ComponentPHIs = Phi.second;
2053 MachineBasicBlock *PhiMBB = ComponentPHIs[0]->getParent();
2054 EntryBuilder->setDebugLoc(PI->getDebugLoc());
2055#ifndef NDEBUG
2056 Verifier.setCurrentInst(PI);
2057#endif // ifndef NDEBUG
2058
2059 SmallSet<const MachineBasicBlock *, 16> SeenPreds;
2060 for (unsigned i = 0; i < PI->getNumIncomingValues(); ++i) {
2061 auto IRPred = PI->getIncomingBlock(i);
2062 ArrayRef<Register> ValRegs = getOrCreateVRegs(*PI->getIncomingValue(i));
2063 for (auto Pred : getMachinePredBBs({IRPred, PI->getParent()})) {
2064 if (SeenPreds.count(Pred) || !PhiMBB->isPredecessor(Pred))
2065 continue;
2066 SeenPreds.insert(Pred);
2067 for (unsigned j = 0; j < ValRegs.size(); ++j) {
2068 MachineInstrBuilder MIB(*MF, ComponentPHIs[j]);
2069 MIB.addUse(ValRegs[j]);
2070 MIB.addMBB(Pred);
2071 }
2072 }
2073 }
2074 }
2075}
2076
2077bool IRTranslator::valueIsSplit(const Value &V,
2078 SmallVectorImpl<uint64_t> *Offsets) {
2079 SmallVector<LLT, 4> SplitTys;
2080 if (Offsets && !Offsets->empty())
2081 Offsets->clear();
2082 computeValueLLTs(*DL, *V.getType(), SplitTys, Offsets);
2083 return SplitTys.size() > 1;
2084}
2085
2086bool IRTranslator::translate(const Instruction &Inst) {
2087 CurBuilder->setDebugLoc(Inst.getDebugLoc());
2088 // We only emit constants into the entry block from here. To prevent jumpy
2089 // debug behaviour set the line to 0.
2090 if (const DebugLoc &DL = Inst.getDebugLoc())
15
Taking false branch
2091 EntryBuilder->setDebugLoc(
2092 DebugLoc::get(0, 0, DL.getScope(), DL.getInlinedAt()));
2093 else
2094 EntryBuilder->setDebugLoc(DebugLoc());
2095
2096 switch (Inst.getOpcode()) {
16
Control jumps to 'case ICmp:' at line 206
2097#define HANDLE_INST(NUM, OPCODE, CLASS) \
2098 case Instruction::OPCODE: \
2099 return translate##OPCODE(Inst, *CurBuilder.get());
2100#include "llvm/IR/Instruction.def"
2101 default:
2102 return false;
2103 }
2104}
2105
2106bool IRTranslator::translate(const Constant &C, Register Reg) {
2107 if (auto CI = dyn_cast<ConstantInt>(&C))
2108 EntryBuilder->buildConstant(Reg, *CI);
2109 else if (auto CF = dyn_cast<ConstantFP>(&C))
2110 EntryBuilder->buildFConstant(Reg, *CF);
2111 else if (isa<UndefValue>(C))
2112 EntryBuilder->buildUndef(Reg);
2113 else if (isa<ConstantPointerNull>(C)) {
2114 // As we are trying to build a constant val of 0 into a pointer,
2115 // insert a cast to make them correct with respect to types.
2116 unsigned NullSize = DL->getTypeSizeInBits(C.getType());
2117 auto *ZeroTy = Type::getIntNTy(C.getContext(), NullSize);
2118 auto *ZeroVal = ConstantInt::get(ZeroTy, 0);
2119 Register ZeroReg = getOrCreateVReg(*ZeroVal);
2120 EntryBuilder->buildCast(Reg, ZeroReg);
2121 } else if (auto GV = dyn_cast<GlobalValue>(&C))
2122 EntryBuilder->buildGlobalValue(Reg, GV);
2123 else if (auto CAZ = dyn_cast<ConstantAggregateZero>(&C)) {
2124 if (!CAZ->getType()->isVectorTy())
2125 return false;
2126 // Return the scalar if it is a <1 x Ty> vector.
2127 if (CAZ->getNumElements() == 1)
2128 return translate(*CAZ->getElementValue(0u), Reg);
2129 SmallVector<Register, 4> Ops;
2130 for (unsigned i = 0; i < CAZ->getNumElements(); ++i) {
2131 Constant &Elt = *CAZ->getElementValue(i);
2132 Ops.push_back(getOrCreateVReg(Elt));
2133 }
2134 EntryBuilder->buildBuildVector(Reg, Ops);
2135 } else if (auto CV = dyn_cast<ConstantDataVector>(&C)) {
2136 // Return the scalar if it is a <1 x Ty> vector.
2137 if (CV->getNumElements() == 1)
2138 return translate(*CV->getElementAsConstant(0), Reg);
2139 SmallVector<Register, 4> Ops;
2140 for (unsigned i = 0; i < CV->getNumElements(); ++i) {
2141 Constant &Elt = *CV->getElementAsConstant(i);
2142 Ops.push_back(getOrCreateVReg(Elt));
2143 }
2144 EntryBuilder->buildBuildVector(Reg, Ops);
2145 } else if (auto CE = dyn_cast<ConstantExpr>(&C)) {
2146 switch(CE->getOpcode()) {
2147#define HANDLE_INST(NUM, OPCODE, CLASS) \
2148 case Instruction::OPCODE: \
2149 return translate##OPCODE(*CE, *EntryBuilder.get());
2150#include "llvm/IR/Instruction.def"
2151 default:
2152 return false;
2153 }
2154 } else if (auto CV = dyn_cast<ConstantVector>(&C)) {
2155 if (CV->getNumOperands() == 1)
2156 return translate(*CV->getOperand(0), Reg);
2157 SmallVector<Register, 4> Ops;
2158 for (unsigned i = 0; i < CV->getNumOperands(); ++i) {
2159 Ops.push_back(getOrCreateVReg(*CV->getOperand(i)));
2160 }
2161 EntryBuilder->buildBuildVector(Reg, Ops);
2162 } else if (auto *BA = dyn_cast<BlockAddress>(&C)) {
2163 EntryBuilder->buildBlockAddress(Reg, BA);
2164 } else
2165 return false;
2166
2167 return true;
2168}
2169
2170void IRTranslator::finalizeBasicBlock() {
2171 for (auto &JTCase : SL->JTCases) {
2172 // Emit header first, if it wasn't already emitted.
2173 if (!JTCase.first.Emitted)
2174 emitJumpTableHeader(JTCase.second, JTCase.first, JTCase.first.HeaderBB);
2175
2176 emitJumpTable(JTCase.second, JTCase.second.MBB);
2177 }
2178 SL->JTCases.clear();
2179}
2180
2181void IRTranslator::finalizeFunction() {
2182 // Release the memory used by the different maps we
2183 // needed during the translation.
2184 PendingPHIs.clear();
2185 VMap.reset();
2186 FrameIndices.clear();
2187 MachinePreds.clear();
2188 // MachineIRBuilder::DebugLoc can outlive the DILocation it holds. Clear it
2189 // to avoid accessing free’d memory (in runOnMachineFunction) and to avoid
2190 // destroying it twice (in ~IRTranslator() and ~LLVMContext())
2191 EntryBuilder.reset();
2192 CurBuilder.reset();
2193 FuncInfo.clear();
2194}
2195
2196/// Returns true if a BasicBlock \p BB within a variadic function contains a
2197/// variadic musttail call.
2198static bool checkForMustTailInVarArgFn(bool IsVarArg, const BasicBlock &BB) {
2199 if (!IsVarArg)
2200 return false;
2201
2202 // Walk the block backwards, because tail calls usually only appear at the end
2203 // of a block.
2204 return std::any_of(BB.rbegin(), BB.rend(), [](const Instruction &I) {
2205 const auto *CI = dyn_cast<CallInst>(&I);
2206 return CI && CI->isMustTailCall();
2207 });
2208}
2209
2210bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) {
2211 MF = &CurMF;
2212 const Function &F = MF->getFunction();
2213 if (F.empty())
1
Assuming the condition is false
2
Taking false branch
2214 return false;
2215 GISelCSEAnalysisWrapper &Wrapper =
2216 getAnalysis<GISelCSEAnalysisWrapperPass>().getCSEWrapper();
2217 // Set the CSEConfig and run the analysis.
2218 GISelCSEInfo *CSEInfo = nullptr;
2219 TPC = &getAnalysis<TargetPassConfig>();
2220 bool EnableCSE = EnableCSEInIRTranslator.getNumOccurrences()
3
Assuming the condition is false
4
'?' condition is false
2221 ? EnableCSEInIRTranslator
2222 : TPC->isGISelCSEEnabled();
2223
2224 if (EnableCSE) {
5
Assuming 'EnableCSE' is false
6
Taking false branch
2225 EntryBuilder = std::make_unique<CSEMIRBuilder>(CurMF);
2226 CSEInfo = &Wrapper.get(TPC->getCSEConfig());
2227 EntryBuilder->setCSEInfo(CSEInfo);
2228 CurBuilder = std::make_unique<CSEMIRBuilder>(CurMF);
2229 CurBuilder->setCSEInfo(CSEInfo);
2230 } else {
2231 EntryBuilder = std::make_unique<MachineIRBuilder>();
2232 CurBuilder = std::make_unique<MachineIRBuilder>();
2233 }
2234 CLI = MF->getSubtarget().getCallLowering();
2235 CurBuilder->setMF(*MF);
2236 EntryBuilder->setMF(*MF);
2237 MRI = &MF->getRegInfo();
2238 DL = &F.getParent()->getDataLayout();
2239 ORE = std::make_unique<OptimizationRemarkEmitter>(&F);
2240 FuncInfo.MF = MF;
2241 FuncInfo.BPI = nullptr;
2242 const auto &TLI = *MF->getSubtarget().getTargetLowering();
2243 const TargetMachine &TM = MF->getTarget();
2244 SL = std::make_unique<GISelSwitchLowering>(this, FuncInfo);
2245 SL->init(TLI, TM, *DL);
2246
2247 EnableOpts = TM.getOptLevel() != CodeGenOpt::None && !skipFunction(F);
7
Assuming the condition is false
2248
2249 assert(PendingPHIs.empty() && "stale PHIs")((PendingPHIs.empty() && "stale PHIs") ? static_cast<
void> (0) : __assert_fail ("PendingPHIs.empty() && \"stale PHIs\""
, "/build/llvm-toolchain-snapshot-10~svn374710/lib/CodeGen/GlobalISel/IRTranslator.cpp"
, 2249, __PRETTY_FUNCTION__))
;
8
'?' condition is true
2250
2251 if (!DL->isLittleEndian()) {
9
Taking true branch
2252 // Currently we don't properly handle big endian code.
2253 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
2254 F.getSubprogram(), &F.getEntryBlock());
2255 R << "unable to translate in big endian mode";
2256 reportTranslationError(*MF, *TPC, *ORE, R);
2257 }
2258
2259 // Release the per-function state when we return, whether we succeeded or not.
2260 auto FinalizeOnReturn = make_scope_exit([this]() { finalizeFunction(); });
2261
2262 // Setup a separate basic-block for the arguments and constants
2263 MachineBasicBlock *EntryBB = MF->CreateMachineBasicBlock();
2264 MF->push_back(EntryBB);
2265 EntryBuilder->setMBB(*EntryBB);
2266
2267 DebugLoc DbgLoc = F.getEntryBlock().getFirstNonPHI()->getDebugLoc();
2268 SwiftError.setFunction(CurMF);
2269 SwiftError.createEntriesInEntryBlock(DbgLoc);
2270
2271 bool IsVarArg = F.isVarArg();
2272 bool HasMustTailInVarArgFn = false;
2273
2274 // Create all blocks, in IR order, to preserve the layout.
2275 for (const BasicBlock &BB: F) {
2276 auto *&MBB = BBToMBB[&BB];
2277
2278 MBB = MF->CreateMachineBasicBlock(&BB);
2279 MF->push_back(MBB);
2280
2281 if (BB.hasAddressTaken())
2282 MBB->setHasAddressTaken();
2283
2284 if (!HasMustTailInVarArgFn)
2285 HasMustTailInVarArgFn = checkForMustTailInVarArgFn(IsVarArg, BB);
2286 }
2287
2288 MF->getFrameInfo().setHasMustTailInVarArgFunc(HasMustTailInVarArgFn);
2289
2290 // Make our arguments/constants entry block fallthrough to the IR entry block.
2291 EntryBB->addSuccessor(&getMBB(F.front()));
2292
2293 // Lower the actual args into this basic block.
2294 SmallVector<ArrayRef<Register>, 8> VRegArgs;
2295 for (const Argument &Arg: F.args()) {
10
Assuming '__begin1' is equal to '__end1'
2296 if (DL->getTypeStoreSize(Arg.getType()) == 0)
2297 continue; // Don't handle zero sized types.
2298 ArrayRef<Register> VRegs = getOrCreateVRegs(Arg);
2299 VRegArgs.push_back(VRegs);
2300
2301 if (Arg.hasSwiftErrorAttr()) {
2302 assert(VRegs.size() == 1 && "Too many vregs for Swift error")((VRegs.size() == 1 && "Too many vregs for Swift error"
) ? static_cast<void> (0) : __assert_fail ("VRegs.size() == 1 && \"Too many vregs for Swift error\""
, "/build/llvm-toolchain-snapshot-10~svn374710/lib/CodeGen/GlobalISel/IRTranslator.cpp"
, 2302, __PRETTY_FUNCTION__))
;
2303 SwiftError.setCurrentVReg(EntryBB, SwiftError.getFunctionArg(), VRegs[0]);
2304 }
2305 }
2306
2307 if (!CLI->lowerFormalArguments(*EntryBuilder.get(), F, VRegArgs)) {
11
Assuming the condition is false
12
Taking false branch
2308 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
2309 F.getSubprogram(), &F.getEntryBlock());
2310 R << "unable to lower arguments: " << ore::NV("Prototype", F.getType());
2311 reportTranslationError(*MF, *TPC, *ORE, R);
2312 return false;
2313 }
2314
2315 // Need to visit defs before uses when translating instructions.
2316 GISelObserverWrapper WrapperObserver;
2317 if (EnableCSE
12.1
'EnableCSE' is false
12.1
'EnableCSE' is false
12.1
'EnableCSE' is false
12.1
'EnableCSE' is false
&& CSEInfo)
2318 WrapperObserver.addObserver(CSEInfo);
2319 {
2320 ReversePostOrderTraversal<const Function *> RPOT(&F);
2321#ifndef NDEBUG
2322 DILocationVerifier Verifier;
2323 WrapperObserver.addObserver(&Verifier);
2324#endif // ifndef NDEBUG
2325 RAIIDelegateInstaller DelInstall(*MF, &WrapperObserver);
2326 for (const BasicBlock *BB : RPOT) {
2327 MachineBasicBlock &MBB = getMBB(*BB);
2328 // Set the insertion point of all the following translations to
2329 // the end of this basic block.
2330 CurBuilder->setMBB(MBB);
2331 HasTailCall = false;
2332 for (const Instruction &Inst : *BB) {
2333 // If we translated a tail call in the last step, then we know
2334 // everything after the call is either a return, or something that is
2335 // handled by the call itself. (E.g. a lifetime marker or assume
2336 // intrinsic.) In this case, we should stop translating the block and
2337 // move on.
2338 if (HasTailCall
12.2
Field 'HasTailCall' is false
12.2
Field 'HasTailCall' is false
12.2
Field 'HasTailCall' is false
12.2
Field 'HasTailCall' is false
)
13
Taking false branch
2339 break;
2340#ifndef NDEBUG
2341 Verifier.setCurrentInst(&Inst);
2342#endif // ifndef NDEBUG
2343 if (translate(Inst))
14
Calling 'IRTranslator::translate'
2344 continue;
2345
2346 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
2347 Inst.getDebugLoc(), BB);
2348 R << "unable to translate instruction: " << ore::NV("Opcode", &Inst);
2349
2350 if (ORE->allowExtraAnalysis("gisel-irtranslator")) {
2351 std::string InstStrStorage;
2352 raw_string_ostream InstStr(InstStrStorage);
2353 InstStr << Inst;
2354
2355 R << ": '" << InstStr.str() << "'";
2356 }
2357
2358 reportTranslationError(*MF, *TPC, *ORE, R);
2359 return false;
2360 }
2361
2362 finalizeBasicBlock();
2363 }
2364#ifndef NDEBUG
2365 WrapperObserver.removeObserver(&Verifier);
2366#endif
2367 }
2368
2369 finishPendingPhis();
2370
2371 SwiftError.propagateVRegs();
2372
2373 // Merge the argument lowering and constants block with its single
2374 // successor, the LLVM-IR entry block. We want the basic block to
2375 // be maximal.
2376 assert(EntryBB->succ_size() == 1 &&((EntryBB->succ_size() == 1 && "Custom BB used for lowering should have only one successor"
) ? static_cast<void> (0) : __assert_fail ("EntryBB->succ_size() == 1 && \"Custom BB used for lowering should have only one successor\""
, "/build/llvm-toolchain-snapshot-10~svn374710/lib/CodeGen/GlobalISel/IRTranslator.cpp"
, 2377, __PRETTY_FUNCTION__))
2377 "Custom BB used for lowering should have only one successor")((EntryBB->succ_size() == 1 && "Custom BB used for lowering should have only one successor"
) ? static_cast<void> (0) : __assert_fail ("EntryBB->succ_size() == 1 && \"Custom BB used for lowering should have only one successor\""
, "/build/llvm-toolchain-snapshot-10~svn374710/lib/CodeGen/GlobalISel/IRTranslator.cpp"
, 2377, __PRETTY_FUNCTION__))
;
2378 // Get the successor of the current entry block.
2379 MachineBasicBlock &NewEntryBB = **EntryBB->succ_begin();
2380 assert(NewEntryBB.pred_size() == 1 &&((NewEntryBB.pred_size() == 1 && "LLVM-IR entry block has a predecessor!?"
) ? static_cast<void> (0) : __assert_fail ("NewEntryBB.pred_size() == 1 && \"LLVM-IR entry block has a predecessor!?\""
, "/build/llvm-toolchain-snapshot-10~svn374710/lib/CodeGen/GlobalISel/IRTranslator.cpp"
, 2381, __PRETTY_FUNCTION__))
2381 "LLVM-IR entry block has a predecessor!?")((NewEntryBB.pred_size() == 1 && "LLVM-IR entry block has a predecessor!?"
) ? static_cast<void> (0) : __assert_fail ("NewEntryBB.pred_size() == 1 && \"LLVM-IR entry block has a predecessor!?\""
, "/build/llvm-toolchain-snapshot-10~svn374710/lib/CodeGen/GlobalISel/IRTranslator.cpp"
, 2381, __PRETTY_FUNCTION__))
;
2382 // Move all the instruction from the current entry block to the
2383 // new entry block.
2384 NewEntryBB.splice(NewEntryBB.begin(), EntryBB, EntryBB->begin(),
2385 EntryBB->end());
2386
2387 // Update the live-in information for the new entry block.
2388 for (const MachineBasicBlock::RegisterMaskPair &LiveIn : EntryBB->liveins())
2389 NewEntryBB.addLiveIn(LiveIn);
2390 NewEntryBB.sortUniqueLiveIns();
2391
2392 // Get rid of the now empty basic block.
2393 EntryBB->removeSuccessor(&NewEntryBB);
2394 MF->remove(EntryBB);
2395 MF->DeleteMachineBasicBlock(EntryBB);
2396
2397 assert(&MF->front() == &NewEntryBB &&((&MF->front() == &NewEntryBB && "New entry wasn't next in the list of basic block!"
) ? static_cast<void> (0) : __assert_fail ("&MF->front() == &NewEntryBB && \"New entry wasn't next in the list of basic block!\""
, "/build/llvm-toolchain-snapshot-10~svn374710/lib/CodeGen/GlobalISel/IRTranslator.cpp"
, 2398, __PRETTY_FUNCTION__))
2398 "New entry wasn't next in the list of basic block!")((&MF->front() == &NewEntryBB && "New entry wasn't next in the list of basic block!"
) ? static_cast<void> (0) : __assert_fail ("&MF->front() == &NewEntryBB && \"New entry wasn't next in the list of basic block!\""
, "/build/llvm-toolchain-snapshot-10~svn374710/lib/CodeGen/GlobalISel/IRTranslator.cpp"
, 2398, __PRETTY_FUNCTION__))
;
2399
2400 // Initialize stack protector information.
2401 StackProtector &SP = getAnalysis<StackProtector>();
2402 SP.copyToMachineFrameInfo(MF->getFrameInfo());
2403
2404 return false;
2405}

/build/llvm-toolchain-snapshot-10~svn374710/include/llvm/IR/Instruction.def

1//===-- llvm/Instruction.def - File that describes Instructions -*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains descriptions of the various LLVM instructions. This is
10// used as a central place for enumerating the different instructions and
11// should eventually be the place to put comments about the instructions.
12//
13//===----------------------------------------------------------------------===//
14
15// NOTE: NO INCLUDE GUARD DESIRED!
16
17// Provide definitions of macros so that users of this file do not have to
18// define everything to use it...
19//
20#ifndef FIRST_TERM_INST
21#define FIRST_TERM_INST(num)
22#endif
23#ifndef HANDLE_TERM_INST
24#ifndef HANDLE_INST
25#define HANDLE_TERM_INST(num, opcode, Class)
26#else
27#define HANDLE_TERM_INST(num, opcode, Class) HANDLE_INST(num, opcode, Class)
28#endif
29#endif
30#ifndef LAST_TERM_INST
31#define LAST_TERM_INST(num)
32#endif
33
34#ifndef FIRST_UNARY_INST
35#define FIRST_UNARY_INST(num)
36#endif
37#ifndef HANDLE_UNARY_INST
38#ifndef HANDLE_INST
39#define HANDLE_UNARY_INST(num, opcode, instclass)
40#else
41#define HANDLE_UNARY_INST(num, opcode, Class) HANDLE_INST(num, opcode, Class)
42#endif
43#endif
44#ifndef LAST_UNARY_INST
45#define LAST_UNARY_INST(num)
46#endif
47
48#ifndef FIRST_BINARY_INST
49#define FIRST_BINARY_INST(num)
50#endif
51#ifndef HANDLE_BINARY_INST
52#ifndef HANDLE_INST
53#define HANDLE_BINARY_INST(num, opcode, instclass)
54#else
55#define HANDLE_BINARY_INST(num, opcode, Class) HANDLE_INST(num, opcode, Class)
56#endif
57#endif
58#ifndef LAST_BINARY_INST
59#define LAST_BINARY_INST(num)
60#endif
61
62#ifndef FIRST_MEMORY_INST
63#define FIRST_MEMORY_INST(num)
64#endif
65#ifndef HANDLE_MEMORY_INST
66#ifndef HANDLE_INST
67#define HANDLE_MEMORY_INST(num, opcode, Class)
68#else
69#define HANDLE_MEMORY_INST(num, opcode, Class) HANDLE_INST(num, opcode, Class)
70#endif
71#endif
72#ifndef LAST_MEMORY_INST
73#define LAST_MEMORY_INST(num)
74#endif
75
76#ifndef FIRST_CAST_INST
77#define FIRST_CAST_INST(num)
78#endif
79#ifndef HANDLE_CAST_INST
80#ifndef HANDLE_INST
81#define HANDLE_CAST_INST(num, opcode, Class)
82#else
83#define HANDLE_CAST_INST(num, opcode, Class) HANDLE_INST(num, opcode, Class)
84#endif
85#endif
86#ifndef LAST_CAST_INST
87#define LAST_CAST_INST(num)
88#endif
89
90#ifndef FIRST_FUNCLETPAD_INST
91#define FIRST_FUNCLETPAD_INST(num)
92#endif
93#ifndef HANDLE_FUNCLETPAD_INST
94#ifndef HANDLE_INST
95#define HANDLE_FUNCLETPAD_INST(num, opcode, Class)
96#else
97#define HANDLE_FUNCLETPAD_INST(num, opcode, Class) HANDLE_INST(num, opcode, Class)
98#endif
99#endif
100#ifndef LAST_FUNCLETPAD_INST
101#define LAST_FUNCLETPAD_INST(num)
102#endif
103
104#ifndef FIRST_OTHER_INST
105#define FIRST_OTHER_INST(num)
106#endif
107#ifndef HANDLE_OTHER_INST
108#ifndef HANDLE_INST
109#define HANDLE_OTHER_INST(num, opcode, Class)
110#else
111#define HANDLE_OTHER_INST(num, opcode, Class) HANDLE_INST(num, opcode, Class)
112#endif
113#endif
114#ifndef LAST_OTHER_INST
115#define LAST_OTHER_INST(num)
116#endif
117
118#ifndef HANDLE_USER_INST
119#define HANDLE_USER_INST(num, opc, Class) HANDLE_OTHER_INST(num, opc, Class)
120#endif
121
122// Terminator Instructions - These instructions are used to terminate a basic
123// block of the program. Every basic block must end with one of these
124// instructions for it to be a well formed basic block.
125//
126 FIRST_TERM_INST ( 1)
127HANDLE_TERM_INST ( 1, Ret , ReturnInst)
128HANDLE_TERM_INST ( 2, Br , BranchInst)
129HANDLE_TERM_INST ( 3, Switch , SwitchInst)
130HANDLE_TERM_INST ( 4, IndirectBr , IndirectBrInst)
131HANDLE_TERM_INST ( 5, Invoke , InvokeInst)
132HANDLE_TERM_INST ( 6, Resume , ResumeInst)
133HANDLE_TERM_INST ( 7, Unreachable , UnreachableInst)
134HANDLE_TERM_INST ( 8, CleanupRet , CleanupReturnInst)
135HANDLE_TERM_INST ( 9, CatchRet , CatchReturnInst)
136HANDLE_TERM_INST (10, CatchSwitch , CatchSwitchInst)
137HANDLE_TERM_INST (11, CallBr , CallBrInst) // A call-site terminator
138 LAST_TERM_INST (11)
139
140// Standard unary operators...
141 FIRST_UNARY_INST(12)
142HANDLE_UNARY_INST(12, FNeg , UnaryOperator)
143 LAST_UNARY_INST(12)
144
145// Standard binary operators...
146 FIRST_BINARY_INST(13)
147HANDLE_BINARY_INST(13, Add , BinaryOperator)
148HANDLE_BINARY_INST(14, FAdd , BinaryOperator)
149HANDLE_BINARY_INST(15, Sub , BinaryOperator)
150HANDLE_BINARY_INST(16, FSub , BinaryOperator)
151HANDLE_BINARY_INST(17, Mul , BinaryOperator)
152HANDLE_BINARY_INST(18, FMul , BinaryOperator)
153HANDLE_BINARY_INST(19, UDiv , BinaryOperator)
154HANDLE_BINARY_INST(20, SDiv , BinaryOperator)
155HANDLE_BINARY_INST(21, FDiv , BinaryOperator)
156HANDLE_BINARY_INST(22, URem , BinaryOperator)
157HANDLE_BINARY_INST(23, SRem , BinaryOperator)
158HANDLE_BINARY_INST(24, FRem , BinaryOperator)
159
160// Logical operators (integer operands)
161HANDLE_BINARY_INST(25, Shl , BinaryOperator) // Shift left (logical)
162HANDLE_BINARY_INST(26, LShr , BinaryOperator) // Shift right (logical)
163HANDLE_BINARY_INST(27, AShr , BinaryOperator) // Shift right (arithmetic)
164HANDLE_BINARY_INST(28, And , BinaryOperator)
165HANDLE_BINARY_INST(29, Or , BinaryOperator)
166HANDLE_BINARY_INST(30, Xor , BinaryOperator)
167 LAST_BINARY_INST(30)
168
169// Memory operators...
170 FIRST_MEMORY_INST(31)
171HANDLE_MEMORY_INST(31, Alloca, AllocaInst) // Stack management
172HANDLE_MEMORY_INST(32, Load , LoadInst ) // Memory manipulation instrs
173HANDLE_MEMORY_INST(33, Store , StoreInst )
174HANDLE_MEMORY_INST(34, GetElementPtr, GetElementPtrInst)
175HANDLE_MEMORY_INST(35, Fence , FenceInst )
176HANDLE_MEMORY_INST(36, AtomicCmpXchg , AtomicCmpXchgInst )
177HANDLE_MEMORY_INST(37, AtomicRMW , AtomicRMWInst )
178 LAST_MEMORY_INST(37)
179
180// Cast operators ...
181// NOTE: The order matters here because CastInst::isEliminableCastPair
182// NOTE: (see Instructions.cpp) encodes a table based on this ordering.
183 FIRST_CAST_INST(38)
184HANDLE_CAST_INST(38, Trunc , TruncInst ) // Truncate integers
185HANDLE_CAST_INST(39, ZExt , ZExtInst ) // Zero extend integers
186HANDLE_CAST_INST(40, SExt , SExtInst ) // Sign extend integers
187HANDLE_CAST_INST(41, FPToUI , FPToUIInst ) // floating point -> UInt
188HANDLE_CAST_INST(42, FPToSI , FPToSIInst ) // floating point -> SInt
189HANDLE_CAST_INST(43, UIToFP , UIToFPInst ) // UInt -> floating point
190HANDLE_CAST_INST(44, SIToFP , SIToFPInst ) // SInt -> floating point
191HANDLE_CAST_INST(45, FPTrunc , FPTruncInst ) // Truncate floating point
192HANDLE_CAST_INST(46, FPExt , FPExtInst ) // Extend floating point
193HANDLE_CAST_INST(47, PtrToInt, PtrToIntInst) // Pointer -> Integer
194HANDLE_CAST_INST(48, IntToPtr, IntToPtrInst) // Integer -> Pointer
195HANDLE_CAST_INST(49, BitCast , BitCastInst ) // Type cast
196HANDLE_CAST_INST(50, AddrSpaceCast, AddrSpaceCastInst) // addrspace cast
197 LAST_CAST_INST(50)
198
199 FIRST_FUNCLETPAD_INST(51)
200HANDLE_FUNCLETPAD_INST(51, CleanupPad, CleanupPadInst)
201HANDLE_FUNCLETPAD_INST(52, CatchPad , CatchPadInst)
202 LAST_FUNCLETPAD_INST(52)
203
204// Other operators...
205 FIRST_OTHER_INST(53)
206HANDLE_OTHER_INST(53, ICmp , ICmpInst ) // Integer comparison instruction
17
Calling 'IRTranslator::translateICmp'
207HANDLE_OTHER_INST(54, FCmp , FCmpInst ) // Floating point comparison instr.
208HANDLE_OTHER_INST(55, PHI , PHINode ) // PHI node instruction
209HANDLE_OTHER_INST(56, Call , CallInst ) // Call a function
210HANDLE_OTHER_INST(57, Select , SelectInst ) // select instruction
211HANDLE_USER_INST (58, UserOp1, Instruction) // May be used internally in a pass
212HANDLE_USER_INST (59, UserOp2, Instruction) // Internal to passes only
213HANDLE_OTHER_INST(60, VAArg , VAArgInst ) // vaarg instruction
214HANDLE_OTHER_INST(61, ExtractElement, ExtractElementInst)// extract from vector
215HANDLE_OTHER_INST(62, InsertElement, InsertElementInst) // insert into vector
216HANDLE_OTHER_INST(63, ShuffleVector, ShuffleVectorInst) // shuffle two vectors.
217HANDLE_OTHER_INST(64, ExtractValue, ExtractValueInst)// extract from aggregate
218HANDLE_OTHER_INST(65, InsertValue, InsertValueInst) // insert into aggregate
219HANDLE_OTHER_INST(66, LandingPad, LandingPadInst) // Landing pad instruction.
220 LAST_OTHER_INST(66)
221
222#undef FIRST_TERM_INST
223#undef HANDLE_TERM_INST
224#undef LAST_TERM_INST
225
226#undef FIRST_UNARY_INST
227#undef HANDLE_UNARY_INST
228#undef LAST_UNARY_INST
229
230#undef FIRST_BINARY_INST
231#undef HANDLE_BINARY_INST
232#undef LAST_BINARY_INST
233
234#undef FIRST_MEMORY_INST
235#undef HANDLE_MEMORY_INST
236#undef LAST_MEMORY_INST
237
238#undef FIRST_CAST_INST
239#undef HANDLE_CAST_INST
240#undef LAST_CAST_INST
241
242#undef FIRST_FUNCLETPAD_INST
243#undef HANDLE_FUNCLETPAD_INST
244#undef LAST_FUNCLETPAD_INST
245
246#undef FIRST_OTHER_INST
247#undef HANDLE_OTHER_INST
248#undef LAST_OTHER_INST
249
250#undef HANDLE_USER_INST
251
252#ifdef HANDLE_INST
253#undef HANDLE_INST
254#endif

/build/llvm-toolchain-snapshot-10~svn374710/include/llvm/CodeGen/GlobalISel/IRTranslator.h

1//===- llvm/CodeGen/GlobalISel/IRTranslator.h - IRTranslator ----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file declares the IRTranslator pass.
10/// This pass is responsible for translating LLVM IR into MachineInstr.
11/// It uses target hooks to lower the ABI but aside from that, the pass
12/// generated code is generic. This is the default translator used for
13/// GlobalISel.
14///
15/// \todo Replace the comments with actual doxygen comments.
16//===----------------------------------------------------------------------===//
17
18#ifndef LLVM_CODEGEN_GLOBALISEL_IRTRANSLATOR_H
19#define LLVM_CODEGEN_GLOBALISEL_IRTRANSLATOR_H
20
21#include "llvm/ADT/DenseMap.h"
22#include "llvm/ADT/SmallVector.h"
23#include "llvm/CodeGen/GlobalISel/CSEMIRBuilder.h"
24#include "llvm/CodeGen/GlobalISel/Types.h"
25#include "llvm/CodeGen/SwiftErrorValueTracking.h"
26#include "llvm/CodeGen/MachineFunctionPass.h"
27#include "llvm/CodeGen/SwitchLoweringUtils.h"
28#include "llvm/IR/Intrinsics.h"
29#include "llvm/Support/Allocator.h"
30#include <memory>
31#include <utility>
32
33namespace llvm {
34
35class AllocaInst;
36class BasicBlock;
37class CallInst;
38class CallLowering;
39class Constant;
40class DataLayout;
41class FunctionLoweringInfo;
42class Instruction;
43class MachineBasicBlock;
44class MachineFunction;
45class MachineInstr;
46class MachineRegisterInfo;
47class OptimizationRemarkEmitter;
48class PHINode;
49class TargetPassConfig;
50class User;
51class Value;
52
53// Technically the pass should run on an hypothetical MachineModule,
54// since it should translate Global into some sort of MachineGlobal.
55// The MachineGlobal should ultimately just be a transfer of ownership of
56// the interesting bits that are relevant to represent a global value.
57// That being said, we could investigate what would it cost to just duplicate
58// the information from the LLVM IR.
59// The idea is that ultimately we would be able to free up the memory used
60// by the LLVM IR as soon as the translation is over.
61class IRTranslator : public MachineFunctionPass {
62public:
63 static char ID;
64
65private:
66 /// Interface used to lower the everything related to calls.
67 const CallLowering *CLI;
68
69 /// This class contains the mapping between the Values to vreg related data.
70 class ValueToVRegInfo {
71 public:
72 ValueToVRegInfo() = default;
73
74 using VRegListT = SmallVector<Register, 1>;
75 using OffsetListT = SmallVector<uint64_t, 1>;
76
77 using const_vreg_iterator =
78 DenseMap<const Value *, VRegListT *>::const_iterator;
79 using const_offset_iterator =
80 DenseMap<const Value *, OffsetListT *>::const_iterator;
81
82 inline const_vreg_iterator vregs_end() const { return ValToVRegs.end(); }
83
84 VRegListT *getVRegs(const Value &V) {
85 auto It = ValToVRegs.find(&V);
86 if (It != ValToVRegs.end())
87 return It->second;
88
89 return insertVRegs(V);
90 }
91
92 OffsetListT *getOffsets(const Value &V) {
93 auto It = TypeToOffsets.find(V.getType());
94 if (It != TypeToOffsets.end())
95 return It->second;
96
97 return insertOffsets(V);
98 }
99
100 const_vreg_iterator findVRegs(const Value &V) const {
101 return ValToVRegs.find(&V);
102 }
103
104 bool contains(const Value &V) const {
105 return ValToVRegs.find(&V) != ValToVRegs.end();
106 }
107
108 void reset() {
109 ValToVRegs.clear();
110 TypeToOffsets.clear();
111 VRegAlloc.DestroyAll();
112 OffsetAlloc.DestroyAll();
113 }
114
115 private:
116 VRegListT *insertVRegs(const Value &V) {
117 assert(ValToVRegs.find(&V) == ValToVRegs.end() && "Value already exists")((ValToVRegs.find(&V) == ValToVRegs.end() && "Value already exists"
) ? static_cast<void> (0) : __assert_fail ("ValToVRegs.find(&V) == ValToVRegs.end() && \"Value already exists\""
, "/build/llvm-toolchain-snapshot-10~svn374710/include/llvm/CodeGen/GlobalISel/IRTranslator.h"
, 117, __PRETTY_FUNCTION__))
;
118
119 // We placement new using our fast allocator since we never try to free
120 // the vectors until translation is finished.
121 auto *VRegList = new (VRegAlloc.Allocate()) VRegListT();
122 ValToVRegs[&V] = VRegList;
123 return VRegList;
124 }
125
126 OffsetListT *insertOffsets(const Value &V) {
127 assert(TypeToOffsets.find(V.getType()) == TypeToOffsets.end() &&((TypeToOffsets.find(V.getType()) == TypeToOffsets.end() &&
"Type already exists") ? static_cast<void> (0) : __assert_fail
("TypeToOffsets.find(V.getType()) == TypeToOffsets.end() && \"Type already exists\""
, "/build/llvm-toolchain-snapshot-10~svn374710/include/llvm/CodeGen/GlobalISel/IRTranslator.h"
, 128, __PRETTY_FUNCTION__))
128 "Type already exists")((TypeToOffsets.find(V.getType()) == TypeToOffsets.end() &&
"Type already exists") ? static_cast<void> (0) : __assert_fail
("TypeToOffsets.find(V.getType()) == TypeToOffsets.end() && \"Type already exists\""
, "/build/llvm-toolchain-snapshot-10~svn374710/include/llvm/CodeGen/GlobalISel/IRTranslator.h"
, 128, __PRETTY_FUNCTION__))
;
129
130 auto *OffsetList = new (OffsetAlloc.Allocate()) OffsetListT();
131 TypeToOffsets[V.getType()] = OffsetList;
132 return OffsetList;
133 }
134 SpecificBumpPtrAllocator<VRegListT> VRegAlloc;
135 SpecificBumpPtrAllocator<OffsetListT> OffsetAlloc;
136
137 // We store pointers to vectors here since references may be invalidated
138 // while we hold them if we stored the vectors directly.
139 DenseMap<const Value *, VRegListT*> ValToVRegs;
140 DenseMap<const Type *, OffsetListT*> TypeToOffsets;
141 };
142
143 /// Mapping of the values of the current LLVM IR function to the related
144 /// virtual registers and offsets.
145 ValueToVRegInfo VMap;
146
147 // N.b. it's not completely obvious that this will be sufficient for every
148 // LLVM IR construct (with "invoke" being the obvious candidate to mess up our
149 // lives.
150 DenseMap<const BasicBlock *, MachineBasicBlock *> BBToMBB;
151
152 // One BasicBlock can be translated to multiple MachineBasicBlocks. For such
153 // BasicBlocks translated to multiple MachineBasicBlocks, MachinePreds retains
154 // a mapping between the edges arriving at the BasicBlock to the corresponding
155 // created MachineBasicBlocks. Some BasicBlocks that get translated to a
156 // single MachineBasicBlock may also end up in this Map.
157 using CFGEdge = std::pair<const BasicBlock *, const BasicBlock *>;
158 DenseMap<CFGEdge, SmallVector<MachineBasicBlock *, 1>> MachinePreds;
159
160 // List of stubbed PHI instructions, for values and basic blocks to be filled
161 // in once all MachineBasicBlocks have been created.
162 SmallVector<std::pair<const PHINode *, SmallVector<MachineInstr *, 1>>, 4>
163 PendingPHIs;
164
165 /// Record of what frame index has been allocated to specified allocas for
166 /// this function.
167 DenseMap<const AllocaInst *, int> FrameIndices;
168
169 SwiftErrorValueTracking SwiftError;
170
171 /// \name Methods for translating form LLVM IR to MachineInstr.
172 /// \see ::translate for general information on the translate methods.
173 /// @{
174
175 /// Translate \p Inst into its corresponding MachineInstr instruction(s).
176 /// Insert the newly translated instruction(s) right where the CurBuilder
177 /// is set.
178 ///
179 /// The general algorithm is:
180 /// 1. Look for a virtual register for each operand or
181 /// create one.
182 /// 2 Update the VMap accordingly.
183 /// 2.alt. For constant arguments, if they are compile time constants,
184 /// produce an immediate in the right operand and do not touch
185 /// ValToReg. Actually we will go with a virtual register for each
186 /// constants because it may be expensive to actually materialize the
187 /// constant. Moreover, if the constant spans on several instructions,
188 /// CSE may not catch them.
189 /// => Update ValToVReg and remember that we saw a constant in Constants.
190 /// We will materialize all the constants in finalize.
191 /// Note: we would need to do something so that we can recognize such operand
192 /// as constants.
193 /// 3. Create the generic instruction.
194 ///
195 /// \return true if the translation succeeded.
196 bool translate(const Instruction &Inst);
197
198 /// Materialize \p C into virtual-register \p Reg. The generic instructions
199 /// performing this materialization will be inserted into the entry block of
200 /// the function.
201 ///
202 /// \return true if the materialization succeeded.
203 bool translate(const Constant &C, Register Reg);
204
205 /// Translate an LLVM bitcast into generic IR. Either a COPY or a G_BITCAST is
206 /// emitted.
207 bool translateBitCast(const User &U, MachineIRBuilder &MIRBuilder);
208
209 /// Translate an LLVM load instruction into generic IR.
210 bool translateLoad(const User &U, MachineIRBuilder &MIRBuilder);
211
212 /// Translate an LLVM store instruction into generic IR.
213 bool translateStore(const User &U, MachineIRBuilder &MIRBuilder);
214
215 /// Translate an LLVM string intrinsic (memcpy, memset, ...).
216 bool translateMemFunc(const CallInst &CI, MachineIRBuilder &MIRBuilder,
217 Intrinsic::ID ID);
218
219 void getStackGuard(Register DstReg, MachineIRBuilder &MIRBuilder);
220
221 bool translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
222 MachineIRBuilder &MIRBuilder);
223
224 /// Helper function for translateSimpleIntrinsic.
225 /// \return The generic opcode for \p IntrinsicID if \p IntrinsicID is a
226 /// simple intrinsic (ceil, fabs, etc.). Otherwise, returns
227 /// Intrinsic::not_intrinsic.
228 unsigned getSimpleIntrinsicOpcode(Intrinsic::ID ID);
229
230 /// Translates the intrinsics defined in getSimpleIntrinsicOpcode.
231 /// \return true if the translation succeeded.
232 bool translateSimpleIntrinsic(const CallInst &CI, Intrinsic::ID ID,
233 MachineIRBuilder &MIRBuilder);
234
235 bool translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
236 MachineIRBuilder &MIRBuilder);
237
238 bool translateInlineAsm(const CallInst &CI, MachineIRBuilder &MIRBuilder);
239
240 /// Returns true if the value should be split into multiple LLTs.
241 /// If \p Offsets is given then the split type's offsets will be stored in it.
242 /// If \p Offsets is not empty it will be cleared first.
243 bool valueIsSplit(const Value &V,
244 SmallVectorImpl<uint64_t> *Offsets = nullptr);
245
246 /// Common code for translating normal calls or invokes.
247 bool translateCallSite(const ImmutableCallSite &CS,
248 MachineIRBuilder &MIRBuilder);
249
250 /// Translate call instruction.
251 /// \pre \p U is a call instruction.
252 bool translateCall(const User &U, MachineIRBuilder &MIRBuilder);
253
254 bool translateInvoke(const User &U, MachineIRBuilder &MIRBuilder);
255
256 bool translateCallBr(const User &U, MachineIRBuilder &MIRBuilder);
257
258 bool translateLandingPad(const User &U, MachineIRBuilder &MIRBuilder);
259
260 /// Translate one of LLVM's cast instructions into MachineInstrs, with the
261 /// given generic Opcode.
262 bool translateCast(unsigned Opcode, const User &U,
263 MachineIRBuilder &MIRBuilder);
264
265 /// Translate a phi instruction.
266 bool translatePHI(const User &U, MachineIRBuilder &MIRBuilder);
267
268 /// Translate a comparison (icmp or fcmp) instruction or constant.
269 bool translateCompare(const User &U, MachineIRBuilder &MIRBuilder);
270
271 /// Translate an integer compare instruction (or constant).
272 bool translateICmp(const User &U, MachineIRBuilder &MIRBuilder) {
273 return translateCompare(U, MIRBuilder);
18
Calling 'IRTranslator::translateCompare'
274 }
275
276 /// Translate a floating-point compare instruction (or constant).
277 bool translateFCmp(const User &U, MachineIRBuilder &MIRBuilder) {
278 return translateCompare(U, MIRBuilder);
279 }
280
281 /// Add remaining operands onto phis we've translated. Executed after all
282 /// MachineBasicBlocks for the function have been created.
283 void finishPendingPhis();
284
285 /// Translate \p Inst into a binary operation \p Opcode.
286 /// \pre \p U is a binary operation.
287 bool translateBinaryOp(unsigned Opcode, const User &U,
288 MachineIRBuilder &MIRBuilder);
289
290 /// Translate branch (br) instruction.
291 /// \pre \p U is a branch instruction.
292 bool translateBr(const User &U, MachineIRBuilder &MIRBuilder);
293
294 // Begin switch lowering functions.
295 bool emitJumpTableHeader(SwitchCG::JumpTable &JT,
296 SwitchCG::JumpTableHeader &JTH,
297 MachineBasicBlock *HeaderBB);
298 void emitJumpTable(SwitchCG::JumpTable &JT, MachineBasicBlock *MBB);
299
300 void emitSwitchCase(SwitchCG::CaseBlock &CB, MachineBasicBlock *SwitchBB,
301 MachineIRBuilder &MIB);
302
303 bool lowerJumpTableWorkItem(SwitchCG::SwitchWorkListItem W,
304 MachineBasicBlock *SwitchMBB,
305 MachineBasicBlock *CurMBB,
306 MachineBasicBlock *DefaultMBB,
307 MachineIRBuilder &MIB,
308 MachineFunction::iterator BBI,
309 BranchProbability UnhandledProbs,
310 SwitchCG::CaseClusterIt I,
311 MachineBasicBlock *Fallthrough,
312 bool FallthroughUnreachable);
313
314 bool lowerSwitchRangeWorkItem(SwitchCG::CaseClusterIt I,
315 Value *Cond,
316 MachineBasicBlock *Fallthrough,
317 bool FallthroughUnreachable,
318 BranchProbability UnhandledProbs,
319 MachineBasicBlock *CurMBB,
320 MachineIRBuilder &MIB,
321 MachineBasicBlock *SwitchMBB);
322
323 bool lowerSwitchWorkItem(SwitchCG::SwitchWorkListItem W, Value *Cond,
324 MachineBasicBlock *SwitchMBB,
325 MachineBasicBlock *DefaultMBB,
326 MachineIRBuilder &MIB);
327
328 bool translateSwitch(const User &U, MachineIRBuilder &MIRBuilder);
329 // End switch lowering section.
330
331 bool translateIndirectBr(const User &U, MachineIRBuilder &MIRBuilder);
332
333 bool translateExtractValue(const User &U, MachineIRBuilder &MIRBuilder);
334
335 bool translateInsertValue(const User &U, MachineIRBuilder &MIRBuilder);
336
337 bool translateSelect(const User &U, MachineIRBuilder &MIRBuilder);
338
339 bool translateGetElementPtr(const User &U, MachineIRBuilder &MIRBuilder);
340
341 bool translateAlloca(const User &U, MachineIRBuilder &MIRBuilder);
342
343 /// Translate return (ret) instruction.
344 /// The target needs to implement CallLowering::lowerReturn for
345 /// this to succeed.
346 /// \pre \p U is a return instruction.
347 bool translateRet(const User &U, MachineIRBuilder &MIRBuilder);
348
349 bool translateFSub(const User &U, MachineIRBuilder &MIRBuilder);
350
351 bool translateFNeg(const User &U, MachineIRBuilder &MIRBuilder);
352
353 bool translateAdd(const User &U, MachineIRBuilder &MIRBuilder) {
354 return translateBinaryOp(TargetOpcode::G_ADD, U, MIRBuilder);
355 }
356 bool translateSub(const User &U, MachineIRBuilder &MIRBuilder) {
357 return translateBinaryOp(TargetOpcode::G_SUB, U, MIRBuilder);
358 }
359 bool translateAnd(const User &U, MachineIRBuilder &MIRBuilder) {
360 return translateBinaryOp(TargetOpcode::G_AND, U, MIRBuilder);
361 }
362 bool translateMul(const User &U, MachineIRBuilder &MIRBuilder) {
363 return translateBinaryOp(TargetOpcode::G_MUL, U, MIRBuilder);
364 }
365 bool translateOr(const User &U, MachineIRBuilder &MIRBuilder) {
366 return translateBinaryOp(TargetOpcode::G_OR, U, MIRBuilder);
367 }
368 bool translateXor(const User &U, MachineIRBuilder &MIRBuilder) {
369 return translateBinaryOp(TargetOpcode::G_XOR, U, MIRBuilder);
370 }
371
372 bool translateUDiv(const User &U, MachineIRBuilder &MIRBuilder) {
373 return translateBinaryOp(TargetOpcode::G_UDIV, U, MIRBuilder);
374 }
375 bool translateSDiv(const User &U, MachineIRBuilder &MIRBuilder) {
376 return translateBinaryOp(TargetOpcode::G_SDIV, U, MIRBuilder);
377 }
378 bool translateURem(const User &U, MachineIRBuilder &MIRBuilder) {
379 return translateBinaryOp(TargetOpcode::G_UREM, U, MIRBuilder);
380 }
381 bool translateSRem(const User &U, MachineIRBuilder &MIRBuilder) {
382 return translateBinaryOp(TargetOpcode::G_SREM, U, MIRBuilder);
383 }
384 bool translateIntToPtr(const User &U, MachineIRBuilder &MIRBuilder) {
385 return translateCast(TargetOpcode::G_INTTOPTR, U, MIRBuilder);
386 }
387 bool translatePtrToInt(const User &U, MachineIRBuilder &MIRBuilder) {
388 return translateCast(TargetOpcode::G_PTRTOINT, U, MIRBuilder);
389 }
390 bool translateTrunc(const User &U, MachineIRBuilder &MIRBuilder) {
391 return translateCast(TargetOpcode::G_TRUNC, U, MIRBuilder);
392 }
393 bool translateFPTrunc(const User &U, MachineIRBuilder &MIRBuilder) {
394 return translateCast(TargetOpcode::G_FPTRUNC, U, MIRBuilder);
395 }
396 bool translateFPExt(const User &U, MachineIRBuilder &MIRBuilder) {
397 return translateCast(TargetOpcode::G_FPEXT, U, MIRBuilder);
398 }
399 bool translateFPToUI(const User &U, MachineIRBuilder &MIRBuilder) {
400 return translateCast(TargetOpcode::G_FPTOUI, U, MIRBuilder);
401 }
402 bool translateFPToSI(const User &U, MachineIRBuilder &MIRBuilder) {
403 return translateCast(TargetOpcode::G_FPTOSI, U, MIRBuilder);
404 }
405 bool translateUIToFP(const User &U, MachineIRBuilder &MIRBuilder) {
406 return translateCast(TargetOpcode::G_UITOFP, U, MIRBuilder);
407 }
408 bool translateSIToFP(const User &U, MachineIRBuilder &MIRBuilder) {
409 return translateCast(TargetOpcode::G_SITOFP, U, MIRBuilder);
410 }
411 bool translateUnreachable(const User &U, MachineIRBuilder &MIRBuilder) {
412 return true;
413 }
414 bool translateSExt(const User &U, MachineIRBuilder &MIRBuilder) {
415 return translateCast(TargetOpcode::G_SEXT, U, MIRBuilder);
416 }
417
418 bool translateZExt(const User &U, MachineIRBuilder &MIRBuilder) {
419 return translateCast(TargetOpcode::G_ZEXT, U, MIRBuilder);
420 }
421
422 bool translateShl(const User &U, MachineIRBuilder &MIRBuilder) {
423 return translateBinaryOp(TargetOpcode::G_SHL, U, MIRBuilder);
424 }
425 bool translateLShr(const User &U, MachineIRBuilder &MIRBuilder) {
426 return translateBinaryOp(TargetOpcode::G_LSHR, U, MIRBuilder);
427 }
428 bool translateAShr(const User &U, MachineIRBuilder &MIRBuilder) {
429 return translateBinaryOp(TargetOpcode::G_ASHR, U, MIRBuilder);
430 }
431
432 bool translateFAdd(const User &U, MachineIRBuilder &MIRBuilder) {
433 return translateBinaryOp(TargetOpcode::G_FADD, U, MIRBuilder);
434 }
435 bool translateFMul(const User &U, MachineIRBuilder &MIRBuilder) {
436 return translateBinaryOp(TargetOpcode::G_FMUL, U, MIRBuilder);
437 }
438 bool translateFDiv(const User &U, MachineIRBuilder &MIRBuilder) {
439 return translateBinaryOp(TargetOpcode::G_FDIV, U, MIRBuilder);
440 }
441 bool translateFRem(const User &U, MachineIRBuilder &MIRBuilder) {
442 return translateBinaryOp(TargetOpcode::G_FREM, U, MIRBuilder);
443 }
444
445 bool translateVAArg(const User &U, MachineIRBuilder &MIRBuilder);
446
447 bool translateInsertElement(const User &U, MachineIRBuilder &MIRBuilder);
448
449 bool translateExtractElement(const User &U, MachineIRBuilder &MIRBuilder);
450
451 bool translateShuffleVector(const User &U, MachineIRBuilder &MIRBuilder);
452
453 bool translateAtomicCmpXchg(const User &U, MachineIRBuilder &MIRBuilder);
454 bool translateAtomicRMW(const User &U, MachineIRBuilder &MIRBuilder);
455 bool translateFence(const User &U, MachineIRBuilder &MIRBuilder);
456
457 // Stubs to keep the compiler happy while we implement the rest of the
458 // translation.
459 bool translateResume(const User &U, MachineIRBuilder &MIRBuilder) {
460 return false;
461 }
462 bool translateCleanupRet(const User &U, MachineIRBuilder &MIRBuilder) {
463 return false;
464 }
465 bool translateCatchRet(const User &U, MachineIRBuilder &MIRBuilder) {
466 return false;
467 }
468 bool translateCatchSwitch(const User &U, MachineIRBuilder &MIRBuilder) {
469 return false;
470 }
471 bool translateAddrSpaceCast(const User &U, MachineIRBuilder &MIRBuilder) {
472 return translateCast(TargetOpcode::G_ADDRSPACE_CAST, U, MIRBuilder);
473 }
474 bool translateCleanupPad(const User &U, MachineIRBuilder &MIRBuilder) {
475 return false;
476 }
477 bool translateCatchPad(const User &U, MachineIRBuilder &MIRBuilder) {
478 return false;
479 }
480 bool translateUserOp1(const User &U, MachineIRBuilder &MIRBuilder) {
481 return false;
482 }
483 bool translateUserOp2(const User &U, MachineIRBuilder &MIRBuilder) {
484 return false;
485 }
486
487 /// @}
488
489 // Builder for machine instruction a la IRBuilder.
490 // I.e., compared to regular MIBuilder, this one also inserts the instruction
491 // in the current block, it can creates block, etc., basically a kind of
492 // IRBuilder, but for Machine IR.
493 // CSEMIRBuilder CurBuilder;
494 std::unique_ptr<MachineIRBuilder> CurBuilder;
495
496 // Builder set to the entry block (just after ABI lowering instructions). Used
497 // as a convenient location for Constants.
498 // CSEMIRBuilder EntryBuilder;
499 std::unique_ptr<MachineIRBuilder> EntryBuilder;
500
501 // The MachineFunction currently being translated.
502 MachineFunction *MF;
503
504 /// MachineRegisterInfo used to create virtual registers.
505 MachineRegisterInfo *MRI = nullptr;
506
507 const DataLayout *DL;
508
509 /// Current target configuration. Controls how the pass handles errors.
510 const TargetPassConfig *TPC;
511
512 /// Current optimization remark emitter. Used to report failures.
513 std::unique_ptr<OptimizationRemarkEmitter> ORE;
514
515 FunctionLoweringInfo FuncInfo;
516
517 // True when either the Target Machine specifies no optimizations or the
518 // function has the optnone attribute.
519 bool EnableOpts = false;
520
521 /// True when the block contains a tail call. This allows the IRTranslator to
522 /// stop translating such blocks early.
523 bool HasTailCall = false;
524
525 /// Switch analysis and optimization.
526 class GISelSwitchLowering : public SwitchCG::SwitchLowering {
527 public:
528 GISelSwitchLowering(IRTranslator *irt, FunctionLoweringInfo &funcinfo)
529 : SwitchLowering(funcinfo), IRT(irt) {
530 assert(irt && "irt is null!")((irt && "irt is null!") ? static_cast<void> (0
) : __assert_fail ("irt && \"irt is null!\"", "/build/llvm-toolchain-snapshot-10~svn374710/include/llvm/CodeGen/GlobalISel/IRTranslator.h"
, 530, __PRETTY_FUNCTION__))
;
531 }
532
533 virtual void addSuccessorWithProb(
534 MachineBasicBlock *Src, MachineBasicBlock *Dst,
535 BranchProbability Prob = BranchProbability::getUnknown()) override {
536 IRT->addSuccessorWithProb(Src, Dst, Prob);
537 }
538
539 virtual ~GISelSwitchLowering() = default;
540
541 private:
542 IRTranslator *IRT;
543 };
544
545 std::unique_ptr<GISelSwitchLowering> SL;
546
547 // * Insert all the code needed to materialize the constants
548 // at the proper place. E.g., Entry block or dominator block
549 // of each constant depending on how fancy we want to be.
550 // * Clear the different maps.
551 void finalizeFunction();
552
553 // Handle emitting jump tables for each basic block.
554 void finalizeBasicBlock();
555
556 /// Get the VRegs that represent \p Val.
557 /// Non-aggregate types have just one corresponding VReg and the list can be
558 /// used as a single "unsigned". Aggregates get flattened. If such VRegs do
559 /// not exist, they are created.
560 ArrayRef<Register> getOrCreateVRegs(const Value &Val);
561
562 Register getOrCreateVReg(const Value &Val) {
563 auto Regs = getOrCreateVRegs(Val);
564 if (Regs.empty())
565 return 0;
566 assert(Regs.size() == 1 &&((Regs.size() == 1 && "attempt to get single VReg for aggregate or void"
) ? static_cast<void> (0) : __assert_fail ("Regs.size() == 1 && \"attempt to get single VReg for aggregate or void\""
, "/build/llvm-toolchain-snapshot-10~svn374710/include/llvm/CodeGen/GlobalISel/IRTranslator.h"
, 567, __PRETTY_FUNCTION__))
567 "attempt to get single VReg for aggregate or void")((Regs.size() == 1 && "attempt to get single VReg for aggregate or void"
) ? static_cast<void> (0) : __assert_fail ("Regs.size() == 1 && \"attempt to get single VReg for aggregate or void\""
, "/build/llvm-toolchain-snapshot-10~svn374710/include/llvm/CodeGen/GlobalISel/IRTranslator.h"
, 567, __PRETTY_FUNCTION__))
;
568 return Regs[0];
569 }
570
571 /// Allocate some vregs and offsets in the VMap. Then populate just the
572 /// offsets while leaving the vregs empty.
573 ValueToVRegInfo::VRegListT &allocateVRegs(const Value &Val);
574
575 /// Get the frame index that represents \p Val.
576 /// If such VReg does not exist, it is created.
577 int getOrCreateFrameIndex(const AllocaInst &AI);
578
579 /// Get the alignment of the given memory operation instruction. This will
580 /// either be the explicitly specified value or the ABI-required alignment for
581 /// the type being accessed (according to the Module's DataLayout).
582 unsigned getMemOpAlignment(const Instruction &I);
583
584 /// Get the MachineBasicBlock that represents \p BB. Specifically, the block
585 /// returned will be the head of the translated block (suitable for branch
586 /// destinations).
587 MachineBasicBlock &getMBB(const BasicBlock &BB);
588
589 /// Record \p NewPred as a Machine predecessor to `Edge.second`, corresponding
590 /// to `Edge.first` at the IR level. This is used when IRTranslation creates
591 /// multiple MachineBasicBlocks for a given IR block and the CFG is no longer
592 /// represented simply by the IR-level CFG.
593 void addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred);
594
595 /// Returns the Machine IR predecessors for the given IR CFG edge. Usually
596 /// this is just the single MachineBasicBlock corresponding to the predecessor
597 /// in the IR. More complex lowering can result in multiple MachineBasicBlocks
598 /// preceding the original though (e.g. switch instructions).
599 SmallVector<MachineBasicBlock *, 1> getMachinePredBBs(CFGEdge Edge) {
600 auto RemappedEdge = MachinePreds.find(Edge);
601 if (RemappedEdge != MachinePreds.end())
602 return RemappedEdge->second;
603 return SmallVector<MachineBasicBlock *, 4>(1, &getMBB(*Edge.first));
604 }
605
606 /// Return branch probability calculated by BranchProbabilityInfo for IR
607 /// blocks.
608 BranchProbability getEdgeProbability(const MachineBasicBlock *Src,
609 const MachineBasicBlock *Dst) const;
610
611 void addSuccessorWithProb(MachineBasicBlock *Src, MachineBasicBlock *Dst,
612 BranchProbability Prob);
613
614public:
615 // Ctor, nothing fancy.
616 IRTranslator();
617
618 StringRef getPassName() const override { return "IRTranslator"; }
619
620 void getAnalysisUsage(AnalysisUsage &AU) const override;
621
622 // Algo:
623 // CallLowering = MF.subtarget.getCallLowering()
624 // F = MF.getParent()
625 // MIRBuilder.reset(MF)
626 // getMBB(F.getEntryBB())
627 // CallLowering->translateArguments(MIRBuilder, F, ValToVReg)
628 // for each bb in F
629 // getMBB(bb)
630 // for each inst in bb
631 // if (!translate(MIRBuilder, inst, ValToVReg, ConstantToSequence))
632 // report_fatal_error("Don't know how to translate input");
633 // finalize()
634 bool runOnMachineFunction(MachineFunction &MF) override;
635};
636
637} // end namespace llvm
638
639#endif // LLVM_CODEGEN_GLOBALISEL_IRTRANSLATOR_H

/build/llvm-toolchain-snapshot-10~svn374710/include/llvm/IR/InstrTypes.h

1//===- llvm/InstrTypes.h - Important Instruction subclasses -----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines various meta classes of instructions that exist in the VM
10// representation. Specific concrete subclasses of these may be found in the
11// i*.h files...
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_IR_INSTRTYPES_H
16#define LLVM_IR_INSTRTYPES_H
17
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/None.h"
20#include "llvm/ADT/Optional.h"
21#include "llvm/ADT/STLExtras.h"
22#include "llvm/ADT/StringMap.h"
23#include "llvm/ADT/StringRef.h"
24#include "llvm/ADT/Twine.h"
25#include "llvm/ADT/iterator_range.h"
26#include "llvm/IR/Attributes.h"
27#include "llvm/IR/CallingConv.h"
28#include "llvm/IR/Constants.h"
29#include "llvm/IR/DerivedTypes.h"
30#include "llvm/IR/Function.h"
31#include "llvm/IR/Instruction.h"
32#include "llvm/IR/LLVMContext.h"
33#include "llvm/IR/OperandTraits.h"
34#include "llvm/IR/Type.h"
35#include "llvm/IR/User.h"
36#include "llvm/IR/Value.h"
37#include "llvm/Support/Casting.h"
38#include "llvm/Support/ErrorHandling.h"
39#include <algorithm>
40#include <cassert>
41#include <cstddef>
42#include <cstdint>
43#include <iterator>
44#include <string>
45#include <vector>
46
47namespace llvm {
48
49namespace Intrinsic {
50enum ID : unsigned;
51}
52
53//===----------------------------------------------------------------------===//
54// UnaryInstruction Class
55//===----------------------------------------------------------------------===//
56
57class UnaryInstruction : public Instruction {
58protected:
59 UnaryInstruction(Type *Ty, unsigned iType, Value *V,
60 Instruction *IB = nullptr)
61 : Instruction(Ty, iType, &Op<0>(), 1, IB) {
62 Op<0>() = V;
63 }
64 UnaryInstruction(Type *Ty, unsigned iType, Value *V, BasicBlock *IAE)
65 : Instruction(Ty, iType, &Op<0>(), 1, IAE) {
66 Op<0>() = V;
67 }
68
69public:
70 // allocate space for exactly one operand
71 void *operator new(size_t s) {
72 return User::operator new(s, 1);
73 }
74
75 /// Transparently provide more efficient getOperand methods.
76 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
77
78 // Methods for support type inquiry through isa, cast, and dyn_cast:
79 static bool classof(const Instruction *I) {
80 return I->isUnaryOp() ||
81 I->getOpcode() == Instruction::Alloca ||
82 I->getOpcode() == Instruction::Load ||
83 I->getOpcode() == Instruction::VAArg ||
84 I->getOpcode() == Instruction::ExtractValue ||
85 (I->getOpcode() >= CastOpsBegin && I->getOpcode() < CastOpsEnd);
86 }
87 static bool classof(const Value *V) {
88 return isa<Instruction>(V) && classof(cast<Instruction>(V));
89 }
90};
91
92template <>
93struct OperandTraits<UnaryInstruction> :
94 public FixedNumOperandTraits<UnaryInstruction, 1> {
95};
96
97DEFINE_TRANSPARENT_OPERAND_ACCESSORS(UnaryInstruction, Value)UnaryInstruction::op_iterator UnaryInstruction::op_begin() { return
OperandTraits<UnaryInstruction>::op_begin(this); } UnaryInstruction
::const_op_iterator UnaryInstruction::op_begin() const { return
OperandTraits<UnaryInstruction>::op_begin(const_cast<
UnaryInstruction*>(this)); } UnaryInstruction::op_iterator
UnaryInstruction::op_end() { return OperandTraits<UnaryInstruction
>::op_end(this); } UnaryInstruction::const_op_iterator UnaryInstruction
::op_end() const { return OperandTraits<UnaryInstruction>
::op_end(const_cast<UnaryInstruction*>(this)); } Value *
UnaryInstruction::getOperand(unsigned i_nocapture) const { ((
i_nocapture < OperandTraits<UnaryInstruction>::operands
(this) && "getOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<UnaryInstruction>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-10~svn374710/include/llvm/IR/InstrTypes.h"
, 97, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<UnaryInstruction>::op_begin(const_cast<
UnaryInstruction*>(this))[i_nocapture].get()); } void UnaryInstruction
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { ((
i_nocapture < OperandTraits<UnaryInstruction>::operands
(this) && "setOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<UnaryInstruction>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-10~svn374710/include/llvm/IR/InstrTypes.h"
, 97, __PRETTY_FUNCTION__)); OperandTraits<UnaryInstruction
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
UnaryInstruction::getNumOperands() const { return OperandTraits
<UnaryInstruction>::operands(this); } template <int Idx_nocapture
> Use &UnaryInstruction::Op() { return this->OpFrom
<Idx_nocapture>(this); } template <int Idx_nocapture
> const Use &UnaryInstruction::Op() const { return this
->OpFrom<Idx_nocapture>(this); }
98
99//===----------------------------------------------------------------------===//
100// UnaryOperator Class
101//===----------------------------------------------------------------------===//
102
103class UnaryOperator : public UnaryInstruction {
104 void AssertOK();
105
106protected:
107 UnaryOperator(UnaryOps iType, Value *S, Type *Ty,
108 const Twine &Name, Instruction *InsertBefore);
109 UnaryOperator(UnaryOps iType, Value *S, Type *Ty,
110 const Twine &Name, BasicBlock *InsertAtEnd);
111
112 // Note: Instruction needs to be a friend here to call cloneImpl.
113 friend class Instruction;
114
115 UnaryOperator *cloneImpl() const;
116
117public:
118
119 /// Construct a unary instruction, given the opcode and an operand.
120 /// Optionally (if InstBefore is specified) insert the instruction
121 /// into a BasicBlock right before the specified instruction. The specified
122 /// Instruction is allowed to be a dereferenced end iterator.
123 ///
124 static UnaryOperator *Create(UnaryOps Op, Value *S,
125 const Twine &Name = Twine(),
126 Instruction *InsertBefore = nullptr);
127
128 /// Construct a unary instruction, given the opcode and an operand.
129 /// Also automatically insert this instruction to the end of the
130 /// BasicBlock specified.
131 ///
132 static UnaryOperator *Create(UnaryOps Op, Value *S,
133 const Twine &Name,
134 BasicBlock *InsertAtEnd);
135
136 /// These methods just forward to Create, and are useful when you
137 /// statically know what type of instruction you're going to create. These
138 /// helpers just save some typing.
139#define HANDLE_UNARY_INST(N, OPC, CLASS) \
140 static UnaryOperator *Create##OPC(Value *V, const Twine &Name = "") {\
141 return Create(Instruction::OPC, V, Name);\
142 }
143#include "llvm/IR/Instruction.def"
144#define HANDLE_UNARY_INST(N, OPC, CLASS) \
145 static UnaryOperator *Create##OPC(Value *V, const Twine &Name, \
146 BasicBlock *BB) {\
147 return Create(Instruction::OPC, V, Name, BB);\
148 }
149#include "llvm/IR/Instruction.def"
150#define HANDLE_UNARY_INST(N, OPC, CLASS) \
151 static UnaryOperator *Create##OPC(Value *V, const Twine &Name, \
152 Instruction *I) {\
153 return Create(Instruction::OPC, V, Name, I);\
154 }
155#include "llvm/IR/Instruction.def"
156
157 static UnaryOperator *CreateWithCopiedFlags(UnaryOps Opc,
158 Value *V,
159 Instruction *CopyO,
160 const Twine &Name = "") {
161 UnaryOperator *UO = Create(Opc, V, Name);
162 UO->copyIRFlags(CopyO);
163 return UO;
164 }
165
166 static UnaryOperator *CreateFNegFMF(Value *Op, Instruction *FMFSource,
167 const Twine &Name = "") {
168 return CreateWithCopiedFlags(Instruction::FNeg, Op, FMFSource, Name);
169 }
170
171 UnaryOps getOpcode() const {
172 return static_cast<UnaryOps>(Instruction::getOpcode());
173 }
174
175 // Methods for support type inquiry through isa, cast, and dyn_cast:
176 static bool classof(const Instruction *I) {
177 return I->isUnaryOp();
178 }
179 static bool classof(const Value *V) {
180 return isa<Instruction>(V) && classof(cast<Instruction>(V));
181 }
182};
183
184//===----------------------------------------------------------------------===//
185// BinaryOperator Class
186//===----------------------------------------------------------------------===//
187
188class BinaryOperator : public Instruction {
189 void AssertOK();
190
191protected:
192 BinaryOperator(BinaryOps iType, Value *S1, Value *S2, Type *Ty,
193 const Twine &Name, Instruction *InsertBefore);
194 BinaryOperator(BinaryOps iType, Value *S1, Value *S2, Type *Ty,
195 const Twine &Name, BasicBlock *InsertAtEnd);
196
197 // Note: Instruction needs to be a friend here to call cloneImpl.
198 friend class Instruction;
199
200 BinaryOperator *cloneImpl() const;
201
202public:
203 // allocate space for exactly two operands
204 void *operator new(size_t s) {
205 return User::operator new(s, 2);
206 }
207
208 /// Transparently provide more efficient getOperand methods.
209 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
210
211 /// Construct a binary instruction, given the opcode and the two
212 /// operands. Optionally (if InstBefore is specified) insert the instruction
213 /// into a BasicBlock right before the specified instruction. The specified
214 /// Instruction is allowed to be a dereferenced end iterator.
215 ///
216 static BinaryOperator *Create(BinaryOps Op, Value *S1, Value *S2,
217 const Twine &Name = Twine(),
218 Instruction *InsertBefore = nullptr);
219
220 /// Construct a binary instruction, given the opcode and the two
221 /// operands. Also automatically insert this instruction to the end of the
222 /// BasicBlock specified.
223 ///
224 static BinaryOperator *Create(BinaryOps Op, Value *S1, Value *S2,
225 const Twine &Name, BasicBlock *InsertAtEnd);
226
227 /// These methods just forward to Create, and are useful when you
228 /// statically know what type of instruction you're going to create. These
229 /// helpers just save some typing.
230#define HANDLE_BINARY_INST(N, OPC, CLASS) \
231 static BinaryOperator *Create##OPC(Value *V1, Value *V2, \
232 const Twine &Name = "") {\
233 return Create(Instruction::OPC, V1, V2, Name);\
234 }
235#include "llvm/IR/Instruction.def"
236#define HANDLE_BINARY_INST(N, OPC, CLASS) \
237 static BinaryOperator *Create##OPC(Value *V1, Value *V2, \
238 const Twine &Name, BasicBlock *BB) {\
239 return Create(Instruction::OPC, V1, V2, Name, BB);\
240 }
241#include "llvm/IR/Instruction.def"
242#define HANDLE_BINARY_INST(N, OPC, CLASS) \
243 static BinaryOperator *Create##OPC(Value *V1, Value *V2, \
244 const Twine &Name, Instruction *I) {\
245 return Create(Instruction::OPC, V1, V2, Name, I);\
246 }
247#include "llvm/IR/Instruction.def"
248
249 static BinaryOperator *CreateWithCopiedFlags(BinaryOps Opc,
250 Value *V1, Value *V2,
251 Instruction *CopyO,
252 const Twine &Name = "") {
253 BinaryOperator *BO = Create(Opc, V1, V2, Name);
254 BO->copyIRFlags(CopyO);
255 return BO;
256 }
257
258 static BinaryOperator *CreateFAddFMF(Value *V1, Value *V2,
259 Instruction *FMFSource,
260 const Twine &Name = "") {
261 return CreateWithCopiedFlags(Instruction::FAdd, V1, V2, FMFSource, Name);
262 }
263 static BinaryOperator *CreateFSubFMF(Value *V1, Value *V2,
264 Instruction *FMFSource,
265 const Twine &Name = "") {
266 return CreateWithCopiedFlags(Instruction::FSub, V1, V2, FMFSource, Name);
267 }
268 static BinaryOperator *CreateFMulFMF(Value *V1, Value *V2,
269 Instruction *FMFSource,
270 const Twine &Name = "") {
271 return CreateWithCopiedFlags(Instruction::FMul, V1, V2, FMFSource, Name);
272 }
273 static BinaryOperator *CreateFDivFMF(Value *V1, Value *V2,
274 Instruction *FMFSource,
275 const Twine &Name = "") {
276 return CreateWithCopiedFlags(Instruction::FDiv, V1, V2, FMFSource, Name);
277 }
278 static BinaryOperator *CreateFRemFMF(Value *V1, Value *V2,
279 Instruction *FMFSource,
280 const Twine &Name = "") {
281 return CreateWithCopiedFlags(Instruction::FRem, V1, V2, FMFSource, Name);
282 }
283 static BinaryOperator *CreateFNegFMF(Value *Op, Instruction *FMFSource,
284 const Twine &Name = "") {
285 Value *Zero = ConstantFP::getNegativeZero(Op->getType());
286 return CreateWithCopiedFlags(Instruction::FSub, Zero, Op, FMFSource, Name);
287 }
288
289 static BinaryOperator *CreateNSW(BinaryOps Opc, Value *V1, Value *V2,
290 const Twine &Name = "") {
291 BinaryOperator *BO = Create(Opc, V1, V2, Name);
292 BO->setHasNoSignedWrap(true);
293 return BO;
294 }
295 static BinaryOperator *CreateNSW(BinaryOps Opc, Value *V1, Value *V2,
296 const Twine &Name, BasicBlock *BB) {
297 BinaryOperator *BO = Create(Opc, V1, V2, Name, BB);
298 BO->setHasNoSignedWrap(true);
299 return BO;
300 }
301 static BinaryOperator *CreateNSW(BinaryOps Opc, Value *V1, Value *V2,
302 const Twine &Name, Instruction *I) {
303 BinaryOperator *BO = Create(Opc, V1, V2, Name, I);
304 BO->setHasNoSignedWrap(true);
305 return BO;
306 }
307
308 static BinaryOperator *CreateNUW(BinaryOps Opc, Value *V1, Value *V2,
309 const Twine &Name = "") {
310 BinaryOperator *BO = Create(Opc, V1, V2, Name);
311 BO->setHasNoUnsignedWrap(true);
312 return BO;
313 }
314 static BinaryOperator *CreateNUW(BinaryOps Opc, Value *V1, Value *V2,
315 const Twine &Name, BasicBlock *BB) {
316 BinaryOperator *BO = Create(Opc, V1, V2, Name, BB);
317 BO->setHasNoUnsignedWrap(true);
318 return BO;
319 }
320 static BinaryOperator *CreateNUW(BinaryOps Opc, Value *V1, Value *V2,
321 const Twine &Name, Instruction *I) {
322 BinaryOperator *BO = Create(Opc, V1, V2, Name, I);
323 BO->setHasNoUnsignedWrap(true);
324 return BO;
325 }
326
327 static BinaryOperator *CreateExact(BinaryOps Opc, Value *V1, Value *V2,
328 const Twine &Name = "") {
329 BinaryOperator *BO = Create(Opc, V1, V2, Name);
330 BO->setIsExact(true);
331 return BO;
332 }
333 static BinaryOperator *CreateExact(BinaryOps Opc, Value *V1, Value *V2,
334 const Twine &Name, BasicBlock *BB) {
335 BinaryOperator *BO = Create(Opc, V1, V2, Name, BB);
336 BO->setIsExact(true);
337 return BO;
338 }
339 static BinaryOperator *CreateExact(BinaryOps Opc, Value *V1, Value *V2,
340 const Twine &Name, Instruction *I) {
341 BinaryOperator *BO = Create(Opc, V1, V2, Name, I);
342 BO->setIsExact(true);
343 return BO;
344 }
345
346#define DEFINE_HELPERS(OPC, NUWNSWEXACT) \
347 static BinaryOperator *Create##NUWNSWEXACT##OPC(Value *V1, Value *V2, \
348 const Twine &Name = "") { \
349 return Create##NUWNSWEXACT(Instruction::OPC, V1, V2, Name); \
350 } \
351 static BinaryOperator *Create##NUWNSWEXACT##OPC( \
352 Value *V1, Value *V2, const Twine &Name, BasicBlock *BB) { \
353 return Create##NUWNSWEXACT(Instruction::OPC, V1, V2, Name, BB); \
354 } \
355 static BinaryOperator *Create##NUWNSWEXACT##OPC( \
356 Value *V1, Value *V2, const Twine &Name, Instruction *I) { \
357 return Create##NUWNSWEXACT(Instruction::OPC, V1, V2, Name, I); \
358 }
359
360 DEFINE_HELPERS(Add, NSW) // CreateNSWAdd
361 DEFINE_HELPERS(Add, NUW) // CreateNUWAdd
362 DEFINE_HELPERS(Sub, NSW) // CreateNSWSub
363 DEFINE_HELPERS(Sub, NUW) // CreateNUWSub
364 DEFINE_HELPERS(Mul, NSW) // CreateNSWMul
365 DEFINE_HELPERS(Mul, NUW) // CreateNUWMul
366 DEFINE_HELPERS(Shl, NSW) // CreateNSWShl
367 DEFINE_HELPERS(Shl, NUW) // CreateNUWShl
368
369 DEFINE_HELPERS(SDiv, Exact) // CreateExactSDiv
370 DEFINE_HELPERS(UDiv, Exact) // CreateExactUDiv
371 DEFINE_HELPERS(AShr, Exact) // CreateExactAShr
372 DEFINE_HELPERS(LShr, Exact) // CreateExactLShr
373
374#undef DEFINE_HELPERS
375
376 /// Helper functions to construct and inspect unary operations (NEG and NOT)
377 /// via binary operators SUB and XOR:
378 ///
379 /// Create the NEG and NOT instructions out of SUB and XOR instructions.
380 ///
381 static BinaryOperator *CreateNeg(Value *Op, const Twine &Name = "",
382 Instruction *InsertBefore = nullptr);
383 static BinaryOperator *CreateNeg(Value *Op, const Twine &Name,
384 BasicBlock *InsertAtEnd);
385 static BinaryOperator *CreateNSWNeg(Value *Op, const Twine &Name = "",
386 Instruction *InsertBefore = nullptr);
387 static BinaryOperator *CreateNSWNeg(Value *Op, const Twine &Name,
388 BasicBlock *InsertAtEnd);
389 static BinaryOperator *CreateNUWNeg(Value *Op, const Twine &Name = "",
390 Instruction *InsertBefore = nullptr);
391 static BinaryOperator *CreateNUWNeg(Value *Op, const Twine &Name,
392 BasicBlock *InsertAtEnd);
393 static BinaryOperator *CreateFNeg(Value *Op, const Twine &Name = "",
394 Instruction *InsertBefore = nullptr);
395 static BinaryOperator *CreateFNeg(Value *Op, const Twine &Name,
396 BasicBlock *InsertAtEnd);
397 static BinaryOperator *CreateNot(Value *Op, const Twine &Name = "",
398 Instruction *InsertBefore = nullptr);
399 static BinaryOperator *CreateNot(Value *Op, const Twine &Name,
400 BasicBlock *InsertAtEnd);
401
402 BinaryOps getOpcode() const {
403 return static_cast<BinaryOps>(Instruction::getOpcode());
404 }
405
406 /// Exchange the two operands to this instruction.
407 /// This instruction is safe to use on any binary instruction and
408 /// does not modify the semantics of the instruction. If the instruction
409 /// cannot be reversed (ie, it's a Div), then return true.
410 ///
411 bool swapOperands();
412
413 // Methods for support type inquiry through isa, cast, and dyn_cast:
414 static bool classof(const Instruction *I) {
415 return I->isBinaryOp();
416 }
417 static bool classof(const Value *V) {
418 return isa<Instruction>(V) && classof(cast<Instruction>(V));
419 }
420};
421
422template <>
423struct OperandTraits<BinaryOperator> :
424 public FixedNumOperandTraits<BinaryOperator, 2> {
425};
426
427DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BinaryOperator, Value)BinaryOperator::op_iterator BinaryOperator::op_begin() { return
OperandTraits<BinaryOperator>::op_begin(this); } BinaryOperator
::const_op_iterator BinaryOperator::op_begin() const { return
OperandTraits<BinaryOperator>::op_begin(const_cast<
BinaryOperator*>(this)); } BinaryOperator::op_iterator BinaryOperator
::op_end() { return OperandTraits<BinaryOperator>::op_end
(this); } BinaryOperator::const_op_iterator BinaryOperator::op_end
() const { return OperandTraits<BinaryOperator>::op_end
(const_cast<BinaryOperator*>(this)); } Value *BinaryOperator
::getOperand(unsigned i_nocapture) const { ((i_nocapture <
OperandTraits<BinaryOperator>::operands(this) &&
"getOperand() out of range!") ? static_cast<void> (0) :
__assert_fail ("i_nocapture < OperandTraits<BinaryOperator>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-10~svn374710/include/llvm/IR/InstrTypes.h"
, 427, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<BinaryOperator>::op_begin(const_cast<
BinaryOperator*>(this))[i_nocapture].get()); } void BinaryOperator
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { ((
i_nocapture < OperandTraits<BinaryOperator>::operands
(this) && "setOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<BinaryOperator>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-10~svn374710/include/llvm/IR/InstrTypes.h"
, 427, __PRETTY_FUNCTION__)); OperandTraits<BinaryOperator
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
BinaryOperator::getNumOperands() const { return OperandTraits
<BinaryOperator>::operands(this); } template <int Idx_nocapture
> Use &BinaryOperator::Op() { return this->OpFrom<
Idx_nocapture>(this); } template <int Idx_nocapture>
const Use &BinaryOperator::Op() const { return this->
OpFrom<Idx_nocapture>(this); }
428
429//===----------------------------------------------------------------------===//
430// CastInst Class
431//===----------------------------------------------------------------------===//
432
433/// This is the base class for all instructions that perform data
434/// casts. It is simply provided so that instruction category testing
435/// can be performed with code like:
436///
437/// if (isa<CastInst>(Instr)) { ... }
438/// Base class of casting instructions.
439class CastInst : public UnaryInstruction {
440protected:
441 /// Constructor with insert-before-instruction semantics for subclasses
442 CastInst(Type *Ty, unsigned iType, Value *S,
443 const Twine &NameStr = "", Instruction *InsertBefore = nullptr)
444 : UnaryInstruction(Ty, iType, S, InsertBefore) {
445 setName(NameStr);
446 }
447 /// Constructor with insert-at-end-of-block semantics for subclasses
448 CastInst(Type *Ty, unsigned iType, Value *S,
449 const Twine &NameStr, BasicBlock *InsertAtEnd)
450 : UnaryInstruction(Ty, iType, S, InsertAtEnd) {
451 setName(NameStr);
452 }
453
454public:
455 /// Provides a way to construct any of the CastInst subclasses using an
456 /// opcode instead of the subclass's constructor. The opcode must be in the
457 /// CastOps category (Instruction::isCast(opcode) returns true). This
458 /// constructor has insert-before-instruction semantics to automatically
459 /// insert the new CastInst before InsertBefore (if it is non-null).
460 /// Construct any of the CastInst subclasses
461 static CastInst *Create(
462 Instruction::CastOps, ///< The opcode of the cast instruction
463 Value *S, ///< The value to be casted (operand 0)
464 Type *Ty, ///< The type to which cast should be made
465 const Twine &Name = "", ///< Name for the instruction
466 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
467 );
468 /// Provides a way to construct any of the CastInst subclasses using an
469 /// opcode instead of the subclass's constructor. The opcode must be in the
470 /// CastOps category. This constructor has insert-at-end-of-block semantics
471 /// to automatically insert the new CastInst at the end of InsertAtEnd (if
472 /// its non-null).
473 /// Construct any of the CastInst subclasses
474 static CastInst *Create(
475 Instruction::CastOps, ///< The opcode for the cast instruction
476 Value *S, ///< The value to be casted (operand 0)
477 Type *Ty, ///< The type to which operand is casted
478 const Twine &Name, ///< The name for the instruction
479 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
480 );
481
482 /// Create a ZExt or BitCast cast instruction
483 static CastInst *CreateZExtOrBitCast(
484 Value *S, ///< The value to be casted (operand 0)
485 Type *Ty, ///< The type to which cast should be made
486 const Twine &Name = "", ///< Name for the instruction
487 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
488 );
489
490 /// Create a ZExt or BitCast cast instruction
491 static CastInst *CreateZExtOrBitCast(
492 Value *S, ///< The value to be casted (operand 0)
493 Type *Ty, ///< The type to which operand is casted
494 const Twine &Name, ///< The name for the instruction
495 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
496 );
497
498 /// Create a SExt or BitCast cast instruction
499 static CastInst *CreateSExtOrBitCast(
500 Value *S, ///< The value to be casted (operand 0)
501 Type *Ty, ///< The type to which cast should be made
502 const Twine &Name = "", ///< Name for the instruction
503 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
504 );
505
506 /// Create a SExt or BitCast cast instruction
507 static CastInst *CreateSExtOrBitCast(
508 Value *S, ///< The value to be casted (operand 0)
509 Type *Ty, ///< The type to which operand is casted
510 const Twine &Name, ///< The name for the instruction
511 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
512 );
513
514 /// Create a BitCast AddrSpaceCast, or a PtrToInt cast instruction.
515 static CastInst *CreatePointerCast(
516 Value *S, ///< The pointer value to be casted (operand 0)
517 Type *Ty, ///< The type to which operand is casted
518 const Twine &Name, ///< The name for the instruction
519 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
520 );
521
522 /// Create a BitCast, AddrSpaceCast or a PtrToInt cast instruction.
523 static CastInst *CreatePointerCast(
524 Value *S, ///< The pointer value to be casted (operand 0)
525 Type *Ty, ///< The type to which cast should be made
526 const Twine &Name = "", ///< Name for the instruction
527 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
528 );
529
530 /// Create a BitCast or an AddrSpaceCast cast instruction.
531 static CastInst *CreatePointerBitCastOrAddrSpaceCast(
532 Value *S, ///< The pointer value to be casted (operand 0)
533 Type *Ty, ///< The type to which operand is casted
534 const Twine &Name, ///< The name for the instruction
535 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
536 );
537
538 /// Create a BitCast or an AddrSpaceCast cast instruction.
539 static CastInst *CreatePointerBitCastOrAddrSpaceCast(
540 Value *S, ///< The pointer value to be casted (operand 0)
541 Type *Ty, ///< The type to which cast should be made
542 const Twine &Name = "", ///< Name for the instruction
543 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
544 );
545
546 /// Create a BitCast, a PtrToInt, or an IntToPTr cast instruction.
547 ///
548 /// If the value is a pointer type and the destination an integer type,
549 /// creates a PtrToInt cast. If the value is an integer type and the
550 /// destination a pointer type, creates an IntToPtr cast. Otherwise, creates
551 /// a bitcast.
552 static CastInst *CreateBitOrPointerCast(
553 Value *S, ///< The pointer value to be casted (operand 0)
554 Type *Ty, ///< The type to which cast should be made
555 const Twine &Name = "", ///< Name for the instruction
556 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
557 );
558
559 /// Create a ZExt, BitCast, or Trunc for int -> int casts.
560 static CastInst *CreateIntegerCast(
561 Value *S, ///< The pointer value to be casted (operand 0)
562 Type *Ty, ///< The type to which cast should be made
563 bool isSigned, ///< Whether to regard S as signed or not
564 const Twine &Name = "", ///< Name for the instruction
565 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
566 );
567
568 /// Create a ZExt, BitCast, or Trunc for int -> int casts.
569 static CastInst *CreateIntegerCast(
570 Value *S, ///< The integer value to be casted (operand 0)
571 Type *Ty, ///< The integer type to which operand is casted
572 bool isSigned, ///< Whether to regard S as signed or not
573 const Twine &Name, ///< The name for the instruction
574 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
575 );
576
577 /// Create an FPExt, BitCast, or FPTrunc for fp -> fp casts
578 static CastInst *CreateFPCast(
579 Value *S, ///< The floating point value to be casted
580 Type *Ty, ///< The floating point type to cast to
581 const Twine &Name = "", ///< Name for the instruction
582 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
583 );
584
585 /// Create an FPExt, BitCast, or FPTrunc for fp -> fp casts
586 static CastInst *CreateFPCast(
587 Value *S, ///< The floating point value to be casted
588 Type *Ty, ///< The floating point type to cast to
589 const Twine &Name, ///< The name for the instruction
590 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
591 );
592
593 /// Create a Trunc or BitCast cast instruction
594 static CastInst *CreateTruncOrBitCast(
595 Value *S, ///< The value to be casted (operand 0)
596 Type *Ty, ///< The type to which cast should be made
597 const Twine &Name = "", ///< Name for the instruction
598 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
599 );
600
601 /// Create a Trunc or BitCast cast instruction
602 static CastInst *CreateTruncOrBitCast(
603 Value *S, ///< The value to be casted (operand 0)
604 Type *Ty, ///< The type to which operand is casted
605 const Twine &Name, ///< The name for the instruction
606 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
607 );
608
609 /// Check whether it is valid to call getCastOpcode for these types.
610 static bool isCastable(
611 Type *SrcTy, ///< The Type from which the value should be cast.
612 Type *DestTy ///< The Type to which the value should be cast.
613 );
614
615 /// Check whether a bitcast between these types is valid
616 static bool isBitCastable(
617 Type *SrcTy, ///< The Type from which the value should be cast.
618 Type *DestTy ///< The Type to which the value should be cast.
619 );
620
621 /// Check whether a bitcast, inttoptr, or ptrtoint cast between these
622 /// types is valid and a no-op.
623 ///
624 /// This ensures that any pointer<->integer cast has enough bits in the
625 /// integer and any other cast is a bitcast.
626 static bool isBitOrNoopPointerCastable(
627 Type *SrcTy, ///< The Type from which the value should be cast.
628 Type *DestTy, ///< The Type to which the value should be cast.
629 const DataLayout &DL);
630
631 /// Returns the opcode necessary to cast Val into Ty using usual casting
632 /// rules.
633 /// Infer the opcode for cast operand and type
634 static Instruction::CastOps getCastOpcode(
635 const Value *Val, ///< The value to cast
636 bool SrcIsSigned, ///< Whether to treat the source as signed
637 Type *Ty, ///< The Type to which the value should be casted
638 bool DstIsSigned ///< Whether to treate the dest. as signed
639 );
640
641 /// There are several places where we need to know if a cast instruction
642 /// only deals with integer source and destination types. To simplify that
643 /// logic, this method is provided.
644 /// @returns true iff the cast has only integral typed operand and dest type.
645 /// Determine if this is an integer-only cast.
646 bool isIntegerCast() const;
647
648 /// A lossless cast is one that does not alter the basic value. It implies
649 /// a no-op cast but is more stringent, preventing things like int->float,
650 /// long->double, or int->ptr.
651 /// @returns true iff the cast is lossless.
652 /// Determine if this is a lossless cast.
653 bool isLosslessCast() const;
654
655 /// A no-op cast is one that can be effected without changing any bits.
656 /// It implies that the source and destination types are the same size. The
657 /// DataLayout argument is to determine the pointer size when examining casts
658 /// involving Integer and Pointer types. They are no-op casts if the integer
659 /// is the same size as the pointer. However, pointer size varies with
660 /// platform.
661 /// Determine if the described cast is a no-op cast.
662 static bool isNoopCast(
663 Instruction::CastOps Opcode, ///< Opcode of cast
664 Type *SrcTy, ///< SrcTy of cast
665 Type *DstTy, ///< DstTy of cast
666 const DataLayout &DL ///< DataLayout to get the Int Ptr type from.
667 );
668
669 /// Determine if this cast is a no-op cast.
670 ///
671 /// \param DL is the DataLayout to determine pointer size.
672 bool isNoopCast(const DataLayout &DL) const;
673
674 /// Determine how a pair of casts can be eliminated, if they can be at all.
675 /// This is a helper function for both CastInst and ConstantExpr.
676 /// @returns 0 if the CastInst pair can't be eliminated, otherwise
677 /// returns Instruction::CastOps value for a cast that can replace
678 /// the pair, casting SrcTy to DstTy.
679 /// Determine if a cast pair is eliminable
680 static unsigned isEliminableCastPair(
681 Instruction::CastOps firstOpcode, ///< Opcode of first cast
682 Instruction::CastOps secondOpcode, ///< Opcode of second cast
683 Type *SrcTy, ///< SrcTy of 1st cast
684 Type *MidTy, ///< DstTy of 1st cast & SrcTy of 2nd cast
685 Type *DstTy, ///< DstTy of 2nd cast
686 Type *SrcIntPtrTy, ///< Integer type corresponding to Ptr SrcTy, or null
687 Type *MidIntPtrTy, ///< Integer type corresponding to Ptr MidTy, or null
688 Type *DstIntPtrTy ///< Integer type corresponding to Ptr DstTy, or null
689 );
690
691 /// Return the opcode of this CastInst
692 Instruction::CastOps getOpcode() const {
693 return Instruction::CastOps(Instruction::getOpcode());
694 }
695
696 /// Return the source type, as a convenience
697 Type* getSrcTy() const { return getOperand(0)->getType(); }
698 /// Return the destination type, as a convenience
699 Type* getDestTy() const { return getType(); }
700
701 /// This method can be used to determine if a cast from S to DstTy using
702 /// Opcode op is valid or not.
703 /// @returns true iff the proposed cast is valid.
704 /// Determine if a cast is valid without creating one.
705 static bool castIsValid(Instruction::CastOps op, Value *S, Type *DstTy);
706
707 /// Methods for support type inquiry through isa, cast, and dyn_cast:
708 static bool classof(const Instruction *I) {
709 return I->isCast();
710 }
711 static bool classof(const Value *V) {
712 return isa<Instruction>(V) && classof(cast<Instruction>(V));
713 }
714};
715
716//===----------------------------------------------------------------------===//
717// CmpInst Class
718//===----------------------------------------------------------------------===//
719
720/// This class is the base class for the comparison instructions.
721/// Abstract base class of comparison instructions.
722class CmpInst : public Instruction {
723public:
724 /// This enumeration lists the possible predicates for CmpInst subclasses.
725 /// Values in the range 0-31 are reserved for FCmpInst, while values in the
726 /// range 32-64 are reserved for ICmpInst. This is necessary to ensure the
727 /// predicate values are not overlapping between the classes.
728 ///
729 /// Some passes (e.g. InstCombine) depend on the bit-wise characteristics of
730 /// FCMP_* values. Changing the bit patterns requires a potential change to
731 /// those passes.
732 enum Predicate {
733 // Opcode U L G E Intuitive operation
734 FCMP_FALSE = 0, ///< 0 0 0 0 Always false (always folded)
735 FCMP_OEQ = 1, ///< 0 0 0 1 True if ordered and equal
736 FCMP_OGT = 2, ///< 0 0 1 0 True if ordered and greater than
737 FCMP_OGE = 3, ///< 0 0 1 1 True if ordered and greater than or equal
738 FCMP_OLT = 4, ///< 0 1 0 0 True if ordered and less than
739 FCMP_OLE = 5, ///< 0 1 0 1 True if ordered and less than or equal
740 FCMP_ONE = 6, ///< 0 1 1 0 True if ordered and operands are unequal
741 FCMP_ORD = 7, ///< 0 1 1 1 True if ordered (no nans)
742 FCMP_UNO = 8, ///< 1 0 0 0 True if unordered: isnan(X) | isnan(Y)
743 FCMP_UEQ = 9, ///< 1 0 0 1 True if unordered or equal
744 FCMP_UGT = 10, ///< 1 0 1 0 True if unordered or greater than
745 FCMP_UGE = 11, ///< 1 0 1 1 True if unordered, greater than, or equal
746 FCMP_ULT = 12, ///< 1 1 0 0 True if unordered or less than
747 FCMP_ULE = 13, ///< 1 1 0 1 True if unordered, less than, or equal
748 FCMP_UNE = 14, ///< 1 1 1 0 True if unordered or not equal
749 FCMP_TRUE = 15, ///< 1 1 1 1 Always true (always folded)
750 FIRST_FCMP_PREDICATE = FCMP_FALSE,
751 LAST_FCMP_PREDICATE = FCMP_TRUE,
752 BAD_FCMP_PREDICATE = FCMP_TRUE + 1,
753 ICMP_EQ = 32, ///< equal
754 ICMP_NE = 33, ///< not equal
755 ICMP_UGT = 34, ///< unsigned greater than
756 ICMP_UGE = 35, ///< unsigned greater or equal
757 ICMP_ULT = 36, ///< unsigned less than
758 ICMP_ULE = 37, ///< unsigned less or equal
759 ICMP_SGT = 38, ///< signed greater than
760 ICMP_SGE = 39, ///< signed greater or equal
761 ICMP_SLT = 40, ///< signed less than
762 ICMP_SLE = 41, ///< signed less or equal
763 FIRST_ICMP_PREDICATE = ICMP_EQ,
764 LAST_ICMP_PREDICATE = ICMP_SLE,
765 BAD_ICMP_PREDICATE = ICMP_SLE + 1
766 };
767
768protected:
769 CmpInst(Type *ty, Instruction::OtherOps op, Predicate pred,
770 Value *LHS, Value *RHS, const Twine &Name = "",
771 Instruction *InsertBefore = nullptr,
772 Instruction *FlagsSource = nullptr);
773
774 CmpInst(Type *ty, Instruction::OtherOps op, Predicate pred,
775 Value *LHS, Value *RHS, const Twine &Name,
776 BasicBlock *InsertAtEnd);
777
778public:
779 // allocate space for exactly two operands
780 void *operator new(size_t s) {
781 return User::operator new(s, 2);
782 }
783
784 /// Construct a compare instruction, given the opcode, the predicate and
785 /// the two operands. Optionally (if InstBefore is specified) insert the
786 /// instruction into a BasicBlock right before the specified instruction.
787 /// The specified Instruction is allowed to be a dereferenced end iterator.
788 /// Create a CmpInst
789 static CmpInst *Create(OtherOps Op,
790 Predicate predicate, Value *S1,
791 Value *S2, const Twine &Name = "",
792 Instruction *InsertBefore = nullptr);
793
794 /// Construct a compare instruction, given the opcode, the predicate and the
795 /// two operands. Also automatically insert this instruction to the end of
796 /// the BasicBlock specified.
797 /// Create a CmpInst
798 static CmpInst *Create(OtherOps Op, Predicate predicate, Value *S1,
799 Value *S2, const Twine &Name, BasicBlock *InsertAtEnd);
800
801 /// Get the opcode casted to the right type
802 OtherOps getOpcode() const {
803 return static_cast<OtherOps>(Instruction::getOpcode());
804 }
805
806 /// Return the predicate for this instruction.
807 Predicate getPredicate() const {
808 return Predicate(getSubclassDataFromInstruction());
809 }
810
811 /// Set the predicate for this instruction to the specified value.
812 void setPredicate(Predicate P) { setInstructionSubclassData(P); }
813
814 static bool isFPPredicate(Predicate P) {
815 return P >= FIRST_FCMP_PREDICATE && P <= LAST_FCMP_PREDICATE;
816 }
817
818 static bool isIntPredicate(Predicate P) {
819 return P >= FIRST_ICMP_PREDICATE && P <= LAST_ICMP_PREDICATE;
24
Assuming 'P' is < FIRST_ICMP_PREDICATE
25
Returning zero, which participates in a condition later
820 }
821
822 static StringRef getPredicateName(Predicate P);
823
824 bool isFPPredicate() const { return isFPPredicate(getPredicate()); }
825 bool isIntPredicate() const { return isIntPredicate(getPredicate()); }
826
827 /// For example, EQ -> NE, UGT -> ULE, SLT -> SGE,
828 /// OEQ -> UNE, UGT -> OLE, OLT -> UGE, etc.
829 /// @returns the inverse predicate for the instruction's current predicate.
830 /// Return the inverse of the instruction's predicate.
831 Predicate getInversePredicate() const {
832 return getInversePredicate(getPredicate());
833 }
834
835 /// For example, EQ -> NE, UGT -> ULE, SLT -> SGE,
836 /// OEQ -> UNE, UGT -> OLE, OLT -> UGE, etc.
837 /// @returns the inverse predicate for predicate provided in \p pred.
838 /// Return the inverse of a given predicate
839 static Predicate getInversePredicate(Predicate pred);
840
841 /// For example, EQ->EQ, SLE->SGE, ULT->UGT,
842 /// OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
843 /// @returns the predicate that would be the result of exchanging the two
844 /// operands of the CmpInst instruction without changing the result
845 /// produced.
846 /// Return the predicate as if the operands were swapped
847 Predicate getSwappedPredicate() const {
848 return getSwappedPredicate(getPredicate());
849 }
850
851 /// This is a static version that you can use without an instruction
852 /// available.
853 /// Return the predicate as if the operands were swapped.
854 static Predicate getSwappedPredicate(Predicate pred);
855
856 /// For predicate of kind "is X or equal to 0" returns the predicate "is X".
857 /// For predicate of kind "is X" returns the predicate "is X or equal to 0".
858 /// does not support other kind of predicates.
859 /// @returns the predicate that does not contains is equal to zero if
860 /// it had and vice versa.
861 /// Return the flipped strictness of predicate
862 Predicate getFlippedStrictnessPredicate() const {
863 return getFlippedStrictnessPredicate(getPredicate());
864 }
865
866 /// This is a static version that you can use without an instruction
867 /// available.
868 /// Return the flipped strictness of predicate
869 static Predicate getFlippedStrictnessPredicate(Predicate pred);
870
871 /// For example, SGT -> SGE, SLT -> SLE, ULT -> ULE, UGT -> UGE.
872 /// Returns the non-strict version of strict comparisons.
873 Predicate getNonStrictPredicate() const {
874 return getNonStrictPredicate(getPredicate());
875 }
876
877 /// This is a static version that you can use without an instruction
878 /// available.
879 /// @returns the non-strict version of comparison provided in \p pred.
880 /// If \p pred is not a strict comparison predicate, returns \p pred.
881 /// Returns the non-strict version of strict comparisons.
882 static Predicate getNonStrictPredicate(Predicate pred);
883
884 /// Provide more efficient getOperand methods.
885 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
886
887 /// This is just a convenience that dispatches to the subclasses.
888 /// Swap the operands and adjust predicate accordingly to retain
889 /// the same comparison.
890 void swapOperands();
891
892 /// This is just a convenience that dispatches to the subclasses.
893 /// Determine if this CmpInst is commutative.
894 bool isCommutative() const;
895
896 /// This is just a convenience that dispatches to the subclasses.
897 /// Determine if this is an equals/not equals predicate.
898 bool isEquality() const;
899
900 /// @returns true if the comparison is signed, false otherwise.
901 /// Determine if this instruction is using a signed comparison.
902 bool isSigned() const {
903 return isSigned(getPredicate());
904 }
905
906 /// @returns true if the comparison is unsigned, false otherwise.
907 /// Determine if this instruction is using an unsigned comparison.
908 bool isUnsigned() const {
909 return isUnsigned(getPredicate());
910 }
911
912 /// For example, ULT->SLT, ULE->SLE, UGT->SGT, UGE->SGE, SLT->Failed assert
913 /// @returns the signed version of the unsigned predicate pred.
914 /// return the signed version of a predicate
915 static Predicate getSignedPredicate(Predicate pred);
916
917 /// For example, ULT->SLT, ULE->SLE, UGT->SGT, UGE->SGE, SLT->Failed assert
918 /// @returns the signed version of the predicate for this instruction (which
919 /// has to be an unsigned predicate).
920 /// return the signed version of a predicate
921 Predicate getSignedPredicate() {
922 return getSignedPredicate(getPredicate());
923 }
924
925 /// This is just a convenience.
926 /// Determine if this is true when both operands are the same.
927 bool isTrueWhenEqual() const {
928 return isTrueWhenEqual(getPredicate());
929 }
930
931 /// This is just a convenience.
932 /// Determine if this is false when both operands are the same.
933 bool isFalseWhenEqual() const {
934 return isFalseWhenEqual(getPredicate());
935 }
936
937 /// @returns true if the predicate is unsigned, false otherwise.
938 /// Determine if the predicate is an unsigned operation.
939 static bool isUnsigned(Predicate predicate);
940
941 /// @returns true if the predicate is signed, false otherwise.
942 /// Determine if the predicate is an signed operation.
943 static bool isSigned(Predicate predicate);
944
945 /// Determine if the predicate is an ordered operation.
946 static bool isOrdered(Predicate predicate);
947
948 /// Determine if the predicate is an unordered operation.
949 static bool isUnordered(Predicate predicate);
950
951 /// Determine if the predicate is true when comparing a value with itself.
952 static bool isTrueWhenEqual(Predicate predicate);
953
954 /// Determine if the predicate is false when comparing a value with itself.
955 static bool isFalseWhenEqual(Predicate predicate);
956
957 /// Determine if Pred1 implies Pred2 is true when two compares have matching
958 /// operands.
959 static bool isImpliedTrueByMatchingCmp(Predicate Pred1, Predicate Pred2);
960
961 /// Determine if Pred1 implies Pred2 is false when two compares have matching
962 /// operands.
963 static bool isImpliedFalseByMatchingCmp(Predicate Pred1, Predicate Pred2);
964
965 /// Methods for support type inquiry through isa, cast, and dyn_cast:
966 static bool classof(const Instruction *I) {
967 return I->getOpcode() == Instruction::ICmp ||
968 I->getOpcode() == Instruction::FCmp;
969 }
970 static bool classof(const Value *V) {
971 return isa<Instruction>(V) && classof(cast<Instruction>(V));
972 }
973
974 /// Create a result type for fcmp/icmp
975 static Type* makeCmpResultType(Type* opnd_type) {
976 if (VectorType* vt = dyn_cast<VectorType>(opnd_type)) {
977 return VectorType::get(Type::getInt1Ty(opnd_type->getContext()),
978 vt->getElementCount());
979 }
980 return Type::getInt1Ty(opnd_type->getContext());
981 }
982
983private:
984 // Shadow Value::setValueSubclassData with a private forwarding method so that
985 // subclasses cannot accidentally use it.
986 void setValueSubclassData(unsigned short D) {
987 Value::setValueSubclassData(D);
988 }
989};
990
991// FIXME: these are redundant if CmpInst < BinaryOperator
992template <>
993struct OperandTraits<CmpInst> : public FixedNumOperandTraits<CmpInst, 2> {
994};
995
996DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CmpInst, Value)CmpInst::op_iterator CmpInst::op_begin() { return OperandTraits
<CmpInst>::op_begin(this); } CmpInst::const_op_iterator
CmpInst::op_begin() const { return OperandTraits<CmpInst>
::op_begin(const_cast<CmpInst*>(this)); } CmpInst::op_iterator
CmpInst::op_end() { return OperandTraits<CmpInst>::op_end
(this); } CmpInst::const_op_iterator CmpInst::op_end() const {
return OperandTraits<CmpInst>::op_end(const_cast<CmpInst
*>(this)); } Value *CmpInst::getOperand(unsigned i_nocapture
) const { ((i_nocapture < OperandTraits<CmpInst>::operands
(this) && "getOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<CmpInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-10~svn374710/include/llvm/IR/InstrTypes.h"
, 996, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<CmpInst>::op_begin(const_cast<CmpInst
*>(this))[i_nocapture].get()); } void CmpInst::setOperand(
unsigned i_nocapture, Value *Val_nocapture) { ((i_nocapture <
OperandTraits<CmpInst>::operands(this) && "setOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<CmpInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-10~svn374710/include/llvm/IR/InstrTypes.h"
, 996, __PRETTY_FUNCTION__)); OperandTraits<CmpInst>::op_begin
(this)[i_nocapture] = Val_nocapture; } unsigned CmpInst::getNumOperands
() const { return OperandTraits<CmpInst>::operands(this
); } template <int Idx_nocapture> Use &CmpInst::Op(
) { return this->OpFrom<Idx_nocapture>(this); } template
<int Idx_nocapture> const Use &CmpInst::Op() const
{ return this->OpFrom<Idx_nocapture>(this); }
997
998/// A lightweight accessor for an operand bundle meant to be passed
999/// around by value.
1000struct OperandBundleUse {
1001 ArrayRef<Use> Inputs;
1002
1003 OperandBundleUse() = default;
1004 explicit OperandBundleUse(StringMapEntry<uint32_t> *Tag, ArrayRef<Use> Inputs)
1005 : Inputs(Inputs), Tag(Tag) {}
1006
1007 /// Return true if the operand at index \p Idx in this operand bundle
1008 /// has the attribute A.
1009 bool operandHasAttr(unsigned Idx, Attribute::AttrKind A) const {
1010 if (isDeoptOperandBundle())
1011 if (A == Attribute::ReadOnly || A == Attribute::NoCapture)
1012 return Inputs[Idx]->getType()->isPointerTy();
1013
1014 // Conservative answer: no operands have any attributes.
1015 return false;
1016 }
1017
1018 /// Return the tag of this operand bundle as a string.
1019 StringRef getTagName() const {
1020 return Tag->getKey();
1021 }
1022
1023 /// Return the tag of this operand bundle as an integer.
1024 ///
1025 /// Operand bundle tags are interned by LLVMContextImpl::getOrInsertBundleTag,
1026 /// and this function returns the unique integer getOrInsertBundleTag
1027 /// associated the tag of this operand bundle to.
1028 uint32_t getTagID() const {
1029 return Tag->getValue();
1030 }
1031
1032 /// Return true if this is a "deopt" operand bundle.
1033 bool isDeoptOperandBundle() const {
1034 return getTagID() == LLVMContext::OB_deopt;
1035 }
1036
1037 /// Return true if this is a "funclet" operand bundle.
1038 bool isFuncletOperandBundle() const {
1039 return getTagID() == LLVMContext::OB_funclet;
1040 }
1041
1042private:
1043 /// Pointer to an entry in LLVMContextImpl::getOrInsertBundleTag.
1044 StringMapEntry<uint32_t> *Tag;
1045};
1046
1047/// A container for an operand bundle being viewed as a set of values
1048/// rather than a set of uses.
1049///
1050/// Unlike OperandBundleUse, OperandBundleDefT owns the memory it carries, and
1051/// so it is possible to create and pass around "self-contained" instances of
1052/// OperandBundleDef and ConstOperandBundleDef.
1053template <typename InputTy> class OperandBundleDefT {
1054 std::string Tag;
1055 std::vector<InputTy> Inputs;
1056
1057public:
1058 explicit OperandBundleDefT(std::string Tag, std::vector<InputTy> Inputs)
1059 : Tag(std::move(Tag)), Inputs(std::move(Inputs)) {}
1060 explicit OperandBundleDefT(std::string Tag, ArrayRef<InputTy> Inputs)
1061 : Tag(std::move(Tag)), Inputs(Inputs) {}
1062
1063 explicit OperandBundleDefT(const OperandBundleUse &OBU) {
1064 Tag = OBU.getTagName();
1065 Inputs.insert(Inputs.end(), OBU.Inputs.begin(), OBU.Inputs.end());
1066 }
1067
1068 ArrayRef<InputTy> inputs() const { return Inputs; }
1069
1070 using input_iterator = typename std::vector<InputTy>::const_iterator;
1071
1072 size_t input_size() const { return Inputs.size(); }
1073 input_iterator input_begin() const { return Inputs.begin(); }
1074 input_iterator input_end() const { return Inputs.end(); }
1075
1076 StringRef getTag() const { return Tag; }
1077};
1078
1079using OperandBundleDef = OperandBundleDefT<Value *>;
1080using ConstOperandBundleDef = OperandBundleDefT<const Value *>;
1081
1082//===----------------------------------------------------------------------===//
1083// CallBase Class
1084//===----------------------------------------------------------------------===//
1085
1086/// Base class for all callable instructions (InvokeInst and CallInst)
1087/// Holds everything related to calling a function.
1088///
1089/// All call-like instructions are required to use a common operand layout:
1090/// - Zero or more arguments to the call,
1091/// - Zero or more operand bundles with zero or more operand inputs each
1092/// bundle,
1093/// - Zero or more subclass controlled operands
1094/// - The called function.
1095///
1096/// This allows this base class to easily access the called function and the
1097/// start of the arguments without knowing how many other operands a particular
1098/// subclass requires. Note that accessing the end of the argument list isn't
1099/// as cheap as most other operations on the base class.
1100class CallBase : public Instruction {
1101protected:
1102 /// The last operand is the called operand.
1103 static constexpr int CalledOperandOpEndIdx = -1;
1104
1105 AttributeList Attrs; ///< parameter attributes for callable
1106 FunctionType *FTy;
1107
1108 template <class... ArgsTy>
1109 CallBase(AttributeList const &A, FunctionType *FT, ArgsTy &&... Args)
1110 : Instruction(std::forward<ArgsTy>(Args)...), Attrs(A), FTy(FT) {}
1111
1112 using Instruction::Instruction;
1113
1114 bool hasDescriptor() const { return Value::HasDescriptor; }
1115
1116 unsigned getNumSubclassExtraOperands() const {
1117 switch (getOpcode()) {
1118 case Instruction::Call:
1119 return 0;
1120 case Instruction::Invoke:
1121 return 2;
1122 case Instruction::CallBr:
1123 return getNumSubclassExtraOperandsDynamic();
1124 }
1125 llvm_unreachable("Invalid opcode!")::llvm::llvm_unreachable_internal("Invalid opcode!", "/build/llvm-toolchain-snapshot-10~svn374710/include/llvm/IR/InstrTypes.h"
, 1125)
;
1126 }
1127
1128 /// Get the number of extra operands for instructions that don't have a fixed
1129 /// number of extra operands.
1130 unsigned getNumSubclassExtraOperandsDynamic() const;
1131
1132public:
1133 using Instruction::getContext;
1134
1135 static bool classof(const Instruction *I) {
1136 return I->getOpcode() == Instruction::Call ||
1137 I->getOpcode() == Instruction::Invoke ||
1138 I->getOpcode() == Instruction::CallBr;
1139 }
1140 static bool classof(const Value *V) {
1141 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1142 }
1143
1144 FunctionType *getFunctionType() const { return FTy; }
1145
1146 void mutateFunctionType(FunctionType *FTy) {
1147 Value::mutateType(FTy->getReturnType());
1148 this->FTy = FTy;
1149 }
1150
1151 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1152
1153 /// data_operands_begin/data_operands_end - Return iterators iterating over
1154 /// the call / invoke argument list and bundle operands. For invokes, this is
1155 /// the set of instruction operands except the invoke target and the two
1156 /// successor blocks; and for calls this is the set of instruction operands
1157 /// except the call target.
1158 User::op_iterator data_operands_begin() { return op_begin(); }
1159 User::const_op_iterator data_operands_begin() const {
1160 return const_cast<CallBase *>(this)->data_operands_begin();
1161 }
1162 User::op_iterator data_operands_end() {
1163 // Walk from the end of the operands over the called operand and any
1164 // subclass operands.
1165 return op_end() - getNumSubclassExtraOperands() - 1;
1166 }
1167 User::const_op_iterator data_operands_end() const {
1168 return const_cast<CallBase *>(this)->data_operands_end();
1169 }
1170 iterator_range<User::op_iterator> data_ops() {
1171 return make_range(data_operands_begin(), data_operands_end());
1172 }
1173 iterator_range<User::const_op_iterator> data_ops() const {
1174 return make_range(data_operands_begin(), data_operands_end());
1175 }
1176 bool data_operands_empty() const {
1177 return data_operands_end() == data_operands_begin();
1178 }
1179 unsigned data_operands_size() const {
1180 return std::distance(data_operands_begin(), data_operands_end());
1181 }
1182
1183 bool isDataOperand(const Use *U) const {
1184 assert(this == U->getUser() &&((this == U->getUser() && "Only valid to query with a use of this instruction!"
) ? static_cast<void> (0) : __assert_fail ("this == U->getUser() && \"Only valid to query with a use of this instruction!\""
, "/build/llvm-toolchain-snapshot-10~svn374710/include/llvm/IR/InstrTypes.h"
, 1185, __PRETTY_FUNCTION__))
1185 "Only valid to query with a use of this instruction!")((this == U->getUser() && "Only valid to query with a use of this instruction!"
) ? static_cast<void> (0) : __assert_fail ("this == U->getUser() && \"Only valid to query with a use of this instruction!\""
, "/build/llvm-toolchain-snapshot-10~svn374710/include/llvm/IR/InstrTypes.h"
, 1185, __PRETTY_FUNCTION__))
;
1186 return data_operands_begin() <= U && U < data_operands_end();
1187 }
1188 bool isDataOperand(Value::const_user_iterator UI) const {
1189 return isDataOperand(&UI.getUse());
1190 }
1191
1192 /// Given a value use iterator, return the data operand corresponding to it.
1193 /// Iterator must actually correspond to a data operand.
1194 unsigned getDataOperandNo(Value::const_user_iterator UI) const {
1195 return getDataOperandNo(&UI.getUse());
1196 }
1197
1198 /// Given a use for a data operand, get the data operand number that
1199 /// corresponds to it.
1200 unsigned getDataOperandNo(const Use *U) const {
1201 assert(isDataOperand(U) && "Data operand # out of range!")((isDataOperand(U) && "Data operand # out of range!")
? static_cast<void> (0) : __assert_fail ("isDataOperand(U) && \"Data operand # out of range!\""
, "/build/llvm-toolchain-snapshot-10~svn374710/include/llvm/IR/InstrTypes.h"
, 1201, __PRETTY_FUNCTION__))
;
1202 return U - data_operands_begin();
1203 }
1204
1205 /// Return the iterator pointing to the beginning of the argument list.
1206 User::op_iterator arg_begin() { return op_begin(); }
1207 User::const_op_iterator arg_begin() const {
1208 return const_cast<CallBase *>(this)->arg_begin();
1209 }
1210
1211 /// Return the iterator pointing to the end of the argument list.
1212 User::op_iterator arg_end() {
1213 // From the end of the data operands, walk backwards past the bundle
1214 // operands.
1215 return data_operands_end() - getNumTotalBundleOperands();
1216 }
1217 User::const_op_iterator arg_end() const {
1218 return const_cast<CallBase *>(this)->arg_end();
1219 }
1220
1221 /// Iteration adapter for range-for loops.
1222 iterator_range<User::op_iterator> args() {
1223 return make_range(arg_begin(), arg_end());
1224 }
1225 iterator_range<User::const_op_iterator> args() const {
1226 return make_range(arg_begin(), arg_end());
1227 }
1228 bool arg_empty() const { return arg_end() == arg_begin(); }
1229 unsigned arg_size() const { return arg_end() - arg_begin(); }
1230
1231 // Legacy API names that duplicate the above and will be removed once users
1232 // are migrated.
1233 iterator_range<User::op_iterator> arg_operands() {
1234 return make_range(arg_begin(), arg_end());
1235 }
1236 iterator_range<User::const_op_iterator> arg_operands() const {
1237 return make_range(arg_begin(), arg_end());
1238 }
1239 unsigned getNumArgOperands() const { return arg_size(); }
1240
1241 Value *getArgOperand(unsigned i) const {
1242 assert(i < getNumArgOperands() && "Out of bounds!")((i < getNumArgOperands() && "Out of bounds!") ? static_cast
<void> (0) : __assert_fail ("i < getNumArgOperands() && \"Out of bounds!\""
, "/build/llvm-toolchain-snapshot-10~svn374710/include/llvm/IR/InstrTypes.h"
, 1242, __PRETTY_FUNCTION__))
;
1243 return getOperand(i);
1244 }
1245
1246 void setArgOperand(unsigned i, Value *v) {
1247 assert(i < getNumArgOperands() && "Out of bounds!")((i < getNumArgOperands() && "Out of bounds!") ? static_cast
<void> (0) : __assert_fail ("i < getNumArgOperands() && \"Out of bounds!\""
, "/build/llvm-toolchain-snapshot-10~svn374710/include/llvm/IR/InstrTypes.h"
, 1247, __PRETTY_FUNCTION__))
;
1248 setOperand(i, v);
1249 }
1250
1251 /// Wrappers for getting the \c Use of a call argument.
1252 const Use &getArgOperandUse(unsigned i) const {
1253 assert(i < getNumArgOperands() && "Out of bounds!")((i < getNumArgOperands() && "Out of bounds!") ? static_cast
<void> (0) : __assert_fail ("i < getNumArgOperands() && \"Out of bounds!\""
, "/build/llvm-toolchain-snapshot-10~svn374710/include/llvm/IR/InstrTypes.h"
, 1253, __PRETTY_FUNCTION__))
;
1254 return User::getOperandUse(i);
1255 }
1256 Use &getArgOperandUse(unsigned i) {
1257 assert(i < getNumArgOperands() && "Out of bounds!")((i < getNumArgOperands() && "Out of bounds!") ? static_cast
<void> (0) : __assert_fail ("i < getNumArgOperands() && \"Out of bounds!\""
, "/build/llvm-toolchain-snapshot-10~svn374710/include/llvm/IR/InstrTypes.h"
, 1257, __PRETTY_FUNCTION__))
;
1258 return User::getOperandUse(i);
1259 }
1260
1261 bool isArgOperand(const Use *U) const {
1262 assert(this == U->getUser() &&((this == U->getUser() && "Only valid to query with a use of this instruction!"
) ? static_cast<void> (0) : __assert_fail ("this == U->getUser() && \"Only valid to query with a use of this instruction!\""
, "/build/llvm-toolchain-snapshot-10~svn374710/include/llvm/IR/InstrTypes.h"
, 1263, __PRETTY_FUNCTION__))
1263 "Only valid to query with a use of this instruction!")((this == U->getUser() && "Only valid to query with a use of this instruction!"
) ? static_cast<void> (0) : __assert_fail ("this == U->getUser() && \"Only valid to query with a use of this instruction!\""
, "/build/llvm-toolchain-snapshot-10~svn374710/include/llvm/IR/InstrTypes.h"
, 1263, __PRETTY_FUNCTION__))
;
1264 return arg_begin() <= U && U < arg_end();
1265 }
1266 bool isArgOperand(Value::const_user_iterator UI) const {
1267 return isArgOperand(&UI.getUse());
1268 }
1269
1270 /// Returns true if this CallSite passes the given Value* as an argument to
1271 /// the called function.
1272 bool hasArgument(const Value *V) const {
1273 return llvm::any_of(args(), [V](const Value *Arg) { return Arg == V; });
1274 }
1275
1276 Value *getCalledOperand() const { return Op<CalledOperandOpEndIdx>(); }
1277
1278 // DEPRECATED: This routine will be removed in favor of `getCalledOperand` in
1279 // the near future.
1280 Value *getCalledValue() const { return getCalledOperand(); }
1281
1282 const Use &getCalledOperandUse() const { return Op<CalledOperandOpEndIdx>(); }
1283 Use &getCalledOperandUse() { return Op<CalledOperandOpEndIdx>(); }
1284
1285 /// Returns the function called, or null if this is an
1286 /// indirect function invocation.
1287 Function *getCalledFunction() const {
1288 return dyn_cast_or_null<Function>(getCalledOperand());
1289 }
1290
1291 /// Return true if the callsite is an indirect call.
1292 bool isIndirectCall() const;
1293
1294 /// Determine whether the passed iterator points to the callee operand's Use.
1295 bool isCallee(Value::const_user_iterator UI) const {
1296 return isCallee(&UI.getUse());
1297 }
1298
1299 /// Determine whether this Use is the callee operand's Use.
1300 bool isCallee(const Use *U) const { return &getCalledOperandUse() == U; }
1301
1302 /// Helper to get the caller (the parent function).
1303 Function *getCaller();
1304 const Function *getCaller() const {
1305 return const_cast<CallBase *>(this)->getCaller();
1306 }
1307
1308 /// Tests if this call site must be tail call optimized. Only a CallInst can
1309 /// be tail call optimized.
1310 bool isMustTailCall() const;
1311
1312 /// Tests if this call site is marked as a tail call.
1313 bool isTailCall() const;
1314
1315 /// Returns the intrinsic ID of the intrinsic called or
1316 /// Intrinsic::not_intrinsic if the called function is not an intrinsic, or if
1317 /// this is an indirect call.
1318 Intrinsic::ID getIntrinsicID() const;
1319
1320 void setCalledOperand(Value *V) { Op<CalledOperandOpEndIdx>() = V; }
1321
1322 /// Sets the function called, including updating the function type.
1323 void setCalledFunction(Function *Fn) {
1324 setCalledFunction(Fn->getFunctionType(), Fn);
1325 }
1326
1327 /// Sets the function called, including updating the function type.
1328 void setCalledFunction(FunctionCallee Fn) {
1329 setCalledFunction(Fn.getFunctionType(), Fn.getCallee());
1330 }
1331
1332 /// Sets the function called, including updating to the specified function
1333 /// type.
1334 void setCalledFunction(FunctionType *FTy, Value *Fn) {
1335 this->FTy = FTy;
1336 assert(FTy == cast<FunctionType>(((FTy == cast<FunctionType>( cast<PointerType>(Fn
->getType())->getElementType())) ? static_cast<void>
(0) : __assert_fail ("FTy == cast<FunctionType>( cast<PointerType>(Fn->getType())->getElementType())"
, "/build/llvm-toolchain-snapshot-10~svn374710/include/llvm/IR/InstrTypes.h"
, 1337, __PRETTY_FUNCTION__))
1337 cast<PointerType>(Fn->getType())->getElementType()))((FTy == cast<FunctionType>( cast<PointerType>(Fn
->getType())->getElementType())) ? static_cast<void>
(0) : __assert_fail ("FTy == cast<FunctionType>( cast<PointerType>(Fn->getType())->getElementType())"
, "/build/llvm-toolchain-snapshot-10~svn374710/include/llvm/IR/InstrTypes.h"
, 1337, __PRETTY_FUNCTION__))
;
1338 // This function doesn't mutate the return type, only the function
1339 // type. Seems broken, but I'm just gonna stick an assert in for now.
1340 assert(getType() == FTy->getReturnType())((getType() == FTy->getReturnType()) ? static_cast<void
> (0) : __assert_fail ("getType() == FTy->getReturnType()"
, "/build/llvm-toolchain-snapshot-10~svn374710/include/llvm/IR/InstrTypes.h"
, 1340, __PRETTY_FUNCTION__))
;
1341 setCalledOperand(Fn);
1342 }
1343
1344 CallingConv::ID getCallingConv() const {
1345 return static_cast<CallingConv::ID>(getSubclassDataFromInstruction() >> 2);
1346 }
1347
1348 void setCallingConv(CallingConv::ID CC) {
1349 auto ID = static_cast<unsigned>(CC);
1350 assert(!(ID & ~CallingConv::MaxID) && "Unsupported calling convention")((!(ID & ~CallingConv::MaxID) && "Unsupported calling convention"
) ? static_cast<void> (0) : __assert_fail ("!(ID & ~CallingConv::MaxID) && \"Unsupported calling convention\""
, "/build/llvm-toolchain-snapshot-10~svn374710/include/llvm/IR/InstrTypes.h"
, 1350, __PRETTY_FUNCTION__))
;
1351 setInstructionSubclassData((getSubclassDataFromInstruction() & 3) |
1352 (ID << 2));
1353 }
1354
1355 /// Check if this call is an inline asm statement.
1356 bool isInlineAsm() const { return isa<InlineAsm>(getCalledOperand()); }
1357
1358 /// \name Attribute API
1359 ///
1360 /// These methods access and modify attributes on this call (including
1361 /// looking through to the attributes on the called function when necessary).
1362 ///@{
1363
1364 /// Return the parameter attributes for this call.
1365 ///
1366 AttributeList getAttributes() const { return Attrs; }
1367
1368 /// Set the parameter attributes for this call.
1369 ///
1370 void setAttributes(AttributeList A) { Attrs = A; }
1371
1372 /// Determine whether this call has the given attribute.
1373 bool hasFnAttr(Attribute::AttrKind Kind) const {
1374 assert(Kind != Attribute::NoBuiltin &&((Kind != Attribute::NoBuiltin && "Use CallBase::isNoBuiltin() to check for Attribute::NoBuiltin"
) ? static_cast<void> (0) : __assert_fail ("Kind != Attribute::NoBuiltin && \"Use CallBase::isNoBuiltin() to check for Attribute::NoBuiltin\""
, "/build/llvm-toolchain-snapshot-10~svn374710/include/llvm/IR/InstrTypes.h"
, 1375, __PRETTY_FUNCTION__))
1375 "Use CallBase::isNoBuiltin() to check for Attribute::NoBuiltin")((Kind != Attribute::NoBuiltin && "Use CallBase::isNoBuiltin() to check for Attribute::NoBuiltin"
) ? static_cast<void> (0) : __assert_fail ("Kind != Attribute::NoBuiltin && \"Use CallBase::isNoBuiltin() to check for Attribute::NoBuiltin\""
, "/build/llvm-toolchain-snapshot-10~svn374710/include/llvm/IR/InstrTypes.h"
, 1375, __PRETTY_FUNCTION__))
;
1376 return hasFnAttrImpl(Kind);
1377 }
1378
1379 /// Determine whether this call has the given attribute.
1380 bool hasFnAttr(StringRef Kind) const { return hasFnAttrImpl(Kind); }
1381
1382 /// adds the attribute to the list of attributes.
1383 void addAttribute(unsigned i, Attribute::AttrKind Kind) {
1384 AttributeList PAL = getAttributes();
1385 PAL = PAL.addAttribute(getContext(), i, Kind);
1386 setAttributes(PAL);
1387 }
1388
1389 /// adds the attribute to the list of attributes.
1390 void addAttribute(unsigned i, Attribute Attr) {
1391 AttributeList PAL = getAttributes();
1392 PAL = PAL.addAttribute(getContext(), i, Attr);
1393 setAttributes(PAL);
1394 }
1395
1396 /// Adds the attribute to the indicated argument
1397 void addParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) {
1398 assert(ArgNo < getNumArgOperands() && "Out of bounds")((ArgNo < getNumArgOperands() && "Out of bounds") ?
static_cast<void> (0) : __assert_fail ("ArgNo < getNumArgOperands() && \"Out of bounds\""
, "/build/llvm-toolchain-snapshot-10~svn374710/include/llvm/IR/InstrTypes.h"
, 1398, __PRETTY_FUNCTION__))
;
1399 AttributeList PAL = getAttributes();
1400 PAL = PAL.addParamAttribute(getContext(), ArgNo, Kind);
1401 setAttributes(PAL);
1402 }
1403
1404 /// Adds the attribute to the indicated argument
1405 void addParamAttr(unsigned ArgNo, Attribute Attr) {
1406 assert(ArgNo < getNumArgOperands() && "Out of bounds")((ArgNo < getNumArgOperands() && "Out of bounds") ?
static_cast<void> (0) : __assert_fail ("ArgNo < getNumArgOperands() && \"Out of bounds\""
, "/build/llvm-toolchain-snapshot-10~svn374710/include/llvm/IR/InstrTypes.h"
, 1406, __PRETTY_FUNCTION__))
;
1407 AttributeList PAL = getAttributes();
1408 PAL = PAL.addParamAttribute(getContext(), ArgNo, Attr);
1409 setAttributes(PAL);
1410 }
1411
1412 /// removes the attribute from the list of attributes.
1413 void removeAttribute(unsigned i, Attribute::AttrKind Kind) {
1414 AttributeList PAL = getAttributes();
1415 PAL = PAL.removeAttribute(getContext(), i, Kind);
1416 setAttributes(PAL);
1417 }
1418
1419 /// removes the attribute from the list of attributes.
1420 void removeAttribute(unsigned i, StringRef Kind) {
1421 AttributeList PAL = getAttributes();
1422 PAL = PAL.removeAttribute(getContext(), i, Kind);
1423 setAttributes(PAL);
1424 }
1425
1426 /// Removes the attribute from the given argument
1427 void removeParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) {
1428 assert(ArgNo < getNumArgOperands() && "Out of bounds")((ArgNo < getNumArgOperands() && "Out of bounds") ?
static_cast<void> (0) : __assert_fail ("ArgNo < getNumArgOperands() && \"Out of bounds\""
, "/build/llvm-toolchain-snapshot-10~svn374710/include/llvm/IR/InstrTypes.h"
, 1428, __PRETTY_FUNCTION__))
;
1429 AttributeList PAL = getAttributes();
1430 PAL = PAL.removeParamAttribute(getContext(), ArgNo, Kind);
1431 setAttributes(PAL);
1432 }
1433
1434 /// Removes the attribute from the given argument
1435 void removeParamAttr(unsigned ArgNo, StringRef Kind) {
1436 assert(ArgNo < getNumArgOperands() && "Out of bounds")((ArgNo < getNumArgOperands() && "Out of bounds") ?
static_cast<void> (0) : __assert_fail ("ArgNo < getNumArgOperands() && \"Out of bounds\""
, "/build/llvm-toolchain-snapshot-10~svn374710/include/llvm/IR/InstrTypes.h"
, 1436, __PRETTY_FUNCTION__))
;
1437 AttributeList PAL = getAttributes();
1438 PAL = PAL.removeParamAttribute(getContext(), ArgNo, Kind);
1439 setAttributes(PAL);
1440 }
1441
1442 /// adds the dereferenceable attribute to the list of attributes.
1443 void addDereferenceableAttr(unsigned i, uint64_t Bytes) {
1444 AttributeList PAL = getAttributes();
1445 PAL = PAL.addDereferenceableAttr(getContext(), i, Bytes);
1446 setAttributes(PAL);
1447 }
1448
1449 /// adds the dereferenceable_or_null attribute to the list of
1450 /// attributes.
1451 void addDereferenceableOrNullAttr(unsigned i, uint64_t Bytes) {
1452 AttributeList PAL = getAttributes();
1453 PAL = PAL.addDereferenceableOrNullAttr(getContext(), i, Bytes);
1454 setAttributes(PAL);
1455 }
1456
1457 /// Determine whether the return value has the given attribute.
1458 bool hasRetAttr(Attribute::AttrKind Kind) const;
1459
1460 /// Determine whether the argument or parameter has the given attribute.
1461 bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const;
1462
1463 /// Get the attribute of a given kind at a position.
1464 Attribute getAttribute(unsigned i, Attribute::AttrKind Kind) const {
1465 return getAttributes().getAttribute(i, Kind);
1466 }
1467
1468 /// Get the attribute of a given kind at a position.
1469 Attribute getAttribute(unsigned i, StringRef Kind) const {
1470 return getAttributes().getAttribute(i, Kind);
1471 }
1472
1473 /// Get the attribute of a given kind from a given arg
1474 Attribute getParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const {
1475 assert(ArgNo < getNumArgOperands() && "Out of bounds")((ArgNo < getNumArgOperands() && "Out of bounds") ?
static_cast<void> (0) : __assert_fail ("ArgNo < getNumArgOperands() && \"Out of bounds\""
, "/build/llvm-toolchain-snapshot-10~svn374710/include/llvm/IR/InstrTypes.h"
, 1475, __PRETTY_FUNCTION__))
;
1476 return getAttributes().getParamAttr(ArgNo, Kind);
1477 }
1478
1479 /// Get the attribute of a given kind from a given arg
1480 Attribute getParamAttr(unsigned ArgNo, StringRef Kind) const {
1481 assert(ArgNo < getNumArgOperands() && "Out of bounds")((ArgNo < getNumArgOperands() && "Out of bounds") ?
static_cast<void> (0) : __assert_fail ("ArgNo < getNumArgOperands() && \"Out of bounds\""
, "/build/llvm-toolchain-snapshot-10~svn374710/include/llvm/IR/InstrTypes.h"
, 1481, __PRETTY_FUNCTION__))
;
1482 return getAttributes().getParamAttr(ArgNo, Kind);
1483 }
1484
1485 /// Return true if the data operand at index \p i has the attribute \p
1486 /// A.
1487 ///
1488 /// Data operands include call arguments and values used in operand bundles,
1489 /// but does not include the callee operand. This routine dispatches to the
1490 /// underlying AttributeList or the OperandBundleUser as appropriate.
1491 ///
1492 /// The index \p i is interpreted as
1493 ///
1494 /// \p i == Attribute::ReturnIndex -> the return value
1495 /// \p i in [1, arg_size + 1) -> argument number (\p i - 1)
1496 /// \p i in [arg_size + 1, data_operand_size + 1) -> bundle operand at index
1497 /// (\p i - 1) in the operand list.
1498 bool dataOperandHasImpliedAttr(unsigned i, Attribute::AttrKind Kind) const {
1499 // Note that we have to add one because `i` isn't zero-indexed.
1500 assert(i < (getNumArgOperands() + getNumTotalBundleOperands() + 1) &&((i < (getNumArgOperands() + getNumTotalBundleOperands() +
1) && "Data operand index out of bounds!") ? static_cast
<void> (0) : __assert_fail ("i < (getNumArgOperands() + getNumTotalBundleOperands() + 1) && \"Data operand index out of bounds!\""
, "/build/llvm-toolchain-snapshot-10~svn374710/include/llvm/IR/InstrTypes.h"
, 1501, __PRETTY_FUNCTION__))
1501 "Data operand index out of bounds!")((i < (getNumArgOperands() + getNumTotalBundleOperands() +
1) && "Data operand index out of bounds!") ? static_cast
<void> (0) : __assert_fail ("i < (getNumArgOperands() + getNumTotalBundleOperands() + 1) && \"Data operand index out of bounds!\""
, "/build/llvm-toolchain-snapshot-10~svn374710/include/llvm/IR/InstrTypes.h"
, 1501, __PRETTY_FUNCTION__))
;
1502
1503 // The attribute A can either be directly specified, if the operand in
1504 // question is a call argument; or be indirectly implied by the kind of its
1505 // containing operand bundle, if the operand is a bundle operand.
1506
1507 if (i == AttributeList::ReturnIndex)
1508 return hasRetAttr(Kind);
1509
1510 // FIXME: Avoid these i - 1 calculations and update the API to use
1511 // zero-based indices.
1512 if (i < (getNumArgOperands() + 1))
1513 return paramHasAttr(i - 1, Kind);
1514
1515 assert(hasOperandBundles() && i >= (getBundleOperandsStartIndex() + 1) &&((hasOperandBundles() && i >= (getBundleOperandsStartIndex
() + 1) && "Must be either a call argument or an operand bundle!"
) ? static_cast<void> (0) : __assert_fail ("hasOperandBundles() && i >= (getBundleOperandsStartIndex() + 1) && \"Must be either a call argument or an operand bundle!\""
, "/build/llvm-toolchain-snapshot-10~svn374710/include/llvm/IR/InstrTypes.h"
, 1516, __PRETTY_FUNCTION__))
1516 "Must be either a call argument or an operand bundle!")((hasOperandBundles() && i >= (getBundleOperandsStartIndex
() + 1) && "Must be either a call argument or an operand bundle!"
) ? static_cast<void> (0) : __assert_fail ("hasOperandBundles() && i >= (getBundleOperandsStartIndex() + 1) && \"Must be either a call argument or an operand bundle!\""
, "/build/llvm-toolchain-snapshot-10~svn374710/include/llvm/IR/InstrTypes.h"
, 1516, __PRETTY_FUNCTION__))
;
1517 return bundleOperandHasAttr(i - 1, Kind);
1518 }
1519
1520 /// Determine whether this data operand is not captured.
1521 // FIXME: Once this API is no longer duplicated in `CallSite`, rename this to
1522 // better indicate that this may return a conservative answer.
1523 bool doesNotCapture(unsigned OpNo) const {
1524 return dataOperandHasImpliedAttr(OpNo + 1, Attribute::NoCapture);
1525 }
1526
1527 /// Determine whether this argument is passed by value.
1528 bool isByValArgument(unsigned ArgNo) const {
1529 return paramHasAttr(ArgNo, Attribute::ByVal);
1530 }
1531
1532 /// Determine whether this argument is passed in an alloca.
1533 bool isInAllocaArgument(unsigned ArgNo) const {
1534 return paramHasAttr(ArgNo, Attribute::InAlloca);
1535 }
1536
1537 /// Determine whether this argument is passed by value or in an alloca.
1538 bool isByValOrInAllocaArgument(unsigned ArgNo) const {
1539 return paramHasAttr(ArgNo, Attribute::ByVal) ||
1540 paramHasAttr(ArgNo, Attribute::InAlloca);
1541 }
1542
1543 /// Determine if there are is an inalloca argument. Only the last argument can
1544 /// have the inalloca attribute.
1545 bool hasInAllocaArgument() const {
1546 return !arg_empty() && paramHasAttr(arg_size() - 1, Attribute::InAlloca);
1547 }
1548
1549 // FIXME: Once this API is no longer duplicated in `CallSite`, rename this to
1550 // better indicate that this may return a conservative answer.
1551 bool doesNotAccessMemory(unsigned OpNo) const {
1552 return dataOperandHasImpliedAttr(OpNo + 1, Attribute::ReadNone);
1553 }
1554
1555 // FIXME: Once this API is no longer duplicated in `CallSite`, rename this to
1556 // better indicate that this may return a conservative answer.
1557 bool onlyReadsMemory(unsigned OpNo) const {
1558 return dataOperandHasImpliedAttr(OpNo + 1, Attribute::ReadOnly) ||
1559 dataOperandHasImpliedAttr(OpNo + 1, Attribute::ReadNone);
1560 }
1561
1562 // FIXME: Once this API is no longer duplicated in `CallSite`, rename this to
1563 // better indicate that this may return a conservative answer.
1564 bool doesNotReadMemory(unsigned OpNo) const {
1565 return dataOperandHasImpliedAttr(OpNo + 1, Attribute::WriteOnly) ||
1566 dataOperandHasImpliedAttr(OpNo + 1, Attribute::ReadNone);
1567 }
1568
1569 /// Extract the alignment of the return value.
1570 unsigned getRetAlignment() const { return Attrs.getRetAlignment(); }
1571
1572 /// Extract the alignment for a call or parameter (0=unknown).
1573 unsigned getParamAlignment(unsigned ArgNo) const {
1574 return Attrs.getParamAlignment(ArgNo);
1575 }
1576
1577 /// Extract the byval type for a call or parameter.
1578 Type *getParamByValType(unsigned ArgNo) const {
1579 Type *Ty = Attrs.getParamByValType(ArgNo);
1580 return Ty ? Ty : getArgOperand(ArgNo)->getType()->getPointerElementType();
1581 }
1582
1583 /// Extract the number of dereferenceable bytes for a call or
1584 /// parameter (0=unknown).
1585 uint64_t getDereferenceableBytes(unsigned i) const {
1586 return Attrs.getDereferenceableBytes(i);
1587 }
1588
1589 /// Extract the number of dereferenceable_or_null bytes for a call or
1590 /// parameter (0=unknown).
1591 uint64_t getDereferenceableOrNullBytes(unsigned i) const {
1592 return Attrs.getDereferenceableOrNullBytes(i);
1593 }
1594
1595 /// Return true if the return value is known to be not null.
1596 /// This may be because it has the nonnull attribute, or because at least
1597 /// one byte is dereferenceable and the pointer is in addrspace(0).
1598 bool isReturnNonNull() const;
1599
1600 /// Determine if the return value is marked with NoAlias attribute.
1601 bool returnDoesNotAlias() const {
1602 return Attrs.hasAttribute(AttributeList::ReturnIndex, Attribute::NoAlias);
1603 }
1604
1605 /// If one of the arguments has the 'returned' attribute, returns its
1606 /// operand value. Otherwise, return nullptr.
1607 Value *getReturnedArgOperand() const;
1608
1609 /// Return true if the call should not be treated as a call to a
1610 /// builtin.
1611 bool isNoBuiltin() const {
1612 return hasFnAttrImpl(Attribute::NoBuiltin) &&
1613 !hasFnAttrImpl(Attribute::Builtin);
1614 }
1615
1616 /// Determine if the call requires strict floating point semantics.
1617 bool isStrictFP() const { return hasFnAttr(Attribute::StrictFP); }
1618
1619 /// Return true if the call should not be inlined.
1620 bool isNoInline() const { return hasFnAttr(Attribute::NoInline); }
1621 void setIsNoInline() {
1622 addAttribute(AttributeList::FunctionIndex, Attribute::NoInline);
1623 }
1624 /// Determine if the call does not access memory.
1625 bool doesNotAccessMemory() const { return hasFnAttr(Attribute::ReadNone); }
1626 void setDoesNotAccessMemory() {
1627 addAttribute(AttributeList::FunctionIndex, Attribute::ReadNone);
1628 }
1629
1630 /// Determine if the call does not access or only reads memory.
1631 bool onlyReadsMemory() const {
1632 return doesNotAccessMemory() || hasFnAttr(Attribute::ReadOnly);
1633 }
1634 void setOnlyReadsMemory() {
1635 addAttribute(AttributeList::FunctionIndex, Attribute::ReadOnly);
1636 }
1637
1638 /// Determine if the call does not access or only writes memory.
1639 bool doesNotReadMemory() const {
1640 return doesNotAccessMemory() || hasFnAttr(Attribute::WriteOnly);
1641 }
1642 void setDoesNotReadMemory() {
1643 addAttribute(AttributeList::FunctionIndex, Attribute::WriteOnly);
1644 }
1645
1646 /// Determine if the call can access memmory only using pointers based
1647 /// on its arguments.
1648 bool onlyAccessesArgMemory() const {
1649 return hasFnAttr(Attribute::ArgMemOnly);
1650 }
1651 void setOnlyAccessesArgMemory() {
1652 addAttribute(AttributeList::FunctionIndex, Attribute::ArgMemOnly);
1653 }
1654
1655 /// Determine if the function may only access memory that is
1656 /// inaccessible from the IR.
1657 bool onlyAccessesInaccessibleMemory() const {
1658 return hasFnAttr(Attribute::InaccessibleMemOnly);
1659 }
1660 void setOnlyAccessesInaccessibleMemory() {
1661 addAttribute(AttributeList::FunctionIndex, Attribute::InaccessibleMemOnly);
1662 }
1663
1664 /// Determine if the function may only access memory that is
1665 /// either inaccessible from the IR or pointed to by its arguments.
1666 bool onlyAccessesInaccessibleMemOrArgMem() const {
1667 return hasFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
1668 }
1669 void setOnlyAccessesInaccessibleMemOrArgMem() {
1670 addAttribute(AttributeList::FunctionIndex,
1671 Attribute::InaccessibleMemOrArgMemOnly);
1672 }
1673 /// Determine if the call cannot return.
1674 bool doesNotReturn() const { return hasFnAttr(Attribute::NoReturn); }
1675 void setDoesNotReturn() {
1676 addAttribute(AttributeList::FunctionIndex, Attribute::NoReturn);
1677 }
1678
1679 /// Determine if the call should not perform indirect branch tracking.
1680 bool doesNoCfCheck() const { return hasFnAttr(Attribute::NoCfCheck); }
1681
1682 /// Determine if the call cannot unwind.
1683 bool doesNotThrow() const { return hasFnAttr(Attribute::NoUnwind); }
1684 void setDoesNotThrow() {
1685 addAttribute(AttributeList::FunctionIndex, Attribute::NoUnwind);
1686 }
1687
1688 /// Determine if the invoke cannot be duplicated.
1689 bool cannotDuplicate() const { return hasFnAttr(Attribute::NoDuplicate); }
1690 void setCannotDuplicate() {
1691 addAttribute(AttributeList::FunctionIndex, Attribute::NoDuplicate);
1692 }
1693
1694 /// Determine if the invoke is convergent
1695 bool isConvergent() const { return hasFnAttr(Attribute::Convergent); }
1696 void setConvergent() {
1697 addAttribute(AttributeList::FunctionIndex, Attribute::Convergent);
1698 }
1699 void setNotConvergent() {
1700 removeAttribute(AttributeList::FunctionIndex, Attribute::Convergent);
1701 }
1702
1703 /// Determine if the call returns a structure through first
1704 /// pointer argument.
1705 bool hasStructRetAttr() const {
1706 if (getNumArgOperands() == 0)
1707 return false;
1708
1709 // Be friendly and also check the callee.
1710 return paramHasAttr(0, Attribute::StructRet);
1711 }
1712
1713 /// Determine if any call argument is an aggregate passed by value.
1714 bool hasByValArgument() const {
1715 return Attrs.hasAttrSomewhere(Attribute::ByVal);
1716 }
1717
1718 ///@{
1719 // End of attribute API.
1720
1721 /// \name Operand Bundle API
1722 ///
1723 /// This group of methods provides the API to access and manipulate operand
1724 /// bundles on this call.
1725 /// @{
1726
1727 /// Return the number of operand bundles associated with this User.
1728 unsigned getNumOperandBundles() const {
1729 return std::distance(bundle_op_info_begin(), bundle_op_info_end());
1730 }
1731
1732 /// Return true if this User has any operand bundles.
1733 bool hasOperandBundles() const { return getNumOperandBundles() != 0; }
1734
1735 /// Return the index of the first bundle operand in the Use array.
1736 unsigned getBundleOperandsStartIndex() const {
1737 assert(hasOperandBundles() && "Don't call otherwise!")((hasOperandBundles() && "Don't call otherwise!") ? static_cast
<void> (0) : __assert_fail ("hasOperandBundles() && \"Don't call otherwise!\""
, "/build/llvm-toolchain-snapshot-10~svn374710/include/llvm/IR/InstrTypes.h"
, 1737, __PRETTY_FUNCTION__))
;
1738 return bundle_op_info_begin()->Begin;
1739 }
1740
1741 /// Return the index of the last bundle operand in the Use array.
1742 unsigned getBundleOperandsEndIndex() const {
1743 assert(hasOperandBundles() && "Don't call otherwise!")((hasOperandBundles() && "Don't call otherwise!") ? static_cast
<void> (0) : __assert_fail ("hasOperandBundles() && \"Don't call otherwise!\""
, "/build/llvm-toolchain-snapshot-10~svn374710/include/llvm/IR/InstrTypes.h"
, 1743, __PRETTY_FUNCTION__))
;
1744 return bundle_op_info_end()[-1].End;
1745 }
1746
1747 /// Return true if the operand at index \p Idx is a bundle operand.
1748 bool isBundleOperand(unsigned Idx) const {
1749 return hasOperandBundles() && Idx >= getBundleOperandsStartIndex() &&
1750 Idx < getBundleOperandsEndIndex();
1751 }
1752
1753 /// Returns true if the use is a bundle operand.
1754 bool isBundleOperand(const Use *U) const {
1755 assert(this == U->getUser() &&((this == U->getUser() && "Only valid to query with a use of this instruction!"
) ? static_cast<void> (0) : __assert_fail ("this == U->getUser() && \"Only valid to query with a use of this instruction!\""
, "/build/llvm-toolchain-snapshot-10~svn374710/include/llvm/IR/InstrTypes.h"
, 1756, __PRETTY_FUNCTION__))
1756 "Only valid to query with a use of this instruction!")((this == U->getUser() && "Only valid to query with a use of this instruction!"
) ? static_cast<void> (0) : __assert_fail ("this == U->getUser() && \"Only valid to query with a use of this instruction!\""
, "/build/llvm-toolchain-snapshot-10~svn374710/include/llvm/IR/InstrTypes.h"
, 1756, __PRETTY_FUNCTION__))
;
1757 return hasOperandBundles() && isBundleOperand(U - op_begin());
1758 }
1759 bool isBundleOperand(Value::const_user_iterator UI) const {
1760 return isBundleOperand(&UI.getUse());
1761 }
1762
1763 /// Return the total number operands (not operand bundles) used by
1764 /// every operand bundle in this OperandBundleUser.
1765 unsigned getNumTotalBundleOperands() const {
1766 if (!hasOperandBundles())
1767 return 0;
1768
1769 unsigned Begin = getBundleOperandsStartIndex();
1770 unsigned End = getBundleOperandsEndIndex();
1771
1772 assert(Begin <= End && "Should be!")((Begin <= End && "Should be!") ? static_cast<void
> (0) : __assert_fail ("Begin <= End && \"Should be!\""
, "/build/llvm-toolchain-snapshot-10~svn374710/include/llvm/IR/InstrTypes.h"
, 1772, __PRETTY_FUNCTION__))
;
1773 return End - Begin;
1774 }
1775
1776 /// Return the operand bundle at a specific index.
1777 OperandBundleUse getOperandBundleAt(unsigned Index) const {
1778 assert(Index < getNumOperandBundles() && "Index out of bounds!")((Index < getNumOperandBundles() && "Index out of bounds!"
) ? static_cast<void> (0) : __assert_fail ("Index < getNumOperandBundles() && \"Index out of bounds!\""
, "/build/llvm-toolchain-snapshot-10~svn374710/include/llvm/IR/InstrTypes.h"
, 1778, __PRETTY_FUNCTION__))
;
1779 return operandBundleFromBundleOpInfo(*(bundle_op_info_begin() + Index));
1780 }
1781
1782 /// Return the number of operand bundles with the tag Name attached to
1783 /// this instruction.
1784 unsigned countOperandBundlesOfType(StringRef Name) const {
1785 unsigned Count = 0;
1786 for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i)
1787 if (getOperandBundleAt(i).getTagName() == Name)
1788 Count++;
1789
1790 return Count;
1791 }
1792
1793 /// Return the number of operand bundles with the tag ID attached to
1794 /// this instruction.
1795 unsigned countOperandBundlesOfType(uint32_t ID) const {
1796 unsigned Count = 0;
1797 for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i)
1798 if (getOperandBundleAt(i).getTagID() == ID)
1799 Count++;
1800
1801 return Count;
1802 }
1803
1804 /// Return an operand bundle by name, if present.
1805 ///
1806 /// It is an error to call this for operand bundle types that may have
1807 /// multiple instances of them on the same instruction.
1808 Optional<OperandBundleUse> getOperandBundle(StringRef Name) const {
1809 assert(countOperandBundlesOfType(Name) < 2 && "Precondition violated!")((countOperandBundlesOfType(Name) < 2 && "Precondition violated!"
) ? static_cast<void> (0) : __assert_fail ("countOperandBundlesOfType(Name) < 2 && \"Precondition violated!\""
, "/build/llvm-toolchain-snapshot-10~svn374710/include/llvm/IR/InstrTypes.h"
, 1809, __PRETTY_FUNCTION__))
;
1810
1811 for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i) {
1812 OperandBundleUse U = getOperandBundleAt(i);
1813 if (U.getTagName() == Name)
1814 return U;
1815 }
1816
1817 return None;
1818 }
1819
1820 /// Return an operand bundle by tag ID, if present.
1821 ///
1822 /// It is an error to call this for operand bundle types that may have
1823 /// multiple instances of them on the same instruction.
1824 Optional<OperandBundleUse> getOperandBundle(uint32_t ID) const {
1825 assert(countOperandBundlesOfType(ID) < 2 && "Precondition violated!")((countOperandBundlesOfType(ID) < 2 && "Precondition violated!"
) ? static_cast<void> (0) : __assert_fail ("countOperandBundlesOfType(ID) < 2 && \"Precondition violated!\""
, "/build/llvm-toolchain-snapshot-10~svn374710/include/llvm/IR/InstrTypes.h"
, 1825, __PRETTY_FUNCTION__))
;
1826
1827 for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i) {
1828 OperandBundleUse U = getOperandBundleAt(i);
1829 if (U.getTagID() == ID)
1830 return U;
1831 }
1832
1833 return None;
1834 }
1835
1836 /// Return the list of operand bundles attached to this instruction as
1837 /// a vector of OperandBundleDefs.
1838 ///
1839 /// This function copies the OperandBundeUse instances associated with this
1840 /// OperandBundleUser to a vector of OperandBundleDefs. Note:
1841 /// OperandBundeUses and OperandBundleDefs are non-trivially *different*
1842 /// representations of operand bundles (see documentation above).
1843 void getOperandBundlesAsDefs(SmallVectorImpl<OperandBundleDef> &Defs) const {
1844 for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i)
1845 Defs.emplace_back(getOperandBundleAt(i));
1846 }
1847
1848 /// Return the operand bundle for the operand at index OpIdx.
1849 ///
1850 /// It is an error to call this with an OpIdx that does not correspond to an
1851 /// bundle operand.
1852 OperandBundleUse getOperandBundleForOperand(unsigned OpIdx) const {
1853 return operandBundleFromBundleOpInfo(getBundleOpInfoForOperand(OpIdx));
1854 }
1855
1856 /// Return true if this operand bundle user has operand bundles that
1857 /// may read from the heap.
1858 bool hasReadingOperandBundles() const {
1859 // Implementation note: this is a conservative implementation of operand
1860 // bundle semantics, where *any* operand bundle forces a callsite to be at
1861 // least readonly.
1862 return hasOperandBundles();
1863 }
1864
1865 /// Return true if this operand bundle user has operand bundles that
1866 /// may write to the heap.
1867 bool hasClobberingOperandBundles() const {
1868 for (auto &BOI : bundle_op_infos()) {
1869 if (BOI.Tag->second == LLVMContext::OB_deopt ||
1870 BOI.Tag->second == LLVMContext::OB_funclet)
1871 continue;
1872
1873 // This instruction has an operand bundle that is not known to us.
1874 // Assume the worst.
1875 return true;
1876 }
1877
1878 return false;
1879 }
1880
1881 /// Return true if the bundle operand at index \p OpIdx has the
1882 /// attribute \p A.
1883 bool bundleOperandHasAttr(unsigned OpIdx, Attribute::AttrKind A) const {
1884 auto &BOI = getBundleOpInfoForOperand(OpIdx);
1885 auto OBU = operandBundleFromBundleOpInfo(BOI);
1886 return OBU.operandHasAttr(OpIdx - BOI.Begin, A);
1887 }
1888
1889 /// Return true if \p Other has the same sequence of operand bundle
1890 /// tags with the same number of operands on each one of them as this
1891 /// OperandBundleUser.
1892 bool hasIdenticalOperandBundleSchema(const CallBase &Other) const {
1893 if (getNumOperandBundles() != Other.getNumOperandBundles())
1894 return false;
1895
1896 return std::equal(bundle_op_info_begin(), bundle_op_info_end(),
1897 Other.bundle_op_info_begin());
1898 }
1899
1900 /// Return true if this operand bundle user contains operand bundles
1901 /// with tags other than those specified in \p IDs.
1902 bool hasOperandBundlesOtherThan(ArrayRef<uint32_t> IDs) const {
1903 for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i) {
1904 uint32_t ID = getOperandBundleAt(i).getTagID();
1905 if (!is_contained(IDs, ID))
1906 return true;
1907 }
1908 return false;
1909 }
1910
1911 /// Is the function attribute S disallowed by some operand bundle on
1912 /// this operand bundle user?
1913 bool isFnAttrDisallowedByOpBundle(StringRef S) const {
1914 // Operand bundles only possibly disallow readnone, readonly and argmenonly
1915 // attributes. All String attributes are fine.
1916 return false;
1917 }
1918
1919 /// Is the function attribute A disallowed by some operand bundle on
1920 /// this operand bundle user?
1921 bool isFnAttrDisallowedByOpBundle(Attribute::AttrKind A) const {
1922 switch (A) {
1923 default:
1924 return false;
1925
1926 case Attribute::InaccessibleMemOrArgMemOnly:
1927 return hasReadingOperandBundles();
1928
1929 case Attribute::InaccessibleMemOnly:
1930 return hasReadingOperandBundles();
1931
1932 case Attribute::ArgMemOnly:
1933 return hasReadingOperandBundles();
1934
1935 case Attribute::ReadNone:
1936 return hasReadingOperandBundles();
1937
1938 case Attribute::ReadOnly:
1939 return hasClobberingOperandBundles();
1940 }
1941
1942 llvm_unreachable("switch has a default case!")::llvm::llvm_unreachable_internal("switch has a default case!"
, "/build/llvm-toolchain-snapshot-10~svn374710/include/llvm/IR/InstrTypes.h"
, 1942)
;
1943 }
1944
1945 /// Used to keep track of an operand bundle. See the main comment on
1946 /// OperandBundleUser above.
1947 struct BundleOpInfo {
1948 /// The operand bundle tag, interned by
1949 /// LLVMContextImpl::getOrInsertBundleTag.
1950 StringMapEntry<uint32_t> *Tag;
1951
1952 /// The index in the Use& vector where operands for this operand
1953 /// bundle starts.
1954 uint32_t Begin;
1955
1956 /// The index in the Use& vector where operands for this operand
1957 /// bundle ends.
1958 uint32_t End;
1959
1960 bool operator==(const BundleOpInfo &Other) const {
1961 return Tag == Other.Tag && Begin == Other.Begin && End == Other.End;
1962 }
1963 };
1964
1965 /// Simple helper function to map a BundleOpInfo to an
1966 /// OperandBundleUse.
1967 OperandBundleUse
1968 operandBundleFromBundleOpInfo(const BundleOpInfo &BOI) const {
1969 auto begin = op_begin();
1970 ArrayRef<Use> Inputs(begin + BOI.Begin, begin + BOI.End);
1971 return OperandBundleUse(BOI.Tag, Inputs);
1972 }
1973
1974 using bundle_op_iterator = BundleOpInfo *;
1975 using const_bundle_op_iterator = const BundleOpInfo *;
1976
1977 /// Return the start of the list of BundleOpInfo instances associated
1978 /// with this OperandBundleUser.
1979 ///
1980 /// OperandBundleUser uses the descriptor area co-allocated with the host User
1981 /// to store some meta information about which operands are "normal" operands,
1982 /// and which ones belong to some operand bundle.
1983 ///
1984 /// The layout of an operand bundle user is
1985 ///
1986 /// +-----------uint32_t End-------------------------------------+
1987 /// | |
1988 /// | +--------uint32_t Begin--------------------+ |
1989 /// | | | |
1990 /// ^ ^ v v
1991 /// |------|------|----|----|----|----|----|---------|----|---------|----|-----
1992 /// | BOI0 | BOI1 | .. | DU | U0 | U1 | .. | BOI0_U0 | .. | BOI1_U0 | .. | Un
1993 /// |------|------|----|----|----|----|----|---------|----|---------|----|-----
1994 /// v v ^ ^
1995 /// | | | |
1996 /// | +--------uint32_t Begin------------+ |
1997 /// | |
1998 /// +-----------uint32_t End-----------------------------+
1999 ///
2000 ///
2001 /// BOI0, BOI1 ... are descriptions of operand bundles in this User's use
2002 /// list. These descriptions are installed and managed by this class, and
2003 /// they're all instances of OperandBundleUser<T>::BundleOpInfo.
2004 ///
2005 /// DU is an additional descriptor installed by User's 'operator new' to keep
2006 /// track of the 'BOI0 ... BOIN' co-allocation. OperandBundleUser does not
2007 /// access or modify DU in any way, it's an implementation detail private to
2008 /// User.
2009 ///
2010 /// The regular Use& vector for the User starts at U0. The operand bundle
2011 /// uses are part of the Use& vector, just like normal uses. In the diagram
2012 /// above, the operand bundle uses start at BOI0_U0. Each instance of
2013 /// BundleOpInfo has information about a contiguous set of uses constituting
2014 /// an operand bundle, and the total set of operand bundle uses themselves
2015 /// form a contiguous set of uses (i.e. there are no gaps between uses
2016 /// corresponding to individual operand bundles).
2017 ///
2018 /// This class does not know the location of the set of operand bundle uses
2019 /// within the use list -- that is decided by the User using this class via
2020 /// the BeginIdx argument in populateBundleOperandInfos.
2021 ///
2022 /// Currently operand bundle users with hung-off operands are not supported.
2023 bundle_op_iterator bundle_op_info_begin() {
2024 if (!hasDescriptor())
2025 return nullptr;
2026
2027 uint8_t *BytesBegin = getDescriptor().begin();
2028 return reinterpret_cast<bundle_op_iterator>(BytesBegin);
2029 }
2030
2031 /// Return the start of the list of BundleOpInfo instances associated
2032 /// with this OperandBundleUser.
2033 const_bundle_op_iterator bundle_op_info_begin() const {
2034 auto *NonConstThis = const_cast<CallBase *>(this);
2035 return NonConstThis->bundle_op_info_begin();
2036 }
2037
2038 /// Return the end of the list of BundleOpInfo instances associated
2039 /// with this OperandBundleUser.
2040 bundle_op_iterator bundle_op_info_end() {
2041 if (!hasDescriptor())
2042 return nullptr;
2043
2044 uint8_t *BytesEnd = getDescriptor().end();
2045 return reinterpret_cast<bundle_op_iterator>(BytesEnd);
2046 }
2047
2048 /// Return the end of the list of BundleOpInfo instances associated
2049 /// with this OperandBundleUser.
2050 const_bundle_op_iterator bundle_op_info_end() const {
2051 auto *NonConstThis = const_cast<CallBase *>(this);
2052 return NonConstThis->bundle_op_info_end();
2053 }
2054
2055 /// Return the range [\p bundle_op_info_begin, \p bundle_op_info_end).
2056 iterator_range<bundle_op_iterator> bundle_op_infos() {
2057 return make_range(bundle_op_info_begin(), bundle_op_info_end());
2058 }
2059
2060 /// Return the range [\p bundle_op_info_begin, \p bundle_op_info_end).
2061 iterator_range<const_bundle_op_iterator> bundle_op_infos() const {
2062 return make_range(bundle_op_info_begin(), bundle_op_info_end());
2063 }
2064
2065 /// Populate the BundleOpInfo instances and the Use& vector from \p
2066 /// Bundles. Return the op_iterator pointing to the Use& one past the last
2067 /// last bundle operand use.
2068 ///
2069 /// Each \p OperandBundleDef instance is tracked by a OperandBundleInfo
2070 /// instance allocated in this User's descriptor.
2071 op_iterator populateBundleOperandInfos(ArrayRef<OperandBundleDef> Bundles,
2072 const unsigned BeginIndex);
2073
2074 /// Return the BundleOpInfo for the operand at index OpIdx.
2075 ///
2076 /// It is an error to call this with an OpIdx that does not correspond to an
2077 /// bundle operand.
2078 const BundleOpInfo &getBundleOpInfoForOperand(unsigned OpIdx) const {
2079 for (auto &BOI : bundle_op_infos())
2080 if (BOI.Begin <= OpIdx && OpIdx < BOI.End)
2081 return BOI;
2082
2083 llvm_unreachable("Did not find operand bundle for operand!")::llvm::llvm_unreachable_internal("Did not find operand bundle for operand!"
, "/build/llvm-toolchain-snapshot-10~svn374710/include/llvm/IR/InstrTypes.h"
, 2083)
;
2084 }
2085
2086protected:
2087 /// Return the total number of values used in \p Bundles.
2088 static unsigned CountBundleInputs(ArrayRef<OperandBundleDef> Bundles) {
2089 unsigned Total = 0;
2090 for (auto &B : Bundles)
2091 Total += B.input_size();
2092 return Total;
2093 }
2094
2095 /// @}
2096 // End of operand bundle API.
2097
2098private:
2099 bool hasFnAttrOnCalledFunction(Attribute::AttrKind Kind) const;
2100 bool hasFnAttrOnCalledFunction(StringRef Kind) const;
2101
2102 template <typename AttrKind> bool hasFnAttrImpl(AttrKind Kind) const {
2103 if (Attrs.hasAttribute(AttributeList::FunctionIndex, Kind))
2104 return true;
2105
2106 // Operand bundles override attributes on the called function, but don't
2107 // override attributes directly present on the call instruction.
2108 if (isFnAttrDisallowedByOpBundle(Kind))
2109 return false;
2110
2111 return hasFnAttrOnCalledFunction(Kind);
2112 }
2113};
2114
2115template <>
2116struct OperandTraits<CallBase> : public VariadicOperandTraits<CallBase, 1> {};
2117
2118DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CallBase, Value)CallBase::op_iterator CallBase::op_begin() { return OperandTraits
<CallBase>::op_begin(this); } CallBase::const_op_iterator
CallBase::op_begin() const { return OperandTraits<CallBase
>::op_begin(const_cast<CallBase*>(this)); } CallBase
::op_iterator CallBase::op_end() { return OperandTraits<CallBase
>::op_end(this); } CallBase::const_op_iterator CallBase::op_end
() const { return OperandTraits<CallBase>::op_end(const_cast
<CallBase*>(this)); } Value *CallBase::getOperand(unsigned
i_nocapture) const { ((i_nocapture < OperandTraits<CallBase
>::operands(this) && "getOperand() out of range!")
? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<CallBase>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-10~svn374710/include/llvm/IR/InstrTypes.h"
, 2118, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<CallBase>::op_begin(const_cast<CallBase
*>(this))[i_nocapture].get()); } void CallBase::setOperand
(unsigned i_nocapture, Value *Val_nocapture) { ((i_nocapture <
OperandTraits<CallBase>::operands(this) && "setOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<CallBase>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-10~svn374710/include/llvm/IR/InstrTypes.h"
, 2118, __PRETTY_FUNCTION__)); OperandTraits<CallBase>::
op_begin(this)[i_nocapture] = Val_nocapture; } unsigned CallBase
::getNumOperands() const { return OperandTraits<CallBase>
::operands(this); } template <int Idx_nocapture> Use &
CallBase::Op() { return this->OpFrom<Idx_nocapture>(
this); } template <int Idx_nocapture> const Use &CallBase
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
2119
2120//===----------------------------------------------------------------------===//
2121// FuncletPadInst Class
2122//===----------------------------------------------------------------------===//
2123class FuncletPadInst : public Instruction {
2124private:
2125 FuncletPadInst(const FuncletPadInst &CPI);
2126
2127 explicit FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
2128 ArrayRef<Value *> Args, unsigned Values,
2129 const Twine &NameStr, Instruction *InsertBefore);
2130 explicit FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
2131 ArrayRef<Value *> Args, unsigned Values,
2132 const Twine &NameStr, BasicBlock *InsertAtEnd);
2133
2134 void init(Value *ParentPad, ArrayRef<Value *> Args, const Twine &NameStr);
2135
2136protected:
2137 // Note: Instruction needs to be a friend here to call cloneImpl.
2138 friend class Instruction;
2139 friend class CatchPadInst;
2140 friend class CleanupPadInst;
2141
2142 FuncletPadInst *cloneImpl() const;
2143
2144public:
2145 /// Provide fast operand accessors
2146 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2147
2148 /// getNumArgOperands - Return the number of funcletpad arguments.
2149 ///
2150 unsigned getNumArgOperands() const { return getNumOperands() - 1; }
2151
2152 /// Convenience accessors
2153
2154 /// Return the outer EH-pad this funclet is nested within.
2155 ///
2156 /// Note: This returns the associated CatchSwitchInst if this FuncletPadInst
2157 /// is a CatchPadInst.
2158 Value *getParentPad() const { return Op<-1>(); }
2159 void setParentPad(Value *ParentPad) {
2160 assert(ParentPad)((ParentPad) ? static_cast<void> (0) : __assert_fail ("ParentPad"
, "/build/llvm-toolchain-snapshot-10~svn374710/include/llvm/IR/InstrTypes.h"
, 2160, __PRETTY_FUNCTION__))
;
2161 Op<-1>() = ParentPad;
2162 }
2163
2164 /// getArgOperand/setArgOperand - Return/set the i-th funcletpad argument.
2165 ///
2166 Value *getArgOperand(unsigned i) const { return getOperand(i); }
2167 void setArgOperand(unsigned i, Value *v) { setOperand(i, v); }
2168
2169 /// arg_operands - iteration adapter for range-for loops.
2170 op_range arg_operands() { return op_range(op_begin(), op_end() - 1); }
2171
2172 /// arg_operands - iteration adapter for range-for loops.
2173 const_op_range arg_operands() const {
2174 return const_op_range(op_begin(), op_end() - 1);
2175 }
2176
2177 // Methods for support type inquiry through isa, cast, and dyn_cast:
2178 static bool classof(const Instruction *I) { return I->isFuncletPad(); }
2179 static bool classof(const Value *V) {
2180 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2181 }
2182};
2183
2184template <>
2185struct OperandTraits<FuncletPadInst>
2186 : public VariadicOperandTraits<FuncletPadInst, /*MINARITY=*/1> {};
2187
2188DEFINE_TRANSPARENT_OPERAND_ACCESSORS(FuncletPadInst, Value)FuncletPadInst::op_iterator FuncletPadInst::op_begin() { return
OperandTraits<FuncletPadInst>::op_begin(this); } FuncletPadInst
::const_op_iterator FuncletPadInst::op_begin() const { return
OperandTraits<FuncletPadInst>::op_begin(const_cast<
FuncletPadInst*>(this)); } FuncletPadInst::op_iterator FuncletPadInst
::op_end() { return OperandTraits<FuncletPadInst>::op_end
(this); } FuncletPadInst::const_op_iterator FuncletPadInst::op_end
() const { return OperandTraits<FuncletPadInst>::op_end
(const_cast<FuncletPadInst*>(this)); } Value *FuncletPadInst
::getOperand(unsigned i_nocapture) const { ((i_nocapture <
OperandTraits<FuncletPadInst>::operands(this) &&
"getOperand() out of range!") ? static_cast<void> (0) :
__assert_fail ("i_nocapture < OperandTraits<FuncletPadInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-10~svn374710/include/llvm/IR/InstrTypes.h"
, 2188, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<FuncletPadInst>::op_begin(const_cast<
FuncletPadInst*>(this))[i_nocapture].get()); } void FuncletPadInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { ((
i_nocapture < OperandTraits<FuncletPadInst>::operands
(this) && "setOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<FuncletPadInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-10~svn374710/include/llvm/IR/InstrTypes.h"
, 2188, __PRETTY_FUNCTION__)); OperandTraits<FuncletPadInst
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
FuncletPadInst::getNumOperands() const { return OperandTraits
<FuncletPadInst>::operands(this); } template <int Idx_nocapture
> Use &FuncletPadInst::Op() { return this->OpFrom<
Idx_nocapture>(this); } template <int Idx_nocapture>
const Use &FuncletPadInst::Op() const { return this->
OpFrom<Idx_nocapture>(this); }
2189
2190} // end namespace llvm
2191
2192#endif // LLVM_IR_INSTRTYPES_H