LLVM 20.0.0git
PreISelIntrinsicLowering.cpp
Go to the documentation of this file.
1//===- PreISelIntrinsicLowering.cpp - Pre-ISel intrinsic lowering pass ----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass implements IR lowering for the llvm.memcpy, llvm.memmove,
10// llvm.memset, llvm.load.relative and llvm.objc.* intrinsics.
11//
12//===----------------------------------------------------------------------===//
13
20#include "llvm/CodeGen/Passes.h"
23#include "llvm/IR/Function.h"
24#include "llvm/IR/IRBuilder.h"
27#include "llvm/IR/Module.h"
28#include "llvm/IR/Type.h"
29#include "llvm/IR/Use.h"
31#include "llvm/Pass.h"
36
37using namespace llvm;
38
39/// Threshold to leave statically sized memory intrinsic calls. Calls of known
40/// size larger than this will be expanded by the pass. Calls of unknown or
41/// lower size will be left for expansion in codegen.
43 "mem-intrinsic-expand-size",
44 cl::desc("Set minimum mem intrinsic size to expand in IR"), cl::init(-1),
46
47namespace {
48
49struct PreISelIntrinsicLowering {
50 const TargetMachine *TM;
51 const function_ref<TargetTransformInfo &(Function &)> LookupTTI;
52 const function_ref<TargetLibraryInfo &(Function &)> LookupTLI;
53
54 /// If this is true, assume it's preferably to leave memory intrinsic calls
55 /// for replacement with a library call later. Otherwise this depends on
56 /// TargetLoweringInfo availability of the corresponding function.
57 const bool UseMemIntrinsicLibFunc;
58
59 explicit PreISelIntrinsicLowering(
60 const TargetMachine *TM_,
63 bool UseMemIntrinsicLibFunc_ = true)
64 : TM(TM_), LookupTTI(LookupTTI_), LookupTLI(LookupTLI_),
65 UseMemIntrinsicLibFunc(UseMemIntrinsicLibFunc_) {}
66
67 static bool shouldExpandMemIntrinsicWithSize(Value *Size,
69 bool expandMemIntrinsicUses(Function &F) const;
70 bool lowerIntrinsics(Module &M) const;
71};
72
73} // namespace
74
75template <class T> static bool forEachCall(Function &Intrin, T Callback) {
76 // Lowering all intrinsics in a function will delete multiple uses, so we
77 // can't use an early-inc-range. In case some remain, we don't want to look
78 // at them again. Unfortunately, Value::UseList is private, so we can't use a
79 // simple Use**. If LastUse is null, the next use to consider is
80 // Intrin.use_begin(), otherwise it's LastUse->getNext().
81 Use *LastUse = nullptr;
82 bool Changed = false;
83 while (!Intrin.use_empty() && (!LastUse || LastUse->getNext())) {
84 Use *U = LastUse ? LastUse->getNext() : &*Intrin.use_begin();
85 bool Removed = false;
86 // An intrinsic cannot have its address taken, so it cannot be an argument
87 // operand. It might be used as operand in debug metadata, though.
88 if (auto CI = dyn_cast<CallInst>(U->getUser()))
89 Changed |= Removed = Callback(CI);
90 if (!Removed)
91 LastUse = U;
92 }
93 return Changed;
94}
95
97 if (F.use_empty())
98 return false;
99
100 bool Changed = false;
101 Type *Int32Ty = Type::getInt32Ty(F.getContext());
102
103 for (Use &U : llvm::make_early_inc_range(F.uses())) {
104 auto CI = dyn_cast<CallInst>(U.getUser());
105 if (!CI || CI->getCalledOperand() != &F)
106 continue;
107
108 IRBuilder<> B(CI);
109 Value *OffsetPtr =
110 B.CreatePtrAdd(CI->getArgOperand(0), CI->getArgOperand(1));
111 Value *OffsetI32 = B.CreateAlignedLoad(Int32Ty, OffsetPtr, Align(4));
112
113 Value *ResultPtr = B.CreatePtrAdd(CI->getArgOperand(0), OffsetI32);
114
115 CI->replaceAllUsesWith(ResultPtr);
116 CI->eraseFromParent();
117 Changed = true;
118 }
119
120 return Changed;
121}
122
123// ObjCARC has knowledge about whether an obj-c runtime function needs to be
124// always tail-called or never tail-called.
127 if (objcarc::IsAlwaysTail(Kind))
128 return CallInst::TCK_Tail;
129 else if (objcarc::IsNeverTail(Kind))
131 return CallInst::TCK_None;
132}
133
134static bool lowerObjCCall(Function &F, const char *NewFn,
135 bool setNonLazyBind = false) {
137 "Pre-ISel intrinsics do lower into regular function calls");
138 if (F.use_empty())
139 return false;
140
141 // If we haven't already looked up this function, check to see if the
142 // program already contains a function with this name.
143 Module *M = F.getParent();
144 FunctionCallee FCache = M->getOrInsertFunction(NewFn, F.getFunctionType());
145
146 if (Function *Fn = dyn_cast<Function>(FCache.getCallee())) {
147 Fn->setLinkage(F.getLinkage());
148 if (setNonLazyBind && !Fn->isWeakForLinker()) {
149 // If we have Native ARC, set nonlazybind attribute for these APIs for
150 // performance.
151 Fn->addFnAttr(Attribute::NonLazyBind);
152 }
153 }
154
156
157 for (Use &U : llvm::make_early_inc_range(F.uses())) {
158 auto *CB = cast<CallBase>(U.getUser());
159
160 if (CB->getCalledFunction() != &F) {
162 (void)Kind;
163 assert((Kind == objcarc::ARCInstKind::RetainRV ||
164 Kind == objcarc::ARCInstKind::UnsafeClaimRV) &&
165 "use expected to be the argument of operand bundle "
166 "\"clang.arc.attachedcall\"");
167 U.set(FCache.getCallee());
168 continue;
169 }
170
171 auto *CI = cast<CallInst>(CB);
172 assert(CI->getCalledFunction() && "Cannot lower an indirect call!");
173
174 IRBuilder<> Builder(CI->getParent(), CI->getIterator());
175 SmallVector<Value *, 8> Args(CI->args());
177 CI->getOperandBundlesAsDefs(BundleList);
178 CallInst *NewCI = Builder.CreateCall(FCache, Args, BundleList);
179 NewCI->setName(CI->getName());
180
181 // Try to set the most appropriate TailCallKind based on both the current
182 // attributes and the ones that we could get from ObjCARC's special
183 // knowledge of the runtime functions.
184 //
185 // std::max respects both requirements of notail and tail here:
186 // * notail on either the call or from ObjCARC becomes notail
187 // * tail on either side is stronger than none, but not notail
188 CallInst::TailCallKind TCK = CI->getTailCallKind();
189 NewCI->setTailCallKind(std::max(TCK, OverridingTCK));
190
191 // Transfer the 'returned' attribute from the intrinsic to the call site.
192 // By applying this only to intrinsic call sites, we avoid applying it to
193 // non-ARC explicit calls to things like objc_retain which have not been
194 // auto-upgraded to use the intrinsics.
195 unsigned Index;
196 if (F.getAttributes().hasAttrSomewhere(Attribute::Returned, &Index) &&
197 Index)
199 Attribute::Returned);
200
201 if (!CI->use_empty())
202 CI->replaceAllUsesWith(NewCI);
203 CI->eraseFromParent();
204 }
205
206 return true;
207}
208
209// TODO: Should refine based on estimated number of accesses (e.g. does it
210// require splitting based on alignment)
211bool PreISelIntrinsicLowering::shouldExpandMemIntrinsicWithSize(
213 ConstantInt *CI = dyn_cast<ConstantInt>(Size);
214 if (!CI)
215 return true;
216 uint64_t Threshold = MemIntrinsicExpandSizeThresholdOpt.getNumOccurrences()
219 uint64_t SizeVal = CI->getZExtValue();
220
221 // Treat a threshold of 0 as a special case to force expansion of all
222 // intrinsics, including size 0.
223 return SizeVal > Threshold || Threshold == 0;
224}
225
226static bool canEmitLibcall(const TargetMachine *TM, Function *F,
227 RTLIB::Libcall LC) {
228 // TODO: Should this consider the address space of the memcpy?
229 if (!TM)
230 return true;
231 const TargetLowering *TLI = TM->getSubtargetImpl(*F)->getTargetLowering();
232 return TLI->getLibcallName(LC) != nullptr;
233}
234
235// TODO: Handle atomic memcpy and memcpy.inline
236// TODO: Pass ScalarEvolution
237bool PreISelIntrinsicLowering::expandMemIntrinsicUses(Function &F) const {
238 Intrinsic::ID ID = F.getIntrinsicID();
239 bool Changed = false;
240
241 for (User *U : llvm::make_early_inc_range(F.users())) {
242 Instruction *Inst = cast<Instruction>(U);
243
244 switch (ID) {
245 case Intrinsic::memcpy: {
246 auto *Memcpy = cast<MemCpyInst>(Inst);
247 Function *ParentFunc = Memcpy->getFunction();
248 const TargetTransformInfo &TTI = LookupTTI(*ParentFunc);
249 if (shouldExpandMemIntrinsicWithSize(Memcpy->getLength(), TTI)) {
250 if (UseMemIntrinsicLibFunc &&
251 canEmitLibcall(TM, ParentFunc, RTLIB::MEMCPY))
252 break;
253
254 // TODO: For optsize, emit the loop into a separate function
255 expandMemCpyAsLoop(Memcpy, TTI);
256 Changed = true;
257 Memcpy->eraseFromParent();
258 }
259
260 break;
261 }
262 case Intrinsic::memcpy_inline: {
263 // Only expand llvm.memcpy.inline with non-constant length in this
264 // codepath, leaving the current SelectionDAG expansion for constant
265 // length memcpy intrinsics undisturbed.
266 auto *Memcpy = cast<MemCpyInlineInst>(Inst);
267 if (isa<ConstantInt>(Memcpy->getLength()))
268 break;
269
270 Function *ParentFunc = Memcpy->getFunction();
271 const TargetTransformInfo &TTI = LookupTTI(*ParentFunc);
272 expandMemCpyAsLoop(Memcpy, TTI);
273 Changed = true;
274 Memcpy->eraseFromParent();
275 break;
276 }
277 case Intrinsic::memmove: {
278 auto *Memmove = cast<MemMoveInst>(Inst);
279 Function *ParentFunc = Memmove->getFunction();
280 const TargetTransformInfo &TTI = LookupTTI(*ParentFunc);
281 if (shouldExpandMemIntrinsicWithSize(Memmove->getLength(), TTI)) {
282 if (UseMemIntrinsicLibFunc &&
283 canEmitLibcall(TM, ParentFunc, RTLIB::MEMMOVE))
284 break;
285
286 if (expandMemMoveAsLoop(Memmove, TTI)) {
287 Changed = true;
288 Memmove->eraseFromParent();
289 }
290 }
291
292 break;
293 }
294 case Intrinsic::memset: {
295 auto *Memset = cast<MemSetInst>(Inst);
296 Function *ParentFunc = Memset->getFunction();
297 const TargetTransformInfo &TTI = LookupTTI(*ParentFunc);
298 if (shouldExpandMemIntrinsicWithSize(Memset->getLength(), TTI)) {
299 if (UseMemIntrinsicLibFunc &&
300 canEmitLibcall(TM, ParentFunc, RTLIB::MEMSET))
301 break;
302
303 expandMemSetAsLoop(Memset);
304 Changed = true;
305 Memset->eraseFromParent();
306 }
307
308 break;
309 }
310 case Intrinsic::memset_inline: {
311 // Only expand llvm.memset.inline with non-constant length in this
312 // codepath, leaving the current SelectionDAG expansion for constant
313 // length memset intrinsics undisturbed.
314 auto *Memset = cast<MemSetInlineInst>(Inst);
315 if (isa<ConstantInt>(Memset->getLength()))
316 break;
317
318 expandMemSetAsLoop(Memset);
319 Changed = true;
320 Memset->eraseFromParent();
321 break;
322 }
323 case Intrinsic::experimental_memset_pattern: {
324 auto *Memset = cast<MemSetPatternInst>(Inst);
326 Changed = true;
327 Memset->eraseFromParent();
328 break;
329 }
330 default:
331 llvm_unreachable("unhandled intrinsic");
332 }
333 }
334
335 return Changed;
336}
337
338bool PreISelIntrinsicLowering::lowerIntrinsics(Module &M) const {
339 bool Changed = false;
340 for (Function &F : M) {
341 switch (F.getIntrinsicID()) {
342 default:
343 break;
344 case Intrinsic::memcpy:
345 case Intrinsic::memcpy_inline:
346 case Intrinsic::memmove:
347 case Intrinsic::memset:
348 case Intrinsic::memset_inline:
349 case Intrinsic::experimental_memset_pattern:
350 Changed |= expandMemIntrinsicUses(F);
351 break;
352 case Intrinsic::load_relative:
353 Changed |= lowerLoadRelative(F);
354 break;
355 case Intrinsic::is_constant:
356 case Intrinsic::objectsize:
357 Changed |= forEachCall(F, [&](CallInst *CI) {
358 Function *Parent = CI->getParent()->getParent();
359 TargetLibraryInfo &TLI = LookupTLI(*Parent);
360 // Intrinsics in unreachable code are not lowered.
361 bool Changed = lowerConstantIntrinsics(*Parent, TLI, /*DT=*/nullptr);
362 return Changed;
363 });
364 break;
365#define BEGIN_REGISTER_VP_INTRINSIC(VPID, MASKPOS, VLENPOS) \
366 case Intrinsic::VPID:
367#include "llvm/IR/VPIntrinsics.def"
368 forEachCall(F, [&](CallInst *CI) {
369 Function *Parent = CI->getParent()->getParent();
370 const TargetTransformInfo &TTI = LookupTTI(*Parent);
371 auto *VPI = cast<VPIntrinsic>(CI);
373 // Expansion of VP intrinsics may change the IR but not actually
374 // replace the intrinsic, so update Changed for the pass
375 // and compute Removed for forEachCall.
376 Changed |= ED != VPExpansionDetails::IntrinsicUnchanged;
377 bool Removed = ED == VPExpansionDetails::IntrinsicReplaced;
378 return Removed;
379 });
380 break;
381 case Intrinsic::objc_autorelease:
382 Changed |= lowerObjCCall(F, "objc_autorelease");
383 break;
384 case Intrinsic::objc_autoreleasePoolPop:
385 Changed |= lowerObjCCall(F, "objc_autoreleasePoolPop");
386 break;
387 case Intrinsic::objc_autoreleasePoolPush:
388 Changed |= lowerObjCCall(F, "objc_autoreleasePoolPush");
389 break;
390 case Intrinsic::objc_autoreleaseReturnValue:
391 Changed |= lowerObjCCall(F, "objc_autoreleaseReturnValue");
392 break;
393 case Intrinsic::objc_copyWeak:
394 Changed |= lowerObjCCall(F, "objc_copyWeak");
395 break;
396 case Intrinsic::objc_destroyWeak:
397 Changed |= lowerObjCCall(F, "objc_destroyWeak");
398 break;
399 case Intrinsic::objc_initWeak:
400 Changed |= lowerObjCCall(F, "objc_initWeak");
401 break;
402 case Intrinsic::objc_loadWeak:
403 Changed |= lowerObjCCall(F, "objc_loadWeak");
404 break;
405 case Intrinsic::objc_loadWeakRetained:
406 Changed |= lowerObjCCall(F, "objc_loadWeakRetained");
407 break;
408 case Intrinsic::objc_moveWeak:
409 Changed |= lowerObjCCall(F, "objc_moveWeak");
410 break;
411 case Intrinsic::objc_release:
412 Changed |= lowerObjCCall(F, "objc_release", true);
413 break;
414 case Intrinsic::objc_retain:
415 Changed |= lowerObjCCall(F, "objc_retain", true);
416 break;
417 case Intrinsic::objc_retainAutorelease:
418 Changed |= lowerObjCCall(F, "objc_retainAutorelease");
419 break;
420 case Intrinsic::objc_retainAutoreleaseReturnValue:
421 Changed |= lowerObjCCall(F, "objc_retainAutoreleaseReturnValue");
422 break;
423 case Intrinsic::objc_retainAutoreleasedReturnValue:
424 Changed |= lowerObjCCall(F, "objc_retainAutoreleasedReturnValue");
425 break;
426 case Intrinsic::objc_retainBlock:
427 Changed |= lowerObjCCall(F, "objc_retainBlock");
428 break;
429 case Intrinsic::objc_storeStrong:
430 Changed |= lowerObjCCall(F, "objc_storeStrong");
431 break;
432 case Intrinsic::objc_storeWeak:
433 Changed |= lowerObjCCall(F, "objc_storeWeak");
434 break;
435 case Intrinsic::objc_unsafeClaimAutoreleasedReturnValue:
436 Changed |= lowerObjCCall(F, "objc_unsafeClaimAutoreleasedReturnValue");
437 break;
438 case Intrinsic::objc_retainedObject:
439 Changed |= lowerObjCCall(F, "objc_retainedObject");
440 break;
441 case Intrinsic::objc_unretainedObject:
442 Changed |= lowerObjCCall(F, "objc_unretainedObject");
443 break;
444 case Intrinsic::objc_unretainedPointer:
445 Changed |= lowerObjCCall(F, "objc_unretainedPointer");
446 break;
447 case Intrinsic::objc_retain_autorelease:
448 Changed |= lowerObjCCall(F, "objc_retain_autorelease");
449 break;
450 case Intrinsic::objc_sync_enter:
451 Changed |= lowerObjCCall(F, "objc_sync_enter");
452 break;
453 case Intrinsic::objc_sync_exit:
454 Changed |= lowerObjCCall(F, "objc_sync_exit");
455 break;
456 }
457 }
458 return Changed;
459}
460
461namespace {
462
463class PreISelIntrinsicLoweringLegacyPass : public ModulePass {
464public:
465 static char ID;
466
467 PreISelIntrinsicLoweringLegacyPass() : ModulePass(ID) {}
468
469 void getAnalysisUsage(AnalysisUsage &AU) const override {
473 }
474
475 bool runOnModule(Module &M) override {
476 auto LookupTTI = [this](Function &F) -> TargetTransformInfo & {
477 return this->getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
478 };
479 auto LookupTLI = [this](Function &F) -> TargetLibraryInfo & {
480 return this->getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
481 };
482
483 const auto *TM = &getAnalysis<TargetPassConfig>().getTM<TargetMachine>();
484 PreISelIntrinsicLowering Lowering(TM, LookupTTI, LookupTLI);
485 return Lowering.lowerIntrinsics(M);
486 }
487};
488
489} // end anonymous namespace
490
491char PreISelIntrinsicLoweringLegacyPass::ID;
492
493INITIALIZE_PASS_BEGIN(PreISelIntrinsicLoweringLegacyPass,
494 "pre-isel-intrinsic-lowering",
495 "Pre-ISel Intrinsic Lowering", false, false)
499INITIALIZE_PASS_END(PreISelIntrinsicLoweringLegacyPass,
500 "pre-isel-intrinsic-lowering",
501 "Pre-ISel Intrinsic Lowering", false, false)
502
504 return new PreISelIntrinsicLoweringLegacyPass();
505}
506
509 auto &FAM = AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
510
511 auto LookupTTI = [&FAM](Function &F) -> TargetTransformInfo & {
513 };
514 auto LookupTLI = [&FAM](Function &F) -> TargetLibraryInfo & {
516 };
517
518 PreISelIntrinsicLowering Lowering(TM, LookupTTI, LookupTLI);
519 if (!Lowering.lowerIntrinsics(M))
520 return PreservedAnalyses::all();
521 else
523}
amdgpu isel
static const Function * getParent(const Value *V)
static bool setNonLazyBind(Function &F)
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
uint64_t Size
Module.h This file contains the declarations for the Module class.
This defines the Use class.
The header file for the LowerConstantIntrinsics pass as used by the new pass manager.
#define F(x, y, z)
Definition: MD5.cpp:55
This file defines ARC utility functions which are used by various parts of the compiler.
FunctionAnalysisManager FAM
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:55
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:57
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:52
static cl::opt< int64_t > MemIntrinsicExpandSizeThresholdOpt("mem-intrinsic-expand-size", cl::desc("Set minimum mem intrinsic size to expand in IR"), cl::init(-1), cl::Hidden)
Threshold to leave statically sized memory intrinsic calls.
pre isel intrinsic lowering
static bool forEachCall(Function &Intrin, T Callback)
pre isel intrinsic Pre ISel Intrinsic Lowering
static bool lowerObjCCall(Function &F, const char *NewFn, bool setNonLazyBind=false)
static bool canEmitLibcall(const TargetMachine *TM, Function *F, RTLIB::Libcall LC)
static CallInst::TailCallKind getOverridingTailCallKind(const Function &F)
static bool lowerLoadRelative(Function &F)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file describes how to lower LLVM code to machine code.
Target-Independent Code Generator Pass Configuration Options pass.
This pass exposes codegen information to IR-level passes.
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:253
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:410
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
void addParamAttr(unsigned ArgNo, Attribute::AttrKind Kind)
Adds the attribute to the indicated argument.
Definition: InstrTypes.h:1502
This class represents a function call, abstracting a target machine's calling convention.
void setTailCallKind(TailCallKind TCK)
This is the shared class of boolean and integer constants.
Definition: Constants.h:83
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:157
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
Definition: DerivedTypes.h:170
const Function & getFunction() const
Definition: Function.h:171
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:2444
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2697
An analysis over an "outer" IR unit that provides access to an analysis manager over an "inner" IR un...
Definition: PassManager.h:567
static bool mayLowerToFunctionCall(Intrinsic::ID IID)
Check if the intrinsic might lower into a regular function call in the course of IR transformations.
ModulePass class - This class is used to implement unstructured interprocedural optimizations and ana...
Definition: Pass.h:251
virtual bool runOnModule(Module &M)=0
runOnModule - Virtual method overriden by subclasses to process the module being operated on.
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
virtual void getAnalysisUsage(AnalysisUsage &) const
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
Definition: Pass.cpp:98
A set of analyses that are preserved following a run of a transformation pass.
Definition: Analysis.h:111
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
Definition: Analysis.h:114
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: Analysis.h:117
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1196
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
const char * getLibcallName(RTLIB::Libcall Call) const
Get the libcall routine name for the specified libcall.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:77
Target-Independent Code Generator Pass Configuration Options.
Wrapper pass for TargetTransformInfo.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
uint64_t getMaxMemIntrinsicInlineSizeThreshold() const
Returns the maximum memset / memcpy size in bytes that still makes it profitable to inline the call.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
static IntegerType * getInt32Ty(LLVMContext &C)
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
LLVM Value Representation.
Definition: Value.h:74
void setName(const Twine &Name)
Change the name of the value.
Definition: Value.cpp:377
use_iterator use_begin()
Definition: Value.h:360
bool use_empty() const
Definition: Value.h:344
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition: ilist_node.h:32
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit.
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
ARCInstKind getAttachedARCFunctionKind(const CallBase *CB)
This function returns the ARCInstKind of the function attached to operand bundle clang_arc_attachedca...
Definition: ObjCARCUtil.h:60
bool IsNeverTail(ARCInstKind Class)
Test if the given class represents instructions which are never safe to mark with the "tail" keyword.
bool IsAlwaysTail(ARCInstKind Class)
Test if the given class represents instructions which are always safe to mark with the "tail" keyword...
ARCInstKind
Equivalence classes of instructions in the ARC Model.
ARCInstKind GetFunctionClass(const Function *F)
Determine if F is one of the special known Functions.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
bool lowerConstantIntrinsics(Function &F, const TargetLibraryInfo &TLI, DominatorTree *DT)
void expandMemSetPatternAsLoop(MemSetPatternInst *MemSet)
Expand MemSetPattern as a loop. MemSet is not deleted.
bool expandMemMoveAsLoop(MemMoveInst *MemMove, const TargetTransformInfo &TTI)
Expand MemMove as a loop.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:657
ModulePass * createPreISelIntrinsicLoweringPass()
This pass lowers the @llvm.load.relative and @llvm.objc.
VPExpansionDetails expandVectorPredicationIntrinsic(VPIntrinsic &VPI, const TargetTransformInfo &TTI)
Expand a vector predication intrinsic.
void expandMemCpyAsLoop(MemCpyInst *MemCpy, const TargetTransformInfo &TTI, ScalarEvolution *SE=nullptr)
Expand MemCpy as a loop. MemCpy is not deleted.
VPExpansionDetails
Represents the details the expansion of a VP intrinsic.
void expandMemSetAsLoop(MemSetInst *MemSet)
Expand MemSet as a loop. MemSet is not deleted.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)