LLVM 20.0.0git
PreISelIntrinsicLowering.cpp
Go to the documentation of this file.
1//===- PreISelIntrinsicLowering.cpp - Pre-ISel intrinsic lowering pass ----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass implements IR lowering for the llvm.memcpy, llvm.memmove,
10// llvm.memset, llvm.load.relative and llvm.objc.* intrinsics.
11//
12//===----------------------------------------------------------------------===//
13
20#include "llvm/CodeGen/Passes.h"
23#include "llvm/IR/Function.h"
24#include "llvm/IR/IRBuilder.h"
27#include "llvm/IR/Module.h"
28#include "llvm/IR/Type.h"
29#include "llvm/IR/Use.h"
31#include "llvm/Pass.h"
37
38using namespace llvm;
39
40/// Threshold to leave statically sized memory intrinsic calls. Calls of known
41/// size larger than this will be expanded by the pass. Calls of unknown or
42/// lower size will be left for expansion in codegen.
44 "mem-intrinsic-expand-size",
45 cl::desc("Set minimum mem intrinsic size to expand in IR"), cl::init(-1),
47
48namespace {
49
50struct PreISelIntrinsicLowering {
51 const TargetMachine *TM;
52 const function_ref<TargetTransformInfo &(Function &)> LookupTTI;
53 const function_ref<TargetLibraryInfo &(Function &)> LookupTLI;
54
55 /// If this is true, assume it's preferably to leave memory intrinsic calls
56 /// for replacement with a library call later. Otherwise this depends on
57 /// TargetLoweringInfo availability of the corresponding function.
58 const bool UseMemIntrinsicLibFunc;
59
60 explicit PreISelIntrinsicLowering(
61 const TargetMachine *TM_,
64 bool UseMemIntrinsicLibFunc_ = true)
65 : TM(TM_), LookupTTI(LookupTTI_), LookupTLI(LookupTLI_),
66 UseMemIntrinsicLibFunc(UseMemIntrinsicLibFunc_) {}
67
68 static bool shouldExpandMemIntrinsicWithSize(Value *Size,
70 bool expandMemIntrinsicUses(Function &F) const;
71 bool lowerIntrinsics(Module &M) const;
72};
73
74} // namespace
75
76template <class T> static bool forEachCall(Function &Intrin, T Callback) {
77 // Lowering all intrinsics in a function will delete multiple uses, so we
78 // can't use an early-inc-range. In case some remain, we don't want to look
79 // at them again. Unfortunately, Value::UseList is private, so we can't use a
80 // simple Use**. If LastUse is null, the next use to consider is
81 // Intrin.use_begin(), otherwise it's LastUse->getNext().
82 Use *LastUse = nullptr;
83 bool Changed = false;
84 while (!Intrin.use_empty() && (!LastUse || LastUse->getNext())) {
85 Use *U = LastUse ? LastUse->getNext() : &*Intrin.use_begin();
86 bool Removed = false;
87 // An intrinsic cannot have its address taken, so it cannot be an argument
88 // operand. It might be used as operand in debug metadata, though.
89 if (auto CI = dyn_cast<CallInst>(U->getUser()))
90 Changed |= Removed = Callback(CI);
91 if (!Removed)
92 LastUse = U;
93 }
94 return Changed;
95}
96
98 if (F.use_empty())
99 return false;
100
101 bool Changed = false;
102 Type *Int32Ty = Type::getInt32Ty(F.getContext());
103
104 for (Use &U : llvm::make_early_inc_range(F.uses())) {
105 auto CI = dyn_cast<CallInst>(U.getUser());
106 if (!CI || CI->getCalledOperand() != &F)
107 continue;
108
109 IRBuilder<> B(CI);
110 Value *OffsetPtr =
111 B.CreatePtrAdd(CI->getArgOperand(0), CI->getArgOperand(1));
112 Value *OffsetI32 = B.CreateAlignedLoad(Int32Ty, OffsetPtr, Align(4));
113
114 Value *ResultPtr = B.CreatePtrAdd(CI->getArgOperand(0), OffsetI32);
115
116 CI->replaceAllUsesWith(ResultPtr);
117 CI->eraseFromParent();
118 Changed = true;
119 }
120
121 return Changed;
122}
123
124// ObjCARC has knowledge about whether an obj-c runtime function needs to be
125// always tail-called or never tail-called.
128 if (objcarc::IsAlwaysTail(Kind))
129 return CallInst::TCK_Tail;
130 else if (objcarc::IsNeverTail(Kind))
132 return CallInst::TCK_None;
133}
134
135static bool lowerObjCCall(Function &F, const char *NewFn,
136 bool setNonLazyBind = false) {
138 "Pre-ISel intrinsics do lower into regular function calls");
139 if (F.use_empty())
140 return false;
141
142 // If we haven't already looked up this function, check to see if the
143 // program already contains a function with this name.
144 Module *M = F.getParent();
145 FunctionCallee FCache = M->getOrInsertFunction(NewFn, F.getFunctionType());
146
147 if (Function *Fn = dyn_cast<Function>(FCache.getCallee())) {
148 Fn->setLinkage(F.getLinkage());
149 if (setNonLazyBind && !Fn->isWeakForLinker()) {
150 // If we have Native ARC, set nonlazybind attribute for these APIs for
151 // performance.
152 Fn->addFnAttr(Attribute::NonLazyBind);
153 }
154 }
155
157
158 for (Use &U : llvm::make_early_inc_range(F.uses())) {
159 auto *CB = cast<CallBase>(U.getUser());
160
161 if (CB->getCalledFunction() != &F) {
163 (void)Kind;
164 assert((Kind == objcarc::ARCInstKind::RetainRV ||
165 Kind == objcarc::ARCInstKind::UnsafeClaimRV) &&
166 "use expected to be the argument of operand bundle "
167 "\"clang.arc.attachedcall\"");
168 U.set(FCache.getCallee());
169 continue;
170 }
171
172 auto *CI = cast<CallInst>(CB);
173 assert(CI->getCalledFunction() && "Cannot lower an indirect call!");
174
175 IRBuilder<> Builder(CI->getParent(), CI->getIterator());
176 SmallVector<Value *, 8> Args(CI->args());
178 CI->getOperandBundlesAsDefs(BundleList);
179 CallInst *NewCI = Builder.CreateCall(FCache, Args, BundleList);
180 NewCI->setName(CI->getName());
181
182 // Try to set the most appropriate TailCallKind based on both the current
183 // attributes and the ones that we could get from ObjCARC's special
184 // knowledge of the runtime functions.
185 //
186 // std::max respects both requirements of notail and tail here:
187 // * notail on either the call or from ObjCARC becomes notail
188 // * tail on either side is stronger than none, but not notail
189 CallInst::TailCallKind TCK = CI->getTailCallKind();
190 NewCI->setTailCallKind(std::max(TCK, OverridingTCK));
191
192 // Transfer the 'returned' attribute from the intrinsic to the call site.
193 // By applying this only to intrinsic call sites, we avoid applying it to
194 // non-ARC explicit calls to things like objc_retain which have not been
195 // auto-upgraded to use the intrinsics.
196 unsigned Index;
197 if (F.getAttributes().hasAttrSomewhere(Attribute::Returned, &Index) &&
198 Index)
200 Attribute::Returned);
201
202 if (!CI->use_empty())
203 CI->replaceAllUsesWith(NewCI);
204 CI->eraseFromParent();
205 }
206
207 return true;
208}
209
210// TODO: Should refine based on estimated number of accesses (e.g. does it
211// require splitting based on alignment)
212bool PreISelIntrinsicLowering::shouldExpandMemIntrinsicWithSize(
214 ConstantInt *CI = dyn_cast<ConstantInt>(Size);
215 if (!CI)
216 return true;
217 uint64_t Threshold = MemIntrinsicExpandSizeThresholdOpt.getNumOccurrences()
220 uint64_t SizeVal = CI->getZExtValue();
221
222 // Treat a threshold of 0 as a special case to force expansion of all
223 // intrinsics, including size 0.
224 return SizeVal > Threshold || Threshold == 0;
225}
226
227static bool canEmitLibcall(const TargetMachine *TM, Function *F,
228 RTLIB::Libcall LC) {
229 // TODO: Should this consider the address space of the memcpy?
230 if (!TM)
231 return true;
232 const TargetLowering *TLI = TM->getSubtargetImpl(*F)->getTargetLowering();
233 return TLI->getLibcallName(LC) != nullptr;
234}
235
236// TODO: Handle atomic memcpy and memcpy.inline
237// TODO: Pass ScalarEvolution
238bool PreISelIntrinsicLowering::expandMemIntrinsicUses(Function &F) const {
239 Intrinsic::ID ID = F.getIntrinsicID();
240 bool Changed = false;
241
242 for (User *U : llvm::make_early_inc_range(F.users())) {
243 Instruction *Inst = cast<Instruction>(U);
244
245 switch (ID) {
246 case Intrinsic::memcpy: {
247 auto *Memcpy = cast<MemCpyInst>(Inst);
248 Function *ParentFunc = Memcpy->getFunction();
249 const TargetTransformInfo &TTI = LookupTTI(*ParentFunc);
250 if (shouldExpandMemIntrinsicWithSize(Memcpy->getLength(), TTI)) {
251 if (UseMemIntrinsicLibFunc &&
252 canEmitLibcall(TM, ParentFunc, RTLIB::MEMCPY))
253 break;
254
255 // TODO: For optsize, emit the loop into a separate function
256 expandMemCpyAsLoop(Memcpy, TTI);
257 Changed = true;
258 Memcpy->eraseFromParent();
259 }
260
261 break;
262 }
263 case Intrinsic::memcpy_inline: {
264 // Only expand llvm.memcpy.inline with non-constant length in this
265 // codepath, leaving the current SelectionDAG expansion for constant
266 // length memcpy intrinsics undisturbed.
267 auto *Memcpy = cast<MemCpyInlineInst>(Inst);
268 if (isa<ConstantInt>(Memcpy->getLength()))
269 break;
270
271 Function *ParentFunc = Memcpy->getFunction();
272 const TargetTransformInfo &TTI = LookupTTI(*ParentFunc);
273 expandMemCpyAsLoop(Memcpy, TTI);
274 Changed = true;
275 Memcpy->eraseFromParent();
276 break;
277 }
278 case Intrinsic::memmove: {
279 auto *Memmove = cast<MemMoveInst>(Inst);
280 Function *ParentFunc = Memmove->getFunction();
281 const TargetTransformInfo &TTI = LookupTTI(*ParentFunc);
282 if (shouldExpandMemIntrinsicWithSize(Memmove->getLength(), TTI)) {
283 if (UseMemIntrinsicLibFunc &&
284 canEmitLibcall(TM, ParentFunc, RTLIB::MEMMOVE))
285 break;
286
287 if (expandMemMoveAsLoop(Memmove, TTI)) {
288 Changed = true;
289 Memmove->eraseFromParent();
290 }
291 }
292
293 break;
294 }
295 case Intrinsic::memset: {
296 auto *Memset = cast<MemSetInst>(Inst);
297 Function *ParentFunc = Memset->getFunction();
298 const TargetTransformInfo &TTI = LookupTTI(*ParentFunc);
299 if (shouldExpandMemIntrinsicWithSize(Memset->getLength(), TTI)) {
300 if (UseMemIntrinsicLibFunc &&
301 canEmitLibcall(TM, ParentFunc, RTLIB::MEMSET))
302 break;
303
304 expandMemSetAsLoop(Memset);
305 Changed = true;
306 Memset->eraseFromParent();
307 }
308
309 break;
310 }
311 case Intrinsic::memset_inline: {
312 // Only expand llvm.memset.inline with non-constant length in this
313 // codepath, leaving the current SelectionDAG expansion for constant
314 // length memset intrinsics undisturbed.
315 auto *Memset = cast<MemSetInlineInst>(Inst);
316 if (isa<ConstantInt>(Memset->getLength()))
317 break;
318
319 expandMemSetAsLoop(Memset);
320 Changed = true;
321 Memset->eraseFromParent();
322 break;
323 }
324 case Intrinsic::experimental_memset_pattern: {
325 auto *Memset = cast<MemSetPatternInst>(Inst);
327 Changed = true;
328 Memset->eraseFromParent();
329 break;
330 }
331 default:
332 llvm_unreachable("unhandled intrinsic");
333 }
334 }
335
336 return Changed;
337}
338
339bool PreISelIntrinsicLowering::lowerIntrinsics(Module &M) const {
340 bool Changed = false;
341 for (Function &F : M) {
342 switch (F.getIntrinsicID()) {
343 default:
344 break;
345 case Intrinsic::memcpy:
346 case Intrinsic::memcpy_inline:
347 case Intrinsic::memmove:
348 case Intrinsic::memset:
349 case Intrinsic::memset_inline:
350 case Intrinsic::experimental_memset_pattern:
351 Changed |= expandMemIntrinsicUses(F);
352 break;
353 case Intrinsic::load_relative:
354 Changed |= lowerLoadRelative(F);
355 break;
356 case Intrinsic::is_constant:
357 case Intrinsic::objectsize:
358 Changed |= forEachCall(F, [&](CallInst *CI) {
359 Function *Parent = CI->getParent()->getParent();
360 TargetLibraryInfo &TLI = LookupTLI(*Parent);
361 // Intrinsics in unreachable code are not lowered.
362 bool Changed = lowerConstantIntrinsics(*Parent, TLI, /*DT=*/nullptr);
363 return Changed;
364 });
365 break;
366#define BEGIN_REGISTER_VP_INTRINSIC(VPID, MASKPOS, VLENPOS) \
367 case Intrinsic::VPID:
368#include "llvm/IR/VPIntrinsics.def"
369 forEachCall(F, [&](CallInst *CI) {
370 Function *Parent = CI->getParent()->getParent();
371 const TargetTransformInfo &TTI = LookupTTI(*Parent);
372 auto *VPI = cast<VPIntrinsic>(CI);
374 // Expansion of VP intrinsics may change the IR but not actually
375 // replace the intrinsic, so update Changed for the pass
376 // and compute Removed for forEachCall.
377 Changed |= ED != VPExpansionDetails::IntrinsicUnchanged;
378 bool Removed = ED == VPExpansionDetails::IntrinsicReplaced;
379 return Removed;
380 });
381 break;
382 case Intrinsic::objc_autorelease:
383 Changed |= lowerObjCCall(F, "objc_autorelease");
384 break;
385 case Intrinsic::objc_autoreleasePoolPop:
386 Changed |= lowerObjCCall(F, "objc_autoreleasePoolPop");
387 break;
388 case Intrinsic::objc_autoreleasePoolPush:
389 Changed |= lowerObjCCall(F, "objc_autoreleasePoolPush");
390 break;
391 case Intrinsic::objc_autoreleaseReturnValue:
392 Changed |= lowerObjCCall(F, "objc_autoreleaseReturnValue");
393 break;
394 case Intrinsic::objc_copyWeak:
395 Changed |= lowerObjCCall(F, "objc_copyWeak");
396 break;
397 case Intrinsic::objc_destroyWeak:
398 Changed |= lowerObjCCall(F, "objc_destroyWeak");
399 break;
400 case Intrinsic::objc_initWeak:
401 Changed |= lowerObjCCall(F, "objc_initWeak");
402 break;
403 case Intrinsic::objc_loadWeak:
404 Changed |= lowerObjCCall(F, "objc_loadWeak");
405 break;
406 case Intrinsic::objc_loadWeakRetained:
407 Changed |= lowerObjCCall(F, "objc_loadWeakRetained");
408 break;
409 case Intrinsic::objc_moveWeak:
410 Changed |= lowerObjCCall(F, "objc_moveWeak");
411 break;
412 case Intrinsic::objc_release:
413 Changed |= lowerObjCCall(F, "objc_release", true);
414 break;
415 case Intrinsic::objc_retain:
416 Changed |= lowerObjCCall(F, "objc_retain", true);
417 break;
418 case Intrinsic::objc_retainAutorelease:
419 Changed |= lowerObjCCall(F, "objc_retainAutorelease");
420 break;
421 case Intrinsic::objc_retainAutoreleaseReturnValue:
422 Changed |= lowerObjCCall(F, "objc_retainAutoreleaseReturnValue");
423 break;
424 case Intrinsic::objc_retainAutoreleasedReturnValue:
425 Changed |= lowerObjCCall(F, "objc_retainAutoreleasedReturnValue");
426 break;
427 case Intrinsic::objc_retainBlock:
428 Changed |= lowerObjCCall(F, "objc_retainBlock");
429 break;
430 case Intrinsic::objc_storeStrong:
431 Changed |= lowerObjCCall(F, "objc_storeStrong");
432 break;
433 case Intrinsic::objc_storeWeak:
434 Changed |= lowerObjCCall(F, "objc_storeWeak");
435 break;
436 case Intrinsic::objc_unsafeClaimAutoreleasedReturnValue:
437 Changed |= lowerObjCCall(F, "objc_unsafeClaimAutoreleasedReturnValue");
438 break;
439 case Intrinsic::objc_retainedObject:
440 Changed |= lowerObjCCall(F, "objc_retainedObject");
441 break;
442 case Intrinsic::objc_unretainedObject:
443 Changed |= lowerObjCCall(F, "objc_unretainedObject");
444 break;
445 case Intrinsic::objc_unretainedPointer:
446 Changed |= lowerObjCCall(F, "objc_unretainedPointer");
447 break;
448 case Intrinsic::objc_retain_autorelease:
449 Changed |= lowerObjCCall(F, "objc_retain_autorelease");
450 break;
451 case Intrinsic::objc_sync_enter:
452 Changed |= lowerObjCCall(F, "objc_sync_enter");
453 break;
454 case Intrinsic::objc_sync_exit:
455 Changed |= lowerObjCCall(F, "objc_sync_exit");
456 break;
457 case Intrinsic::exp:
458 case Intrinsic::exp2:
459 Changed |= forEachCall(F, [&](CallInst *CI) {
460 Type *Ty = CI->getArgOperand(0)->getType();
461 if (!isa<ScalableVectorType>(Ty))
462 return false;
463 const TargetLowering *TL = TM->getSubtargetImpl(F)->getTargetLowering();
464 unsigned Op = TL->IntrinsicIDToISD(F.getIntrinsicID());
466 return false;
468 });
469 break;
470 }
471 }
472 return Changed;
473}
474
475namespace {
476
477class PreISelIntrinsicLoweringLegacyPass : public ModulePass {
478public:
479 static char ID;
480
481 PreISelIntrinsicLoweringLegacyPass() : ModulePass(ID) {}
482
483 void getAnalysisUsage(AnalysisUsage &AU) const override {
487 }
488
489 bool runOnModule(Module &M) override {
490 auto LookupTTI = [this](Function &F) -> TargetTransformInfo & {
491 return this->getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
492 };
493 auto LookupTLI = [this](Function &F) -> TargetLibraryInfo & {
494 return this->getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
495 };
496
497 const auto *TM = &getAnalysis<TargetPassConfig>().getTM<TargetMachine>();
498 PreISelIntrinsicLowering Lowering(TM, LookupTTI, LookupTLI);
499 return Lowering.lowerIntrinsics(M);
500 }
501};
502
503} // end anonymous namespace
504
505char PreISelIntrinsicLoweringLegacyPass::ID;
506
507INITIALIZE_PASS_BEGIN(PreISelIntrinsicLoweringLegacyPass,
508 "pre-isel-intrinsic-lowering",
509 "Pre-ISel Intrinsic Lowering", false, false)
513INITIALIZE_PASS_END(PreISelIntrinsicLoweringLegacyPass,
514 "pre-isel-intrinsic-lowering",
515 "Pre-ISel Intrinsic Lowering", false, false)
516
518 return new PreISelIntrinsicLoweringLegacyPass();
519}
520
523 auto &FAM = AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
524
525 auto LookupTTI = [&FAM](Function &F) -> TargetTransformInfo & {
527 };
528 auto LookupTLI = [&FAM](Function &F) -> TargetLibraryInfo & {
530 };
531
532 PreISelIntrinsicLowering Lowering(TM, LookupTTI, LookupTLI);
533 if (!Lowering.lowerIntrinsics(M))
534 return PreservedAnalyses::all();
535 else
537}
amdgpu isel
static const Function * getParent(const Value *V)
static bool setNonLazyBind(Function &F)
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
uint64_t Size
Module.h This file contains the declarations for the Module class.
This defines the Use class.
The header file for the LowerConstantIntrinsics pass as used by the new pass manager.
#define F(x, y, z)
Definition: MD5.cpp:55
This file defines ARC utility functions which are used by various parts of the compiler.
FunctionAnalysisManager FAM
if(PassOpts->AAPipeline)
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:55
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:57
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:52
static cl::opt< int64_t > MemIntrinsicExpandSizeThresholdOpt("mem-intrinsic-expand-size", cl::desc("Set minimum mem intrinsic size to expand in IR"), cl::init(-1), cl::Hidden)
Threshold to leave statically sized memory intrinsic calls.
pre isel intrinsic lowering
static bool forEachCall(Function &Intrin, T Callback)
pre isel intrinsic Pre ISel Intrinsic Lowering
static bool lowerObjCCall(Function &F, const char *NewFn, bool setNonLazyBind=false)
static bool canEmitLibcall(const TargetMachine *TM, Function *F, RTLIB::Libcall LC)
static CallInst::TailCallKind getOverridingTailCallKind(const Function &F)
static bool lowerLoadRelative(Function &F)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static SymbolRef::Type getType(const Symbol *Sym)
Definition: TapiFile.cpp:39
This file describes how to lower LLVM code to machine code.
Target-Independent Code Generator Pass Configuration Options pass.
This pass exposes codegen information to IR-level passes.
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:253
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:410
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1286
void addParamAttr(unsigned ArgNo, Attribute::AttrKind Kind)
Adds the attribute to the indicated argument.
Definition: InstrTypes.h:1494
This class represents a function call, abstracting a target machine's calling convention.
void setTailCallKind(TailCallKind TCK)
This is the shared class of boolean and integer constants.
Definition: Constants.h:83
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:157
This class represents an Operation in the Expression.
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
Definition: DerivedTypes.h:170
const Function & getFunction() const
Definition: Function.h:171
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:2449
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2705
An analysis over an "outer" IR unit that provides access to an analysis manager over an "inner" IR un...
Definition: PassManager.h:567
static bool mayLowerToFunctionCall(Intrinsic::ID IID)
Check if the intrinsic might lower into a regular function call in the course of IR transformations.
ModulePass class - This class is used to implement unstructured interprocedural optimizations and ana...
Definition: Pass.h:251
virtual bool runOnModule(Module &M)=0
runOnModule - Virtual method overriden by subclasses to process the module being operated on.
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
virtual void getAnalysisUsage(AnalysisUsage &) const
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
Definition: Pass.cpp:98
A set of analyses that are preserved following a run of a transformation pass.
Definition: Analysis.h:111
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
Definition: Analysis.h:114
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: Analysis.h:117
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1196
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
bool isOperationExpand(unsigned Op, EVT VT) const
Return true if the specified operation is illegal on this target or unlikely to be made legal with cu...
int IntrinsicIDToISD(Intrinsic::ID ID) const
Get the ISD node that corresponds to the Intrinsic ID.
const char * getLibcallName(RTLIB::Libcall Call) const
Get the libcall routine name for the specified libcall.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:77
Target-Independent Code Generator Pass Configuration Options.
Wrapper pass for TargetTransformInfo.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
uint64_t getMaxMemIntrinsicInlineSizeThreshold() const
Returns the maximum memset / memcpy size in bytes that still makes it profitable to inline the call.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
static IntegerType * getInt32Ty(LLVMContext &C)
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
LLVM Value Representation.
Definition: Value.h:74
void setName(const Twine &Name)
Change the name of the value.
Definition: Value.cpp:377
use_iterator use_begin()
Definition: Value.h:360
bool use_empty() const
Definition: Value.h:344
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition: ilist_node.h:32
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit.
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
ARCInstKind getAttachedARCFunctionKind(const CallBase *CB)
This function returns the ARCInstKind of the function attached to operand bundle clang_arc_attachedca...
Definition: ObjCARCUtil.h:60
bool IsNeverTail(ARCInstKind Class)
Test if the given class represents instructions which are never safe to mark with the "tail" keyword.
bool IsAlwaysTail(ARCInstKind Class)
Test if the given class represents instructions which are always safe to mark with the "tail" keyword...
ARCInstKind
Equivalence classes of instructions in the ARC Model.
ARCInstKind GetFunctionClass(const Function *F)
Determine if F is one of the special known Functions.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
bool lowerUnaryVectorIntrinsicAsLoop(Module &M, CallInst *CI)
Lower CI as a loop.
bool lowerConstantIntrinsics(Function &F, const TargetLibraryInfo &TLI, DominatorTree *DT)
void expandMemSetPatternAsLoop(MemSetPatternInst *MemSet)
Expand MemSetPattern as a loop. MemSet is not deleted.
bool expandMemMoveAsLoop(MemMoveInst *MemMove, const TargetTransformInfo &TTI)
Expand MemMove as a loop.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:657
ModulePass * createPreISelIntrinsicLoweringPass()
This pass lowers the @llvm.load.relative and @llvm.objc.
VPExpansionDetails expandVectorPredicationIntrinsic(VPIntrinsic &VPI, const TargetTransformInfo &TTI)
Expand a vector predication intrinsic.
void expandMemCpyAsLoop(MemCpyInst *MemCpy, const TargetTransformInfo &TTI, ScalarEvolution *SE=nullptr)
Expand MemCpy as a loop. MemCpy is not deleted.
VPExpansionDetails
Represents the details the expansion of a VP intrinsic.
void expandMemSetAsLoop(MemSetInst *MemSet)
Expand MemSet as a loop. MemSet is not deleted.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
static EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
Definition: ValueTypes.cpp:289
PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)