LLVM 17.0.0git
Attributor.cpp
Go to the documentation of this file.
1//===- Attributor.cpp - Module-wide attribute deduction -------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements an interprocedural pass that deduces and/or propagates
10// attributes. This is done in an abstract interpretation style fixpoint
11// iteration. See the Attributor.h file comment and the class descriptions in
12// that file for more information.
13//
14//===----------------------------------------------------------------------===//
15
17
19#include "llvm/ADT/STLExtras.h"
20#include "llvm/ADT/Statistic.h"
27#include "llvm/IR/Attributes.h"
28#include "llvm/IR/Constant.h"
30#include "llvm/IR/Constants.h"
31#include "llvm/IR/DataLayout.h"
32#include "llvm/IR/GlobalValue.h"
34#include "llvm/IR/Instruction.h"
37#include "llvm/IR/ValueHandle.h"
41#include "llvm/Support/Debug.h"
49#include <cstdint>
50
51#ifdef EXPENSIVE_CHECKS
52#include "llvm/IR/Verifier.h"
53#endif
54
55#include <cassert>
56#include <optional>
57#include <string>
58
59using namespace llvm;
60
61#define DEBUG_TYPE "attributor"
62#define VERBOSE_DEBUG_TYPE DEBUG_TYPE "-verbose"
63
64DEBUG_COUNTER(ManifestDBGCounter, "attributor-manifest",
65 "Determine what attributes are manifested in the IR");
66
67STATISTIC(NumFnDeleted, "Number of function deleted");
68STATISTIC(NumFnWithExactDefinition,
69 "Number of functions with exact definitions");
70STATISTIC(NumFnWithoutExactDefinition,
71 "Number of functions without exact definitions");
72STATISTIC(NumFnShallowWrappersCreated, "Number of shallow wrappers created");
73STATISTIC(NumAttributesTimedOut,
74 "Number of abstract attributes timed out before fixpoint");
75STATISTIC(NumAttributesValidFixpoint,
76 "Number of abstract attributes in a valid fixpoint state");
77STATISTIC(NumAttributesManifested,
78 "Number of abstract attributes manifested in IR");
79
80// TODO: Determine a good default value.
81//
82// In the LLVM-TS and SPEC2006, 32 seems to not induce compile time overheads
83// (when run with the first 5 abstract attributes). The results also indicate
84// that we never reach 32 iterations but always find a fixpoint sooner.
85//
86// This will become more evolved once we perform two interleaved fixpoint
87// iterations: bottom-up and top-down.
89 SetFixpointIterations("attributor-max-iterations", cl::Hidden,
90 cl::desc("Maximal number of fixpoint iterations."),
91 cl::init(32));
92
94 "attributor-max-initialization-chain-length", cl::Hidden,
96 "Maximal number of chained initializations (to avoid stack overflows)"),
99
101 "attributor-max-iterations-verify", cl::Hidden,
102 cl::desc("Verify that max-iterations is a tight bound for a fixpoint"),
103 cl::init(false));
104
106 "attributor-annotate-decl-cs", cl::Hidden,
107 cl::desc("Annotate call sites of function declarations."), cl::init(false));
108
109static cl::opt<bool> EnableHeapToStack("enable-heap-to-stack-conversion",
110 cl::init(true), cl::Hidden);
111
112static cl::opt<bool>
113 AllowShallowWrappers("attributor-allow-shallow-wrappers", cl::Hidden,
114 cl::desc("Allow the Attributor to create shallow "
115 "wrappers for non-exact definitions."),
116 cl::init(false));
117
118static cl::opt<bool>
119 AllowDeepWrapper("attributor-allow-deep-wrappers", cl::Hidden,
120 cl::desc("Allow the Attributor to use IP information "
121 "derived from non-exact functions via cloning"),
122 cl::init(false));
123
124// These options can only used for debug builds.
125#ifndef NDEBUG
127 SeedAllowList("attributor-seed-allow-list", cl::Hidden,
128 cl::desc("Comma seperated list of attribute names that are "
129 "allowed to be seeded."),
131
133 "attributor-function-seed-allow-list", cl::Hidden,
134 cl::desc("Comma seperated list of function names that are "
135 "allowed to be seeded."),
137#endif
138
139static cl::opt<bool>
140 DumpDepGraph("attributor-dump-dep-graph", cl::Hidden,
141 cl::desc("Dump the dependency graph to dot files."),
142 cl::init(false));
143
145 "attributor-depgraph-dot-filename-prefix", cl::Hidden,
146 cl::desc("The prefix used for the CallGraph dot file names."));
147
148static cl::opt<bool> ViewDepGraph("attributor-view-dep-graph", cl::Hidden,
149 cl::desc("View the dependency graph."),
150 cl::init(false));
151
152static cl::opt<bool> PrintDependencies("attributor-print-dep", cl::Hidden,
153 cl::desc("Print attribute dependencies"),
154 cl::init(false));
155
157 "attributor-enable-call-site-specific-deduction", cl::Hidden,
158 cl::desc("Allow the Attributor to do call site specific analysis"),
159 cl::init(false));
160
161static cl::opt<bool>
162 PrintCallGraph("attributor-print-call-graph", cl::Hidden,
163 cl::desc("Print Attributor's internal call graph"),
164 cl::init(false));
165
166static cl::opt<bool> SimplifyAllLoads("attributor-simplify-all-loads",
168 cl::desc("Try to simplify all loads."),
169 cl::init(true));
170
171/// Logic operators for the change status enum class.
172///
173///{
175 return L == ChangeStatus::CHANGED ? L : R;
176}
178 L = L | R;
179 return L;
180}
182 return L == ChangeStatus::UNCHANGED ? L : R;
183}
185 L = L & R;
186 return L;
187}
188///}
189
191 const AbstractAttribute &QueryingAA) {
192 // We are looking for volatile instructions or non-relaxed atomics.
193 if (const auto *CB = dyn_cast<CallBase>(&I)) {
194 if (CB->hasFnAttr(Attribute::NoSync))
195 return true;
196
197 // Non-convergent and readnone imply nosync.
198 if (!CB->isConvergent() && !CB->mayReadOrWriteMemory())
199 return true;
200
202 return true;
203
204 const auto &NoSyncAA = A.getAAFor<AANoSync>(
205 QueryingAA, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL);
206 return NoSyncAA.isAssumedNoSync();
207 }
208
209 if (!I.mayReadOrWriteMemory())
210 return true;
211
212 return !I.isVolatile() && !AANoSync::isNonRelaxedAtomic(&I);
213}
214
216 const Value &V, bool ForAnalysisOnly) {
217 // TODO: See the AAInstanceInfo class comment.
218 if (!ForAnalysisOnly)
219 return false;
220 auto &InstanceInfoAA = A.getAAFor<AAInstanceInfo>(
221 QueryingAA, IRPosition::value(V), DepClassTy::OPTIONAL);
222 return InstanceInfoAA.isAssumedUniqueForAnalysis();
223}
224
226 const TargetLibraryInfo *TLI,
227 const DataLayout &DL,
228 AA::RangeTy *RangePtr) {
229 if (isa<AllocaInst>(Obj))
230 return UndefValue::get(&Ty);
231 if (Constant *Init = getInitialValueOfAllocation(&Obj, TLI, &Ty))
232 return Init;
233 auto *GV = dyn_cast<GlobalVariable>(&Obj);
234 if (!GV)
235 return nullptr;
236 if (!GV->hasLocalLinkage() && !(GV->isConstant() && GV->hasInitializer()))
237 return nullptr;
238 if (!GV->hasInitializer())
239 return UndefValue::get(&Ty);
240
241 if (RangePtr && !RangePtr->offsetOrSizeAreUnknown()) {
242 APInt Offset = APInt(64, RangePtr->Offset);
243 return ConstantFoldLoadFromConst(GV->getInitializer(), &Ty, Offset, DL);
244 }
245
246 return ConstantFoldLoadFromUniformValue(GV->getInitializer(), &Ty);
247}
248
249bool AA::isValidInScope(const Value &V, const Function *Scope) {
250 if (isa<Constant>(V))
251 return true;
252 if (auto *I = dyn_cast<Instruction>(&V))
253 return I->getFunction() == Scope;
254 if (auto *A = dyn_cast<Argument>(&V))
255 return A->getParent() == Scope;
256 return false;
257}
258
260 InformationCache &InfoCache) {
261 if (isa<Constant>(VAC.getValue()) || VAC.getValue() == VAC.getCtxI())
262 return true;
263 const Function *Scope = nullptr;
264 const Instruction *CtxI = VAC.getCtxI();
265 if (CtxI)
266 Scope = CtxI->getFunction();
267 if (auto *A = dyn_cast<Argument>(VAC.getValue()))
268 return A->getParent() == Scope;
269 if (auto *I = dyn_cast<Instruction>(VAC.getValue())) {
270 if (I->getFunction() == Scope) {
271 if (const DominatorTree *DT =
273 *Scope))
274 return DT->dominates(I, CtxI);
275 // Local dominance check mostly for the old PM passes.
276 if (CtxI && I->getParent() == CtxI->getParent())
277 return llvm::any_of(
278 make_range(I->getIterator(), I->getParent()->end()),
279 [&](const Instruction &AfterI) { return &AfterI == CtxI; });
280 }
281 }
282 return false;
283}
284
286 if (V.getType() == &Ty)
287 return &V;
288 if (isa<PoisonValue>(V))
289 return PoisonValue::get(&Ty);
290 if (isa<UndefValue>(V))
291 return UndefValue::get(&Ty);
292 if (auto *C = dyn_cast<Constant>(&V)) {
293 if (C->isNullValue())
294 return Constant::getNullValue(&Ty);
295 if (C->getType()->isPointerTy() && Ty.isPointerTy())
296 return ConstantExpr::getPointerCast(C, &Ty);
297 if (C->getType()->getPrimitiveSizeInBits() >= Ty.getPrimitiveSizeInBits()) {
298 if (C->getType()->isIntegerTy() && Ty.isIntegerTy())
299 return ConstantExpr::getTrunc(C, &Ty, /* OnlyIfReduced */ true);
300 if (C->getType()->isFloatingPointTy() && Ty.isFloatingPointTy())
301 return ConstantExpr::getFPTrunc(C, &Ty, /* OnlyIfReduced */ true);
302 }
303 }
304 return nullptr;
305}
306
307std::optional<Value *>
308AA::combineOptionalValuesInAAValueLatice(const std::optional<Value *> &A,
309 const std::optional<Value *> &B,
310 Type *Ty) {
311 if (A == B)
312 return A;
313 if (!B)
314 return A;
315 if (*B == nullptr)
316 return nullptr;
317 if (!A)
318 return Ty ? getWithType(**B, *Ty) : nullptr;
319 if (*A == nullptr)
320 return nullptr;
321 if (!Ty)
322 Ty = (*A)->getType();
323 if (isa_and_nonnull<UndefValue>(*A))
324 return getWithType(**B, *Ty);
325 if (isa<UndefValue>(*B))
326 return A;
327 if (*A && *B && *A == getWithType(**B, *Ty))
328 return A;
329 return nullptr;
330}
331
332template <bool IsLoad, typename Ty>
334 Attributor &A, Ty &I, SmallSetVector<Value *, 4> &PotentialCopies,
335 SmallSetVector<Instruction *, 4> &PotentialValueOrigins,
336 const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation,
337 bool OnlyExact) {
338 LLVM_DEBUG(dbgs() << "Trying to determine the potential copies of " << I
339 << " (only exact: " << OnlyExact << ")\n";);
340
341 Value &Ptr = *I.getPointerOperand();
342 // Containers to remember the pointer infos and new copies while we are not
343 // sure that we can find all of them. If we abort we want to avoid spurious
344 // dependences and potential copies in the provided container.
346 SmallVector<Value *> NewCopies;
347 SmallVector<Instruction *> NewCopyOrigins;
348
349 const auto *TLI =
350 A.getInfoCache().getTargetLibraryInfoForFunction(*I.getFunction());
351
352 auto Pred = [&](Value &Obj) {
353 LLVM_DEBUG(dbgs() << "Visit underlying object " << Obj << "\n");
354 if (isa<UndefValue>(&Obj))
355 return true;
356 if (isa<ConstantPointerNull>(&Obj)) {
357 // A null pointer access can be undefined but any offset from null may
358 // be OK. We do not try to optimize the latter.
359 if (!NullPointerIsDefined(I.getFunction(),
360 Ptr.getType()->getPointerAddressSpace()) &&
361 A.getAssumedSimplified(Ptr, QueryingAA, UsedAssumedInformation,
362 AA::Interprocedural) == &Obj)
363 return true;
365 dbgs() << "Underlying object is a valid nullptr, giving up.\n";);
366 return false;
367 }
368 // TODO: Use assumed noalias return.
369 if (!isa<AllocaInst>(&Obj) && !isa<GlobalVariable>(&Obj) &&
370 !(IsLoad ? isAllocationFn(&Obj, TLI) : isNoAliasCall(&Obj))) {
371 LLVM_DEBUG(dbgs() << "Underlying object is not supported yet: " << Obj
372 << "\n";);
373 return false;
374 }
375 if (auto *GV = dyn_cast<GlobalVariable>(&Obj))
376 if (!GV->hasLocalLinkage() &&
377 !(GV->isConstant() && GV->hasInitializer())) {
378 LLVM_DEBUG(dbgs() << "Underlying object is global with external "
379 "linkage, not supported yet: "
380 << Obj << "\n";);
381 return false;
382 }
383
384 bool NullOnly = true;
385 bool NullRequired = false;
386 auto CheckForNullOnlyAndUndef = [&](std::optional<Value *> V,
387 bool IsExact) {
388 if (!V || *V == nullptr)
389 NullOnly = false;
390 else if (isa<UndefValue>(*V))
391 /* No op */;
392 else if (isa<Constant>(*V) && cast<Constant>(*V)->isNullValue())
393 NullRequired = !IsExact;
394 else
395 NullOnly = false;
396 };
397
398 auto AdjustWrittenValueType = [&](const AAPointerInfo::Access &Acc,
399 Value &V) {
400 Value *AdjV = AA::getWithType(V, *I.getType());
401 if (!AdjV) {
402 LLVM_DEBUG(dbgs() << "Underlying object written but stored value "
403 "cannot be converted to read type: "
404 << *Acc.getRemoteInst() << " : " << *I.getType()
405 << "\n";);
406 }
407 return AdjV;
408 };
409
410 auto CheckAccess = [&](const AAPointerInfo::Access &Acc, bool IsExact) {
411 if ((IsLoad && !Acc.isWriteOrAssumption()) || (!IsLoad && !Acc.isRead()))
412 return true;
413 if (IsLoad && Acc.isWrittenValueYetUndetermined())
414 return true;
415 CheckForNullOnlyAndUndef(Acc.getContent(), IsExact);
416 if (OnlyExact && !IsExact && !NullOnly &&
417 !isa_and_nonnull<UndefValue>(Acc.getWrittenValue())) {
418 LLVM_DEBUG(dbgs() << "Non exact access " << *Acc.getRemoteInst()
419 << ", abort!\n");
420 return false;
421 }
422 if (NullRequired && !NullOnly) {
423 LLVM_DEBUG(dbgs() << "Required all `null` accesses due to non exact "
424 "one, however found non-null one: "
425 << *Acc.getRemoteInst() << ", abort!\n");
426 return false;
427 }
428 if (IsLoad) {
429 assert(isa<LoadInst>(I) && "Expected load or store instruction only!");
430 if (!Acc.isWrittenValueUnknown()) {
431 Value *V = AdjustWrittenValueType(Acc, *Acc.getWrittenValue());
432 if (!V)
433 return false;
434 NewCopies.push_back(V);
435 NewCopyOrigins.push_back(Acc.getRemoteInst());
436 return true;
437 }
438 auto *SI = dyn_cast<StoreInst>(Acc.getRemoteInst());
439 if (!SI) {
440 LLVM_DEBUG(dbgs() << "Underlying object written through a non-store "
441 "instruction not supported yet: "
442 << *Acc.getRemoteInst() << "\n";);
443 return false;
444 }
445 Value *V = AdjustWrittenValueType(Acc, *SI->getValueOperand());
446 if (!V)
447 return false;
448 NewCopies.push_back(V);
449 NewCopyOrigins.push_back(SI);
450 } else {
451 assert(isa<StoreInst>(I) && "Expected load or store instruction only!");
452 auto *LI = dyn_cast<LoadInst>(Acc.getRemoteInst());
453 if (!LI && OnlyExact) {
454 LLVM_DEBUG(dbgs() << "Underlying object read through a non-load "
455 "instruction not supported yet: "
456 << *Acc.getRemoteInst() << "\n";);
457 return false;
458 }
459 NewCopies.push_back(Acc.getRemoteInst());
460 }
461 return true;
462 };
463
464 // If the value has been written to we don't need the initial value of the
465 // object.
466 bool HasBeenWrittenTo = false;
467
468 AA::RangeTy Range;
469 auto &PI = A.getAAFor<AAPointerInfo>(QueryingAA, IRPosition::value(Obj),
470 DepClassTy::NONE);
471 if (!PI.forallInterferingAccesses(A, QueryingAA, I,
472 /* FindInterferingWrites */ IsLoad,
473 /* FindInterferingReads */ !IsLoad,
474 CheckAccess, HasBeenWrittenTo, Range)) {
476 dbgs()
477 << "Failed to verify all interfering accesses for underlying object: "
478 << Obj << "\n");
479 return false;
480 }
481
482 if (IsLoad && !HasBeenWrittenTo && !Range.isUnassigned()) {
483 const DataLayout &DL = A.getDataLayout();
484 Value *InitialValue =
485 AA::getInitialValueForObj(Obj, *I.getType(), TLI, DL, &Range);
486 if (!InitialValue) {
487 LLVM_DEBUG(dbgs() << "Could not determine required initial value of "
488 "underlying object, abort!\n");
489 return false;
490 }
491 CheckForNullOnlyAndUndef(InitialValue, /* IsExact */ true);
492 if (NullRequired && !NullOnly) {
493 LLVM_DEBUG(dbgs() << "Non exact access but initial value that is not "
494 "null or undef, abort!\n");
495 return false;
496 }
497
498 NewCopies.push_back(InitialValue);
499 NewCopyOrigins.push_back(nullptr);
500 }
501
502 PIs.push_back(&PI);
503
504 return true;
505 };
506
507 const auto &AAUO = A.getAAFor<AAUnderlyingObjects>(
508 QueryingAA, IRPosition::value(Ptr), DepClassTy::OPTIONAL);
509 if (!AAUO.forallUnderlyingObjects(Pred)) {
511 dbgs() << "Underlying objects stored into could not be determined\n";);
512 return false;
513 }
514
515 // Only if we were successful collection all potential copies we record
516 // dependences (on non-fix AAPointerInfo AAs). We also only then modify the
517 // given PotentialCopies container.
518 for (const auto *PI : PIs) {
519 if (!PI->getState().isAtFixpoint())
520 UsedAssumedInformation = true;
521 A.recordDependence(*PI, QueryingAA, DepClassTy::OPTIONAL);
522 }
523 PotentialCopies.insert(NewCopies.begin(), NewCopies.end());
524 PotentialValueOrigins.insert(NewCopyOrigins.begin(), NewCopyOrigins.end());
525
526 return true;
527}
528
530 Attributor &A, LoadInst &LI, SmallSetVector<Value *, 4> &PotentialValues,
531 SmallSetVector<Instruction *, 4> &PotentialValueOrigins,
532 const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation,
533 bool OnlyExact) {
534 return getPotentialCopiesOfMemoryValue</* IsLoad */ true>(
535 A, LI, PotentialValues, PotentialValueOrigins, QueryingAA,
536 UsedAssumedInformation, OnlyExact);
537}
538
540 Attributor &A, StoreInst &SI, SmallSetVector<Value *, 4> &PotentialCopies,
541 const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation,
542 bool OnlyExact) {
543 SmallSetVector<Instruction *, 4> PotentialValueOrigins;
544 return getPotentialCopiesOfMemoryValue</* IsLoad */ false>(
545 A, SI, PotentialCopies, PotentialValueOrigins, QueryingAA,
546 UsedAssumedInformation, OnlyExact);
547}
548
550 const AbstractAttribute &QueryingAA,
551 bool RequireReadNone, bool &IsKnown) {
552
555 const auto &MemLocAA =
556 A.getAAFor<AAMemoryLocation>(QueryingAA, IRP, DepClassTy::NONE);
557 if (MemLocAA.isAssumedReadNone()) {
558 IsKnown = MemLocAA.isKnownReadNone();
559 if (!IsKnown)
560 A.recordDependence(MemLocAA, QueryingAA, DepClassTy::OPTIONAL);
561 return true;
562 }
563 }
564
565 const auto &MemBehaviorAA =
566 A.getAAFor<AAMemoryBehavior>(QueryingAA, IRP, DepClassTy::NONE);
567 if (MemBehaviorAA.isAssumedReadNone() ||
568 (!RequireReadNone && MemBehaviorAA.isAssumedReadOnly())) {
569 IsKnown = RequireReadNone ? MemBehaviorAA.isKnownReadNone()
570 : MemBehaviorAA.isKnownReadOnly();
571 if (!IsKnown)
572 A.recordDependence(MemBehaviorAA, QueryingAA, DepClassTy::OPTIONAL);
573 return true;
574 }
575
576 return false;
577}
578
580 const AbstractAttribute &QueryingAA, bool &IsKnown) {
581 return isAssumedReadOnlyOrReadNone(A, IRP, QueryingAA,
582 /* RequireReadNone */ false, IsKnown);
583}
585 const AbstractAttribute &QueryingAA, bool &IsKnown) {
586 return isAssumedReadOnlyOrReadNone(A, IRP, QueryingAA,
587 /* RequireReadNone */ true, IsKnown);
588}
589
590static bool
592 const Instruction *ToI, const Function &ToFn,
593 const AbstractAttribute &QueryingAA,
594 const AA::InstExclusionSetTy *ExclusionSet,
595 std::function<bool(const Function &F)> GoBackwardsCB) {
596 LLVM_DEBUG({
597 dbgs() << "[AA] isPotentiallyReachable @" << ToFn.getName() << " from "
598 << FromI << " [GBCB: " << bool(GoBackwardsCB) << "][#ExS: "
599 << (ExclusionSet ? std::to_string(ExclusionSet->size()) : "none")
600 << "]\n";
601 if (ExclusionSet)
602 for (auto *ES : *ExclusionSet)
603 dbgs() << *ES << "\n";
604 });
605
606 // We know kernels (generally) cannot be called from within the module. Thus,
607 // for reachability we would need to step back from a kernel which would allow
608 // us to reach anything anyway. Even if a kernel is invoked from another
609 // kernel, values like allocas and shared memory are not accessible. We
610 // implicitly check for this situation to avoid costly lookups.
611 if (GoBackwardsCB && &ToFn != FromI.getFunction() &&
612 !GoBackwardsCB(*FromI.getFunction()) && ToFn.hasFnAttribute("kernel") &&
613 FromI.getFunction()->hasFnAttribute("kernel")) {
614 LLVM_DEBUG(dbgs() << "[AA] assume kernel cannot be reached from within the "
615 "module; success\n";);
616 return false;
617 }
618
619 // If we can go arbitrarily backwards we will eventually reach an entry point
620 // that can reach ToI. Only if a set of blocks through which we cannot go is
621 // provided, or once we track internal functions not accessible from the
622 // outside, it makes sense to perform backwards analysis in the absence of a
623 // GoBackwardsCB.
624 if (!GoBackwardsCB && !ExclusionSet) {
625 LLVM_DEBUG(dbgs() << "[AA] check @" << ToFn.getName() << " from " << FromI
626 << " is not checked backwards and does not have an "
627 "exclusion set, abort\n");
628 return true;
629 }
630
633 Worklist.push_back(&FromI);
634
635 while (!Worklist.empty()) {
636 const Instruction *CurFromI = Worklist.pop_back_val();
637 if (!Visited.insert(CurFromI).second)
638 continue;
639
640 const Function *FromFn = CurFromI->getFunction();
641 if (FromFn == &ToFn) {
642 if (!ToI)
643 return true;
644 LLVM_DEBUG(dbgs() << "[AA] check " << *ToI << " from " << *CurFromI
645 << " intraprocedurally\n");
646 const auto &ReachabilityAA = A.getAAFor<AAIntraFnReachability>(
647 QueryingAA, IRPosition::function(ToFn), DepClassTy::OPTIONAL);
648 bool Result =
649 ReachabilityAA.isAssumedReachable(A, *CurFromI, *ToI, ExclusionSet);
650 LLVM_DEBUG(dbgs() << "[AA] " << *CurFromI << " "
651 << (Result ? "can potentially " : "cannot ") << "reach "
652 << *ToI << " [Intra]\n");
653 if (Result)
654 return true;
655 }
656
657 bool Result = true;
658 if (!ToFn.isDeclaration() && ToI) {
659 const auto &ToReachabilityAA = A.getAAFor<AAIntraFnReachability>(
660 QueryingAA, IRPosition::function(ToFn), DepClassTy::OPTIONAL);
661 const Instruction &EntryI = ToFn.getEntryBlock().front();
662 Result =
663 ToReachabilityAA.isAssumedReachable(A, EntryI, *ToI, ExclusionSet);
664 LLVM_DEBUG(dbgs() << "[AA] Entry " << EntryI << " of @" << ToFn.getName()
665 << " " << (Result ? "can potentially " : "cannot ")
666 << "reach @" << *ToI << " [ToFn]\n");
667 }
668
669 if (Result) {
670 // The entry of the ToFn can reach the instruction ToI. If the current
671 // instruction is already known to reach the ToFn.
672 const auto &FnReachabilityAA = A.getAAFor<AAInterFnReachability>(
673 QueryingAA, IRPosition::function(*FromFn), DepClassTy::OPTIONAL);
674 Result = FnReachabilityAA.instructionCanReach(A, *CurFromI, ToFn,
675 ExclusionSet);
676 LLVM_DEBUG(dbgs() << "[AA] " << *CurFromI << " in @" << FromFn->getName()
677 << " " << (Result ? "can potentially " : "cannot ")
678 << "reach @" << ToFn.getName() << " [FromFn]\n");
679 if (Result)
680 return true;
681 }
682
683 // TODO: Check assumed nounwind.
684 const auto &ReachabilityAA = A.getAAFor<AAIntraFnReachability>(
685 QueryingAA, IRPosition::function(*FromFn), DepClassTy::OPTIONAL);
686 auto ReturnInstCB = [&](Instruction &Ret) {
687 bool Result =
688 ReachabilityAA.isAssumedReachable(A, *CurFromI, Ret, ExclusionSet);
689 LLVM_DEBUG(dbgs() << "[AA][Ret] " << *CurFromI << " "
690 << (Result ? "can potentially " : "cannot ") << "reach "
691 << Ret << " [Intra]\n");
692 return !Result;
693 };
694
695 // Check if we can reach returns.
696 bool UsedAssumedInformation = false;
697 if (A.checkForAllInstructions(ReturnInstCB, FromFn, QueryingAA,
698 {Instruction::Ret}, UsedAssumedInformation)) {
699 LLVM_DEBUG(dbgs() << "[AA] No return is reachable, done\n");
700 continue;
701 }
702
703 if (!GoBackwardsCB) {
704 LLVM_DEBUG(dbgs() << "[AA] check @" << ToFn.getName() << " from " << FromI
705 << " is not checked backwards, abort\n");
706 return true;
707 }
708
709 // If we do not go backwards from the FromFn we are done here and so far we
710 // could not find a way to reach ToFn/ToI.
711 if (!GoBackwardsCB(*FromFn))
712 continue;
713
714 LLVM_DEBUG(dbgs() << "Stepping backwards to the call sites of @"
715 << FromFn->getName() << "\n");
716
717 auto CheckCallSite = [&](AbstractCallSite ACS) {
718 CallBase *CB = ACS.getInstruction();
719 if (!CB)
720 return false;
721
722 if (isa<InvokeInst>(CB))
723 return false;
724
726 Worklist.push_back(Inst);
727 return true;
728 };
729
730 Result = !A.checkForAllCallSites(CheckCallSite, *FromFn,
731 /* RequireAllCallSites */ true,
732 &QueryingAA, UsedAssumedInformation);
733 if (Result) {
734 LLVM_DEBUG(dbgs() << "[AA] stepping back to call sites from " << *CurFromI
735 << " in @" << FromFn->getName()
736 << " failed, give up\n");
737 return true;
738 }
739
740 LLVM_DEBUG(dbgs() << "[AA] stepped back to call sites from " << *CurFromI
741 << " in @" << FromFn->getName()
742 << " worklist size is: " << Worklist.size() << "\n");
743 }
744 return false;
745}
746
748 Attributor &A, const Instruction &FromI, const Instruction &ToI,
749 const AbstractAttribute &QueryingAA,
750 const AA::InstExclusionSetTy *ExclusionSet,
751 std::function<bool(const Function &F)> GoBackwardsCB) {
752 const Function *ToFn = ToI.getFunction();
753 return ::isPotentiallyReachable(A, FromI, &ToI, *ToFn, QueryingAA,
754 ExclusionSet, GoBackwardsCB);
755}
756
758 Attributor &A, const Instruction &FromI, const Function &ToFn,
759 const AbstractAttribute &QueryingAA,
760 const AA::InstExclusionSetTy *ExclusionSet,
761 std::function<bool(const Function &F)> GoBackwardsCB) {
762 return ::isPotentiallyReachable(A, FromI, /* ToI */ nullptr, ToFn, QueryingAA,
763 ExclusionSet, GoBackwardsCB);
764}
765
767 const AbstractAttribute &QueryingAA) {
768 if (isa<UndefValue>(Obj))
769 return true;
770 if (isa<AllocaInst>(Obj)) {
771 InformationCache &InfoCache = A.getInfoCache();
772 if (!InfoCache.stackIsAccessibleByOtherThreads()) {
774 dbgs() << "[AA] Object '" << Obj
775 << "' is thread local; stack objects are thread local.\n");
776 return true;
777 }
778 const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
779 QueryingAA, IRPosition::value(Obj), DepClassTy::OPTIONAL);
780 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj << "' is "
781 << (NoCaptureAA.isAssumedNoCapture() ? "" : "not")
782 << " thread local; "
783 << (NoCaptureAA.isAssumedNoCapture() ? "non-" : "")
784 << "captured stack object.\n");
785 return NoCaptureAA.isAssumedNoCapture();
786 }
787 if (auto *GV = dyn_cast<GlobalVariable>(&Obj)) {
788 if (GV->isConstant()) {
789 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj
790 << "' is thread local; constant global\n");
791 return true;
792 }
793 if (GV->isThreadLocal()) {
794 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj
795 << "' is thread local; thread local global\n");
796 return true;
797 }
798 }
799
800 if (A.getInfoCache().targetIsGPU()) {
801 if (Obj.getType()->getPointerAddressSpace() ==
802 (int)AA::GPUAddressSpace::Local) {
803 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj
804 << "' is thread local; GPU local memory\n");
805 return true;
806 }
807 if (Obj.getType()->getPointerAddressSpace() ==
808 (int)AA::GPUAddressSpace::Constant) {
809 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj
810 << "' is thread local; GPU constant memory\n");
811 return true;
812 }
813 }
814
815 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj << "' is not thread local\n");
816 return false;
817}
818
820 const AbstractAttribute &QueryingAA) {
821 if (!I.mayHaveSideEffects() && !I.mayReadFromMemory())
822 return false;
823
825
826 auto AddLocationPtr = [&](std::optional<MemoryLocation> Loc) {
827 if (!Loc || !Loc->Ptr) {
829 dbgs() << "[AA] Access to unknown location; -> requires barriers\n");
830 return false;
831 }
832 Ptrs.insert(Loc->Ptr);
833 return true;
834 };
835
836 if (const MemIntrinsic *MI = dyn_cast<MemIntrinsic>(&I)) {
837 if (!AddLocationPtr(MemoryLocation::getForDest(MI)))
838 return true;
839 if (const MemTransferInst *MTI = dyn_cast<MemTransferInst>(&I))
840 if (!AddLocationPtr(MemoryLocation::getForSource(MTI)))
841 return true;
842 } else if (!AddLocationPtr(MemoryLocation::getOrNone(&I)))
843 return true;
844
845 return isPotentiallyAffectedByBarrier(A, Ptrs.getArrayRef(), QueryingAA, &I);
846}
847
850 const AbstractAttribute &QueryingAA,
851 const Instruction *CtxI) {
852 for (const Value *Ptr : Ptrs) {
853 if (!Ptr) {
854 LLVM_DEBUG(dbgs() << "[AA] nullptr; -> requires barriers\n");
855 return true;
856 }
857
858 auto Pred = [&](Value &Obj) {
859 if (AA::isAssumedThreadLocalObject(A, Obj, QueryingAA))
860 return true;
861 LLVM_DEBUG(dbgs() << "[AA] Access to '" << Obj << "' via '" << *Ptr
862 << "'; -> requires barrier\n");
863 return false;
864 };
865
866 const auto &UnderlyingObjsAA = A.getAAFor<AAUnderlyingObjects>(
867 QueryingAA, IRPosition::value(*Ptr), DepClassTy::OPTIONAL);
868 if (!UnderlyingObjsAA.forallUnderlyingObjects(Pred))
869 return true;
870 }
871 return false;
872}
873
874/// Return true if \p New is equal or worse than \p Old.
875static bool isEqualOrWorse(const Attribute &New, const Attribute &Old) {
876 if (!Old.isIntAttribute())
877 return true;
878
879 return Old.getValueAsInt() >= New.getValueAsInt();
880}
881
882/// Return true if the information provided by \p Attr was added to the
883/// attribute list \p Attrs. This is only the case if it was not already present
884/// in \p Attrs at the position describe by \p PK and \p AttrIdx.
885static bool addIfNotExistent(LLVMContext &Ctx, const Attribute &Attr,
886 AttributeList &Attrs, int AttrIdx,
887 bool ForceReplace = false) {
888
889 if (Attr.isEnumAttribute()) {
891 if (Attrs.hasAttributeAtIndex(AttrIdx, Kind))
892 if (!ForceReplace &&
893 isEqualOrWorse(Attr, Attrs.getAttributeAtIndex(AttrIdx, Kind)))
894 return false;
895 Attrs = Attrs.addAttributeAtIndex(Ctx, AttrIdx, Attr);
896 return true;
897 }
898 if (Attr.isStringAttribute()) {
899 StringRef Kind = Attr.getKindAsString();
900 if (Attrs.hasAttributeAtIndex(AttrIdx, Kind))
901 if (!ForceReplace &&
902 isEqualOrWorse(Attr, Attrs.getAttributeAtIndex(AttrIdx, Kind)))
903 return false;
904 Attrs = Attrs.addAttributeAtIndex(Ctx, AttrIdx, Attr);
905 return true;
906 }
907 if (Attr.isIntAttribute()) {
909 if (Attrs.hasAttributeAtIndex(AttrIdx, Kind))
910 if (!ForceReplace &&
911 isEqualOrWorse(Attr, Attrs.getAttributeAtIndex(AttrIdx, Kind)))
912 return false;
913 Attrs = Attrs.removeAttributeAtIndex(Ctx, AttrIdx, Kind);
914 Attrs = Attrs.addAttributeAtIndex(Ctx, AttrIdx, Attr);
915 return true;
916 }
917
918 llvm_unreachable("Expected enum or string attribute!");
919}
920
923 return cast<Argument>(&getAnchorValue());
924
925 // Not an Argument and no argument number means this is not a call site
926 // argument, thus we cannot find a callback argument to return.
927 int ArgNo = getCallSiteArgNo();
928 if (ArgNo < 0)
929 return nullptr;
930
931 // Use abstract call sites to make the connection between the call site
932 // values and the ones in callbacks. If a callback was found that makes use
933 // of the underlying call site operand, we want the corresponding callback
934 // callee argument and not the direct callee argument.
935 std::optional<Argument *> CBCandidateArg;
936 SmallVector<const Use *, 4> CallbackUses;
937 const auto &CB = cast<CallBase>(getAnchorValue());
938 AbstractCallSite::getCallbackUses(CB, CallbackUses);
939 for (const Use *U : CallbackUses) {
940 AbstractCallSite ACS(U);
941 assert(ACS && ACS.isCallbackCall());
942 if (!ACS.getCalledFunction())
943 continue;
944
945 for (unsigned u = 0, e = ACS.getNumArgOperands(); u < e; u++) {
946
947 // Test if the underlying call site operand is argument number u of the
948 // callback callee.
949 if (ACS.getCallArgOperandNo(u) != ArgNo)
950 continue;
951
952 assert(ACS.getCalledFunction()->arg_size() > u &&
953 "ACS mapped into var-args arguments!");
954 if (CBCandidateArg) {
955 CBCandidateArg = nullptr;
956 break;
957 }
958 CBCandidateArg = ACS.getCalledFunction()->getArg(u);
959 }
960 }
961
962 // If we found a unique callback candidate argument, return it.
963 if (CBCandidateArg && *CBCandidateArg)
964 return *CBCandidateArg;
965
966 // If no callbacks were found, or none used the underlying call site operand
967 // exclusively, use the direct callee argument if available.
968 const Function *Callee = CB.getCalledFunction();
969 if (Callee && Callee->arg_size() > unsigned(ArgNo))
970 return Callee->getArg(ArgNo);
971
972 return nullptr;
973}
974
977 if (getState().isAtFixpoint())
978 return HasChanged;
979
980 LLVM_DEBUG(dbgs() << "[Attributor] Update: " << *this << "\n");
981
982 HasChanged = updateImpl(A);
983
984 LLVM_DEBUG(dbgs() << "[Attributor] Update " << HasChanged << " " << *this
985 << "\n");
986
987 return HasChanged;
988}
989
992 const ArrayRef<Attribute> &DeducedAttrs,
993 bool ForceReplace) {
994 Function *ScopeFn = IRP.getAnchorScope();
996
997 // In the following some generic code that will manifest attributes in
998 // DeducedAttrs if they improve the current IR. Due to the different
999 // annotation positions we use the underlying AttributeList interface.
1000
1001 AttributeList Attrs;
1002 switch (PK) {
1009 Attrs = ScopeFn->getAttributes();
1010 break;
1014 Attrs = cast<CallBase>(IRP.getAnchorValue()).getAttributes();
1015 break;
1016 }
1017
1019 LLVMContext &Ctx = IRP.getAnchorValue().getContext();
1020 for (const Attribute &Attr : DeducedAttrs) {
1021 if (!addIfNotExistent(Ctx, Attr, Attrs, IRP.getAttrIdx(), ForceReplace))
1022 continue;
1023
1024 HasChanged = ChangeStatus::CHANGED;
1025 }
1026
1027 if (HasChanged == ChangeStatus::UNCHANGED)
1028 return HasChanged;
1029
1030 switch (PK) {
1034 ScopeFn->setAttributes(Attrs);
1035 break;
1039 cast<CallBase>(IRP.getAnchorValue()).setAttributes(Attrs);
1040 break;
1043 break;
1044 }
1045
1046 return HasChanged;
1047}
1048
1050const IRPosition
1052
1054 IRPositions.emplace_back(IRP);
1055
1056 // Helper to determine if operand bundles on a call site are benin or
1057 // potentially problematic. We handle only llvm.assume for now.
1058 auto CanIgnoreOperandBundles = [](const CallBase &CB) {
1059 return (isa<IntrinsicInst>(CB) &&
1060 cast<IntrinsicInst>(CB).getIntrinsicID() == Intrinsic ::assume);
1061 };
1062
1063 const auto *CB = dyn_cast<CallBase>(&IRP.getAnchorValue());
1064 switch (IRP.getPositionKind()) {
1068 return;
1071 IRPositions.emplace_back(IRPosition::function(*IRP.getAnchorScope()));
1072 return;
1074 assert(CB && "Expected call site!");
1075 // TODO: We need to look at the operand bundles similar to the redirection
1076 // in CallBase.
1077 if (!CB->hasOperandBundles() || CanIgnoreOperandBundles(*CB))
1078 if (const Function *Callee = CB->getCalledFunction())
1079 IRPositions.emplace_back(IRPosition::function(*Callee));
1080 return;
1082 assert(CB && "Expected call site!");
1083 // TODO: We need to look at the operand bundles similar to the redirection
1084 // in CallBase.
1085 if (!CB->hasOperandBundles() || CanIgnoreOperandBundles(*CB)) {
1086 if (const Function *Callee = CB->getCalledFunction()) {
1087 IRPositions.emplace_back(IRPosition::returned(*Callee));
1088 IRPositions.emplace_back(IRPosition::function(*Callee));
1089 for (const Argument &Arg : Callee->args())
1090 if (Arg.hasReturnedAttr()) {
1091 IRPositions.emplace_back(
1092 IRPosition::callsite_argument(*CB, Arg.getArgNo()));
1093 IRPositions.emplace_back(
1094 IRPosition::value(*CB->getArgOperand(Arg.getArgNo())));
1095 IRPositions.emplace_back(IRPosition::argument(Arg));
1096 }
1097 }
1098 }
1099 IRPositions.emplace_back(IRPosition::callsite_function(*CB));
1100 return;
1102 assert(CB && "Expected call site!");
1103 // TODO: We need to look at the operand bundles similar to the redirection
1104 // in CallBase.
1105 if (!CB->hasOperandBundles() || CanIgnoreOperandBundles(*CB)) {
1106 const Function *Callee = CB->getCalledFunction();
1107 if (Callee) {
1108 if (Argument *Arg = IRP.getAssociatedArgument())
1109 IRPositions.emplace_back(IRPosition::argument(*Arg));
1110 IRPositions.emplace_back(IRPosition::function(*Callee));
1111 }
1112 }
1113 IRPositions.emplace_back(IRPosition::value(IRP.getAssociatedValue()));
1114 return;
1115 }
1116 }
1117}
1118
1120 bool IgnoreSubsumingPositions, Attributor *A) const {
1122 for (const IRPosition &EquivIRP : SubsumingPositionIterator(*this)) {
1123 for (Attribute::AttrKind AK : AKs)
1124 if (EquivIRP.getAttrsFromIRAttr(AK, Attrs))
1125 return true;
1126 // The first position returned by the SubsumingPositionIterator is
1127 // always the position itself. If we ignore subsuming positions we
1128 // are done after the first iteration.
1129 if (IgnoreSubsumingPositions)
1130 break;
1131 }
1132 if (A)
1133 for (Attribute::AttrKind AK : AKs)
1134 if (getAttrsFromAssumes(AK, Attrs, *A))
1135 return true;
1136 return false;
1137}
1138
1141 bool IgnoreSubsumingPositions, Attributor *A) const {
1142 for (const IRPosition &EquivIRP : SubsumingPositionIterator(*this)) {
1143 for (Attribute::AttrKind AK : AKs)
1144 EquivIRP.getAttrsFromIRAttr(AK, Attrs);
1145 // The first position returned by the SubsumingPositionIterator is
1146 // always the position itself. If we ignore subsuming positions we
1147 // are done after the first iteration.
1148 if (IgnoreSubsumingPositions)
1149 break;
1150 }
1151 if (A)
1152 for (Attribute::AttrKind AK : AKs)
1153 getAttrsFromAssumes(AK, Attrs, *A);
1154}
1155
1156bool IRPosition::getAttrsFromIRAttr(Attribute::AttrKind AK,
1157 SmallVectorImpl<Attribute> &Attrs) const {
1159 return false;
1160
1161 AttributeList AttrList;
1162 if (const auto *CB = dyn_cast<CallBase>(&getAnchorValue()))
1163 AttrList = CB->getAttributes();
1164 else
1165 AttrList = getAssociatedFunction()->getAttributes();
1166
1167 bool HasAttr = AttrList.hasAttributeAtIndex(getAttrIdx(), AK);
1168 if (HasAttr)
1169 Attrs.push_back(AttrList.getAttributeAtIndex(getAttrIdx(), AK));
1170 return HasAttr;
1171}
1172
1173bool IRPosition::getAttrsFromAssumes(Attribute::AttrKind AK,
1175 Attributor &A) const {
1176 assert(getPositionKind() != IRP_INVALID && "Did expect a valid position!");
1177 Value &AssociatedValue = getAssociatedValue();
1178
1179 const Assume2KnowledgeMap &A2K =
1180 A.getInfoCache().getKnowledgeMap().lookup({&AssociatedValue, AK});
1181
1182 // Check if we found any potential assume use, if not we don't need to create
1183 // explorer iterators.
1184 if (A2K.empty())
1185 return false;
1186
1187 LLVMContext &Ctx = AssociatedValue.getContext();
1188 unsigned AttrsSize = Attrs.size();
1190 A.getInfoCache().getMustBeExecutedContextExplorer();
1191 auto EIt = Explorer.begin(getCtxI()), EEnd = Explorer.end(getCtxI());
1192 for (const auto &It : A2K)
1193 if (Explorer.findInContextOf(It.first, EIt, EEnd))
1194 Attrs.push_back(Attribute::get(Ctx, AK, It.second.Max));
1195 return AttrsSize != Attrs.size();
1196}
1197
1198void IRPosition::verify() {
1199#ifdef EXPENSIVE_CHECKS
1200 switch (getPositionKind()) {
1201 case IRP_INVALID:
1202 assert((CBContext == nullptr) &&
1203 "Invalid position must not have CallBaseContext!");
1204 assert(!Enc.getOpaqueValue() &&
1205 "Expected a nullptr for an invalid position!");
1206 return;
1207 case IRP_FLOAT:
1208 assert((!isa<Argument>(&getAssociatedValue())) &&
1209 "Expected specialized kind for argument values!");
1210 return;
1211 case IRP_RETURNED:
1212 assert(isa<Function>(getAsValuePtr()) &&
1213 "Expected function for a 'returned' position!");
1214 assert(getAsValuePtr() == &getAssociatedValue() &&
1215 "Associated value mismatch!");
1216 return;
1218 assert((CBContext == nullptr) &&
1219 "'call site returned' position must not have CallBaseContext!");
1220 assert((isa<CallBase>(getAsValuePtr())) &&
1221 "Expected call base for 'call site returned' position!");
1222 assert(getAsValuePtr() == &getAssociatedValue() &&
1223 "Associated value mismatch!");
1224 return;
1225 case IRP_CALL_SITE:
1226 assert((CBContext == nullptr) &&
1227 "'call site function' position must not have CallBaseContext!");
1228 assert((isa<CallBase>(getAsValuePtr())) &&
1229 "Expected call base for 'call site function' position!");
1230 assert(getAsValuePtr() == &getAssociatedValue() &&
1231 "Associated value mismatch!");
1232 return;
1233 case IRP_FUNCTION:
1234 assert(isa<Function>(getAsValuePtr()) &&
1235 "Expected function for a 'function' position!");
1236 assert(getAsValuePtr() == &getAssociatedValue() &&
1237 "Associated value mismatch!");
1238 return;
1239 case IRP_ARGUMENT:
1240 assert(isa<Argument>(getAsValuePtr()) &&
1241 "Expected argument for a 'argument' position!");
1242 assert(getAsValuePtr() == &getAssociatedValue() &&
1243 "Associated value mismatch!");
1244 return;
1246 assert((CBContext == nullptr) &&
1247 "'call site argument' position must not have CallBaseContext!");
1248 Use *U = getAsUsePtr();
1249 (void)U; // Silence unused variable warning.
1250 assert(U && "Expected use for a 'call site argument' position!");
1251 assert(isa<CallBase>(U->getUser()) &&
1252 "Expected call base user for a 'call site argument' position!");
1253 assert(cast<CallBase>(U->getUser())->isArgOperand(U) &&
1254 "Expected call base argument operand for a 'call site argument' "
1255 "position");
1256 assert(cast<CallBase>(U->getUser())->getArgOperandNo(U) ==
1257 unsigned(getCallSiteArgNo()) &&
1258 "Argument number mismatch!");
1259 assert(U->get() == &getAssociatedValue() && "Associated value mismatch!");
1260 return;
1261 }
1262 }
1263#endif
1264}
1265
1266std::optional<Constant *>
1268 const AbstractAttribute &AA,
1269 bool &UsedAssumedInformation) {
1270 // First check all callbacks provided by outside AAs. If any of them returns
1271 // a non-null value that is different from the associated value, or
1272 // std::nullopt, we assume it's simplified.
1273 for (auto &CB : SimplificationCallbacks.lookup(IRP)) {
1274 std::optional<Value *> SimplifiedV = CB(IRP, &AA, UsedAssumedInformation);
1275 if (!SimplifiedV)
1276 return std::nullopt;
1277 if (isa_and_nonnull<Constant>(*SimplifiedV))
1278 return cast<Constant>(*SimplifiedV);
1279 return nullptr;
1280 }
1281 if (auto *C = dyn_cast<Constant>(&IRP.getAssociatedValue()))
1282 return C;
1284 if (getAssumedSimplifiedValues(IRP, &AA, Values,
1286 UsedAssumedInformation)) {
1287 if (Values.empty())
1288 return std::nullopt;
1289 if (auto *C = dyn_cast_or_null<Constant>(
1290 AAPotentialValues::getSingleValue(*this, AA, IRP, Values)))
1291 return C;
1292 }
1293 return nullptr;
1294}
1295
1297 const IRPosition &IRP, const AbstractAttribute *AA,
1298 bool &UsedAssumedInformation, AA::ValueScope S) {
1299 // First check all callbacks provided by outside AAs. If any of them returns
1300 // a non-null value that is different from the associated value, or
1301 // std::nullopt, we assume it's simplified.
1302 for (auto &CB : SimplificationCallbacks.lookup(IRP))
1303 return CB(IRP, AA, UsedAssumedInformation);
1304
1306 if (!getAssumedSimplifiedValues(IRP, AA, Values, S, UsedAssumedInformation))
1307 return &IRP.getAssociatedValue();
1308 if (Values.empty())
1309 return std::nullopt;
1310 if (AA)
1311 if (Value *V = AAPotentialValues::getSingleValue(*this, *AA, IRP, Values))
1312 return V;
1315 return nullptr;
1316 return &IRP.getAssociatedValue();
1317}
1318
1320 const IRPosition &IRP, const AbstractAttribute *AA,
1322 bool &UsedAssumedInformation) {
1323 // First check all callbacks provided by outside AAs. If any of them returns
1324 // a non-null value that is different from the associated value, or
1325 // std::nullopt, we assume it's simplified.
1326 const auto &SimplificationCBs = SimplificationCallbacks.lookup(IRP);
1327 for (const auto &CB : SimplificationCBs) {
1328 std::optional<Value *> CBResult = CB(IRP, AA, UsedAssumedInformation);
1329 if (!CBResult.has_value())
1330 continue;
1331 Value *V = *CBResult;
1332 if (!V)
1333 return false;
1336 Values.push_back(AA::ValueAndContext{*V, nullptr});
1337 else
1338 return false;
1339 }
1340 if (!SimplificationCBs.empty())
1341 return true;
1342
1343 // If no high-level/outside simplification occurred, use AAPotentialValues.
1344 const auto &PotentialValuesAA =
1345 getOrCreateAAFor<AAPotentialValues>(IRP, AA, DepClassTy::OPTIONAL);
1346 if (!PotentialValuesAA.getAssumedSimplifiedValues(*this, Values, S))
1347 return false;
1348 UsedAssumedInformation |= !PotentialValuesAA.isAtFixpoint();
1349 return true;
1350}
1351
1353 std::optional<Value *> V, CallBase &CB, const AbstractAttribute &AA,
1354 bool &UsedAssumedInformation) {
1355 if (!V)
1356 return V;
1357 if (*V == nullptr || isa<Constant>(*V))
1358 return V;
1359 if (auto *Arg = dyn_cast<Argument>(*V))
1360 if (CB.getCalledFunction() == Arg->getParent())
1361 if (!Arg->hasPointeeInMemoryValueAttr())
1362 return getAssumedSimplified(
1363 IRPosition::callsite_argument(CB, Arg->getArgNo()), AA,
1364 UsedAssumedInformation, AA::Intraprocedural);
1365 return nullptr;
1366}
1367
1369 // The abstract attributes are allocated via the BumpPtrAllocator Allocator,
1370 // thus we cannot delete them. We can, and want to, destruct them though.
1371 for (auto &It : AAMap) {
1372 AbstractAttribute *AA = It.getSecond();
1373 AA->~AbstractAttribute();
1374 }
1375}
1376
1378 const AAIsDead *FnLivenessAA,
1379 bool &UsedAssumedInformation,
1380 bool CheckBBLivenessOnly, DepClassTy DepClass) {
1381 const IRPosition &IRP = AA.getIRPosition();
1382 if (!Functions.count(IRP.getAnchorScope()))
1383 return false;
1384 return isAssumedDead(IRP, &AA, FnLivenessAA, UsedAssumedInformation,
1385 CheckBBLivenessOnly, DepClass);
1386}
1387
1389 const AbstractAttribute *QueryingAA,
1390 const AAIsDead *FnLivenessAA,
1391 bool &UsedAssumedInformation,
1392 bool CheckBBLivenessOnly, DepClassTy DepClass) {
1393 Instruction *UserI = dyn_cast<Instruction>(U.getUser());
1394 if (!UserI)
1395 return isAssumedDead(IRPosition::value(*U.get()), QueryingAA, FnLivenessAA,
1396 UsedAssumedInformation, CheckBBLivenessOnly, DepClass);
1397
1398 if (auto *CB = dyn_cast<CallBase>(UserI)) {
1399 // For call site argument uses we can check if the argument is
1400 // unused/dead.
1401 if (CB->isArgOperand(&U)) {
1402 const IRPosition &CSArgPos =
1403 IRPosition::callsite_argument(*CB, CB->getArgOperandNo(&U));
1404 return isAssumedDead(CSArgPos, QueryingAA, FnLivenessAA,
1405 UsedAssumedInformation, CheckBBLivenessOnly,
1406 DepClass);
1407 }
1408 } else if (ReturnInst *RI = dyn_cast<ReturnInst>(UserI)) {
1409 const IRPosition &RetPos = IRPosition::returned(*RI->getFunction());
1410 return isAssumedDead(RetPos, QueryingAA, FnLivenessAA,
1411 UsedAssumedInformation, CheckBBLivenessOnly, DepClass);
1412 } else if (PHINode *PHI = dyn_cast<PHINode>(UserI)) {
1413 BasicBlock *IncomingBB = PHI->getIncomingBlock(U);
1414 return isAssumedDead(*IncomingBB->getTerminator(), QueryingAA, FnLivenessAA,
1415 UsedAssumedInformation, CheckBBLivenessOnly, DepClass);
1416 } else if (StoreInst *SI = dyn_cast<StoreInst>(UserI)) {
1417 if (!CheckBBLivenessOnly && SI->getPointerOperand() != U.get()) {
1418 const IRPosition IRP = IRPosition::inst(*SI);
1419 const AAIsDead &IsDeadAA =
1420 getOrCreateAAFor<AAIsDead>(IRP, QueryingAA, DepClassTy::NONE);
1421 if (IsDeadAA.isRemovableStore()) {
1422 if (QueryingAA)
1423 recordDependence(IsDeadAA, *QueryingAA, DepClass);
1424 if (!IsDeadAA.isKnown(AAIsDead::IS_REMOVABLE))
1425 UsedAssumedInformation = true;
1426 return true;
1427 }
1428 }
1429 }
1430
1431 return isAssumedDead(IRPosition::inst(*UserI), QueryingAA, FnLivenessAA,
1432 UsedAssumedInformation, CheckBBLivenessOnly, DepClass);
1433}
1434
1436 const AbstractAttribute *QueryingAA,
1437 const AAIsDead *FnLivenessAA,
1438 bool &UsedAssumedInformation,
1439 bool CheckBBLivenessOnly, DepClassTy DepClass,
1440 bool CheckForDeadStore) {
1441 const IRPosition::CallBaseContext *CBCtx =
1442 QueryingAA ? QueryingAA->getCallBaseContext() : nullptr;
1443
1444 if (ManifestAddedBlocks.contains(I.getParent()))
1445 return false;
1446
1447 const Function &F = *I.getFunction();
1448 if (!FnLivenessAA || FnLivenessAA->getAnchorScope() != &F)
1449 FnLivenessAA = &getOrCreateAAFor<AAIsDead>(IRPosition::function(F, CBCtx),
1450 QueryingAA, DepClassTy::NONE);
1451
1452 // Don't use recursive reasoning.
1453 if (QueryingAA == FnLivenessAA)
1454 return false;
1455
1456 // If we have a context instruction and a liveness AA we use it.
1457 if (CheckBBLivenessOnly ? FnLivenessAA->isAssumedDead(I.getParent())
1458 : FnLivenessAA->isAssumedDead(&I)) {
1459 if (QueryingAA)
1460 recordDependence(*FnLivenessAA, *QueryingAA, DepClass);
1461 if (!FnLivenessAA->isKnownDead(&I))
1462 UsedAssumedInformation = true;
1463 return true;
1464 }
1465
1466 if (CheckBBLivenessOnly)
1467 return false;
1468
1469 const IRPosition IRP = IRPosition::inst(I, CBCtx);
1470 const AAIsDead &IsDeadAA =
1471 getOrCreateAAFor<AAIsDead>(IRP, QueryingAA, DepClassTy::NONE);
1472
1473 // Don't use recursive reasoning.
1474 if (QueryingAA == &IsDeadAA)
1475 return false;
1476
1477 if (IsDeadAA.isAssumedDead()) {
1478 if (QueryingAA)
1479 recordDependence(IsDeadAA, *QueryingAA, DepClass);
1480 if (!IsDeadAA.isKnownDead())
1481 UsedAssumedInformation = true;
1482 return true;
1483 }
1484
1485 if (CheckForDeadStore && isa<StoreInst>(I) && IsDeadAA.isRemovableStore()) {
1486 if (QueryingAA)
1487 recordDependence(IsDeadAA, *QueryingAA, DepClass);
1488 if (!IsDeadAA.isKnownDead())
1489 UsedAssumedInformation = true;
1490 return true;
1491 }
1492
1493 return false;
1494}
1495
1497 const AbstractAttribute *QueryingAA,
1498 const AAIsDead *FnLivenessAA,
1499 bool &UsedAssumedInformation,
1500 bool CheckBBLivenessOnly, DepClassTy DepClass) {
1501 // Don't check liveness for constants, e.g. functions, used as (floating)
1502 // values since the context instruction and such is here meaningless.
1504 isa<Constant>(IRP.getAssociatedValue())) {
1505 return false;
1506 }
1507
1508 Instruction *CtxI = IRP.getCtxI();
1509 if (CtxI &&
1510 isAssumedDead(*CtxI, QueryingAA, FnLivenessAA, UsedAssumedInformation,
1511 /* CheckBBLivenessOnly */ true,
1512 CheckBBLivenessOnly ? DepClass : DepClassTy::OPTIONAL))
1513 return true;
1514
1515 if (CheckBBLivenessOnly)
1516 return false;
1517
1518 // If we haven't succeeded we query the specific liveness info for the IRP.
1519 const AAIsDead *IsDeadAA;
1521 IsDeadAA = &getOrCreateAAFor<AAIsDead>(
1523 QueryingAA, DepClassTy::NONE);
1524 else
1525 IsDeadAA = &getOrCreateAAFor<AAIsDead>(IRP, QueryingAA, DepClassTy::NONE);
1526
1527 // Don't use recursive reasoning.
1528 if (QueryingAA == IsDeadAA)
1529 return false;
1530
1531 if (IsDeadAA->isAssumedDead()) {
1532 if (QueryingAA)
1533 recordDependence(*IsDeadAA, *QueryingAA, DepClass);
1534 if (!IsDeadAA->isKnownDead())
1535 UsedAssumedInformation = true;
1536 return true;
1537 }
1538
1539 return false;
1540}
1541
1543 const AbstractAttribute *QueryingAA,
1544 const AAIsDead *FnLivenessAA,
1545 DepClassTy DepClass) {
1546 const Function &F = *BB.getParent();
1547 if (!FnLivenessAA || FnLivenessAA->getAnchorScope() != &F)
1548 FnLivenessAA = &getOrCreateAAFor<AAIsDead>(IRPosition::function(F),
1549 QueryingAA, DepClassTy::NONE);
1550
1551 // Don't use recursive reasoning.
1552 if (QueryingAA == FnLivenessAA)
1553 return false;
1554
1555 if (FnLivenessAA->isAssumedDead(&BB)) {
1556 if (QueryingAA)
1557 recordDependence(*FnLivenessAA, *QueryingAA, DepClass);
1558 return true;
1559 }
1560
1561 return false;
1562}
1563
1565 function_ref<bool(const Use &, bool &)> Pred,
1566 const AbstractAttribute &QueryingAA, const Value &V,
1567 bool CheckBBLivenessOnly, DepClassTy LivenessDepClass,
1568 bool IgnoreDroppableUses,
1569 function_ref<bool(const Use &OldU, const Use &NewU)> EquivalentUseCB) {
1570
1571 // Check virtual uses first.
1572 for (VirtualUseCallbackTy &CB : VirtualUseCallbacks.lookup(&V))
1573 if (!CB(*this, &QueryingAA))
1574 return false;
1575
1576 // Check the trivial case first as it catches void values.
1577 if (V.use_empty())
1578 return true;
1579
1580 const IRPosition &IRP = QueryingAA.getIRPosition();
1583
1584 auto AddUsers = [&](const Value &V, const Use *OldUse) {
1585 for (const Use &UU : V.uses()) {
1586 if (OldUse && EquivalentUseCB && !EquivalentUseCB(*OldUse, UU)) {
1587 LLVM_DEBUG(dbgs() << "[Attributor] Potential copy was "
1588 "rejected by the equivalence call back: "
1589 << *UU << "!\n");
1590 return false;
1591 }
1592
1593 Worklist.push_back(&UU);
1594 }
1595 return true;
1596 };
1597
1598 AddUsers(V, /* OldUse */ nullptr);
1599
1600 LLVM_DEBUG(dbgs() << "[Attributor] Got " << Worklist.size()
1601 << " initial uses to check\n");
1602
1603 const Function *ScopeFn = IRP.getAnchorScope();
1604 const auto *LivenessAA =
1605 ScopeFn ? &getAAFor<AAIsDead>(QueryingAA, IRPosition::function(*ScopeFn),
1607 : nullptr;
1608
1609 while (!Worklist.empty()) {
1610 const Use *U = Worklist.pop_back_val();
1611 if (isa<PHINode>(U->getUser()) && !Visited.insert(U).second)
1612 continue;
1614 if (auto *Fn = dyn_cast<Function>(U->getUser()))
1615 dbgs() << "[Attributor] Check use: " << **U << " in " << Fn->getName()
1616 << "\n";
1617 else
1618 dbgs() << "[Attributor] Check use: " << **U << " in " << *U->getUser()
1619 << "\n";
1620 });
1621 bool UsedAssumedInformation = false;
1622 if (isAssumedDead(*U, &QueryingAA, LivenessAA, UsedAssumedInformation,
1623 CheckBBLivenessOnly, LivenessDepClass)) {
1625 dbgs() << "[Attributor] Dead use, skip!\n");
1626 continue;
1627 }
1628 if (IgnoreDroppableUses && U->getUser()->isDroppable()) {
1630 dbgs() << "[Attributor] Droppable user, skip!\n");
1631 continue;
1632 }
1633
1634 if (auto *SI = dyn_cast<StoreInst>(U->getUser())) {
1635 if (&SI->getOperandUse(0) == U) {
1636 if (!Visited.insert(U).second)
1637 continue;
1638 SmallSetVector<Value *, 4> PotentialCopies;
1640 *this, *SI, PotentialCopies, QueryingAA, UsedAssumedInformation,
1641 /* OnlyExact */ true)) {
1643 dbgs()
1644 << "[Attributor] Value is stored, continue with "
1645 << PotentialCopies.size()
1646 << " potential copies instead!\n");
1647 for (Value *PotentialCopy : PotentialCopies)
1648 if (!AddUsers(*PotentialCopy, U))
1649 return false;
1650 continue;
1651 }
1652 }
1653 }
1654
1655 bool Follow = false;
1656 if (!Pred(*U, Follow))
1657 return false;
1658 if (!Follow)
1659 continue;
1660
1661 User &Usr = *U->getUser();
1662 AddUsers(Usr, /* OldUse */ nullptr);
1663
1664 auto *RI = dyn_cast<ReturnInst>(&Usr);
1665 if (!RI)
1666 continue;
1667
1668 Function &F = *RI->getFunction();
1669 auto CallSitePred = [&](AbstractCallSite ACS) {
1670 return AddUsers(*ACS.getInstruction(), U);
1671 };
1672 if (!checkForAllCallSites(CallSitePred, F, /* RequireAllCallSites */ true,
1673 &QueryingAA, UsedAssumedInformation)) {
1674 LLVM_DEBUG(dbgs() << "[Attributor] Could not follow return instruction "
1675 "to all call sites: "
1676 << *RI << "\n");
1677 return false;
1678 }
1679 }
1680
1681 return true;
1682}
1683
1685 const AbstractAttribute &QueryingAA,
1686 bool RequireAllCallSites,
1687 bool &UsedAssumedInformation) {
1688 // We can try to determine information from
1689 // the call sites. However, this is only possible all call sites are known,
1690 // hence the function has internal linkage.
1691 const IRPosition &IRP = QueryingAA.getIRPosition();
1692 const Function *AssociatedFunction = IRP.getAssociatedFunction();
1693 if (!AssociatedFunction) {
1694 LLVM_DEBUG(dbgs() << "[Attributor] No function associated with " << IRP
1695 << "\n");
1696 return false;
1697 }
1698
1699 return checkForAllCallSites(Pred, *AssociatedFunction, RequireAllCallSites,
1700 &QueryingAA, UsedAssumedInformation);
1701}
1702
1704 const Function &Fn,
1705 bool RequireAllCallSites,
1706 const AbstractAttribute *QueryingAA,
1707 bool &UsedAssumedInformation,
1708 bool CheckPotentiallyDead) {
1709 if (RequireAllCallSites && !Fn.hasLocalLinkage()) {
1710 LLVM_DEBUG(
1711 dbgs()
1712 << "[Attributor] Function " << Fn.getName()
1713 << " has no internal linkage, hence not all call sites are known\n");
1714 return false;
1715 }
1716 // Check virtual uses first.
1717 for (VirtualUseCallbackTy &CB : VirtualUseCallbacks.lookup(&Fn))
1718 if (!CB(*this, QueryingAA))
1719 return false;
1720
1722 for (unsigned u = 0; u < Uses.size(); ++u) {
1723 const Use &U = *Uses[u];
1725 if (auto *Fn = dyn_cast<Function>(U))
1726 dbgs() << "[Attributor] Check use: " << Fn->getName() << " in "
1727 << *U.getUser() << "\n";
1728 else
1729 dbgs() << "[Attributor] Check use: " << *U << " in " << *U.getUser()
1730 << "\n";
1731 });
1732 if (!CheckPotentiallyDead &&
1733 isAssumedDead(U, QueryingAA, nullptr, UsedAssumedInformation,
1734 /* CheckBBLivenessOnly */ true)) {
1736 dbgs() << "[Attributor] Dead use, skip!\n");
1737 continue;
1738 }
1739 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U.getUser())) {
1740 if (CE->isCast() && CE->getType()->isPointerTy()) {
1742 dbgs() << "[Attributor] Use, is constant cast expression, add "
1743 << CE->getNumUses() << " uses of that expression instead!\n";
1744 });
1745 for (const Use &CEU : CE->uses())
1746 Uses.push_back(&CEU);
1747 continue;
1748 }
1749 }
1750
1751 AbstractCallSite ACS(&U);
1752 if (!ACS) {
1753 LLVM_DEBUG(dbgs() << "[Attributor] Function " << Fn.getName()
1754 << " has non call site use " << *U.get() << " in "
1755 << *U.getUser() << "\n");
1756 // BlockAddress users are allowed.
1757 if (isa<BlockAddress>(U.getUser()))
1758 continue;
1759 return false;
1760 }
1761
1762 const Use *EffectiveUse =
1763 ACS.isCallbackCall() ? &ACS.getCalleeUseForCallback() : &U;
1764 if (!ACS.isCallee(EffectiveUse)) {
1765 if (!RequireAllCallSites) {
1766 LLVM_DEBUG(dbgs() << "[Attributor] User " << *EffectiveUse->getUser()
1767 << " is not a call of " << Fn.getName()
1768 << ", skip use\n");
1769 continue;
1770 }
1771 LLVM_DEBUG(dbgs() << "[Attributor] User " << *EffectiveUse->getUser()
1772 << " is an invalid use of " << Fn.getName() << "\n");
1773 return false;
1774 }
1775
1776 // Make sure the arguments that can be matched between the call site and the
1777 // callee argee on their type. It is unlikely they do not and it doesn't
1778 // make sense for all attributes to know/care about this.
1779 assert(&Fn == ACS.getCalledFunction() && "Expected known callee");
1780 unsigned MinArgsParams =
1781 std::min(size_t(ACS.getNumArgOperands()), Fn.arg_size());
1782 for (unsigned u = 0; u < MinArgsParams; ++u) {
1783 Value *CSArgOp = ACS.getCallArgOperand(u);
1784 if (CSArgOp && Fn.getArg(u)->getType() != CSArgOp->getType()) {
1785 LLVM_DEBUG(
1786 dbgs() << "[Attributor] Call site / callee argument type mismatch ["
1787 << u << "@" << Fn.getName() << ": "
1788 << *Fn.getArg(u)->getType() << " vs. "
1789 << *ACS.getCallArgOperand(u)->getType() << "\n");
1790 return false;
1791 }
1792 }
1793
1794 if (Pred(ACS))
1795 continue;
1796
1797 LLVM_DEBUG(dbgs() << "[Attributor] Call site callback failed for "
1798 << *ACS.getInstruction() << "\n");
1799 return false;
1800 }
1801
1802 return true;
1803}
1804
1805bool Attributor::shouldPropagateCallBaseContext(const IRPosition &IRP) {
1806 // TODO: Maintain a cache of Values that are
1807 // on the pathway from a Argument to a Instruction that would effect the
1808 // liveness/return state etc.
1810}
1811
1813 function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred,
1814 const AbstractAttribute &QueryingAA) {
1815
1816 const IRPosition &IRP = QueryingAA.getIRPosition();
1817 // Since we need to provide return instructions we have to have an exact
1818 // definition.
1819 const Function *AssociatedFunction = IRP.getAssociatedFunction();
1820 if (!AssociatedFunction)
1821 return false;
1822
1823 // If this is a call site query we use the call site specific return values
1824 // and liveness information.
1825 // TODO: use the function scope once we have call site AAReturnedValues.
1826 const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
1827 const auto &AARetVal =
1828 getAAFor<AAReturnedValues>(QueryingAA, QueryIRP, DepClassTy::REQUIRED);
1829 if (!AARetVal.getState().isValidState())
1830 return false;
1831
1832 return AARetVal.checkForAllReturnedValuesAndReturnInsts(Pred);
1833}
1834
1836 function_ref<bool(Value &)> Pred, const AbstractAttribute &QueryingAA) {
1837
1838 const IRPosition &IRP = QueryingAA.getIRPosition();
1839 const Function *AssociatedFunction = IRP.getAssociatedFunction();
1840 if (!AssociatedFunction)
1841 return false;
1842
1843 // TODO: use the function scope once we have call site AAReturnedValues.
1844 const IRPosition &QueryIRP = IRPosition::function(
1845 *AssociatedFunction, QueryingAA.getCallBaseContext());
1846 const auto &AARetVal =
1847 getAAFor<AAReturnedValues>(QueryingAA, QueryIRP, DepClassTy::REQUIRED);
1848 if (!AARetVal.getState().isValidState())
1849 return false;
1850
1851 return AARetVal.checkForAllReturnedValuesAndReturnInsts(
1852 [&](Value &RV, const SmallSetVector<ReturnInst *, 4> &) {
1853 return Pred(RV);
1854 });
1855}
1856
1859 function_ref<bool(Instruction &)> Pred, const AbstractAttribute *QueryingAA,
1860 const AAIsDead *LivenessAA, const ArrayRef<unsigned> &Opcodes,
1861 bool &UsedAssumedInformation, bool CheckBBLivenessOnly = false,
1862 bool CheckPotentiallyDead = false) {
1863 for (unsigned Opcode : Opcodes) {
1864 // Check if we have instructions with this opcode at all first.
1865 auto *Insts = OpcodeInstMap.lookup(Opcode);
1866 if (!Insts)
1867 continue;
1868
1869 for (Instruction *I : *Insts) {
1870 // Skip dead instructions.
1871 if (A && !CheckPotentiallyDead &&
1872 A->isAssumedDead(IRPosition::inst(*I), QueryingAA, LivenessAA,
1873 UsedAssumedInformation, CheckBBLivenessOnly)) {
1875 dbgs() << "[Attributor] Instruction " << *I
1876 << " is potentially dead, skip!\n";);
1877 continue;
1878 }
1879
1880 if (!Pred(*I))
1881 return false;
1882 }
1883 }
1884 return true;
1885}
1886
1888 const Function *Fn,
1889 const AbstractAttribute &QueryingAA,
1890 const ArrayRef<unsigned> &Opcodes,
1891 bool &UsedAssumedInformation,
1892 bool CheckBBLivenessOnly,
1893 bool CheckPotentiallyDead) {
1894 // Since we need to provide instructions we have to have an exact definition.
1895 if (!Fn || Fn->isDeclaration())
1896 return false;
1897
1898 // TODO: use the function scope once we have call site AAReturnedValues.
1899 const IRPosition &QueryIRP = IRPosition::function(*Fn);
1900 const auto *LivenessAA =
1901 CheckPotentiallyDead
1902 ? nullptr
1903 : &(getAAFor<AAIsDead>(QueryingAA, QueryIRP, DepClassTy::NONE));
1904
1905 auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(*Fn);
1906 if (!checkForAllInstructionsImpl(this, OpcodeInstMap, Pred, &QueryingAA,
1907 LivenessAA, Opcodes, UsedAssumedInformation,
1908 CheckBBLivenessOnly, CheckPotentiallyDead))
1909 return false;
1910
1911 return true;
1912}
1913
1915 const AbstractAttribute &QueryingAA,
1916 const ArrayRef<unsigned> &Opcodes,
1917 bool &UsedAssumedInformation,
1918 bool CheckBBLivenessOnly,
1919 bool CheckPotentiallyDead) {
1920 const IRPosition &IRP = QueryingAA.getIRPosition();
1921 const Function *AssociatedFunction = IRP.getAssociatedFunction();
1922 return checkForAllInstructions(Pred, AssociatedFunction, QueryingAA, Opcodes,
1923 UsedAssumedInformation, CheckBBLivenessOnly,
1924 CheckPotentiallyDead);
1925}
1926
1928 function_ref<bool(Instruction &)> Pred, AbstractAttribute &QueryingAA,
1929 bool &UsedAssumedInformation) {
1930
1931 const Function *AssociatedFunction =
1932 QueryingAA.getIRPosition().getAssociatedFunction();
1933 if (!AssociatedFunction)
1934 return false;
1935
1936 // TODO: use the function scope once we have call site AAReturnedValues.
1937 const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
1938 const auto &LivenessAA =
1939 getAAFor<AAIsDead>(QueryingAA, QueryIRP, DepClassTy::NONE);
1940
1941 for (Instruction *I :
1942 InfoCache.getReadOrWriteInstsForFunction(*AssociatedFunction)) {
1943 // Skip dead instructions.
1944 if (isAssumedDead(IRPosition::inst(*I), &QueryingAA, &LivenessAA,
1945 UsedAssumedInformation))
1946 continue;
1947
1948 if (!Pred(*I))
1949 return false;
1950 }
1951
1952 return true;
1953}
1954
1955void Attributor::runTillFixpoint() {
1956 TimeTraceScope TimeScope("Attributor::runTillFixpoint");
1957 LLVM_DEBUG(dbgs() << "[Attributor] Identified and initialized "
1958 << DG.SyntheticRoot.Deps.size()
1959 << " abstract attributes.\n");
1960
1961 // Now that all abstract attributes are collected and initialized we start
1962 // the abstract analysis.
1963
1964 unsigned IterationCounter = 1;
1965 unsigned MaxIterations =
1966 Configuration.MaxFixpointIterations.value_or(SetFixpointIterations);
1967
1969 SetVector<AbstractAttribute *> Worklist, InvalidAAs;
1970 Worklist.insert(DG.SyntheticRoot.begin(), DG.SyntheticRoot.end());
1971
1972 do {
1973 // Remember the size to determine new attributes.
1974 size_t NumAAs = DG.SyntheticRoot.Deps.size();
1975 LLVM_DEBUG(dbgs() << "\n\n[Attributor] #Iteration: " << IterationCounter
1976 << ", Worklist size: " << Worklist.size() << "\n");
1977
1978 // For invalid AAs we can fix dependent AAs that have a required dependence,
1979 // thereby folding long dependence chains in a single step without the need
1980 // to run updates.
1981 for (unsigned u = 0; u < InvalidAAs.size(); ++u) {
1982 AbstractAttribute *InvalidAA = InvalidAAs[u];
1983
1984 // Check the dependences to fast track invalidation.
1986 dbgs() << "[Attributor] InvalidAA: " << *InvalidAA
1987 << " has " << InvalidAA->Deps.size()
1988 << " required & optional dependences\n");
1989 for (auto &DepIt : InvalidAA->Deps) {
1990 AbstractAttribute *DepAA = cast<AbstractAttribute>(DepIt.getPointer());
1991 if (DepIt.getInt() == unsigned(DepClassTy::OPTIONAL)) {
1993 dbgs() << " - recompute: " << *DepAA);
1994 Worklist.insert(DepAA);
1995 continue;
1996 }
1998 << " - invalidate: " << *DepAA);
2000 assert(DepAA->getState().isAtFixpoint() && "Expected fixpoint state!");
2001 if (!DepAA->getState().isValidState())
2002 InvalidAAs.insert(DepAA);
2003 else
2004 ChangedAAs.push_back(DepAA);
2005 }
2006 InvalidAA->Deps.clear();
2007 }
2008
2009 // Add all abstract attributes that are potentially dependent on one that
2010 // changed to the work list.
2011 for (AbstractAttribute *ChangedAA : ChangedAAs) {
2012 for (auto &DepIt : ChangedAA->Deps)
2013 Worklist.insert(cast<AbstractAttribute>(DepIt.getPointer()));
2014 ChangedAA->Deps.clear();
2015 }
2016
2017 LLVM_DEBUG(dbgs() << "[Attributor] #Iteration: " << IterationCounter
2018 << ", Worklist+Dependent size: " << Worklist.size()
2019 << "\n");
2020
2021 // Reset the changed and invalid set.
2022 ChangedAAs.clear();
2023 InvalidAAs.clear();
2024
2025 // Update all abstract attribute in the work list and record the ones that
2026 // changed.
2027 for (AbstractAttribute *AA : Worklist) {
2028 const auto &AAState = AA->getState();
2029 if (!AAState.isAtFixpoint())
2030 if (updateAA(*AA) == ChangeStatus::CHANGED)
2031 ChangedAAs.push_back(AA);
2032
2033 // Use the InvalidAAs vector to propagate invalid states fast transitively
2034 // without requiring updates.
2035 if (!AAState.isValidState())
2036 InvalidAAs.insert(AA);
2037 }
2038
2039 // Add attributes to the changed set if they have been created in the last
2040 // iteration.
2041 ChangedAAs.append(DG.SyntheticRoot.begin() + NumAAs,
2042 DG.SyntheticRoot.end());
2043
2044 // Reset the work list and repopulate with the changed abstract attributes.
2045 // Note that dependent ones are added above.
2046 Worklist.clear();
2047 Worklist.insert(ChangedAAs.begin(), ChangedAAs.end());
2048 Worklist.insert(QueryAAsAwaitingUpdate.begin(),
2049 QueryAAsAwaitingUpdate.end());
2050 QueryAAsAwaitingUpdate.clear();
2051
2052 } while (!Worklist.empty() &&
2053 (IterationCounter++ < MaxIterations || VerifyMaxFixpointIterations));
2054
2055 if (IterationCounter > MaxIterations && !Functions.empty()) {
2056 auto Remark = [&](OptimizationRemarkMissed ORM) {
2057 return ORM << "Attributor did not reach a fixpoint after "
2058 << ore::NV("Iterations", MaxIterations) << " iterations.";
2059 };
2060 Function *F = Functions.front();
2061 emitRemark<OptimizationRemarkMissed>(F, "FixedPoint", Remark);
2062 }
2063
2064 LLVM_DEBUG(dbgs() << "\n[Attributor] Fixpoint iteration done after: "
2065 << IterationCounter << "/" << MaxIterations
2066 << " iterations\n");
2067
2068 // Reset abstract arguments not settled in a sound fixpoint by now. This
2069 // happens when we stopped the fixpoint iteration early. Note that only the
2070 // ones marked as "changed" *and* the ones transitively depending on them
2071 // need to be reverted to a pessimistic state. Others might not be in a
2072 // fixpoint state but we can use the optimistic results for them anyway.
2074 for (unsigned u = 0; u < ChangedAAs.size(); u++) {
2075 AbstractAttribute *ChangedAA = ChangedAAs[u];
2076 if (!Visited.insert(ChangedAA).second)
2077 continue;
2078
2079 AbstractState &State = ChangedAA->getState();
2080 if (!State.isAtFixpoint()) {
2082
2083 NumAttributesTimedOut++;
2084 }
2085
2086 for (auto &DepIt : ChangedAA->Deps)
2087 ChangedAAs.push_back(cast<AbstractAttribute>(DepIt.getPointer()));
2088 ChangedAA->Deps.clear();
2089 }
2090
2091 LLVM_DEBUG({
2092 if (!Visited.empty())
2093 dbgs() << "\n[Attributor] Finalized " << Visited.size()
2094 << " abstract attributes.\n";
2095 });
2096
2097 if (VerifyMaxFixpointIterations && IterationCounter != MaxIterations) {
2098 errs() << "\n[Attributor] Fixpoint iteration done after: "
2099 << IterationCounter << "/" << MaxIterations << " iterations\n";
2100 llvm_unreachable("The fixpoint was not reached with exactly the number of "
2101 "specified iterations!");
2102 }
2103}
2104
2106 assert(AA.isQueryAA() &&
2107 "Non-query AAs should not be required to register for updates!");
2108 QueryAAsAwaitingUpdate.insert(&AA);
2109}
2110
2111ChangeStatus Attributor::manifestAttributes() {
2112 TimeTraceScope TimeScope("Attributor::manifestAttributes");
2113 size_t NumFinalAAs = DG.SyntheticRoot.Deps.size();
2114
2115 unsigned NumManifested = 0;
2116 unsigned NumAtFixpoint = 0;
2117 ChangeStatus ManifestChange = ChangeStatus::UNCHANGED;
2118 for (auto &DepAA : DG.SyntheticRoot.Deps) {
2119 AbstractAttribute *AA = cast<AbstractAttribute>(DepAA.getPointer());
2120 AbstractState &State = AA->getState();
2121
2122 // If there is not already a fixpoint reached, we can now take the
2123 // optimistic state. This is correct because we enforced a pessimistic one
2124 // on abstract attributes that were transitively dependent on a changed one
2125 // already above.
2126 if (!State.isAtFixpoint())
2128
2129 // We must not manifest Attributes that use Callbase info.
2130 if (AA->hasCallBaseContext())
2131 continue;
2132 // If the state is invalid, we do not try to manifest it.
2133 if (!State.isValidState())
2134 continue;
2135
2136 if (AA->getCtxI() && !isRunOn(*AA->getAnchorScope()))
2137 continue;
2138
2139 // Skip dead code.
2140 bool UsedAssumedInformation = false;
2141 if (isAssumedDead(*AA, nullptr, UsedAssumedInformation,
2142 /* CheckBBLivenessOnly */ true))
2143 continue;
2144 // Check if the manifest debug counter that allows skipping manifestation of
2145 // AAs
2146 if (!DebugCounter::shouldExecute(ManifestDBGCounter))
2147 continue;
2148 // Manifest the state and record if we changed the IR.
2149 ChangeStatus LocalChange = AA->manifest(*this);
2150 if (LocalChange == ChangeStatus::CHANGED && AreStatisticsEnabled())
2151 AA->trackStatistics();
2152 LLVM_DEBUG(dbgs() << "[Attributor] Manifest " << LocalChange << " : " << *AA
2153 << "\n");
2154
2155 ManifestChange = ManifestChange | LocalChange;
2156
2157 NumAtFixpoint++;
2158 NumManifested += (LocalChange == ChangeStatus::CHANGED);
2159 }
2160
2161 (void)NumManifested;
2162 (void)NumAtFixpoint;
2163 LLVM_DEBUG(dbgs() << "\n[Attributor] Manifested " << NumManifested
2164 << " arguments while " << NumAtFixpoint
2165 << " were in a valid fixpoint state\n");
2166
2167 NumAttributesManifested += NumManifested;
2168 NumAttributesValidFixpoint += NumAtFixpoint;
2169
2170 (void)NumFinalAAs;
2171 if (NumFinalAAs != DG.SyntheticRoot.Deps.size()) {
2172 auto DepIt = DG.SyntheticRoot.Deps.begin();
2173 for (unsigned u = 0; u < NumFinalAAs; ++u)
2174 ++DepIt;
2175 for (unsigned u = NumFinalAAs; u < DG.SyntheticRoot.Deps.size();
2176 ++u, ++DepIt) {
2177 errs() << "Unexpected abstract attribute: "
2178 << cast<AbstractAttribute>(DepIt->getPointer()) << " :: "
2179 << cast<AbstractAttribute>(DepIt->getPointer())
2180 ->getIRPosition()
2181 .getAssociatedValue()
2182 << "\n";
2183 }
2184 llvm_unreachable("Expected the final number of abstract attributes to "
2185 "remain unchanged!");
2186 }
2187 return ManifestChange;
2188}
2189
2190void Attributor::identifyDeadInternalFunctions() {
2191 // Early exit if we don't intend to delete functions.
2192 if (!Configuration.DeleteFns)
2193 return;
2194
2195 // To avoid triggering an assertion in the lazy call graph we will not delete
2196 // any internal library functions. We should modify the assertion though and
2197 // allow internals to be deleted.
2198 const auto *TLI =
2199 isModulePass()
2200 ? nullptr
2202 LibFunc LF;
2203
2204 // Identify dead internal functions and delete them. This happens outside
2205 // the other fixpoint analysis as we might treat potentially dead functions
2206 // as live to lower the number of iterations. If they happen to be dead, the
2207 // below fixpoint loop will identify and eliminate them.
2208
2209 SmallVector<Function *, 8> InternalFns;
2210 for (Function *F : Functions)
2211 if (F->hasLocalLinkage() && (isModulePass() || !TLI->getLibFunc(*F, LF)))
2212 InternalFns.push_back(F);
2213
2214 SmallPtrSet<Function *, 8> LiveInternalFns;
2215 bool FoundLiveInternal = true;
2216 while (FoundLiveInternal) {
2217 FoundLiveInternal = false;
2218 for (unsigned u = 0, e = InternalFns.size(); u < e; ++u) {
2219 Function *F = InternalFns[u];
2220 if (!F)
2221 continue;
2222
2223 bool UsedAssumedInformation = false;
2225 [&](AbstractCallSite ACS) {
2227 return ToBeDeletedFunctions.count(Callee) ||
2228 (Functions.count(Callee) && Callee->hasLocalLinkage() &&
2229 !LiveInternalFns.count(Callee));
2230 },
2231 *F, true, nullptr, UsedAssumedInformation)) {
2232 continue;
2233 }
2234
2235 LiveInternalFns.insert(F);
2236 InternalFns[u] = nullptr;
2237 FoundLiveInternal = true;
2238 }
2239 }
2240
2241 for (unsigned u = 0, e = InternalFns.size(); u < e; ++u)
2242 if (Function *F = InternalFns[u])
2243 ToBeDeletedFunctions.insert(F);
2244}
2245
2246ChangeStatus Attributor::cleanupIR() {
2247 TimeTraceScope TimeScope("Attributor::cleanupIR");
2248 // Delete stuff at the end to avoid invalid references and a nice order.
2249 LLVM_DEBUG(dbgs() << "\n[Attributor] Delete/replace at least "
2250 << ToBeDeletedFunctions.size() << " functions and "
2251 << ToBeDeletedBlocks.size() << " blocks and "
2252 << ToBeDeletedInsts.size() << " instructions and "
2253 << ToBeChangedValues.size() << " values and "
2254 << ToBeChangedUses.size() << " uses. To insert "
2255 << ToBeChangedToUnreachableInsts.size()
2256 << " unreachables.\n"
2257 << "Preserve manifest added " << ManifestAddedBlocks.size()
2258 << " blocks\n");
2259
2261 SmallVector<Instruction *, 32> TerminatorsToFold;
2262
2263 auto ReplaceUse = [&](Use *U, Value *NewV) {
2264 Value *OldV = U->get();
2265
2266 // If we plan to replace NewV we need to update it at this point.
2267 do {
2268 const auto &Entry = ToBeChangedValues.lookup(NewV);
2269 if (!get<0>(Entry))
2270 break;
2271 NewV = get<0>(Entry);
2272 } while (true);
2273
2274 Instruction *I = dyn_cast<Instruction>(U->getUser());
2275 assert((!I || isRunOn(*I->getFunction())) &&
2276 "Cannot replace an instruction outside the current SCC!");
2277
2278 // Do not replace uses in returns if the value is a must-tail call we will
2279 // not delete.
2280 if (auto *RI = dyn_cast_or_null<ReturnInst>(I)) {
2281 if (auto *CI = dyn_cast<CallInst>(OldV->stripPointerCasts()))
2282 if (CI->isMustTailCall() && !ToBeDeletedInsts.count(CI))
2283 return;
2284 // If we rewrite a return and the new value is not an argument, strip the
2285 // `returned` attribute as it is wrong now.
2286 if (!isa<Argument>(NewV))
2287 for (auto &Arg : RI->getFunction()->args())
2288 Arg.removeAttr(Attribute::Returned);
2289 }
2290
2291 LLVM_DEBUG(dbgs() << "Use " << *NewV << " in " << *U->getUser()
2292 << " instead of " << *OldV << "\n");
2293 U->set(NewV);
2294
2295 if (Instruction *I = dyn_cast<Instruction>(OldV)) {
2296 CGModifiedFunctions.insert(I->getFunction());
2297 if (!isa<PHINode>(I) && !ToBeDeletedInsts.count(I) &&
2299 DeadInsts.push_back(I);
2300 }
2301 if (isa<UndefValue>(NewV) && isa<CallBase>(U->getUser())) {
2302 auto *CB = cast<CallBase>(U->getUser());
2303 if (CB->isArgOperand(U)) {
2304 unsigned Idx = CB->getArgOperandNo(U);
2305 CB->removeParamAttr(Idx, Attribute::NoUndef);
2306 Function *Fn = CB->getCalledFunction();
2307 if (Fn && Fn->arg_size() > Idx)
2308 Fn->removeParamAttr(Idx, Attribute::NoUndef);
2309 }
2310 }
2311 if (isa<Constant>(NewV) && isa<BranchInst>(U->getUser())) {
2312 Instruction *UserI = cast<Instruction>(U->getUser());
2313 if (isa<UndefValue>(NewV)) {
2314 ToBeChangedToUnreachableInsts.insert(UserI);
2315 } else {
2316 TerminatorsToFold.push_back(UserI);
2317 }
2318 }
2319 };
2320
2321 for (auto &It : ToBeChangedUses) {
2322 Use *U = It.first;
2323 Value *NewV = It.second;
2324 ReplaceUse(U, NewV);
2325 }
2326
2328 for (auto &It : ToBeChangedValues) {
2329 Value *OldV = It.first;
2330 auto [NewV, Done] = It.second;
2331 Uses.clear();
2332 for (auto &U : OldV->uses())
2333 if (Done || !U.getUser()->isDroppable())
2334 Uses.push_back(&U);
2335 for (Use *U : Uses) {
2336 if (auto *I = dyn_cast<Instruction>(U->getUser()))
2337 if (!isRunOn(*I->getFunction()))
2338 continue;
2339 ReplaceUse(U, NewV);
2340 }
2341 }
2342
2343 for (const auto &V : InvokeWithDeadSuccessor)
2344 if (InvokeInst *II = dyn_cast_or_null<InvokeInst>(V)) {
2345 assert(isRunOn(*II->getFunction()) &&
2346 "Cannot replace an invoke outside the current SCC!");
2347 bool UnwindBBIsDead = II->hasFnAttr(Attribute::NoUnwind);
2348 bool NormalBBIsDead = II->hasFnAttr(Attribute::NoReturn);
2349 bool Invoke2CallAllowed =
2350 !AAIsDead::mayCatchAsynchronousExceptions(*II->getFunction());
2351 assert((UnwindBBIsDead || NormalBBIsDead) &&
2352 "Invoke does not have dead successors!");
2353 BasicBlock *BB = II->getParent();
2354 BasicBlock *NormalDestBB = II->getNormalDest();
2355 if (UnwindBBIsDead) {
2356 Instruction *NormalNextIP = &NormalDestBB->front();
2357 if (Invoke2CallAllowed) {
2358 changeToCall(II);
2359 NormalNextIP = BB->getTerminator();
2360 }
2361 if (NormalBBIsDead)
2362 ToBeChangedToUnreachableInsts.insert(NormalNextIP);
2363 } else {
2364 assert(NormalBBIsDead && "Broken invariant!");
2365 if (!NormalDestBB->getUniquePredecessor())
2366 NormalDestBB = SplitBlockPredecessors(NormalDestBB, {BB}, ".dead");
2367 ToBeChangedToUnreachableInsts.insert(&NormalDestBB->front());
2368 }
2369 }
2370 for (Instruction *I : TerminatorsToFold) {
2371 assert(isRunOn(*I->getFunction()) &&
2372 "Cannot replace a terminator outside the current SCC!");
2373 CGModifiedFunctions.insert(I->getFunction());
2374 ConstantFoldTerminator(I->getParent());
2375 }
2376 for (const auto &V : ToBeChangedToUnreachableInsts)
2377 if (Instruction *I = dyn_cast_or_null<Instruction>(V)) {
2378 LLVM_DEBUG(dbgs() << "[Attributor] Change to unreachable: " << *I
2379 << "\n");
2380 assert(isRunOn(*I->getFunction()) &&
2381 "Cannot replace an instruction outside the current SCC!");
2382 CGModifiedFunctions.insert(I->getFunction());
2384 }
2385
2386 for (const auto &V : ToBeDeletedInsts) {
2387 if (Instruction *I = dyn_cast_or_null<Instruction>(V)) {
2388 if (auto *CB = dyn_cast<CallBase>(I)) {
2389 assert((isa<IntrinsicInst>(CB) || isRunOn(*I->getFunction())) &&
2390 "Cannot delete an instruction outside the current SCC!");
2391 if (!isa<IntrinsicInst>(CB))
2392 Configuration.CGUpdater.removeCallSite(*CB);
2393 }
2394 I->dropDroppableUses();
2395 CGModifiedFunctions.insert(I->getFunction());
2396 if (!I->getType()->isVoidTy())
2397 I->replaceAllUsesWith(UndefValue::get(I->getType()));
2398 if (!isa<PHINode>(I) && isInstructionTriviallyDead(I))
2399 DeadInsts.push_back(I);
2400 else
2401 I->eraseFromParent();
2402 }
2403 }
2404
2405 llvm::erase_if(DeadInsts, [&](WeakTrackingVH I) { return !I; });
2406
2407 LLVM_DEBUG({
2408 dbgs() << "[Attributor] DeadInsts size: " << DeadInsts.size() << "\n";
2409 for (auto &I : DeadInsts)
2410 if (I)
2411 dbgs() << " - " << *I << "\n";
2412 });
2413
2415
2416 if (unsigned NumDeadBlocks = ToBeDeletedBlocks.size()) {
2417 SmallVector<BasicBlock *, 8> ToBeDeletedBBs;
2418 ToBeDeletedBBs.reserve(NumDeadBlocks);
2419 for (BasicBlock *BB : ToBeDeletedBlocks) {
2420 assert(isRunOn(*BB->getParent()) &&
2421 "Cannot delete a block outside the current SCC!");
2422 CGModifiedFunctions.insert(BB->getParent());
2423 // Do not delete BBs added during manifests of AAs.
2424 if (ManifestAddedBlocks.contains(BB))
2425 continue;
2426 ToBeDeletedBBs.push_back(BB);
2427 }
2428 // Actually we do not delete the blocks but squash them into a single
2429 // unreachable but untangling branches that jump here is something we need
2430 // to do in a more generic way.
2431 detachDeadBlocks(ToBeDeletedBBs, nullptr);
2432 }
2433
2434 identifyDeadInternalFunctions();
2435
2436 // Rewrite the functions as requested during manifest.
2437 ChangeStatus ManifestChange = rewriteFunctionSignatures(CGModifiedFunctions);
2438
2439 for (Function *Fn : CGModifiedFunctions)
2440 if (!ToBeDeletedFunctions.count(Fn) && Functions.count(Fn))
2441 Configuration.CGUpdater.reanalyzeFunction(*Fn);
2442
2443 for (Function *Fn : ToBeDeletedFunctions) {
2444 if (!Functions.count(Fn))
2445 continue;
2446 Configuration.CGUpdater.removeFunction(*Fn);
2447 }
2448
2449 if (!ToBeChangedUses.empty())
2450 ManifestChange = ChangeStatus::CHANGED;
2451
2452 if (!ToBeChangedToUnreachableInsts.empty())
2453 ManifestChange = ChangeStatus::CHANGED;
2454
2455 if (!ToBeDeletedFunctions.empty())
2456 ManifestChange = ChangeStatus::CHANGED;
2457
2458 if (!ToBeDeletedBlocks.empty())
2459 ManifestChange = ChangeStatus::CHANGED;
2460
2461 if (!ToBeDeletedInsts.empty())
2462 ManifestChange = ChangeStatus::CHANGED;
2463
2464 if (!InvokeWithDeadSuccessor.empty())
2465 ManifestChange = ChangeStatus::CHANGED;
2466
2467 if (!DeadInsts.empty())
2468 ManifestChange = ChangeStatus::CHANGED;
2469
2470 NumFnDeleted += ToBeDeletedFunctions.size();
2471
2472 LLVM_DEBUG(dbgs() << "[Attributor] Deleted " << ToBeDeletedFunctions.size()
2473 << " functions after manifest.\n");
2474
2475#ifdef EXPENSIVE_CHECKS
2476 for (Function *F : Functions) {
2477 if (ToBeDeletedFunctions.count(F))
2478 continue;
2479 assert(!verifyFunction(*F, &errs()) && "Module verification failed!");
2480 }
2481#endif
2482
2483 return ManifestChange;
2484}
2485
2487 TimeTraceScope TimeScope("Attributor::run");
2488 AttributorCallGraph ACallGraph(*this);
2489
2490 if (PrintCallGraph)
2491 ACallGraph.populateAll();
2492
2493 Phase = AttributorPhase::UPDATE;
2494 runTillFixpoint();
2495
2496 // dump graphs on demand
2497 if (DumpDepGraph)
2498 DG.dumpGraph();
2499
2500 if (ViewDepGraph)
2501 DG.viewGraph();
2502
2504 DG.print();
2505
2506 Phase = AttributorPhase::MANIFEST;
2507 ChangeStatus ManifestChange = manifestAttributes();
2508
2509 Phase = AttributorPhase::CLEANUP;
2510 ChangeStatus CleanupChange = cleanupIR();
2511
2512 if (PrintCallGraph)
2513 ACallGraph.print();
2514
2515 return ManifestChange | CleanupChange;
2516}
2517
2518ChangeStatus Attributor::updateAA(AbstractAttribute &AA) {
2519 TimeTraceScope TimeScope("updateAA", [&]() {
2520 return AA.getName() + std::to_string(AA.getIRPosition().getPositionKind());
2521 });
2522 assert(Phase == AttributorPhase::UPDATE &&
2523 "We can update AA only in the update stage!");
2524
2525 // Use a new dependence vector for this update.
2526 DependenceVector DV;
2527 DependenceStack.push_back(&DV);
2528
2529 auto &AAState = AA.getState();
2531 bool UsedAssumedInformation = false;
2532 if (!isAssumedDead(AA, nullptr, UsedAssumedInformation,
2533 /* CheckBBLivenessOnly */ true))
2534 CS = AA.update(*this);
2535
2536 if (!AA.isQueryAA() && DV.empty() && !AA.getState().isAtFixpoint()) {
2537 // If the AA did not rely on outside information but changed, we run it
2538 // again to see if it found a fixpoint. Most AAs do but we don't require
2539 // them to. Hence, it might take the AA multiple iterations to get to a
2540 // fixpoint even if it does not rely on outside information, which is fine.
2542 if (CS == ChangeStatus::CHANGED)
2543 RerunCS = AA.update(*this);
2544
2545 // If the attribute did not change during the run or rerun, and it still did
2546 // not query any non-fix information, the state will not change and we can
2547 // indicate that right at this point.
2548 if (RerunCS == ChangeStatus::UNCHANGED && !AA.isQueryAA() && DV.empty())
2549 AAState.indicateOptimisticFixpoint();
2550 }
2551
2552 if (!AAState.isAtFixpoint())
2553 rememberDependences();
2554
2555 // Verify the stack was used properly, that is we pop the dependence vector we
2556 // put there earlier.
2557 DependenceVector *PoppedDV = DependenceStack.pop_back_val();
2558 (void)PoppedDV;
2559 assert(PoppedDV == &DV && "Inconsistent usage of the dependence stack!");
2560
2561 return CS;
2562}
2563
2565 assert(!F.isDeclaration() && "Cannot create a wrapper around a declaration!");
2566
2567 Module &M = *F.getParent();
2568 LLVMContext &Ctx = M.getContext();
2569 FunctionType *FnTy = F.getFunctionType();
2570
2571 Function *Wrapper =
2572 Function::Create(FnTy, F.getLinkage(), F.getAddressSpace(), F.getName());
2573 F.setName(""); // set the inside function anonymous
2574 M.getFunctionList().insert(F.getIterator(), Wrapper);
2575
2576 F.setLinkage(GlobalValue::InternalLinkage);
2577
2578 F.replaceAllUsesWith(Wrapper);
2579 assert(F.use_empty() && "Uses remained after wrapper was created!");
2580
2581 // Move the COMDAT section to the wrapper.
2582 // TODO: Check if we need to keep it for F as well.
2583 Wrapper->setComdat(F.getComdat());
2584 F.setComdat(nullptr);
2585
2586 // Copy all metadata and attributes but keep them on F as well.
2588 F.getAllMetadata(MDs);
2589 for (auto MDIt : MDs)
2590 Wrapper->addMetadata(MDIt.first, *MDIt.second);
2591 Wrapper->setAttributes(F.getAttributes());
2592
2593 // Create the call in the wrapper.
2594 BasicBlock *EntryBB = BasicBlock::Create(Ctx, "entry", Wrapper);
2595
2597 Argument *FArgIt = F.arg_begin();
2598 for (Argument &Arg : Wrapper->args()) {
2599 Args.push_back(&Arg);
2600 Arg.setName((FArgIt++)->getName());
2601 }
2602
2603 CallInst *CI = CallInst::Create(&F, Args, "", EntryBB);
2604 CI->setTailCall(true);
2605 CI->addFnAttr(Attribute::NoInline);
2606 ReturnInst::Create(Ctx, CI->getType()->isVoidTy() ? nullptr : CI, EntryBB);
2607
2608 NumFnShallowWrappersCreated++;
2609}
2610
2612 if (F.isDeclaration() || F.hasLocalLinkage() ||
2614 return false;
2615 return true;
2616}
2617
2619 if (!AllowDeepWrapper && !Force)
2620 return nullptr;
2621 if (!isInternalizable(F))
2622 return nullptr;
2623
2624 SmallPtrSet<Function *, 2> FnSet = {&F};
2625 DenseMap<Function *, Function *> InternalizedFns;
2626 internalizeFunctions(FnSet, InternalizedFns);
2627
2628 return InternalizedFns[&F];
2629}
2630
2633 for (Function *F : FnSet)
2635 return false;
2636
2637 FnMap.clear();
2638 // Generate the internalized version of each function.
2639 for (Function *F : FnSet) {
2640 Module &M = *F->getParent();
2641 FunctionType *FnTy = F->getFunctionType();
2642
2643 // Create a copy of the current function
2644 Function *Copied =
2645 Function::Create(FnTy, F->getLinkage(), F->getAddressSpace(),
2646 F->getName() + ".internalized");
2647 ValueToValueMapTy VMap;
2648 auto *NewFArgIt = Copied->arg_begin();
2649 for (auto &Arg : F->args()) {
2650 auto ArgName = Arg.getName();
2651 NewFArgIt->setName(ArgName);
2652 VMap[&Arg] = &(*NewFArgIt++);
2653 }
2655
2656 // Copy the body of the original function to the new one
2657 CloneFunctionInto(Copied, F, VMap,
2659
2660 // Set the linakage and visibility late as CloneFunctionInto has some
2661 // implicit requirements.
2664
2665 // Copy metadata
2667 F->getAllMetadata(MDs);
2668 for (auto MDIt : MDs)
2669 if (!Copied->hasMetadata())
2670 Copied->addMetadata(MDIt.first, *MDIt.second);
2671
2672 M.getFunctionList().insert(F->getIterator(), Copied);
2673 Copied->setDSOLocal(true);
2674 FnMap[F] = Copied;
2675 }
2676
2677 // Replace all uses of the old function with the new internalized function
2678 // unless the caller is a function that was just internalized.
2679 for (Function *F : FnSet) {
2680 auto &InternalizedFn = FnMap[F];
2681 auto IsNotInternalized = [&](Use &U) -> bool {
2682 if (auto *CB = dyn_cast<CallBase>(U.getUser()))
2683 return !FnMap.lookup(CB->getCaller());
2684 return false;
2685 };
2686 F->replaceUsesWithIf(InternalizedFn, IsNotInternalized);
2687 }
2688
2689 return true;
2690}
2691
2693 Argument &Arg, ArrayRef<Type *> ReplacementTypes) {
2694
2695 if (!Configuration.RewriteSignatures)
2696 return false;
2697
2698 Function *Fn = Arg.getParent();
2699 auto CallSiteCanBeChanged = [Fn](AbstractCallSite ACS) {
2700 // Forbid the call site to cast the function return type. If we need to
2701 // rewrite these functions we need to re-create a cast for the new call site
2702 // (if the old had uses).
2703 if (!ACS.getCalledFunction() ||
2704 ACS.getInstruction()->getType() !=
2706 return false;
2707 if (ACS.getCalledOperand()->getType() != Fn->getType())
2708 return false;
2709 // Forbid must-tail calls for now.
2710 return !ACS.isCallbackCall() && !ACS.getInstruction()->isMustTailCall();
2711 };
2712
2713 // Avoid var-arg functions for now.
2714 if (Fn->isVarArg()) {
2715 LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite var-args functions\n");
2716 return false;
2717 }
2718
2719 // Avoid functions with complicated argument passing semantics.
2720 AttributeList FnAttributeList = Fn->getAttributes();
2721 if (FnAttributeList.hasAttrSomewhere(Attribute::Nest) ||
2722 FnAttributeList.hasAttrSomewhere(Attribute::StructRet) ||
2723 FnAttributeList.hasAttrSomewhere(Attribute::InAlloca) ||
2724 FnAttributeList.hasAttrSomewhere(Attribute::Preallocated)) {
2725 LLVM_DEBUG(
2726 dbgs() << "[Attributor] Cannot rewrite due to complex attribute\n");
2727 return false;
2728 }
2729
2730 // Avoid callbacks for now.
2731 bool UsedAssumedInformation = false;
2732 if (!checkForAllCallSites(CallSiteCanBeChanged, *Fn, true, nullptr,
2733 UsedAssumedInformation)) {
2734 LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite all call sites\n");
2735 return false;
2736 }
2737
2738 auto InstPred = [](Instruction &I) {
2739 if (auto *CI = dyn_cast<CallInst>(&I))
2740 return !CI->isMustTailCall();
2741 return true;
2742 };
2743
2744 // Forbid must-tail calls for now.
2745 // TODO:
2746 auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(*Fn);
2747 if (!checkForAllInstructionsImpl(nullptr, OpcodeInstMap, InstPred, nullptr,
2748 nullptr, {Instruction::Call},
2749 UsedAssumedInformation)) {
2750 LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite due to instructions\n");
2751 return false;
2752 }
2753
2754 return true;
2755}
2756
2758 Argument &Arg, ArrayRef<Type *> ReplacementTypes,
2761 LLVM_DEBUG(dbgs() << "[Attributor] Register new rewrite of " << Arg << " in "
2762 << Arg.getParent()->getName() << " with "
2763 << ReplacementTypes.size() << " replacements\n");
2764 assert(isValidFunctionSignatureRewrite(Arg, ReplacementTypes) &&
2765 "Cannot register an invalid rewrite");
2766
2767 Function *Fn = Arg.getParent();
2769 ArgumentReplacementMap[Fn];
2770 if (ARIs.empty())
2771 ARIs.resize(Fn->arg_size());
2772
2773 // If we have a replacement already with less than or equal new arguments,
2774 // ignore this request.
2775 std::unique_ptr<ArgumentReplacementInfo> &ARI = ARIs[Arg.getArgNo()];
2776 if (ARI && ARI->getNumReplacementArgs() <= ReplacementTypes.size()) {
2777 LLVM_DEBUG(dbgs() << "[Attributor] Existing rewrite is preferred\n");
2778 return false;
2779 }
2780
2781 // If we have a replacement already but we like the new one better, delete
2782 // the old.
2783 ARI.reset();
2784
2785 LLVM_DEBUG(dbgs() << "[Attributor] Register new rewrite of " << Arg << " in "
2786 << Arg.getParent()->getName() << " with "
2787 << ReplacementTypes.size() << " replacements\n");
2788
2789 // Remember the replacement.
2790 ARI.reset(new ArgumentReplacementInfo(*this, Arg, ReplacementTypes,
2791 std::move(CalleeRepairCB),
2792 std::move(ACSRepairCB)));
2793
2794 return true;
2795}
2796
2797bool Attributor::shouldSeedAttribute(AbstractAttribute &AA) {
2798 bool Result = true;
2799#ifndef NDEBUG
2800 if (SeedAllowList.size() != 0)
2802 Function *Fn = AA.getAnchorScope();
2803 if (FunctionSeedAllowList.size() != 0 && Fn)
2805#endif
2806 return Result;
2807}
2808
2809ChangeStatus Attributor::rewriteFunctionSignatures(
2810 SmallSetVector<Function *, 8> &ModifiedFns) {
2812
2813 for (auto &It : ArgumentReplacementMap) {
2814 Function *OldFn = It.getFirst();
2815
2816 // Deleted functions do not require rewrites.
2817 if (!Functions.count(OldFn) || ToBeDeletedFunctions.count(OldFn))
2818 continue;
2819
2821 It.getSecond();
2822 assert(ARIs.size() == OldFn->arg_size() && "Inconsistent state!");
2823
2824 SmallVector<Type *, 16> NewArgumentTypes;
2825 SmallVector<AttributeSet, 16> NewArgumentAttributes;
2826
2827 // Collect replacement argument types and copy over existing attributes.
2828 AttributeList OldFnAttributeList = OldFn->getAttributes();
2829 for (Argument &Arg : OldFn->args()) {
2830 if (const std::unique_ptr<ArgumentReplacementInfo> &ARI =
2831 ARIs[Arg.getArgNo()]) {
2832 NewArgumentTypes.append(ARI->ReplacementTypes.begin(),
2833 ARI->ReplacementTypes.end());
2834 NewArgumentAttributes.append(ARI->getNumReplacementArgs(),
2835 AttributeSet());
2836 } else {
2837 NewArgumentTypes.push_back(Arg.getType());
2838 NewArgumentAttributes.push_back(
2839 OldFnAttributeList.getParamAttrs(Arg.getArgNo()));
2840 }
2841 }
2842
2843 uint64_t LargestVectorWidth = 0;
2844 for (auto *I : NewArgumentTypes)
2845 if (auto *VT = dyn_cast<llvm::VectorType>(I))
2846 LargestVectorWidth =
2847 std::max(LargestVectorWidth,
2848 VT->getPrimitiveSizeInBits().getKnownMinValue());
2849
2850 FunctionType *OldFnTy = OldFn->getFunctionType();
2851 Type *RetTy = OldFnTy->getReturnType();
2852
2853 // Construct the new function type using the new arguments types.
2854 FunctionType *NewFnTy =
2855 FunctionType::get(RetTy, NewArgumentTypes, OldFnTy->isVarArg());
2856
2857 LLVM_DEBUG(dbgs() << "[Attributor] Function rewrite '" << OldFn->getName()
2858 << "' from " << *OldFn->getFunctionType() << " to "
2859 << *NewFnTy << "\n");
2860
2861 // Create the new function body and insert it into the module.
2862 Function *NewFn = Function::Create(NewFnTy, OldFn->getLinkage(),
2863 OldFn->getAddressSpace(), "");
2864 Functions.insert(NewFn);
2865 OldFn->getParent()->getFunctionList().insert(OldFn->getIterator(), NewFn);
2866 NewFn->takeName(OldFn);
2867 NewFn->copyAttributesFrom(OldFn);
2868
2869 // Patch the pointer to LLVM function in debug info descriptor.
2870 NewFn->setSubprogram(OldFn->getSubprogram());
2871 OldFn->setSubprogram(nullptr);
2872
2873 // Recompute the parameter attributes list based on the new arguments for
2874 // the function.
2875 LLVMContext &Ctx = OldFn->getContext();
2877 Ctx, OldFnAttributeList.getFnAttrs(), OldFnAttributeList.getRetAttrs(),
2878 NewArgumentAttributes));
2879 AttributeFuncs::updateMinLegalVectorWidthAttr(*NewFn, LargestVectorWidth);
2880
2881 // Since we have now created the new function, splice the body of the old
2882 // function right into the new function, leaving the old rotting hulk of the
2883 // function empty.
2884 NewFn->splice(NewFn->begin(), OldFn);
2885
2886 // Fixup block addresses to reference new function.
2887 SmallVector<BlockAddress *, 8u> BlockAddresses;
2888 for (User *U : OldFn->users())
2889 if (auto *BA = dyn_cast<BlockAddress>(U))
2890 BlockAddresses.push_back(BA);
2891 for (auto *BA : BlockAddresses)
2892 BA->replaceAllUsesWith(BlockAddress::get(NewFn, BA->getBasicBlock()));
2893
2894 // Set of all "call-like" instructions that invoke the old function mapped
2895 // to their new replacements.
2897
2898 // Callback to create a new "call-like" instruction for a given one.
2899 auto CallSiteReplacementCreator = [&](AbstractCallSite ACS) {
2900 CallBase *OldCB = cast<CallBase>(ACS.getInstruction());
2901 const AttributeList &OldCallAttributeList = OldCB->getAttributes();
2902
2903 // Collect the new argument operands for the replacement call site.
2904 SmallVector<Value *, 16> NewArgOperands;
2905 SmallVector<AttributeSet, 16> NewArgOperandAttributes;
2906 for (unsigned OldArgNum = 0; OldArgNum < ARIs.size(); ++OldArgNum) {
2907 unsigned NewFirstArgNum = NewArgOperands.size();
2908 (void)NewFirstArgNum; // only used inside assert.
2909 if (const std::unique_ptr<ArgumentReplacementInfo> &ARI =
2910 ARIs[OldArgNum]) {
2911 if (ARI->ACSRepairCB)
2912 ARI->ACSRepairCB(*ARI, ACS, NewArgOperands);
2913 assert(ARI->getNumReplacementArgs() + NewFirstArgNum ==
2914 NewArgOperands.size() &&
2915 "ACS repair callback did not provide as many operand as new "
2916 "types were registered!");
2917 // TODO: Exose the attribute set to the ACS repair callback
2918 NewArgOperandAttributes.append(ARI->ReplacementTypes.size(),
2919 AttributeSet());
2920 } else {
2921 NewArgOperands.push_back(ACS.getCallArgOperand(OldArgNum));
2922 NewArgOperandAttributes.push_back(
2923 OldCallAttributeList.getParamAttrs(OldArgNum));
2924 }
2925 }
2926
2927 assert(NewArgOperands.size() == NewArgOperandAttributes.size() &&
2928 "Mismatch # argument operands vs. # argument operand attributes!");
2929 assert(NewArgOperands.size() == NewFn->arg_size() &&
2930 "Mismatch # argument operands vs. # function arguments!");
2931
2932 SmallVector<OperandBundleDef, 4> OperandBundleDefs;
2933 OldCB->getOperandBundlesAsDefs(OperandBundleDefs);
2934
2935 // Create a new call or invoke instruction to replace the old one.
2936 CallBase *NewCB;
2937 if (InvokeInst *II = dyn_cast<InvokeInst>(OldCB)) {
2938 NewCB =
2939 InvokeInst::Create(NewFn, II->getNormalDest(), II->getUnwindDest(),
2940 NewArgOperands, OperandBundleDefs, "", OldCB);
2941 } else {
2942 auto *NewCI = CallInst::Create(NewFn, NewArgOperands, OperandBundleDefs,
2943 "", OldCB);
2944 NewCI->setTailCallKind(cast<CallInst>(OldCB)->getTailCallKind());
2945 NewCB = NewCI;
2946 }
2947
2948 // Copy over various properties and the new attributes.
2949 NewCB->copyMetadata(*OldCB, {LLVMContext::MD_prof, LLVMContext::MD_dbg});
2950 NewCB->setCallingConv(OldCB->getCallingConv());
2951 NewCB->takeName(OldCB);
2953 Ctx, OldCallAttributeList.getFnAttrs(),
2954 OldCallAttributeList.getRetAttrs(), NewArgOperandAttributes));
2955
2957 LargestVectorWidth);
2958
2959 CallSitePairs.push_back({OldCB, NewCB});
2960 return true;
2961 };
2962
2963 // Use the CallSiteReplacementCreator to create replacement call sites.
2964 bool UsedAssumedInformation = false;
2965 bool Success = checkForAllCallSites(CallSiteReplacementCreator, *OldFn,
2966 true, nullptr, UsedAssumedInformation,
2967 /* CheckPotentiallyDead */ true);
2968 (void)Success;
2969 assert(Success && "Assumed call site replacement to succeed!");
2970
2971 // Rewire the arguments.
2972 Argument *OldFnArgIt = OldFn->arg_begin();
2973 Argument *NewFnArgIt = NewFn->arg_begin();
2974 for (unsigned OldArgNum = 0; OldArgNum < ARIs.size();
2975 ++OldArgNum, ++OldFnArgIt) {
2976 if (const std::unique_ptr<ArgumentReplacementInfo> &ARI =
2977 ARIs[OldArgNum]) {
2978 if (ARI->CalleeRepairCB)
2979 ARI->CalleeRepairCB(*ARI, *NewFn, NewFnArgIt);
2980 if (ARI->ReplacementTypes.empty())
2981 OldFnArgIt->replaceAllUsesWith(
2982 PoisonValue::get(OldFnArgIt->getType()));
2983 NewFnArgIt += ARI->ReplacementTypes.size();
2984 } else {
2985 NewFnArgIt->takeName(&*OldFnArgIt);
2986 OldFnArgIt->replaceAllUsesWith(&*NewFnArgIt);
2987 ++NewFnArgIt;
2988 }
2989 }
2990
2991 // Eliminate the instructions *after* we visited all of them.
2992 for (auto &CallSitePair : CallSitePairs) {
2993 CallBase &OldCB = *CallSitePair.first;
2994 CallBase &NewCB = *CallSitePair.second;
2995 assert(OldCB.getType() == NewCB.getType() &&
2996 "Cannot handle call sites with different types!");
2997 ModifiedFns.insert(OldCB.getFunction());
2998 Configuration.CGUpdater.replaceCallSite(OldCB, NewCB);
2999 OldCB.replaceAllUsesWith(&NewCB);
3000 OldCB.eraseFromParent();
3001 }
3002
3003 // Replace the function in the call graph (if any).
3004 Configuration.CGUpdater.replaceFunctionWith(*OldFn, *NewFn);
3005
3006 // If the old function was modified and needed to be reanalyzed, the new one
3007 // does now.
3008 if (ModifiedFns.remove(OldFn))
3009 ModifiedFns.insert(NewFn);
3010
3011 Changed = ChangeStatus::CHANGED;
3012 }
3013
3014 return Changed;
3015}
3016
3017void InformationCache::initializeInformationCache(const Function &CF,
3018 FunctionInfo &FI) {
3019 // As we do not modify the function here we can remove the const
3020 // withouth breaking implicit assumptions. At the end of the day, we could
3021 // initialize the cache eagerly which would look the same to the users.
3022 Function &F = const_cast<Function &>(CF);
3023
3024 // Walk all instructions to find interesting instructions that might be
3025 // queried by abstract attributes during their initialization or update.
3026 // This has to happen before we create attributes.
3027
3029
3030 // Add \p V to the assume uses map which track the number of uses outside of
3031 // "visited" assumes. If no outside uses are left the value is added to the
3032 // assume only use vector.
3033 auto AddToAssumeUsesMap = [&](const Value &V) -> void {
3035 if (auto *I = dyn_cast<Instruction>(&V))
3036 Worklist.push_back(I);
3037 while (!Worklist.empty()) {
3038 const Instruction *I = Worklist.pop_back_val();
3039 std::optional<short> &NumUses = AssumeUsesMap[I];
3040 if (!NumUses)
3041 NumUses = I->getNumUses();
3042 NumUses = *NumUses - /* this assume */ 1;
3043 if (*NumUses != 0)
3044 continue;
3045 AssumeOnlyValues.insert(I);
3046 for (const Value *Op : I->operands())
3047 if (auto *OpI = dyn_cast<Instruction>(Op))
3048 Worklist.push_back(OpI);
3049 }
3050 };
3051
3052 for (Instruction &I : instructions(&F)) {
3053 bool IsInterestingOpcode = false;
3054
3055 // To allow easy access to all instructions in a function with a given
3056 // opcode we store them in the InfoCache. As not all opcodes are interesting
3057 // to concrete attributes we only cache the ones that are as identified in
3058 // the following switch.
3059 // Note: There are no concrete attributes now so this is initially empty.
3060 switch (I.getOpcode()) {
3061 default:
3062 assert(!isa<CallBase>(&I) &&
3063 "New call base instruction type needs to be known in the "
3064 "Attributor.");
3065 break;
3066 case Instruction::Call:
3067 // Calls are interesting on their own, additionally:
3068 // For `llvm.assume` calls we also fill the KnowledgeMap as we find them.
3069 // For `must-tail` calls we remember the caller and callee.
3070 if (auto *Assume = dyn_cast<AssumeInst>(&I)) {
3071 AssumeOnlyValues.insert(Assume);
3072 fillMapFromAssume(*Assume, KnowledgeMap);
3073 AddToAssumeUsesMap(*Assume->getArgOperand(0));
3074 } else if (cast<CallInst>(I).isMustTailCall()) {
3075 FI.ContainsMustTailCall = true;
3076 if (const Function *Callee = cast<CallInst>(I).getCalledFunction())
3077 getFunctionInfo(*Callee).CalledViaMustTail = true;
3078 }
3079 [[fallthrough]];
3080 case Instruction::CallBr:
3081 case Instruction::Invoke:
3082 case Instruction::CleanupRet:
3083 case Instruction::CatchSwitch:
3084 case Instruction::AtomicRMW:
3085 case Instruction::AtomicCmpXchg:
3086 case Instruction::Br:
3087 case Instruction::Resume:
3088 case Instruction::Ret:
3089 case Instruction::Load:
3090 // The alignment of a pointer is interesting for loads.
3091 case Instruction::Store:
3092 // The alignment of a pointer is interesting for stores.
3093 case Instruction::Alloca:
3094 case Instruction::AddrSpaceCast:
3095 IsInterestingOpcode = true;
3096 }
3097 if (IsInterestingOpcode) {
3098 auto *&Insts = FI.OpcodeInstMap[I.getOpcode()];
3099 if (!Insts)
3100 Insts = new (Allocator) InstructionVectorTy();
3101 Insts->push_back(&I);
3102 }
3103 if (I.mayReadOrWriteMemory())
3104 FI.RWInsts.push_back(&I);
3105 }
3106
3107 if (F.hasFnAttribute(Attribute::AlwaysInline) &&
3109 InlineableFunctions.insert(&F);
3110}
3111
3113 return AG.getAnalysis<AAManager>(F);
3114}
3115
3116InformationCache::FunctionInfo::~FunctionInfo() {
3117 // The instruction vectors are allocated using a BumpPtrAllocator, we need to
3118 // manually destroy them.
3119 for (auto &It : OpcodeInstMap)
3120 It.getSecond()->~InstructionVectorTy();
3121}
3122
3124 const AbstractAttribute &ToAA,
3125 DepClassTy DepClass) {
3126 if (DepClass == DepClassTy::NONE)
3127 return;
3128 // If we are outside of an update, thus before the actual fixpoint iteration
3129 // started (= when we create AAs), we do not track dependences because we will
3130 // put all AAs into the initial worklist anyway.
3131 if (DependenceStack.empty())
3132 return;
3133 if (FromAA.getState().isAtFixpoint())
3134 return;
3135 DependenceStack.back()->push_back({&FromAA, &ToAA, DepClass});
3136}
3137
3138void Attributor::rememberDependences() {
3139 assert(!DependenceStack.empty() && "No dependences to remember!");
3140
3141 for (DepInfo &DI : *DependenceStack.back()) {
3142 assert((DI.DepClass == DepClassTy::REQUIRED ||
3143 DI.DepClass == DepClassTy::OPTIONAL) &&
3144 "Expected required or optional dependence (1 bit)!");
3145 auto &DepAAs = const_cast<AbstractAttribute &>(*DI.FromAA).Deps;
3146 DepAAs.insert(AbstractAttribute::DepTy(
3147 const_cast<AbstractAttribute *>(DI.ToAA), unsigned(DI.DepClass)));
3148 }
3149}
3150
3152 if (!VisitedFunctions.insert(&F).second)
3153 return;
3154 if (F.isDeclaration())
3155 return;
3156
3157 // In non-module runs we need to look at the call sites of a function to
3158 // determine if it is part of a must-tail call edge. This will influence what
3159 // attributes we can derive.
3160 InformationCache::FunctionInfo &FI = InfoCache.getFunctionInfo(F);
3161 if (!isModulePass() && !FI.CalledViaMustTail) {
3162 for (const Use &U : F.uses())
3163 if (const auto *CB = dyn_cast<CallBase>(U.getUser()))
3164 if (CB->isCallee(&U) && CB->isMustTailCall())
3165 FI.CalledViaMustTail = true;
3166 }
3167
3169
3170 // Check for dead BasicBlocks in every function.
3171 // We need dead instruction detection because we do not want to deal with
3172 // broken IR in which SSA rules do not apply.
3173 getOrCreateAAFor<AAIsDead>(FPos);
3174
3175 // Every function might be "will-return".
3176 getOrCreateAAFor<AAWillReturn>(FPos);
3177
3178 // Every function might contain instructions that cause "undefined behavior".
3179 getOrCreateAAFor<AAUndefinedBehavior>(FPos);
3180
3181 // Every function can be nounwind.
3182 getOrCreateAAFor<AANoUnwind>(FPos);
3183
3184 // Every function might be marked "nosync"
3185 getOrCreateAAFor<AANoSync>(FPos);
3186
3187 // Every function might be "no-free".
3188 getOrCreateAAFor<AANoFree>(FPos);
3189
3190 // Every function might be "no-return".
3191 getOrCreateAAFor<AANoReturn>(FPos);
3192
3193 // Every function might be "no-recurse".
3194 getOrCreateAAFor<AANoRecurse>(FPos);
3195
3196 // Every function might be "readnone/readonly/writeonly/...".
3197 getOrCreateAAFor<AAMemoryBehavior>(FPos);
3198
3199 // Every function can be "readnone/argmemonly/inaccessiblememonly/...".
3200 getOrCreateAAFor<AAMemoryLocation>(FPos);
3201
3202 // Every function can track active assumptions.
3203 getOrCreateAAFor<AAAssumptionInfo>(FPos);
3204
3205 // Every function might be applicable for Heap-To-Stack conversion.
3207 getOrCreateAAFor<AAHeapToStack>(FPos);
3208
3209 // Return attributes are only appropriate if the return type is non void.
3210 Type *ReturnType = F.getReturnType();
3211 if (!ReturnType->isVoidTy()) {
3212 // Argument attribute "returned" --- Create only one per function even
3213 // though it is an argument attribute.
3214 getOrCreateAAFor<AAReturnedValues>(FPos);
3215
3217
3218 // Every returned value might be dead.
3219 getOrCreateAAFor<AAIsDead>(RetPos);
3220
3221 // Every function might be simplified.
3222 bool UsedAssumedInformation = false;
3223 getAssumedSimplified(RetPos, nullptr, UsedAssumedInformation,
3225
3226 // Every returned value might be marked noundef.
3227 getOrCreateAAFor<AANoUndef>(RetPos);
3228
3229 if (ReturnType->isPointerTy()) {
3230
3231 // Every function with pointer return type might be marked align.
3232 getOrCreateAAFor<AAAlign>(RetPos);
3233
3234 // Every function with pointer return type might be marked nonnull.
3235 getOrCreateAAFor<AANonNull>(RetPos);
3236
3237 // Every function with pointer return type might be marked noalias.
3238 getOrCreateAAFor<AANoAlias>(RetPos);
3239
3240 // Every function with pointer return type might be marked
3241 // dereferenceable.
3242 getOrCreateAAFor<AADereferenceable>(RetPos);
3243 } else if (AttributeFuncs::isNoFPClassCompatibleType(ReturnType)) {
3244 getOrCreateAAFor<AANoFPClass>(RetPos);
3245 }
3246 }
3247
3248 for (Argument &Arg : F.args()) {
3250
3251 // Every argument might be simplified. We have to go through the Attributor
3252 // interface though as outside AAs can register custom simplification
3253 // callbacks.
3254 bool UsedAssumedInformation = false;
3255 getAssumedSimplified(ArgPos, /* AA */ nullptr, UsedAssumedInformation,
3257
3258 // Every argument might be dead.
3259 getOrCreateAAFor<AAIsDead>(ArgPos);
3260
3261 // Every argument might be marked noundef.
3262 getOrCreateAAFor<AANoUndef>(ArgPos);
3263
3264 if (Arg.getType()->isPointerTy()) {
3265 // Every argument with pointer type might be marked nonnull.
3266 getOrCreateAAFor<AANonNull>(ArgPos);
3267
3268 // Every argument with pointer type might be marked noalias.
3269 getOrCreateAAFor<AANoAlias>(ArgPos);
3270
3271 // Every argument with pointer type might be marked dereferenceable.
3272 getOrCreateAAFor<AADereferenceable>(ArgPos);
3273
3274 // Every argument with pointer type might be marked align.
3275 getOrCreateAAFor<AAAlign>(ArgPos);
3276
3277 // Every argument with pointer type might be marked nocapture.
3278 getOrCreateAAFor<AANoCapture>(ArgPos);
3279
3280 // Every argument with pointer type might be marked
3281 // "readnone/readonly/writeonly/..."
3282 getOrCreateAAFor<AAMemoryBehavior>(ArgPos);
3283
3284 // Every argument with pointer type might be marked nofree.
3285 getOrCreateAAFor<AANoFree>(ArgPos);
3286
3287 // Every argument with pointer type might be privatizable (or promotable)
3288 getOrCreateAAFor<AAPrivatizablePtr>(ArgPos);
3289 } else if (AttributeFuncs::isNoFPClassCompatibleType(Arg.getType())) {
3290 getOrCreateAAFor<AANoFPClass>(ArgPos);
3291 }
3292 }
3293
3294 auto CallSitePred = [&](Instruction &I) -> bool {
3295 auto &CB = cast<CallBase>(I);
3296 IRPosition CBInstPos = IRPosition::inst(CB);
3298
3299 // Call sites might be dead if they do not have side effects and no live
3300 // users. The return value might be dead if there are no live users.
3301 getOrCreateAAFor<AAIsDead>(CBInstPos);
3302
3303 Function *Callee = CB.getCalledFunction();
3304 // TODO: Even if the callee is not known now we might be able to simplify
3305 // the call/callee.
3306 if (!Callee)
3307 return true;
3308
3309 // Every call site can track active assumptions.
3310 getOrCreateAAFor<AAAssumptionInfo>(CBFnPos);
3311
3312 // Skip declarations except if annotations on their call sites were
3313 // explicitly requested.
3314 if (!AnnotateDeclarationCallSites && Callee->isDeclaration() &&
3315 !Callee->hasMetadata(LLVMContext::MD_callback))
3316 return true;
3317
3318 if (!Callee->getReturnType()->isVoidTy() && !CB.use_empty()) {
3320 bool UsedAssumedInformation = false;
3321 getAssumedSimplified(CBRetPos, nullptr, UsedAssumedInformation,
3323
3325 getOrCreateAAFor<AANoFPClass>(CBInstPos);
3326 }
3327
3328 for (int I = 0, E = CB.arg_size(); I < E; ++I) {
3329
3331
3332 // Every call site argument might be dead.
3333 getOrCreateAAFor<AAIsDead>(CBArgPos);
3334
3335 // Call site argument might be simplified. We have to go through the
3336 // Attributor interface though as outside AAs can register custom
3337 // simplification callbacks.
3338 bool UsedAssumedInformation = false;
3339 getAssumedSimplified(CBArgPos, /* AA */ nullptr, UsedAssumedInformation,
3341
3342 // Every call site argument might be marked "noundef".
3343 getOrCreateAAFor<AANoUndef>(CBArgPos);
3344
3345 Type *ArgTy = CB.getArgOperand(I)->getType();
3346
3347 if (!ArgTy->isPointerTy()) {
3349 getOrCreateAAFor<AANoFPClass>(CBArgPos);
3350
3351 continue;
3352 }
3353
3354 // Call site argument attribute "non-null".
3355 getOrCreateAAFor<AANonNull>(CBArgPos);
3356
3357 // Call site argument attribute "nocapture".
3358 getOrCreateAAFor<AANoCapture>(CBArgPos);
3359
3360 // Call site argument attribute "no-alias".
3361 getOrCreateAAFor<AANoAlias>(CBArgPos);
3362
3363 // Call site argument attribute "dereferenceable".
3364 getOrCreateAAFor<AADereferenceable>(CBArgPos);
3365
3366 // Call site argument attribute "align".
3367 getOrCreateAAFor<AAAlign>(CBArgPos);
3368
3369 // Call site argument attribute
3370 // "readnone/readonly/writeonly/..."
3371 getOrCreateAAFor<AAMemoryBehavior>(CBArgPos);
3372
3373 // Call site argument attribute "nofree".
3374 getOrCreateAAFor<AANoFree>(CBArgPos);
3375 }
3376 return true;
3377 };
3378
3379 auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(F);
3380 bool Success;
3381 bool UsedAssumedInformation = false;
3383 nullptr, OpcodeInstMap, CallSitePred, nullptr, nullptr,
3384 {(unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr,
3385 (unsigned)Instruction::Call},
3386 UsedAssumedInformation);
3387 (void)Success;
3388 assert(Success && "Expected the check call to be successful!");
3389
3390 auto LoadStorePred = [&](Instruction &I) -> bool {
3391 if (isa<LoadInst>(I)) {
3392 getOrCreateAAFor<AAAlign>(
3393 IRPosition::value(*cast<LoadInst>(I).getPointerOperand()));
3394 if (SimplifyAllLoads)
3396 UsedAssumedInformation, AA::Intraprocedural);
3397 } else {
3398 auto &SI = cast<StoreInst>(I);
3399 getOrCreateAAFor<AAIsDead>(IRPosition::inst(I));
3400 getAssumedSimplified(IRPosition::value(*SI.getValueOperand()), nullptr,
3401 UsedAssumedInformation, AA::Intraprocedural);
3402 getOrCreateAAFor<AAAlign>(IRPosition::value(*SI.getPointerOperand()));
3403 }
3404 return true;
3405 };
3407 nullptr, OpcodeInstMap, LoadStorePred, nullptr, nullptr,
3408 {(unsigned)Instruction::Load, (unsigned)Instruction::Store},
3409 UsedAssumedInformation);
3410 (void)Success;
3411 assert(Success && "Expected the check call to be successful!");
3412}
3413
3414/// Helpers to ease debugging through output streams and print calls.
3415///
3416///{
3418 return OS << (S == ChangeStatus::CHANGED ? "changed" : "unchanged");
3419}
3420
3422 switch (AP) {
3424 return OS << "inv";
3426 return OS << "flt";
3428 return OS << "fn_ret";
3430 return OS << "cs_ret";
3432 return OS << "fn";
3434 return OS << "cs";
3436 return OS << "arg";
3438 return OS << "cs_arg";
3439 }
3440 llvm_unreachable("Unknown attribute position!");
3441}
3442
3444 const Value &AV = Pos.getAssociatedValue();
3445 OS << "{" << Pos.getPositionKind() << ":" << AV.getName() << " ["
3446 << Pos.getAnchorValue().getName() << "@" << Pos.getCallSiteArgNo() << "]";
3447
3448 if (Pos.hasCallBaseContext())
3449 OS << "[cb_context:" << *Pos.getCallBaseContext() << "]";
3450 return OS << "}";
3451}
3452
3454 OS << "range-state(" << S.getBitWidth() << ")<";
3455 S.getKnown().print(OS);
3456 OS << " / ";
3457 S.getAssumed().print(OS);
3458 OS << ">";
3459
3460 return OS << static_cast<const AbstractState &>(S);
3461}
3462
3464 return OS << (!S.isValidState() ? "top" : (S.isAtFixpoint() ? "fix" : ""));
3465}
3466
3468 AA.print(OS);
3469 return OS;
3470}
3471
3474 OS << "set-state(< {";
3475 if (!S.isValidState())
3476 OS << "full-set";
3477 else {
3478 for (const auto &It : S.getAssumedSet())
3479 OS << It << ", ";
3480 if (S.undefIsContained())
3481 OS << "undef ";
3482 }
3483 OS << "} >)";
3484
3485 return OS;
3486}
3487
3489 const PotentialLLVMValuesState &S) {
3490 OS << "set-state(< {";
3491 if (!S.isValidState())
3492 OS << "full-set";
3493 else {
3494 for (const auto &It : S.getAssumedSet()) {
3495 if (auto *F = dyn_cast<Function>(It.first.getValue()))
3496 OS << "@" << F->getName() << "[" << int(It.second) << "], ";
3497 else
3498 OS << *It.first.getValue() << "[" << int(It.second) << "], ";
3499 }
3500 if (S.undefIsContained())
3501 OS << "undef ";
3502 }
3503 OS << "} >)";
3504
3505 return OS;
3506}
3507
3509 OS << "[";
3510 OS << getName();
3511 OS << "] for CtxI ";
3512
3513 if (auto *I = getCtxI()) {
3514 OS << "'";
3515 I->print(OS);
3516 OS << "'";
3517 } else
3518 OS << "<<null inst>>";
3519
3520 OS << " at position " << getIRPosition() << " with state " << getAsStr()
3521 << '\n';
3522}
3523
3525 print(OS);
3526
3527 for (const auto &DepAA : Deps) {
3528 auto *AA = DepAA.getPointer();
3529 OS << " updates ";
3530 AA->print(OS);
3531 }
3532
3533 OS << '\n';
3534}
3535
3537 const AAPointerInfo::Access &Acc) {
3538 OS << " [" << Acc.getKind() << "] " << *Acc.getRemoteInst();
3539 if (Acc.getLocalInst() != Acc.getRemoteInst())
3540 OS << " via " << *Acc.getLocalInst();
3541 if (Acc.getContent()) {
3542 if (*Acc.getContent())
3543 OS << " [" << **Acc.getContent() << "]";
3544 else
3545 OS << " [ <unknown> ]";
3546 }
3547 return OS;
3548}
3549///}
3550
3551/// ----------------------------------------------------------------------------
3552/// Pass (Manager) Boilerplate
3553/// ----------------------------------------------------------------------------
3554
3556 SetVector<Function *> &Functions,
3557 AnalysisGetter &AG,
3558 CallGraphUpdater &CGUpdater,
3559 bool DeleteFns, bool IsModulePass) {
3560 if (Functions.empty())
3561 return false;
3562
3563 LLVM_DEBUG({
3564 dbgs() << "[Attributor] Run on module with " << Functions.size()
3565 << " functions:\n";
3566 for (Function *Fn : Functions)
3567 dbgs() << " - " << Fn->getName() << "\n";
3568 });
3569
3570 // Create an Attributor and initially empty information cache that is filled
3571 // while we identify default attribute opportunities.
3572 AttributorConfig AC(CGUpdater);
3573 AC.IsModulePass = IsModulePass;
3574 AC.DeleteFns = DeleteFns;
3575 Attributor A(Functions, InfoCache, AC);
3576
3577 // Create shallow wrappers for all functions that are not IPO amendable
3579 for (Function *F : Functions)
3580 if (!A.isFunctionIPOAmendable(*F))
3582
3583 // Internalize non-exact functions
3584 // TODO: for now we eagerly internalize functions without calculating the
3585 // cost, we need a cost interface to determine whether internalizing
3586 // a function is "beneficial"
3587 if (AllowDeepWrapper) {
3588 unsigned FunSize = Functions.size();
3589 for (unsigned u = 0; u < FunSize; u++) {
3590 Function *F = Functions[u];
3591 if (!F->isDeclaration() && !F->isDefinitionExact() && F->getNumUses() &&
3592 !GlobalValue::isInterposableLinkage(F->getLinkage())) {
3594 assert(NewF && "Could not internalize function.");
3595 Functions.insert(NewF);
3596
3597 // Update call graph
3598 CGUpdater.replaceFunctionWith(*F, *NewF);
3599 for (const Use &U : NewF->uses())
3600 if (CallBase *CB = dyn_cast<CallBase>(U.getUser())) {
3601 auto *CallerF = CB->getCaller();
3602 CGUpdater.reanalyzeFunction(*CallerF);
3603 }
3604 }
3605 }
3606 }
3607
3608 for (Function *F : Functions) {
3609 if (F->hasExactDefinition())
3610 NumFnWithExactDefinition++;
3611 else
3612 NumFnWithoutExactDefinition++;
3613
3614 // We look at internal functions only on-demand but if any use is not a
3615 // direct call or outside the current set of analyzed functions, we have
3616 // to do it eagerly.
3617 if (F->hasLocalLinkage()) {
3618 if (llvm::all_of(F->uses(), [&Functions](const Use &U) {
3619 const auto *CB = dyn_cast<CallBase>(U.getUser());
3620 return CB && CB->isCallee(&U) &&
3621 Functions.count(const_cast<Function *>(CB->getCaller()));
3622 }))
3623 continue;
3624 }
3625
3626 // Populate the Attributor with abstract attribute opportunities in the
3627 // function and the information cache with IR information.
3628 A.identifyDefaultAbstractAttributes(*F);
3629 }
3630
3631 ChangeStatus Changed = A.run();
3632
3633 LLVM_DEBUG(dbgs() << "[Attributor] Done with " << Functions.size()
3634 << " functions, result: " << Changed << ".\n");
3635 return Changed == ChangeStatus::CHANGED;
3636}
3637
3638void AADepGraph::viewGraph() { llvm::ViewGraph(this, "Dependency Graph"); }
3639
3641 static std::atomic<int> CallTimes;
3642 std::string Prefix;
3643
3644 if (!DepGraphDotFileNamePrefix.empty())
3646 else
3647 Prefix = "dep_graph";
3648 std::string Filename =
3649 Prefix + "_" + std::to_string(CallTimes.load()) + ".dot";
3650
3651 outs() << "Dependency graph dump to " << Filename << ".\n";
3652
3653 std::error_code EC;
3654
3655 raw_fd_ostream File(Filename, EC, sys::fs::OF_TextWithCRLF);
3656 if (!EC)
3657 llvm::WriteGraph(File, this);
3658
3659 CallTimes++;
3660}
3661
3663 for (auto DepAA : SyntheticRoot.Deps)
3664 cast<AbstractAttribute>(DepAA.getPointer())->printWithDeps(outs());
3665}
3666
3670 AnalysisGetter AG(FAM);
3671
3672 SetVector<Function *> Functions;
3673 for (Function &F : M)
3674 Functions.insert(&F);
3675
3676 CallGraphUpdater CGUpdater;
3678 InformationCache InfoCache(M, AG, Allocator, /* CGSCC */ nullptr);
3679 if (runAttributorOnFunctions(InfoCache, Functions, AG, CGUpdater,
3680 /* DeleteFns */ true, /* IsModulePass */ true)) {
3681 // FIXME: Think about passes we will preserve and add them here.
3682 return PreservedAnalyses::none();
3683 }
3684 return PreservedAnalyses::all();
3685}
3686
3689 LazyCallGraph &CG,
3690 CGSCCUpdateResult &UR) {
3692 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager();
3693 AnalysisGetter AG(FAM);
3694
3695 SetVector<Function *> Functions;
3696 for (LazyCallGraph::Node &N : C)
3697 Functions.insert(&N.getFunction());
3698
3699 if (Functions.empty())
3700 return PreservedAnalyses::all();
3701
3702 Module &M = *Functions.back()->getParent();
3703 CallGraphUpdater CGUpdater;
3704 CGUpdater.initialize(CG, C, AM, UR);
3706 InformationCache InfoCache(M, AG, Allocator, /* CGSCC */ &Functions);
3707 if (runAttributorOnFunctions(InfoCache, Functions, AG, CGUpdater,
3708 /* DeleteFns */ false,
3709 /* IsModulePass */ false)) {
3710 // FIXME: Think about passes we will preserve and add them here.
3713 return PA;
3714 }
3715 return PreservedAnalyses::all();
3716}
3717
3718namespace llvm {
3719
3720template <> struct GraphTraits<AADepGraphNode *> {
3724
3725 static NodeRef getEntryNode(AADepGraphNode *DGN) { return DGN; }
3726 static NodeRef DepGetVal(const DepTy &DT) { return DT.getPointer(); }
3727
3731
3732 static ChildIteratorType child_begin(NodeRef N) { return N->child_begin(); }
3733
3734 static ChildIteratorType child_end(NodeRef N) { return N->child_end(); }
3735};
3736
3737template <>
3739 static NodeRef getEntryNode(AADepGraph *DG) { return DG->GetEntryNode(); }
3740
3743
3744 static nodes_iterator nodes_begin(AADepGraph *DG) { return DG->begin(); }
3745
3746 static nodes_iterator nodes_end(AADepGraph *DG) { return DG->end(); }
3747};
3748
3749template <> struct DOTGraphTraits<AADepGraph *> : public DefaultDOTGraphTraits {
3751
3752 static std::string getNodeLabel(const AADepGraphNode *Node,
3753 const AADepGraph *DG) {
3754 std::string AAString;
3755 raw_string_ostream O(AAString);
3756 Node->print(O);
3757 return AAString;
3758 }
3759};
3760
3761} // end namespace llvm
#define Success
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
amdgpu aa AMDGPU Address space based Alias Analysis Wrapper
amdgpu Simplify well known AMD library false FunctionCallee Callee
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
Rewrite undef for PHI
SmallPtrSet< MachineInstr *, 2 > Uses
This file contains the simple types necessary to represent the attributes associated with functions a...
static cl::opt< bool > AllowShallowWrappers("attributor-allow-shallow-wrappers", cl::Hidden, cl::desc("Allow the Attributor to create shallow " "wrappers for non-exact definitions."), cl::init(false))
static cl::opt< bool > VerifyMaxFixpointIterations("attributor-max-iterations-verify", cl::Hidden, cl::desc("Verify that max-iterations is a tight bound for a fixpoint"), cl::init(false))
static bool checkForAllInstructionsImpl(Attributor *A, InformationCache::OpcodeInstMapTy &OpcodeInstMap, function_ref< bool(Instruction &)> Pred, const AbstractAttribute *QueryingAA, const AAIsDead *LivenessAA, const ArrayRef< unsigned > &Opcodes, bool &UsedAssumedInformation, bool CheckBBLivenessOnly=false, bool CheckPotentiallyDead=false)
#define VERBOSE_DEBUG_TYPE
Definition: Attributor.cpp:62
static cl::opt< bool > EnableHeapToStack("enable-heap-to-stack-conversion", cl::init(true), cl::Hidden)
static bool runAttributorOnFunctions(InformationCache &InfoCache, SetVector< Function * > &Functions, AnalysisGetter &AG, CallGraphUpdater &CGUpdater, bool DeleteFns, bool IsModulePass)
}
static bool isPotentiallyReachable(Attributor &A, const Instruction &FromI, const Instruction *ToI, const Function &ToFn, const AbstractAttribute &QueryingAA, const AA::InstExclusionSetTy *ExclusionSet, std::function< bool(const Function &F)> GoBackwardsCB)
Definition: Attributor.cpp:591
static bool getPotentialCopiesOfMemoryValue(Attributor &A, Ty &I, SmallSetVector< Value *, 4 > &PotentialCopies, SmallSetVector< Instruction *, 4 > &PotentialValueOrigins, const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation, bool OnlyExact)
Definition: Attributor.cpp:333
static cl::list< std::string > FunctionSeedAllowList("attributor-function-seed-allow-list", cl::Hidden, cl::desc("Comma seperated list of function names that are " "allowed to be seeded."), cl::CommaSeparated)
static cl::opt< unsigned, true > MaxInitializationChainLengthX("attributor-max-initialization-chain-length", cl::Hidden, cl::desc("Maximal number of chained initializations (to avoid stack overflows)"), cl::location(MaxInitializationChainLength), cl::init(1024))
static cl::opt< bool > SimplifyAllLoads("attributor-simplify-all-loads", cl::Hidden, cl::desc("Try to simplify all loads."), cl::init(true))
static cl::opt< bool > ViewDepGraph("attributor-view-dep-graph", cl::Hidden, cl::desc("View the dependency graph."), cl::init(false))
static bool isEqualOrWorse(const Attribute &New, const Attribute &Old)
Return true if New is equal or worse than Old.
Definition: Attributor.cpp:875
static cl::opt< bool > AllowDeepWrapper("attributor-allow-deep-wrappers", cl::Hidden, cl::desc("Allow the Attributor to use IP information " "derived from non-exact functions via cloning"), cl::init(false))
static cl::opt< bool > DumpDepGraph("attributor-dump-dep-graph", cl::Hidden, cl::desc("Dump the dependency graph to dot files."), cl::init(false))
static cl::opt< bool > PrintCallGraph("attributor-print-call-graph", cl::Hidden, cl::desc("Print Attributor's internal call graph"), cl::init(false))
static cl::opt< bool > PrintDependencies("attributor-print-dep", cl::Hidden, cl::desc("Print attribute dependencies"), cl::init(false))
static bool isAssumedReadOnlyOrReadNone(Attributor &A, const IRPosition &IRP, const AbstractAttribute &QueryingAA, bool RequireReadNone, bool &IsKnown)
Definition: Attributor.cpp:549
static cl::opt< std::string > DepGraphDotFileNamePrefix("attributor-depgraph-dot-filename-prefix", cl::Hidden, cl::desc("The prefix used for the CallGraph dot file names."))
static cl::opt< bool > AnnotateDeclarationCallSites("attributor-annotate-decl-cs", cl::Hidden, cl::desc("Annotate call sites of function declarations."), cl::init(false))
static bool addIfNotExistent(LLVMContext &Ctx, const Attribute &Attr, AttributeList &Attrs, int AttrIdx, bool ForceReplace=false)
Return true if the information provided by Attr was added to the attribute list Attrs.
Definition: Attributor.cpp:885
static cl::opt< unsigned > SetFixpointIterations("attributor-max-iterations", cl::Hidden, cl::desc("Maximal number of fixpoint iterations."), cl::init(32))
static cl::list< std::string > SeedAllowList("attributor-seed-allow-list", cl::Hidden, cl::desc("Comma seperated list of attribute names that are " "allowed to be seeded."), cl::CommaSeparated)
static cl::opt< bool > EnableCallSiteSpecific("attributor-enable-call-site-specific-deduction", cl::Hidden, cl::desc("Allow the Attributor to do call site specific analysis"), cl::init(false))
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file provides interfaces used to build and manipulate a call graph, which is a very useful tool ...
This file contains the declarations for the subclasses of Constant, which represent the different fla...
return RetTy
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file provides an implementation of debug counters.
#define DEBUG_COUNTER(VARNAME, COUNTERNAME, DESC)
Definition: DebugCounter.h:182
#define LLVM_DEBUG(X)
Definition: Debug.h:101
#define DEBUG_WITH_TYPE(TYPE, X)
DEBUG_WITH_TYPE macro - This macro should be used by passes to emit debug information.
Definition: Debug.h:64
static Function * getFunction(Constant *C)
Definition: Evaluator.cpp:236
IRTranslator LLVM IR MI
static bool isMustTailCall(Value *V)
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
static const Function * getCalledFunction(const Value *V, bool &IsNoBuiltin)
print must be executed print the must be executed context for all instructions
Contains a collection of routines for determining if a given instruction is guaranteed to execute if ...
FunctionAnalysisManager FAM
This file defines the PointerIntPair class.
static StringRef getName(Value *V)
Basic Register Allocator
@ SI
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool isSimple(Instruction *I)
This file contains some templates that are useful if you are working with the STL at all.
raw_pwrite_stream & OS
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:167
A manager for alias analyses.
Class for arbitrary precision integers.
Definition: APInt.h:75
AbstractCallSite.
CallBase * getInstruction() const
Return the underlying instruction.
bool isCallbackCall() const
Return true if this ACS represents a callback call.
const Use & getCalleeUseForCallback() const
Return the use of the callee value in the underlying instruction.
static void getCallbackUses(const CallBase &CB, SmallVectorImpl< const Use * > &CallbackUses)
Add operand uses of CB that represent callback uses into CallbackUses.
bool isCallee(Value::const_user_iterator UI) const
Return true if UI is the use that defines the callee of this ACS.
Value * getCallArgOperand(Argument &Arg) const
Return the operand of the underlying instruction associated with Arg.
int getCallArgOperandNo(Argument &Arg) const
Return the operand index of the underlying instruction associated with Arg.
Value * getCalledOperand() const
Return the pointer to function that is being called.
unsigned getNumArgOperands() const
Return the number of parameters of the callee.
Function * getCalledFunction() const
Return the function being called if this is a direct call, otherwise return null (if it's an indirect...
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:620
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:774
This class represents an incoming formal argument to a Function.
Definition: Argument.h:28
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:163
bool hasAttributeAtIndex(unsigned Index, Attribute::AttrKind Kind) const
Return true if the attribute exists at the given index.
AttributeSet getFnAttrs() const
The function attributes are returned.
static AttributeList get(LLVMContext &C, ArrayRef< std::pair< unsigned, Attribute > > Attrs)
Create an AttributeList with the specified parameters in it.
AttributeSet getRetAttrs() const
The attributes for the ret value are returned.
bool hasAttrSomewhere(Attribute::AttrKind Kind, unsigned *Index=nullptr) const
Return true if the specified attribute is set for at least one parameter or for the return value.
Attribute getAttributeAtIndex(unsigned Index, Attribute::AttrKind Kind) const
Return the attribute object that exists at the given index.
AttributeSet getAttributes(unsigned Index) const
The attributes for the specified index are returned.
AttributeSet getParamAttrs(unsigned ArgNo) const
The attributes for the argument or parameter at the given index are returned.
bool isStringAttribute() const
Return true if the attribute is a string (target-dependent) attribute.
Definition: Attributes.cpp:281
bool isEnumAttribute() const
Return true if the attribute is an Attribute::AttrKind type.
Definition: Attributes.cpp:273
bool isIntAttribute() const
Return true if the attribute is an integer attribute.
Definition: Attributes.cpp:277
uint64_t getValueAsInt() const
Return the attribute's value as an integer.
Definition: Attributes.cpp:296
StringRef getKindAsString() const
Return the attribute's kind as a string.
Definition: Attributes.cpp:310
static Attribute get(LLVMContext &Context, AttrKind Kind, uint64_t Val=0)
Return a uniquified Attribute object.
Definition: Attributes.cpp:91
Attribute::AttrKind getKindAsEnum() const
Return the attribute's kind as an enum (Attribute::AttrKind).
Definition: Attributes.cpp:289
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
Definition: Attributes.h:87
LLVM Basic Block Representation.
Definition: BasicBlock.h:56
const Instruction & front() const
Definition: BasicBlock.h:326
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition: BasicBlock.h:105
const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
Definition: BasicBlock.cpp:292
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:112
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:127
static BlockAddress * get(Function *F, BasicBlock *BB)
Return a BlockAddress for the specified function and basic block.
Definition: Constants.cpp:1777
Allocate memory in an ever growing pool, as if by bump-pointer.
Definition: Allocator.h:66
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1186
void setCallingConv(CallingConv::ID CC)
Definition: InstrTypes.h:1471
void addFnAttr(Attribute::AttrKind Kind)
Adds the attribute to the function.
Definition: InstrTypes.h:1518
void getOperandBundlesAsDefs(SmallVectorImpl< OperandBundleDef > &Defs) const
Return the list of operand bundles attached to this instruction as a vector of OperandBundleDefs.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
Definition: InstrTypes.h:1408
CallingConv::ID getCallingConv() const
Definition: InstrTypes.h:1467
bool isMustTailCall() const
Tests if this call site must be tail call optimized.
void setAttributes(AttributeList A)
Set the parameter attributes for this call.
Definition: InstrTypes.h:1490
AttributeList getAttributes() const
Return the parameter attributes for this call.
Definition: InstrTypes.h:1486
Function * getCaller()
Helper to get the caller (the parent function).
Wrapper to unify "old style" CallGraph and "new style" LazyCallGraph.
void removeFunction(Function &Fn)
Remove Fn from the call graph.
void removeCallSite(CallBase &CS)
Remove the call site CS from the call graph.
void replaceFunctionWith(Function &OldFn, Function &NewFn)
Replace OldFn in the call graph (and SCC) with NewFn.
void reanalyzeFunction(Function &Fn)
After an CGSCC pass changes a function in ways that affect the call graph, this method can be called ...
bool replaceCallSite(CallBase &OldCS, CallBase &NewCS)
Replace OldCS with the new call site NewCS.
void initialize(CallGraph &CG, CallGraphSCC &SCC)
Initializers for usage outside of a CGSCC pass, inside a CGSCC pass in the old and new pass manager (...
This class represents a function call, abstracting a target machine's calling convention.
void setTailCall(bool IsTc=true)
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
A constant value that is initialized with an expression using other constant values.
Definition: Constants.h:998
static Constant * getPointerCast(Constant *C, Type *Ty)
Create a BitCast, AddrSpaceCast, or a PtrToInt cast constant expression.
Definition: Constants.cpp:2040
static Constant * getFPTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2132
static Constant * getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2090
void print(raw_ostream &OS) const
Print out the bounds to a stream.
This is an important base class in LLVM.
Definition: Constant.h:41
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:356
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
static bool shouldExecute(unsigned CounterName)
Definition: DebugCounter.h:72
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition: DenseMap.h:202
bool empty() const
Definition: DenseMap.h:98
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:279
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:166
A proxy from a FunctionAnalysisManager to an SCC.
Class to represent function types.
Definition: DerivedTypes.h:103
static FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
void setSubprogram(DISubprogram *SP)
Set the attached subprogram.
Definition: Metadata.cpp:1621
static Function * Create(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, const Twine &N="", Module *M=nullptr)
Definition: Function.h:136
void splice(Function::iterator ToIt, Function *FromF)
Transfer all blocks from FromF to this function at ToIt.
Definition: Function.h:692
const BasicBlock & getEntryBlock() const
Definition: Function.h:740
void removeParamAttr(unsigned ArgNo, Attribute::AttrKind Kind)
removes the attribute from the list of attributes.
Definition: Function.cpp:626
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition: Function.h:174
iterator_range< arg_iterator > args()
Definition: Function.h:795
DISubprogram * getSubprogram() const
Get the attached subprogram.
Definition: Metadata.cpp:1625
AttributeList getAttributes() const
Return the attribute list for this Function.
Definition: Function.h:313
iterator begin()
Definition: Function.h:756
arg_iterator arg_begin()
Definition: Function.h:771
void setAttributes(AttributeList Attrs)
Set the attribute list for this Function.
Definition: Function.h:316
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:319
size_t arg_size() const
Definition: Function.h:804
Type * getReturnType() const
Returns the type of the ret val.
Definition: Function.h:179
Argument * getArg(unsigned i) const
Definition: Function.h:789
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
Definition: Function.h:187
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.cpp:644
void copyAttributesFrom(const Function *Src)
copyAttributesFrom - copy all additional attributes (those not needed to create a Function) from the ...
Definition: Function.cpp:743
bool hasMetadata() const
Return true if this value has any metadata attached to it.
Definition: Value.h:585
void addMetadata(unsigned KindID, MDNode &MD)
Add a metadata attachment.
Definition: Metadata.cpp:1359
bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
Definition: Globals.cpp:275
LinkageTypes getLinkage() const
Definition: GlobalValue.h:541
bool hasLocalLinkage() const
Definition: GlobalValue.h:523
void setLinkage(LinkageTypes LT)
Definition: GlobalValue.h:532
unsigned getAddressSpace() const
Definition: GlobalValue.h:201
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:652
void setDSOLocal(bool Local)
Definition: GlobalValue.h:299
PointerType * getType() const
Global values are always pointers.
Definition: GlobalValue.h:290
@ DefaultVisibility
The GV is visible.
Definition: GlobalValue.h:63
void setVisibility(VisibilityTypes V)
Definition: GlobalValue.h:250
static bool isInterposableLinkage(LinkageTypes Linkage)
Whether the definition of this global may be replaced by something non-equivalent at link time.
Definition: GlobalValue.h:420
@ PrivateLinkage
Like Internal, but omit from symbol table.
Definition: GlobalValue.h:56
@ InternalLinkage
Rename collisions when linking (static functions).
Definition: GlobalValue.h:55
bool isSuccess() const
Definition: InlineCost.h:188
An analysis over an "outer" IR unit that provides access to an analysis manager over an "inner" IR un...
Definition: PassManager.h:933
const BasicBlock * getParent() const
Definition: Instruction.h:90
const Function * getFunction() const
Return the function this instruction belongs to.
Definition: Instruction.cpp:74
const Instruction * getNextNonDebugInstruction(bool SkipPseudoOp=false) const
Return a pointer to the next non-debug instruction in the same basic block as 'this',...
SymbolTableList< Instruction >::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Definition: Instruction.cpp:82
void copyMetadata(const Instruction &SrcInst, ArrayRef< unsigned > WL=ArrayRef< unsigned >())
Copy metadata from SrcInst to this instruction.
Invoke instruction.
static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr, Instruction *InsertBefore=nullptr)
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
A node in the call graph.
An SCC of the call graph.
A lazily constructed view of the call graph of a module.
An instruction for reading from memory.
Definition: Instructions.h:177
This is the common base class for memset/memcpy/memmove.
This class wraps the llvm.memcpy/memmove intrinsics.
static MemoryLocation getForSource(const MemTransferInst *MTI)
Return a location representing the source of a memory transfer.
static MemoryLocation getForDest(const MemIntrinsic *MI)
Return a location representing the destination of a memory set or transfer.
static std::optional< MemoryLocation > getOrNone(const Instruction *Inst)
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
const FunctionListType & getFunctionList() const
Get the Module's list of functions (constant).
Definition: Module.h:579
Diagnostic information for missed-optimization remarks.
PointerIntPair - This class implements a pair of a pointer and small integer.
void * getOpaqueValue() const
PointerTy getPointer() const
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Definition: Constants.cpp:1758
A set of analyses that are preserved following a run of a transformation pass.
Definition: PassManager.h:152
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
Definition: PassManager.h:155
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: PassManager.h:158
void preserve()
Mark an analysis as preserved.
Definition: PassManager.h:173
Return a value (possibly void), from a function.
static ReturnInst * Create(LLVMContext &C, Value *retVal=nullptr, Instruction *InsertBefore=nullptr)
A vector that has set insertion semantics.
Definition: SetVector.h:40
size_type size() const
Determine the number of elements in the SetVector.
Definition: SetVector.h:77
size_type count(const key_type &key) const
Count the number of elements of a given key in the SetVector.
Definition: SetVector.h:208
bool remove(const value_type &X)
Remove an item from the set vector.
Definition: SetVector.h:157
ArrayRef< T > getArrayRef() const
Definition: SetVector.h:63
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition: SetVector.h:141
const T & front() const
Return the first element of the SetVector.
Definition: SetVector.h:122
void clear()
Completely clear the SetVector.
Definition: SetVector.h:213
bool empty() const
Determine if the SetVector is empty or not.
Definition: SetVector.h:72
const T & back() const
Return the last element of the SetVector.
Definition: SetVector.h:128
typename vector_type::const_iterator iterator
Definition: SetVector.h:48
iterator begin()
Get an iterator to the beginning of the SetVector.
Definition: SetVector.h:82
size_type size() const
Definition: SmallPtrSet.h:93
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:344
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:383
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:365
A SetVector that performs no allocations if smaller than a certain size.
Definition: SetVector.h:301
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:577
void reserve(size_type N)
Definition: SmallVector.h:667
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:687
void resize(size_type N)
Definition: SmallVector.h:642
void push_back(const T &Elt)
Definition: SmallVector.h:416
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
An instruction for storing to memory.
Definition: Instructions.h:301
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
A visitor class for IR positions.
Definition: Attributor.h:1091
SubsumingPositionIterator(const IRPosition &IRP)
Provides information about what library functions are available for the current target.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:258
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition: Type.h:185
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:231
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
bool isVoidTy() const
Return true if this is 'void'.
Definition: Type.h:140
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
Definition: Constants.cpp:1739
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:532
iterator_range< user_iterator > users()
Definition: Value.h:421
const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition: Value.cpp:685
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:994
iterator_range< use_iterator > uses()
Definition: Value.h:376
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:308
void takeName(Value *V)
Transfer the name from V to this value.
Definition: Value.cpp:381
Value handle that is nullable, but tries to track the Value.
Definition: ValueHandle.h:204
An efficient, type-erasing, non-owning reference to a callable.
self_iterator getIterator()
Definition: ilist_node.h:82
iterator insert(iterator where, pointer New)
Definition: ilist.h:229
A raw_ostream that writes to a file descriptor.
Definition: raw_ostream.h:454
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
A raw_ostream that writes to an std::string.
Definition: raw_ostream.h:642
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
bool isAssumedReadNone(Attributor &A, const IRPosition &IRP, const AbstractAttribute &QueryingAA, bool &IsKnown)
Return true if IRP is readnone.
Definition: Attributor.cpp:584
bool isAssumedReadOnly(Attributor &A, const IRPosition &IRP, const AbstractAttribute &QueryingAA, bool &IsKnown)
Return true if IRP is readonly.
Definition: Attributor.cpp:579
std::optional< Value * > combineOptionalValuesInAAValueLatice(const std::optional< Value * > &A, const std::optional< Value * > &B, Type *Ty)
Return the combination of A and B such that the result is a possible value of both.
Definition: Attributor.cpp:308
bool isValidAtPosition(const ValueAndContext &VAC, InformationCache &InfoCache)
Return true if the value of VAC is a valid at the position of VAC, that is a constant,...
Definition: Attributor.cpp:259
bool isAssumedThreadLocalObject(Attributor &A, Value &Obj, const AbstractAttribute &QueryingAA)
Return true if Obj is assumed to be a thread local object.
Definition: Attributor.cpp:766
Constant * getInitialValueForObj(Value &Obj, Type &Ty, const TargetLibraryInfo *TLI, const DataLayout &DL, RangeTy *RangePtr=nullptr)
Return the initial value of Obj with type Ty if that is a constant.
Definition: Attributor.cpp:225
bool isDynamicallyUnique(Attributor &A, const AbstractAttribute &QueryingAA, const Value &V, bool ForAnalysisOnly=true)
Return true if V is dynamically unique, that is, there are no two "instances" of V at runtime with di...
Definition: Attributor.cpp:215
bool getPotentialCopiesOfStoredValue(Attributor &A, StoreInst &SI, SmallSetVector< Value *, 4 > &PotentialCopies, const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation, bool OnlyExact=false)
Collect all potential values of the one stored by SI into PotentialCopies.
Definition: Attributor.cpp:539
bool isPotentiallyAffectedByBarrier(Attributor &A, const Instruction &I, const AbstractAttribute &QueryingAA)
Return true if I is potentially affected by a barrier.
Definition: Attributor.cpp:819
ValueScope
Flags to distinguish intra-procedural queries from potentially inter-procedural queries.
Definition: Attributor.h:169
@ Intraprocedural
Definition: Attributor.h:170
@ Interprocedural
Definition: Attributor.h:171
bool isValidInScope(const Value &V, const Function *Scope)
Return true if V is a valid value in Scope, that is a constant or an instruction/argument of Scope.
Definition: Attributor.cpp:249
bool isPotentiallyReachable(Attributor &A, const Instruction &FromI, const Instruction &ToI, const AbstractAttribute &QueryingAA, const AA::InstExclusionSetTy *ExclusionSet=nullptr, std::function< bool(const Function &F)> GoBackwardsCB=nullptr)
Return true if ToI is potentially reachable from FromI without running into any instruction in Exclus...
Definition: Attributor.cpp:747
bool isNoSyncInst(Attributor &A, const Instruction &I, const AbstractAttribute &QueryingAA)
Return true if I is a nosync instruction.
Definition: Attributor.cpp:190
bool getPotentiallyLoadedValues(Attributor &A, LoadInst &LI, SmallSetVector< Value *, 4 > &PotentialValues, SmallSetVector< Instruction *, 4 > &PotentialValueOrigins, const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation, bool OnlyExact=false)
Collect all potential values LI could read into PotentialValues.
Definition: Attributor.cpp:529
Value * getWithType(Value &V, Type &Ty)
Try to convert V to type Ty without introducing new instructions.
Definition: Attributor.cpp:285
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
bool isNoFPClassCompatibleType(Type *Ty)
Returns true if this is a type legal for the 'nofpclass' attribute.
void updateMinLegalVectorWidthAttr(Function &Fn, uint64_t Width)
Update min-legal-vector-width if it is in Attribute and less than Width.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:445
@ CommaSeparated
Definition: CommandLine.h:164
LocationClass< Ty > location(Ty &L)
Definition: CommandLine.h:465
DiagnosticInfoOptimizationBase::Argument NV
@ OF_TextWithCRLF
The file should be opened in text mode and use a carriage linefeed '\r '.
Definition: FileSystem.h:770
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:406
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1782
Constant * getInitialValueOfAllocation(const Value *V, const TargetLibraryInfo *TLI, Type *Ty)
If this is a call to an allocation function that initializes memory to a fixed value,...
bool RecursivelyDeleteTriviallyDeadInstructions(Value *V, const TargetLibraryInfo *TLI=nullptr, MemorySSAUpdater *MSSAU=nullptr, std::function< void(Value *)> AboutToDeleteCallback=std::function< void(Value *)>())
If the specified value is a trivially dead instruction, delete it.
Definition: Local.cpp:537
unsigned MaxInitializationChainLength
The value passed to the line option that defines the maximal initialization chain length.
Definition: Attributor.cpp:98
bool ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions=false, const TargetLibraryInfo *TLI=nullptr, DomTreeUpdater *DTU=nullptr)
If a terminator instruction is predicated on a constant value, convert it into an unconditional branc...
Definition: Local.cpp:126
APInt operator&(APInt a, const APInt &b)
Definition: APInt.h:2057
void detachDeadBlocks(ArrayRef< BasicBlock * > BBs, SmallVectorImpl< DominatorTree::UpdateType > *Updates, bool KeepOneInputPHIs=false)
Replace contents of every block in BBs with single unreachable instruction.
@ Done
Definition: Threading.h:61
bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
Definition: Verifier.cpp:6465
CallInst * changeToCall(InvokeInst *II, DomTreeUpdater *DTU=nullptr)
This function converts the specified invoke into a normal call.
Definition: Local.cpp:2308
raw_fd_ostream & outs()
This returns a reference to a raw_fd_ostream for standard output.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
bool isNoAliasCall(const Value *V)
Return true if this pointer is returned by a noalias function.
raw_ostream & WriteGraph(raw_ostream &O, const GraphType &G, bool ShortNames=false, const Twine &Title="")
Definition: GraphWriter.h:359
const Value * getPointerOperand(const Value *V)
A helper function that returns the pointer operand of a load, store or GEP instruction.
InlineResult isInlineViable(Function &Callee)
Minimal filter to detect invalid constructs for inlining.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1789
bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
Definition: Local.cpp:398
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
Definition: Function.cpp:2140
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
bool AreStatisticsEnabled()
Check if statistics are enabled.
Definition: Statistic.cpp:139
Constant * ConstantFoldLoadFromConst(Constant *C, Type *Ty, const APInt &Offset, const DataLayout &DL)
Extract value of C at the given Offset reinterpreted as Ty.
unsigned changeToUnreachable(Instruction *I, bool PreserveLCSSA=false, DomTreeUpdater *DTU=nullptr, MemorySSAUpdater *MSSAU=nullptr)
Insert an unreachable instruction before the specified instruction, making it and the rest of the cod...
Definition: Local.cpp:2243
Constant * ConstantFoldLoadFromUniformValue(Constant *C, Type *Ty)
If C is a uniform value where all bits are the same (either all zero, all ones, all undef or all pois...
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
BasicBlock * SplitBlockPredecessors(BasicBlock *BB, ArrayRef< BasicBlock * > Preds, const char *Suffix, DominatorTree *DT, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, bool PreserveLCSSA=false)
This method introduces at least one new basic block into the function and moves some of the predecess...
bool operator&=(SparseBitVector< ElementSize > *LHS, const SparseBitVector< ElementSize > &RHS)
void ViewGraph(const GraphType &G, const Twine &Name, bool ShortNames=false, const Twine &Title="", GraphProgram::Name Program=GraphProgram::DOT)
ViewGraph - Emit a dot graph, run 'dot', run gv on the postscript file, then cleanup.
Definition: GraphWriter.h:427
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
Definition: APFixedPoint.h:292
void CloneFunctionInto(Function *NewFunc, const Function *OldFunc, ValueToValueMapTy &VMap, CloneFunctionChangeType Changes, SmallVectorImpl< ReturnInst * > &Returns, const char *NameSuffix="", ClonedCodeInfo *CodeInfo=nullptr, ValueMapTypeRemapper *TypeMapper=nullptr, ValueMaterializer *Materializer=nullptr)
Clone OldFunc into NewFunc, transforming the old arguments into references to VMap values.
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
Definition: STLExtras.h:2076
iterator_range< pointer_iterator< WrappedIteratorT > > make_pointer_range(RangeT &&Range)
Definition: iterator.h:363
bool isAllocationFn(const Value *V, const TargetLibraryInfo *TLI)
Tests if a value is a call or invoke to a library function that allocates or reallocates memory (eith...
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1939
ChangeStatus
{
Definition: Attributor.h:466
void fillMapFromAssume(AssumeInst &Assume, RetainedKnowledgeMap &Result)
Insert into the map all the informations contained in the operand bundles of the llvm....
bool operator|=(SparseBitVector< ElementSize > &LHS, const SparseBitVector< ElementSize > *RHS)
DepClassTy
Definition: Attributor.h:476
@ OPTIONAL
The target may be valid if the source is not.
@ NONE
Do not track a dependence between source and target.
@ REQUIRED
The target cannot be valid if the source is not.
APInt operator|(APInt a, const APInt &b)
Definition: APInt.h:2077
#define N
DepSetTy Deps
Set of dependency graph nodes which should be updated if this one is updated.
Definition: Attributor.h:493
aaiterator begin()
Definition: Attributor.h:507
aaiterator end()
Definition: Attributor.h:508
The data structure for the dependency graph.
Definition: Attributor.h:524
iterator begin()
Definition: Attributor.h:539
AADepGraphNode SyntheticRoot
There is no root node for the dependency graph.
Definition: Attributor.h:536
void print()
Print dependency graph.
iterator end()
Definition: Attributor.h:540
void dumpGraph()
Dump graph to file.
AADepGraphNode * GetEntryNode()
Definition: Attributor.h:537
An abstract interface to track if a value leaves it's defining function instance.
Definition: Attributor.h:3939
bool isAssumedUniqueForAnalysis() const
Return true if we assume that the underlying value is unique in its scope wrt.
Definition: Attributor.h:3953
An abstract Attribute for computing reachability between functions.
Definition: Attributor.h:5108
An abstract interface to determine reachability of point A to B.
Definition: Attributor.h:3493
An abstract interface for liveness abstract attribute.
Definition: Attributor.h:3616
virtual bool isKnownDead() const =0
Returns true if the underlying value is known dead.
virtual bool isAssumedDead() const =0
The query functions are protected such that other attributes need to go through the Attributor interf...
virtual bool isRemovableStore() const
Return true if the underlying value is a store that is known to be removable.
Definition: Attributor.h:3652
static bool mayCatchAsynchronousExceptions(const Function &F)
Determine if F might catch asynchronous exceptions.
Definition: Attributor.h:3673
An abstract interface for memory access kind related attributes (readnone/readonly/writeonly).
Definition: Attributor.h:4229
An abstract interface for all memory location attributes (readnone/argmemonly/inaccessiblememonly/ina...
Definition: Attributor.h:4292
An abstract interface for all nocapture attributes.
Definition: Attributor.h:3979
bool isAssumedNoSync() const
Returns true if "nosync" is assumed.
Definition: Attributor.h:3325
static bool isNonRelaxedAtomic(const Instruction *I)
Helper function used to determine whether an instruction is non-relaxed atomic.
static bool isNoSyncIntrinsic(const Instruction *I)
Helper function specific for intrinsics which are potentially volatile.
An access description.
Definition: Attributor.h:5327
bool isWrittenValueUnknown() const
Return true if the value written cannot be determined at all.
Definition: Attributor.h:5433
std::optional< Value * > getContent() const
Return the written value which can be llvm::null if it is not yet determined.
Definition: Attributor.h:5452
bool isWriteOrAssumption() const
Return true if this is a write access.
Definition: Attributor.h:5403
bool isRead() const
Return true if this is a read access.
Definition: Attributor.h:5397
Value * getWrittenValue() const
Return the value writen, if any.
Definition: Attributor.h:5444
Instruction * getLocalInst() const
Return the instruction that causes the access with respect to the local scope of the associated attri...
Definition: Attributor.h:5424
Instruction * getRemoteInst() const
Return the actual instruction that causes the access.
Definition: Attributor.h:5427
bool isWrittenValueYetUndetermined() const
Return true if the value written is not known yet.
Definition: Attributor.h:5430
AccessKind getKind() const
Return the access kind.
Definition: Attributor.h:5394
An abstract interface for struct information.
Definition: Attributor.h:5148
static Value * getSingleValue(Attributor &A, const AbstractAttribute &AA, const IRPosition &IRP, SmallVectorImpl< AA::ValueAndContext > &Values)
Extract the single value in Values if any.
An abstract attribute for getting all assumption underlying objects.
Definition: Attributor.h:5570
Helper to represent an access offset and size, with logic to deal with uncertainty and check for over...
Definition: Attributor.h:226
bool offsetOrSizeAreUnknown() const
Return true if offset or size are unknown.
Definition: Attributor.h:235
Value * getValue() const
Definition: Attributor.h:181
const Instruction * getCtxI() const
Definition: Attributor.h:182
Base struct for all "concrete attribute" deductions.
Definition: Attributor.h:3108
ChangeStatus update(Attributor &A)
Hook for the Attributor to trigger an update of the internal state.
Definition: Attributor.cpp:975
virtual ChangeStatus manifest(Attributor &A)
Hook for the Attributor to trigger the manifestation of the information represented by the abstract a...
Definition: Attributor.h:3182
virtual void printWithDeps(raw_ostream &OS) const
virtual StateType & getState()=0
Return the internal abstract state for inspection.
virtual const std::string getName() const =0
This function should return the name of the AbstractAttribute.
virtual ~AbstractAttribute()=default
Virtual destructor.
virtual const std::string getAsStr() const =0
This function should return the "summarized" assumed state as string.
void print(raw_ostream &OS) const override
Helper functions, for debug purposes only.
virtual bool isQueryAA() const
A query AA is always scheduled as long as we do updates because it does lazy computation that cannot ...
Definition: Attributor.h:3140
virtual ChangeStatus updateImpl(Attributor &A)=0
The actual update/transfer function which has to be implemented by the derived classes.
virtual void trackStatistics() const =0
Hook to enable custom statistic tracking, called after manifest that resulted in a change if statisti...
const IRPosition & getIRPosition() const
Return an IR position, see struct IRPosition.
Definition: Attributor.h:3147
An interface to query the internal state of an abstract attribute.
Definition: Attributor.h:2428
virtual ChangeStatus indicatePessimisticFixpoint()=0
Indicate that the abstract state should converge to the pessimistic state.
virtual bool isAtFixpoint() const =0
Return if this abstract state is fixed, thus does not need to be updated if information changes as it...
virtual bool isValidState() const =0
Return if this abstract state is in a valid state.
virtual ChangeStatus indicateOptimisticFixpoint()=0
Indicate that the abstract state should converge to the optimistic state.
Wrapper for FunctionAnalysisManager.
Definition: Attributor.h:1102
Analysis::Result * getAnalysis(const Function &F)
Definition: Attributor.h:1117
PreservedAnalyses run(LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM, LazyCallGraph &CG, CGSCCUpdateResult &UR)
void populateAll() const
Force populate the entire call graph.
Definition: Attributor.h:4979
Configuration for the Attributor.
Definition: Attributor.h:1399
std::optional< unsigned > MaxFixpointIterations
Maximum number of iterations to run until fixpoint.
Definition: Attributor.h:1433
bool RewriteSignatures
Flag to determine if we rewrite function signatures.
Definition: Attributor.h:1416
bool DeleteFns
Flag to determine if we can delete functions or keep dead ones around.
Definition: Attributor.h:1413
CallGraphUpdater & CGUpdater
Helper to update an underlying call graph and to delete functions.
Definition: Attributor.h:1427
PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
Helper struct used in the communication between an abstract attribute (AA) that wants to change the s...
Definition: Attributor.h:2033
std::function< void(const ArgumentReplacementInfo &, AbstractCallSite, SmallVectorImpl< Value * > &)> ACSRepairCBTy
Abstract call site (ACS) repair callback type.
Definition: Attributor.h:2056
std::function< void(const ArgumentReplacementInfo &, Function &, Function::arg_iterator)> CalleeRepairCBTy
Callee repair callback type.
Definition: Attributor.h:2042
The fixpoint analysis framework that orchestrates the attribute deduction.
Definition: Attributor.h:1473
bool registerFunctionSignatureRewrite(Argument &Arg, ArrayRef< Type * > ReplacementTypes, ArgumentReplacementInfo::CalleeRepairCBTy &&CalleeRepairCB, ArgumentReplacementInfo::ACSRepairCBTy &&ACSRepairCB)
Register a rewrite for a function signature.
bool isModulePass() const
Return true if this is a module pass, false otherwise.
Definition: Attributor.h:1715
bool isValidFunctionSignatureRewrite(Argument &Arg, ArrayRef< Type * > ReplacementTypes)
Check if we can rewrite a function signature.
bool checkForAllInstructions(function_ref< bool(Instruction &)> Pred, const Function *Fn, const AbstractAttribute &QueryingAA, const ArrayRef< unsigned > &Opcodes, bool &UsedAssumedInformation, bool CheckBBLivenessOnly=false, bool CheckPotentiallyDead=false)
Check Pred on all instructions in Fn with an opcode present in Opcodes.
static bool isInternalizable(Function &F)
Returns true if the function F can be internalized.
bool isRunOn(Function &Fn) const
Return true if we derive attributes for Fn.
Definition: Attributor.h:1718
bool isAssumedDead(const AbstractAttribute &AA, const AAIsDead *LivenessAA, bool &UsedAssumedInformation, bool CheckBBLivenessOnly=false, DepClassTy DepClass=DepClassTy::OPTIONAL)
Return true if AA (or its context instruction) is assumed dead.
void recordDependence(const AbstractAttribute &FromAA, const AbstractAttribute &ToAA, DepClassTy DepClass)
Explicitly record a dependence from FromAA to ToAA, that is if FromAA changes ToAA should be updated ...
static void createShallowWrapper(Function &F)
Create a shallow wrapper for F such that F has internal linkage afterwards.
bool checkForAllReturnedValuesAndReturnInsts(function_ref< bool(Value &, const SmallSetVector< ReturnInst *, 4 > &)> Pred, const AbstractAttribute &QueryingAA)
Check Pred on all values potentially returned by F.
std::optional< Value * > getAssumedSimplified(const IRPosition &IRP, const AbstractAttribute &AA, bool &UsedAssumedInformation, AA::ValueScope S)
If V is assumed simplified, return it, if it is unclear yet, return std::nullopt, otherwise return nu...
Definition: Attributor.h:1846
static Function * internalizeFunction(Function &F, bool Force=false)
Make another copy of the function F such that the copied version has internal linkage afterwards and ...
bool checkForAllReadWriteInstructions(function_ref< bool(Instruction &)> Pred, AbstractAttribute &QueryingAA, bool &UsedAssumedInformation)
Check Pred on all Read/Write instructions.
std::optional< Constant * > getAssumedConstant(const IRPosition &IRP, const AbstractAttribute &AA, bool &UsedAssumedInformation)
If IRP is assumed to be a constant, return it, if it is unclear yet, return std::nullopt,...
InformationCache & getInfoCache()
Return the internal information cache.
Definition: Attributor.h:1712
std::optional< Value * > translateArgumentToCallSiteContent(std::optional< Value * > V, CallBase &CB, const AbstractAttribute &AA, bool &UsedAssumedInformation)
Translate V from the callee context into the call site context.
bool checkForAllReturnedValues(function_ref< bool(Value &)> Pred, const AbstractAttribute &QueryingAA)
Check Pred on all values potentially returned by the function associated with QueryingAA.
bool checkForAllUses(function_ref< bool(const Use &, bool &)> Pred, const AbstractAttribute &QueryingAA, const Value &V, bool CheckBBLivenessOnly=false, DepClassTy LivenessDepClass=DepClassTy::OPTIONAL, bool IgnoreDroppableUses=true, function_ref< bool(const Use &OldU, const Use &NewU)> EquivalentUseCB=nullptr)
Check Pred on all (transitive) uses of V.
void registerForUpdate(AbstractAttribute &AA)
Allows a query AA to request an update if a new query was received.
bool getAssumedSimplifiedValues(const IRPosition &IRP, const AbstractAttribute *AA, SmallVectorImpl< AA::ValueAndContext > &Values, AA::ValueScope S, bool &UsedAssumedInformation)
Try to simplify IRP and in the scope S.
void identifyDefaultAbstractAttributes(Function &F)
Determine opportunities to derive 'default' attributes in F and create abstract attribute objects for...
std::function< bool(Attributor &, const AbstractAttribute *)> VirtualUseCallbackTy
Definition: Attributor.h:1899
ChangeStatus run()
Run the analyses until a fixpoint is reached or enforced (timeout).
static bool internalizeFunctions(SmallPtrSetImpl< Function * > &FnSet, DenseMap< Function *, Function * > &FnMap)
Make copies of each function in the set FnSet such that the copied version has internal linkage after...
bool checkForAllCallSites(function_ref< bool(AbstractCallSite)> Pred, const AbstractAttribute &QueryingAA, bool RequireAllCallSites, bool &UsedAssumedInformation)
Check Pred on all function call sites.
bool isKnown(base_t BitsEncoding) const
Return true if the bits set in BitsEncoding are "known bits".
Definition: Attributor.h:2576
Support structure for SCC passes to communicate updates the call graph back to the CGSCC pass manager...
static std::string getNodeLabel(const AADepGraphNode *Node, const AADepGraph *DG)
DOTGraphTraits - Template class that can be specialized to customize how graphs are converted to 'dot...
DefaultDOTGraphTraits - This class provides the default implementations of all of the DOTGraphTraits ...
An information struct used to provide DenseMap with the various necessary components for a given valu...
Definition: DenseMapInfo.h:51
static NodeRef DepGetVal(const DepTy &DT)
static ChildIteratorType child_end(NodeRef N)
static NodeRef getEntryNode(AADepGraphNode *DGN)
static ChildIteratorType child_begin(NodeRef N)
AADepGraphNode::DepSetTy::iterator ChildEdgeIteratorType
static NodeRef getEntryNode(AADepGraph *DG)
static nodes_iterator nodes_begin(AADepGraph *DG)
static nodes_iterator nodes_end(AADepGraph *DG)
static ChangeStatus manifestAttrs(Attributor &A, const IRPosition &IRP, const ArrayRef< Attribute > &DeducedAttrs, bool ForceReplace=false)
Definition: Attributor.cpp:991
Helper to describe and deal with positions in the LLVM-IR.
Definition: Attributor.h:560
Function * getAssociatedFunction() const
Return the associated function, if any.
Definition: Attributor.h:691
unsigned getAttrIdx() const
Return the index in the attribute list for this position.
Definition: Attributor.h:780
bool hasCallBaseContext() const
Check if the position has any call base context.
Definition: Attributor.h:900
static const IRPosition callsite_returned(const CallBase &CB)
Crea