LLVM 20.0.0git
Attributor.cpp
Go to the documentation of this file.
1//===- Attributor.cpp - Module-wide attribute deduction -------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements an interprocedural pass that deduces and/or propagates
10// attributes. This is done in an abstract interpretation style fixpoint
11// iteration. See the Attributor.h file comment and the class descriptions in
12// that file for more information.
13//
14//===----------------------------------------------------------------------===//
15
17
18#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/STLExtras.h"
22#include "llvm/ADT/Statistic.h"
29#include "llvm/IR/Attributes.h"
30#include "llvm/IR/Constant.h"
32#include "llvm/IR/Constants.h"
33#include "llvm/IR/DataLayout.h"
34#include "llvm/IR/GlobalValue.h"
36#include "llvm/IR/Instruction.h"
39#include "llvm/IR/LLVMContext.h"
40#include "llvm/IR/ValueHandle.h"
43#include "llvm/Support/Debug.h"
47#include "llvm/Support/ModRef.h"
52#include <cstdint>
53#include <memory>
54
55#ifdef EXPENSIVE_CHECKS
56#include "llvm/IR/Verifier.h"
57#endif
58
59#include <cassert>
60#include <optional>
61#include <string>
62
63using namespace llvm;
64
65#define DEBUG_TYPE "attributor"
66#define VERBOSE_DEBUG_TYPE DEBUG_TYPE "-verbose"
67
68DEBUG_COUNTER(ManifestDBGCounter, "attributor-manifest",
69 "Determine what attributes are manifested in the IR");
70
71STATISTIC(NumFnDeleted, "Number of function deleted");
72STATISTIC(NumFnWithExactDefinition,
73 "Number of functions with exact definitions");
74STATISTIC(NumFnWithoutExactDefinition,
75 "Number of functions without exact definitions");
76STATISTIC(NumFnShallowWrappersCreated, "Number of shallow wrappers created");
77STATISTIC(NumAttributesTimedOut,
78 "Number of abstract attributes timed out before fixpoint");
79STATISTIC(NumAttributesValidFixpoint,
80 "Number of abstract attributes in a valid fixpoint state");
81STATISTIC(NumAttributesManifested,
82 "Number of abstract attributes manifested in IR");
83
84// TODO: Determine a good default value.
85//
86// In the LLVM-TS and SPEC2006, 32 seems to not induce compile time overheads
87// (when run with the first 5 abstract attributes). The results also indicate
88// that we never reach 32 iterations but always find a fixpoint sooner.
89//
90// This will become more evolved once we perform two interleaved fixpoint
91// iterations: bottom-up and top-down.
93 SetFixpointIterations("attributor-max-iterations", cl::Hidden,
94 cl::desc("Maximal number of fixpoint iterations."),
95 cl::init(32));
96
98 MaxSpecializationPerCB("attributor-max-specializations-per-call-base",
100 cl::desc("Maximal number of callees specialized for "
101 "a call base"),
102 cl::init(UINT32_MAX));
103
105 "attributor-max-initialization-chain-length", cl::Hidden,
106 cl::desc(
107 "Maximal number of chained initializations (to avoid stack overflows)"),
110
112 "attributor-annotate-decl-cs", cl::Hidden,
113 cl::desc("Annotate call sites of function declarations."), cl::init(false));
114
115static cl::opt<bool> EnableHeapToStack("enable-heap-to-stack-conversion",
116 cl::init(true), cl::Hidden);
117
118static cl::opt<bool>
119 AllowShallowWrappers("attributor-allow-shallow-wrappers", cl::Hidden,
120 cl::desc("Allow the Attributor to create shallow "
121 "wrappers for non-exact definitions."),
122 cl::init(false));
123
124static cl::opt<bool>
125 AllowDeepWrapper("attributor-allow-deep-wrappers", cl::Hidden,
126 cl::desc("Allow the Attributor to use IP information "
127 "derived from non-exact functions via cloning"),
128 cl::init(false));
129
130// These options can only used for debug builds.
131#ifndef NDEBUG
133 SeedAllowList("attributor-seed-allow-list", cl::Hidden,
134 cl::desc("Comma separated list of attribute names that are "
135 "allowed to be seeded."),
137
139 "attributor-function-seed-allow-list", cl::Hidden,
140 cl::desc("Comma separated list of function names that are "
141 "allowed to be seeded."),
143#endif
144
145static cl::opt<bool>
146 DumpDepGraph("attributor-dump-dep-graph", cl::Hidden,
147 cl::desc("Dump the dependency graph to dot files."),
148 cl::init(false));
149
151 "attributor-depgraph-dot-filename-prefix", cl::Hidden,
152 cl::desc("The prefix used for the CallGraph dot file names."));
153
154static cl::opt<bool> ViewDepGraph("attributor-view-dep-graph", cl::Hidden,
155 cl::desc("View the dependency graph."),
156 cl::init(false));
157
158static cl::opt<bool> PrintDependencies("attributor-print-dep", cl::Hidden,
159 cl::desc("Print attribute dependencies"),
160 cl::init(false));
161
163 "attributor-enable-call-site-specific-deduction", cl::Hidden,
164 cl::desc("Allow the Attributor to do call site specific analysis"),
165 cl::init(false));
166
167static cl::opt<bool>
168 PrintCallGraph("attributor-print-call-graph", cl::Hidden,
169 cl::desc("Print Attributor's internal call graph"),
170 cl::init(false));
171
172static cl::opt<bool> SimplifyAllLoads("attributor-simplify-all-loads",
174 cl::desc("Try to simplify all loads."),
175 cl::init(true));
176
178 "attributor-assume-closed-world", cl::Hidden,
179 cl::desc("Should a closed world be assumed, or not. Default if not set."));
180
181/// Logic operators for the change status enum class.
182///
183///{
185 return L == ChangeStatus::CHANGED ? L : R;
186}
188 L = L | R;
189 return L;
190}
192 return L == ChangeStatus::UNCHANGED ? L : R;
193}
195 L = L & R;
196 return L;
197}
198///}
199
200bool AA::isGPU(const Module &M) {
201 Triple T(M.getTargetTriple());
202 return T.isAMDGPU() || T.isNVPTX();
203}
204
206 const AbstractAttribute &QueryingAA) {
207 // We are looking for volatile instructions or non-relaxed atomics.
208 if (const auto *CB = dyn_cast<CallBase>(&I)) {
209 if (CB->hasFnAttr(Attribute::NoSync))
210 return true;
211
212 // Non-convergent and readnone imply nosync.
213 if (!CB->isConvergent() && !CB->mayReadOrWriteMemory())
214 return true;
215
217 return true;
218
219 bool IsKnownNoSync;
220 return AA::hasAssumedIRAttr<Attribute::NoSync>(
221 A, &QueryingAA, IRPosition::callsite_function(*CB),
222 DepClassTy::OPTIONAL, IsKnownNoSync);
223 }
224
225 if (!I.mayReadOrWriteMemory())
226 return true;
227
228 return !I.isVolatile() && !AANoSync::isNonRelaxedAtomic(&I);
229}
230
232 const Value &V, bool ForAnalysisOnly) {
233 // TODO: See the AAInstanceInfo class comment.
234 if (!ForAnalysisOnly)
235 return false;
236 auto *InstanceInfoAA = A.getAAFor<AAInstanceInfo>(
237 QueryingAA, IRPosition::value(V), DepClassTy::OPTIONAL);
238 return InstanceInfoAA && InstanceInfoAA->isAssumedUniqueForAnalysis();
239}
240
241Constant *
243 Value &Obj, Type &Ty, const TargetLibraryInfo *TLI,
244 const DataLayout &DL, AA::RangeTy *RangePtr) {
245 if (isa<AllocaInst>(Obj))
246 return UndefValue::get(&Ty);
247 if (Constant *Init = getInitialValueOfAllocation(&Obj, TLI, &Ty))
248 return Init;
249 auto *GV = dyn_cast<GlobalVariable>(&Obj);
250 if (!GV)
251 return nullptr;
252
253 bool UsedAssumedInformation = false;
254 Constant *Initializer = nullptr;
255 if (A.hasGlobalVariableSimplificationCallback(*GV)) {
256 auto AssumedGV = A.getAssumedInitializerFromCallBack(
257 *GV, &QueryingAA, UsedAssumedInformation);
258 Initializer = *AssumedGV;
259 if (!Initializer)
260 return nullptr;
261 } else {
262 if (!GV->hasLocalLinkage() &&
263 (GV->isInterposable() || !(GV->isConstant() && GV->hasInitializer())))
264 return nullptr;
265 if (!GV->hasInitializer())
266 return UndefValue::get(&Ty);
267
268 if (!Initializer)
269 Initializer = GV->getInitializer();
270 }
271
272 if (RangePtr && !RangePtr->offsetOrSizeAreUnknown()) {
273 APInt Offset = APInt(64, RangePtr->Offset);
274 return ConstantFoldLoadFromConst(Initializer, &Ty, Offset, DL);
275 }
276
277 return ConstantFoldLoadFromUniformValue(Initializer, &Ty, DL);
278}
279
280bool AA::isValidInScope(const Value &V, const Function *Scope) {
281 if (isa<Constant>(V))
282 return true;
283 if (auto *I = dyn_cast<Instruction>(&V))
284 return I->getFunction() == Scope;
285 if (auto *A = dyn_cast<Argument>(&V))
286 return A->getParent() == Scope;
287 return false;
288}
289
291 InformationCache &InfoCache) {
292 if (isa<Constant>(VAC.getValue()) || VAC.getValue() == VAC.getCtxI())
293 return true;
294 const Function *Scope = nullptr;
295 const Instruction *CtxI = VAC.getCtxI();
296 if (CtxI)
297 Scope = CtxI->getFunction();
298 if (auto *A = dyn_cast<Argument>(VAC.getValue()))
299 return A->getParent() == Scope;
300 if (auto *I = dyn_cast<Instruction>(VAC.getValue())) {
301 if (I->getFunction() == Scope) {
302 if (const DominatorTree *DT =
304 *Scope))
305 return DT->dominates(I, CtxI);
306 // Local dominance check mostly for the old PM passes.
307 if (CtxI && I->getParent() == CtxI->getParent())
308 return llvm::any_of(
309 make_range(I->getIterator(), I->getParent()->end()),
310 [&](const Instruction &AfterI) { return &AfterI == CtxI; });
311 }
312 }
313 return false;
314}
315
317 if (V.getType() == &Ty)
318 return &V;
319 if (isa<PoisonValue>(V))
320 return PoisonValue::get(&Ty);
321 if (isa<UndefValue>(V))
322 return UndefValue::get(&Ty);
323 if (auto *C = dyn_cast<Constant>(&V)) {
324 if (C->isNullValue())
325 return Constant::getNullValue(&Ty);
326 if (C->getType()->isPointerTy() && Ty.isPointerTy())
327 return ConstantExpr::getPointerCast(C, &Ty);
328 if (C->getType()->getPrimitiveSizeInBits() >= Ty.getPrimitiveSizeInBits()) {
329 if (C->getType()->isIntegerTy() && Ty.isIntegerTy())
330 return ConstantExpr::getTrunc(C, &Ty, /* OnlyIfReduced */ true);
331 if (C->getType()->isFloatingPointTy() && Ty.isFloatingPointTy())
332 return ConstantFoldCastInstruction(Instruction::FPTrunc, C, &Ty);
333 }
334 }
335 return nullptr;
336}
337
338std::optional<Value *>
339AA::combineOptionalValuesInAAValueLatice(const std::optional<Value *> &A,
340 const std::optional<Value *> &B,
341 Type *Ty) {
342 if (A == B)
343 return A;
344 if (!B)
345 return A;
346 if (*B == nullptr)
347 return nullptr;
348 if (!A)
349 return Ty ? getWithType(**B, *Ty) : nullptr;
350 if (*A == nullptr)
351 return nullptr;
352 if (!Ty)
353 Ty = (*A)->getType();
354 if (isa_and_nonnull<UndefValue>(*A))
355 return getWithType(**B, *Ty);
356 if (isa<UndefValue>(*B))
357 return A;
358 if (*A && *B && *A == getWithType(**B, *Ty))
359 return A;
360 return nullptr;
361}
362
363template <bool IsLoad, typename Ty>
365 Attributor &A, Ty &I, SmallSetVector<Value *, 4> &PotentialCopies,
366 SmallSetVector<Instruction *, 4> *PotentialValueOrigins,
367 const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation,
368 bool OnlyExact) {
369 LLVM_DEBUG(dbgs() << "Trying to determine the potential copies of " << I
370 << " (only exact: " << OnlyExact << ")\n";);
371
372 Value &Ptr = *I.getPointerOperand();
373 // Containers to remember the pointer infos and new copies while we are not
374 // sure that we can find all of them. If we abort we want to avoid spurious
375 // dependences and potential copies in the provided container.
379
380 const auto *TLI =
381 A.getInfoCache().getTargetLibraryInfoForFunction(*I.getFunction());
382
383 auto Pred = [&](Value &Obj) {
384 LLVM_DEBUG(dbgs() << "Visit underlying object " << Obj << "\n");
385 if (isa<UndefValue>(&Obj))
386 return true;
387 if (isa<ConstantPointerNull>(&Obj)) {
388 // A null pointer access can be undefined but any offset from null may
389 // be OK. We do not try to optimize the latter.
390 if (!NullPointerIsDefined(I.getFunction(),
391 Ptr.getType()->getPointerAddressSpace()) &&
392 A.getAssumedSimplified(Ptr, QueryingAA, UsedAssumedInformation,
393 AA::Interprocedural) == &Obj)
394 return true;
396 dbgs() << "Underlying object is a valid nullptr, giving up.\n";);
397 return false;
398 }
399 // TODO: Use assumed noalias return.
400 if (!isa<AllocaInst>(&Obj) && !isa<GlobalVariable>(&Obj) &&
401 !(IsLoad ? isAllocationFn(&Obj, TLI) : isNoAliasCall(&Obj))) {
402 LLVM_DEBUG(dbgs() << "Underlying object is not supported yet: " << Obj
403 << "\n";);
404 return false;
405 }
406 if (auto *GV = dyn_cast<GlobalVariable>(&Obj))
407 if (!GV->hasLocalLinkage() &&
408 !(GV->isConstant() && GV->hasInitializer())) {
409 LLVM_DEBUG(dbgs() << "Underlying object is global with external "
410 "linkage, not supported yet: "
411 << Obj << "\n";);
412 return false;
413 }
414
415 bool NullOnly = true;
416 bool NullRequired = false;
417 auto CheckForNullOnlyAndUndef = [&](std::optional<Value *> V,
418 bool IsExact) {
419 if (!V || *V == nullptr)
420 NullOnly = false;
421 else if (isa<UndefValue>(*V))
422 /* No op */;
423 else if (isa<Constant>(*V) && cast<Constant>(*V)->isNullValue())
424 NullRequired = !IsExact;
425 else
426 NullOnly = false;
427 };
428
429 auto AdjustWrittenValueType = [&](const AAPointerInfo::Access &Acc,
430 Value &V) {
431 Value *AdjV = AA::getWithType(V, *I.getType());
432 if (!AdjV) {
433 LLVM_DEBUG(dbgs() << "Underlying object written but stored value "
434 "cannot be converted to read type: "
435 << *Acc.getRemoteInst() << " : " << *I.getType()
436 << "\n";);
437 }
438 return AdjV;
439 };
440
441 auto SkipCB = [&](const AAPointerInfo::Access &Acc) {
442 if ((IsLoad && !Acc.isWriteOrAssumption()) || (!IsLoad && !Acc.isRead()))
443 return true;
444 if (IsLoad) {
446 return true;
447 if (PotentialValueOrigins && !isa<AssumeInst>(Acc.getRemoteInst()))
448 return false;
449 if (!Acc.isWrittenValueUnknown())
450 if (Value *V = AdjustWrittenValueType(Acc, *Acc.getWrittenValue()))
451 if (NewCopies.count(V)) {
452 NewCopyOrigins.insert(Acc.getRemoteInst());
453 return true;
454 }
455 if (auto *SI = dyn_cast<StoreInst>(Acc.getRemoteInst()))
456 if (Value *V = AdjustWrittenValueType(Acc, *SI->getValueOperand()))
457 if (NewCopies.count(V)) {
458 NewCopyOrigins.insert(Acc.getRemoteInst());
459 return true;
460 }
461 }
462 return false;
463 };
464
465 auto CheckAccess = [&](const AAPointerInfo::Access &Acc, bool IsExact) {
466 if ((IsLoad && !Acc.isWriteOrAssumption()) || (!IsLoad && !Acc.isRead()))
467 return true;
468 if (IsLoad && Acc.isWrittenValueYetUndetermined())
469 return true;
470 CheckForNullOnlyAndUndef(Acc.getContent(), IsExact);
471 if (OnlyExact && !IsExact && !NullOnly &&
472 !isa_and_nonnull<UndefValue>(Acc.getWrittenValue())) {
473 LLVM_DEBUG(dbgs() << "Non exact access " << *Acc.getRemoteInst()
474 << ", abort!\n");
475 return false;
476 }
477 if (NullRequired && !NullOnly) {
478 LLVM_DEBUG(dbgs() << "Required all `null` accesses due to non exact "
479 "one, however found non-null one: "
480 << *Acc.getRemoteInst() << ", abort!\n");
481 return false;
482 }
483 if (IsLoad) {
484 assert(isa<LoadInst>(I) && "Expected load or store instruction only!");
485 if (!Acc.isWrittenValueUnknown()) {
486 Value *V = AdjustWrittenValueType(Acc, *Acc.getWrittenValue());
487 if (!V)
488 return false;
489 NewCopies.insert(V);
490 if (PotentialValueOrigins)
491 NewCopyOrigins.insert(Acc.getRemoteInst());
492 return true;
493 }
494 auto *SI = dyn_cast<StoreInst>(Acc.getRemoteInst());
495 if (!SI) {
496 LLVM_DEBUG(dbgs() << "Underlying object written through a non-store "
497 "instruction not supported yet: "
498 << *Acc.getRemoteInst() << "\n";);
499 return false;
500 }
501 Value *V = AdjustWrittenValueType(Acc, *SI->getValueOperand());
502 if (!V)
503 return false;
504 NewCopies.insert(V);
505 if (PotentialValueOrigins)
506 NewCopyOrigins.insert(SI);
507 } else {
508 assert(isa<StoreInst>(I) && "Expected load or store instruction only!");
509 auto *LI = dyn_cast<LoadInst>(Acc.getRemoteInst());
510 if (!LI && OnlyExact) {
511 LLVM_DEBUG(dbgs() << "Underlying object read through a non-load "
512 "instruction not supported yet: "
513 << *Acc.getRemoteInst() << "\n";);
514 return false;
515 }
516 NewCopies.insert(Acc.getRemoteInst());
517 }
518 return true;
519 };
520
521 // If the value has been written to we don't need the initial value of the
522 // object.
523 bool HasBeenWrittenTo = false;
524
526 auto *PI = A.getAAFor<AAPointerInfo>(QueryingAA, IRPosition::value(Obj),
527 DepClassTy::NONE);
528 if (!PI || !PI->forallInterferingAccesses(
529 A, QueryingAA, I,
530 /* FindInterferingWrites */ IsLoad,
531 /* FindInterferingReads */ !IsLoad, CheckAccess,
532 HasBeenWrittenTo, Range, SkipCB)) {
534 dbgs()
535 << "Failed to verify all interfering accesses for underlying object: "
536 << Obj << "\n");
537 return false;
538 }
539
540 if (IsLoad && !HasBeenWrittenTo && !Range.isUnassigned()) {
541 const DataLayout &DL = A.getDataLayout();
542 Value *InitialValue = AA::getInitialValueForObj(
543 A, QueryingAA, Obj, *I.getType(), TLI, DL, &Range);
544 if (!InitialValue) {
545 LLVM_DEBUG(dbgs() << "Could not determine required initial value of "
546 "underlying object, abort!\n");
547 return false;
548 }
549 CheckForNullOnlyAndUndef(InitialValue, /* IsExact */ true);
550 if (NullRequired && !NullOnly) {
551 LLVM_DEBUG(dbgs() << "Non exact access but initial value that is not "
552 "null or undef, abort!\n");
553 return false;
554 }
555
556 NewCopies.insert(InitialValue);
557 if (PotentialValueOrigins)
558 NewCopyOrigins.insert(nullptr);
559 }
560
561 PIs.push_back(PI);
562
563 return true;
564 };
565
566 const auto *AAUO = A.getAAFor<AAUnderlyingObjects>(
567 QueryingAA, IRPosition::value(Ptr), DepClassTy::OPTIONAL);
568 if (!AAUO || !AAUO->forallUnderlyingObjects(Pred)) {
570 dbgs() << "Underlying objects stored into could not be determined\n";);
571 return false;
572 }
573
574 // Only if we were successful collection all potential copies we record
575 // dependences (on non-fix AAPointerInfo AAs). We also only then modify the
576 // given PotentialCopies container.
577 for (const auto *PI : PIs) {
578 if (!PI->getState().isAtFixpoint())
579 UsedAssumedInformation = true;
580 A.recordDependence(*PI, QueryingAA, DepClassTy::OPTIONAL);
581 }
582 PotentialCopies.insert(NewCopies.begin(), NewCopies.end());
583 if (PotentialValueOrigins)
584 PotentialValueOrigins->insert(NewCopyOrigins.begin(), NewCopyOrigins.end());
585
586 return true;
587}
588
590 Attributor &A, LoadInst &LI, SmallSetVector<Value *, 4> &PotentialValues,
591 SmallSetVector<Instruction *, 4> &PotentialValueOrigins,
592 const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation,
593 bool OnlyExact) {
594 return getPotentialCopiesOfMemoryValue</* IsLoad */ true>(
595 A, LI, PotentialValues, &PotentialValueOrigins, QueryingAA,
596 UsedAssumedInformation, OnlyExact);
597}
598
600 Attributor &A, StoreInst &SI, SmallSetVector<Value *, 4> &PotentialCopies,
601 const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation,
602 bool OnlyExact) {
603 return getPotentialCopiesOfMemoryValue</* IsLoad */ false>(
604 A, SI, PotentialCopies, nullptr, QueryingAA, UsedAssumedInformation,
605 OnlyExact);
606}
607
609 const AbstractAttribute &QueryingAA,
610 bool RequireReadNone, bool &IsKnown) {
611 if (RequireReadNone) {
612 if (AA::hasAssumedIRAttr<Attribute::ReadNone>(
613 A, &QueryingAA, IRP, DepClassTy::OPTIONAL, IsKnown,
614 /* IgnoreSubsumingPositions */ true))
615 return true;
616 } else if (AA::hasAssumedIRAttr<Attribute::ReadOnly>(
617 A, &QueryingAA, IRP, DepClassTy::OPTIONAL, IsKnown,
618 /* IgnoreSubsumingPositions */ true))
619 return true;
620
623 const auto *MemLocAA =
624 A.getAAFor<AAMemoryLocation>(QueryingAA, IRP, DepClassTy::NONE);
625 if (MemLocAA && MemLocAA->isAssumedReadNone()) {
626 IsKnown = MemLocAA->isKnownReadNone();
627 if (!IsKnown)
628 A.recordDependence(*MemLocAA, QueryingAA, DepClassTy::OPTIONAL);
629 return true;
630 }
631 }
632
633 const auto *MemBehaviorAA =
634 A.getAAFor<AAMemoryBehavior>(QueryingAA, IRP, DepClassTy::NONE);
635 if (MemBehaviorAA &&
636 (MemBehaviorAA->isAssumedReadNone() ||
637 (!RequireReadNone && MemBehaviorAA->isAssumedReadOnly()))) {
638 IsKnown = RequireReadNone ? MemBehaviorAA->isKnownReadNone()
639 : MemBehaviorAA->isKnownReadOnly();
640 if (!IsKnown)
641 A.recordDependence(*MemBehaviorAA, QueryingAA, DepClassTy::OPTIONAL);
642 return true;
643 }
644
645 return false;
646}
647
649 const AbstractAttribute &QueryingAA, bool &IsKnown) {
650 return isAssumedReadOnlyOrReadNone(A, IRP, QueryingAA,
651 /* RequireReadNone */ false, IsKnown);
652}
654 const AbstractAttribute &QueryingAA, bool &IsKnown) {
655 return isAssumedReadOnlyOrReadNone(A, IRP, QueryingAA,
656 /* RequireReadNone */ true, IsKnown);
657}
658
659static bool
661 const Instruction *ToI, const Function &ToFn,
662 const AbstractAttribute &QueryingAA,
663 const AA::InstExclusionSetTy *ExclusionSet,
664 std::function<bool(const Function &F)> GoBackwardsCB) {
666 dbgs() << "[AA] isPotentiallyReachable @" << ToFn.getName() << " from "
667 << FromI << " [GBCB: " << bool(GoBackwardsCB) << "][#ExS: "
668 << (ExclusionSet ? std::to_string(ExclusionSet->size()) : "none")
669 << "]\n";
670 if (ExclusionSet)
671 for (auto *ES : *ExclusionSet)
672 dbgs() << *ES << "\n";
673 });
674
675 // We know kernels (generally) cannot be called from within the module. Thus,
676 // for reachability we would need to step back from a kernel which would allow
677 // us to reach anything anyway. Even if a kernel is invoked from another
678 // kernel, values like allocas and shared memory are not accessible. We
679 // implicitly check for this situation to avoid costly lookups.
680 if (GoBackwardsCB && &ToFn != FromI.getFunction() &&
681 !GoBackwardsCB(*FromI.getFunction()) && ToFn.hasFnAttribute("kernel") &&
682 FromI.getFunction()->hasFnAttribute("kernel")) {
683 LLVM_DEBUG(dbgs() << "[AA] assume kernel cannot be reached from within the "
684 "module; success\n";);
685 return false;
686 }
687
688 // If we can go arbitrarily backwards we will eventually reach an entry point
689 // that can reach ToI. Only if a set of blocks through which we cannot go is
690 // provided, or once we track internal functions not accessible from the
691 // outside, it makes sense to perform backwards analysis in the absence of a
692 // GoBackwardsCB.
693 if (!GoBackwardsCB && !ExclusionSet) {
694 LLVM_DEBUG(dbgs() << "[AA] check @" << ToFn.getName() << " from " << FromI
695 << " is not checked backwards and does not have an "
696 "exclusion set, abort\n");
697 return true;
698 }
699
702 Worklist.push_back(&FromI);
703
704 while (!Worklist.empty()) {
705 const Instruction *CurFromI = Worklist.pop_back_val();
706 if (!Visited.insert(CurFromI).second)
707 continue;
708
709 const Function *FromFn = CurFromI->getFunction();
710 if (FromFn == &ToFn) {
711 if (!ToI)
712 return true;
713 LLVM_DEBUG(dbgs() << "[AA] check " << *ToI << " from " << *CurFromI
714 << " intraprocedurally\n");
715 const auto *ReachabilityAA = A.getAAFor<AAIntraFnReachability>(
716 QueryingAA, IRPosition::function(ToFn), DepClassTy::OPTIONAL);
717 bool Result = !ReachabilityAA || ReachabilityAA->isAssumedReachable(
718 A, *CurFromI, *ToI, ExclusionSet);
719 LLVM_DEBUG(dbgs() << "[AA] " << *CurFromI << " "
720 << (Result ? "can potentially " : "cannot ") << "reach "
721 << *ToI << " [Intra]\n");
722 if (Result)
723 return true;
724 }
725
726 bool Result = true;
727 if (!ToFn.isDeclaration() && ToI) {
728 const auto *ToReachabilityAA = A.getAAFor<AAIntraFnReachability>(
729 QueryingAA, IRPosition::function(ToFn), DepClassTy::OPTIONAL);
730 const Instruction &EntryI = ToFn.getEntryBlock().front();
731 Result = !ToReachabilityAA || ToReachabilityAA->isAssumedReachable(
732 A, EntryI, *ToI, ExclusionSet);
733 LLVM_DEBUG(dbgs() << "[AA] Entry " << EntryI << " of @" << ToFn.getName()
734 << " " << (Result ? "can potentially " : "cannot ")
735 << "reach @" << *ToI << " [ToFn]\n");
736 }
737
738 if (Result) {
739 // The entry of the ToFn can reach the instruction ToI. If the current
740 // instruction is already known to reach the ToFn.
741 const auto *FnReachabilityAA = A.getAAFor<AAInterFnReachability>(
742 QueryingAA, IRPosition::function(*FromFn), DepClassTy::OPTIONAL);
743 Result = !FnReachabilityAA || FnReachabilityAA->instructionCanReach(
744 A, *CurFromI, ToFn, ExclusionSet);
745 LLVM_DEBUG(dbgs() << "[AA] " << *CurFromI << " in @" << FromFn->getName()
746 << " " << (Result ? "can potentially " : "cannot ")
747 << "reach @" << ToFn.getName() << " [FromFn]\n");
748 if (Result)
749 return true;
750 }
751
752 // TODO: Check assumed nounwind.
753 const auto *ReachabilityAA = A.getAAFor<AAIntraFnReachability>(
754 QueryingAA, IRPosition::function(*FromFn), DepClassTy::OPTIONAL);
755 auto ReturnInstCB = [&](Instruction &Ret) {
756 bool Result = !ReachabilityAA || ReachabilityAA->isAssumedReachable(
757 A, *CurFromI, Ret, ExclusionSet);
758 LLVM_DEBUG(dbgs() << "[AA][Ret] " << *CurFromI << " "
759 << (Result ? "can potentially " : "cannot ") << "reach "
760 << Ret << " [Intra]\n");
761 return !Result;
762 };
763
764 // Check if we can reach returns.
765 bool UsedAssumedInformation = false;
766 if (A.checkForAllInstructions(ReturnInstCB, FromFn, &QueryingAA,
767 {Instruction::Ret}, UsedAssumedInformation)) {
768 LLVM_DEBUG(dbgs() << "[AA] No return is reachable, done\n");
769 continue;
770 }
771
772 if (!GoBackwardsCB) {
773 LLVM_DEBUG(dbgs() << "[AA] check @" << ToFn.getName() << " from " << FromI
774 << " is not checked backwards, abort\n");
775 return true;
776 }
777
778 // If we do not go backwards from the FromFn we are done here and so far we
779 // could not find a way to reach ToFn/ToI.
780 if (!GoBackwardsCB(*FromFn))
781 continue;
782
783 LLVM_DEBUG(dbgs() << "Stepping backwards to the call sites of @"
784 << FromFn->getName() << "\n");
785
786 auto CheckCallSite = [&](AbstractCallSite ACS) {
787 CallBase *CB = ACS.getInstruction();
788 if (!CB)
789 return false;
790
791 if (isa<InvokeInst>(CB))
792 return false;
793
795 Worklist.push_back(Inst);
796 return true;
797 };
798
799 Result = !A.checkForAllCallSites(CheckCallSite, *FromFn,
800 /* RequireAllCallSites */ true,
801 &QueryingAA, UsedAssumedInformation);
802 if (Result) {
803 LLVM_DEBUG(dbgs() << "[AA] stepping back to call sites from " << *CurFromI
804 << " in @" << FromFn->getName()
805 << " failed, give up\n");
806 return true;
807 }
808
809 LLVM_DEBUG(dbgs() << "[AA] stepped back to call sites from " << *CurFromI
810 << " in @" << FromFn->getName()
811 << " worklist size is: " << Worklist.size() << "\n");
812 }
813 return false;
814}
815
817 Attributor &A, const Instruction &FromI, const Instruction &ToI,
818 const AbstractAttribute &QueryingAA,
819 const AA::InstExclusionSetTy *ExclusionSet,
820 std::function<bool(const Function &F)> GoBackwardsCB) {
821 const Function *ToFn = ToI.getFunction();
822 return ::isPotentiallyReachable(A, FromI, &ToI, *ToFn, QueryingAA,
823 ExclusionSet, GoBackwardsCB);
824}
825
827 Attributor &A, const Instruction &FromI, const Function &ToFn,
828 const AbstractAttribute &QueryingAA,
829 const AA::InstExclusionSetTy *ExclusionSet,
830 std::function<bool(const Function &F)> GoBackwardsCB) {
831 return ::isPotentiallyReachable(A, FromI, /* ToI */ nullptr, ToFn, QueryingAA,
832 ExclusionSet, GoBackwardsCB);
833}
834
836 const AbstractAttribute &QueryingAA) {
837 if (isa<UndefValue>(Obj))
838 return true;
839 if (isa<AllocaInst>(Obj)) {
840 InformationCache &InfoCache = A.getInfoCache();
841 if (!InfoCache.stackIsAccessibleByOtherThreads()) {
843 dbgs() << "[AA] Object '" << Obj
844 << "' is thread local; stack objects are thread local.\n");
845 return true;
846 }
847 bool IsKnownNoCapture;
848 bool IsAssumedNoCapture = AA::hasAssumedIRAttr<Attribute::NoCapture>(
849 A, &QueryingAA, IRPosition::value(Obj), DepClassTy::OPTIONAL,
850 IsKnownNoCapture);
851 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj << "' is "
852 << (IsAssumedNoCapture ? "" : "not") << " thread local; "
853 << (IsAssumedNoCapture ? "non-" : "")
854 << "captured stack object.\n");
855 return IsAssumedNoCapture;
856 }
857 if (auto *GV = dyn_cast<GlobalVariable>(&Obj)) {
858 if (GV->isConstant()) {
859 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj
860 << "' is thread local; constant global\n");
861 return true;
862 }
863 if (GV->isThreadLocal()) {
864 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj
865 << "' is thread local; thread local global\n");
866 return true;
867 }
868 }
869
870 if (A.getInfoCache().targetIsGPU()) {
871 if (Obj.getType()->getPointerAddressSpace() ==
872 (int)AA::GPUAddressSpace::Local) {
873 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj
874 << "' is thread local; GPU local memory\n");
875 return true;
876 }
877 if (Obj.getType()->getPointerAddressSpace() ==
878 (int)AA::GPUAddressSpace::Constant) {
879 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj
880 << "' is thread local; GPU constant memory\n");
881 return true;
882 }
883 }
884
885 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj << "' is not thread local\n");
886 return false;
887}
888
890 const AbstractAttribute &QueryingAA) {
891 if (!I.mayHaveSideEffects() && !I.mayReadFromMemory())
892 return false;
893
895
896 auto AddLocationPtr = [&](std::optional<MemoryLocation> Loc) {
897 if (!Loc || !Loc->Ptr) {
899 dbgs() << "[AA] Access to unknown location; -> requires barriers\n");
900 return false;
901 }
902 Ptrs.insert(Loc->Ptr);
903 return true;
904 };
905
906 if (const MemIntrinsic *MI = dyn_cast<MemIntrinsic>(&I)) {
907 if (!AddLocationPtr(MemoryLocation::getForDest(MI)))
908 return true;
909 if (const MemTransferInst *MTI = dyn_cast<MemTransferInst>(&I))
910 if (!AddLocationPtr(MemoryLocation::getForSource(MTI)))
911 return true;
912 } else if (!AddLocationPtr(MemoryLocation::getOrNone(&I)))
913 return true;
914
915 return isPotentiallyAffectedByBarrier(A, Ptrs.getArrayRef(), QueryingAA, &I);
916}
917
920 const AbstractAttribute &QueryingAA,
921 const Instruction *CtxI) {
922 for (const Value *Ptr : Ptrs) {
923 if (!Ptr) {
924 LLVM_DEBUG(dbgs() << "[AA] nullptr; -> requires barriers\n");
925 return true;
926 }
927
928 auto Pred = [&](Value &Obj) {
929 if (AA::isAssumedThreadLocalObject(A, Obj, QueryingAA))
930 return true;
931 LLVM_DEBUG(dbgs() << "[AA] Access to '" << Obj << "' via '" << *Ptr
932 << "'; -> requires barrier\n");
933 return false;
934 };
935
936 const auto *UnderlyingObjsAA = A.getAAFor<AAUnderlyingObjects>(
937 QueryingAA, IRPosition::value(*Ptr), DepClassTy::OPTIONAL);
938 if (!UnderlyingObjsAA || !UnderlyingObjsAA->forallUnderlyingObjects(Pred))
939 return true;
940 }
941 return false;
942}
943
944/// Return true if \p New is equal or worse than \p Old.
945static bool isEqualOrWorse(const Attribute &New, const Attribute &Old) {
946 if (!Old.isIntAttribute())
947 return true;
948
949 return Old.getValueAsInt() >= New.getValueAsInt();
950}
951
952/// Return true if the information provided by \p Attr was added to the
953/// attribute set \p AttrSet. This is only the case if it was not already
954/// present in \p AttrSet.
955static bool addIfNotExistent(LLVMContext &Ctx, const Attribute &Attr,
956 AttributeSet AttrSet, bool ForceReplace,
957 AttrBuilder &AB) {
958
959 if (Attr.isEnumAttribute()) {
961 if (AttrSet.hasAttribute(Kind))
962 return false;
963 AB.addAttribute(Kind);
964 return true;
965 }
966 if (Attr.isStringAttribute()) {
967 StringRef Kind = Attr.getKindAsString();
968 if (AttrSet.hasAttribute(Kind)) {
969 if (!ForceReplace)
970 return false;
971 }
972 AB.addAttribute(Kind, Attr.getValueAsString());
973 return true;
974 }
975 if (Attr.isIntAttribute()) {
977 if (!ForceReplace && Kind == Attribute::Memory) {
978 MemoryEffects ME = Attr.getMemoryEffects() & AttrSet.getMemoryEffects();
979 if (ME == AttrSet.getMemoryEffects())
980 return false;
981 AB.addMemoryAttr(ME);
982 return true;
983 }
984 if (AttrSet.hasAttribute(Kind)) {
985 if (!ForceReplace && isEqualOrWorse(Attr, AttrSet.getAttribute(Kind)))
986 return false;
987 }
988 AB.addAttribute(Attr);
989 return true;
990 }
991
992 llvm_unreachable("Expected enum or string attribute!");
993}
994
997 return cast<Argument>(&getAnchorValue());
998
999 // Not an Argument and no argument number means this is not a call site
1000 // argument, thus we cannot find a callback argument to return.
1001 int ArgNo = getCallSiteArgNo();
1002 if (ArgNo < 0)
1003 return nullptr;
1004
1005 // Use abstract call sites to make the connection between the call site
1006 // values and the ones in callbacks. If a callback was found that makes use
1007 // of the underlying call site operand, we want the corresponding callback
1008 // callee argument and not the direct callee argument.
1009 std::optional<Argument *> CBCandidateArg;
1010 SmallVector<const Use *, 4> CallbackUses;
1011 const auto &CB = cast<CallBase>(getAnchorValue());
1012 AbstractCallSite::getCallbackUses(CB, CallbackUses);
1013 for (const Use *U : CallbackUses) {
1014 AbstractCallSite ACS(U);
1015 assert(ACS && ACS.isCallbackCall());
1016 if (!ACS.getCalledFunction())
1017 continue;
1018
1019 for (unsigned u = 0, e = ACS.getNumArgOperands(); u < e; u++) {
1020
1021 // Test if the underlying call site operand is argument number u of the
1022 // callback callee.
1023 if (ACS.getCallArgOperandNo(u) != ArgNo)
1024 continue;
1025
1026 assert(ACS.getCalledFunction()->arg_size() > u &&
1027 "ACS mapped into var-args arguments!");
1028 if (CBCandidateArg) {
1029 CBCandidateArg = nullptr;
1030 break;
1031 }
1032 CBCandidateArg = ACS.getCalledFunction()->getArg(u);
1033 }
1034 }
1035
1036 // If we found a unique callback candidate argument, return it.
1037 if (CBCandidateArg && *CBCandidateArg)
1038 return *CBCandidateArg;
1039
1040 // If no callbacks were found, or none used the underlying call site operand
1041 // exclusively, use the direct callee argument if available.
1042 auto *Callee = dyn_cast_if_present<Function>(CB.getCalledOperand());
1043 if (Callee && Callee->arg_size() > unsigned(ArgNo))
1044 return Callee->getArg(ArgNo);
1045
1046 return nullptr;
1047}
1048
1051 if (getState().isAtFixpoint())
1052 return HasChanged;
1053
1054 LLVM_DEBUG(dbgs() << "[Attributor] Update: " << *this << "\n");
1055
1056 HasChanged = updateImpl(A);
1057
1058 LLVM_DEBUG(dbgs() << "[Attributor] Update " << HasChanged << " " << *this
1059 << "\n");
1060
1061 return HasChanged;
1062}
1063
1065 InformationCache &InfoCache,
1066 AttributorConfig Configuration)
1067 : Allocator(InfoCache.Allocator), Functions(Functions),
1068 InfoCache(InfoCache), Configuration(Configuration) {
1069 if (!isClosedWorldModule())
1070 return;
1071 for (Function *Fn : Functions)
1072 if (Fn->hasAddressTaken(/*PutOffender=*/nullptr,
1073 /*IgnoreCallbackUses=*/false,
1074 /*IgnoreAssumeLikeCalls=*/true,
1075 /*IgnoreLLVMUsed=*/true,
1076 /*IgnoreARCAttachedCall=*/false,
1077 /*IgnoreCastedDirectCall=*/true))
1078 InfoCache.IndirectlyCallableFunctions.push_back(Fn);
1079}
1080
1085 "Did expect a valid position!");
1088 if (!Explorer)
1089 return false;
1090
1091 Value &AssociatedValue = IRP.getAssociatedValue();
1092
1093 const Assume2KnowledgeMap &A2K =
1094 getInfoCache().getKnowledgeMap().lookup({&AssociatedValue, AK});
1095
1096 // Check if we found any potential assume use, if not we don't need to create
1097 // explorer iterators.
1098 if (A2K.empty())
1099 return false;
1100
1101 LLVMContext &Ctx = AssociatedValue.getContext();
1102 unsigned AttrsSize = Attrs.size();
1103 auto EIt = Explorer->begin(IRP.getCtxI()),
1104 EEnd = Explorer->end(IRP.getCtxI());
1105 for (const auto &It : A2K)
1106 if (Explorer->findInContextOf(It.first, EIt, EEnd))
1107 Attrs.push_back(Attribute::get(Ctx, AK, It.second.Max));
1108 return AttrsSize != Attrs.size();
1109}
1110
1111template <typename DescTy>
1113Attributor::updateAttrMap(const IRPosition &IRP, ArrayRef<DescTy> AttrDescs,
1114 function_ref<bool(const DescTy &, AttributeSet,
1116 CB) {
1117 if (AttrDescs.empty())
1119 switch (IRP.getPositionKind()) {
1123 default:
1124 break;
1125 };
1126
1127 AttributeList AL;
1128 Value *AttrListAnchor = IRP.getAttrListAnchor();
1129 auto It = AttrsMap.find(AttrListAnchor);
1130 if (It == AttrsMap.end())
1131 AL = IRP.getAttrList();
1132 else
1133 AL = It->getSecond();
1134
1135 LLVMContext &Ctx = IRP.getAnchorValue().getContext();
1136 auto AttrIdx = IRP.getAttrIdx();
1137 AttributeSet AS = AL.getAttributes(AttrIdx);
1138 AttributeMask AM;
1139 AttrBuilder AB(Ctx);
1140
1142 for (const DescTy &AttrDesc : AttrDescs)
1143 if (CB(AttrDesc, AS, AM, AB))
1144 HasChanged = ChangeStatus::CHANGED;
1145
1146 if (HasChanged == ChangeStatus::UNCHANGED)
1148
1149 AL = AL.removeAttributesAtIndex(Ctx, AttrIdx, AM);
1150 AL = AL.addAttributesAtIndex(Ctx, AttrIdx, AB);
1151 AttrsMap[AttrListAnchor] = AL;
1152 return ChangeStatus::CHANGED;
1153}
1154
1157 bool IgnoreSubsumingPositions,
1158 Attribute::AttrKind ImpliedAttributeKind) {
1159 bool Implied = false;
1160 bool HasAttr = false;
1161 auto HasAttrCB = [&](const Attribute::AttrKind &Kind, AttributeSet AttrSet,
1163 if (AttrSet.hasAttribute(Kind)) {
1164 Implied |= Kind != ImpliedAttributeKind;
1165 HasAttr = true;
1166 }
1167 return false;
1168 };
1169 for (const IRPosition &EquivIRP : SubsumingPositionIterator(IRP)) {
1170 updateAttrMap<Attribute::AttrKind>(EquivIRP, AttrKinds, HasAttrCB);
1171 if (HasAttr)
1172 break;
1173 // The first position returned by the SubsumingPositionIterator is
1174 // always the position itself. If we ignore subsuming positions we
1175 // are done after the first iteration.
1176 if (IgnoreSubsumingPositions)
1177 break;
1178 Implied = true;
1179 }
1180 if (!HasAttr) {
1181 Implied = true;
1183 for (Attribute::AttrKind AK : AttrKinds)
1184 if (getAttrsFromAssumes(IRP, AK, Attrs)) {
1185 HasAttr = true;
1186 break;
1187 }
1188 }
1189
1190 // Check if we should manifest the implied attribute kind at the IRP.
1191 if (ImpliedAttributeKind != Attribute::None && HasAttr && Implied)
1193 ImpliedAttributeKind)});
1194 return HasAttr;
1195}
1196
1200 bool IgnoreSubsumingPositions) {
1201 auto CollectAttrCB = [&](const Attribute::AttrKind &Kind,
1202 AttributeSet AttrSet, AttributeMask &,
1203 AttrBuilder &) {
1204 if (AttrSet.hasAttribute(Kind))
1205 Attrs.push_back(AttrSet.getAttribute(Kind));
1206 return false;
1207 };
1208 for (const IRPosition &EquivIRP : SubsumingPositionIterator(IRP)) {
1209 updateAttrMap<Attribute::AttrKind>(EquivIRP, AttrKinds, CollectAttrCB);
1210 // The first position returned by the SubsumingPositionIterator is
1211 // always the position itself. If we ignore subsuming positions we
1212 // are done after the first iteration.
1213 if (IgnoreSubsumingPositions)
1214 break;
1215 }
1216 for (Attribute::AttrKind AK : AttrKinds)
1217 getAttrsFromAssumes(IRP, AK, Attrs);
1218}
1219
1222 auto RemoveAttrCB = [&](const Attribute::AttrKind &Kind, AttributeSet AttrSet,
1223 AttributeMask &AM, AttrBuilder &) {
1224 if (!AttrSet.hasAttribute(Kind))
1225 return false;
1226 AM.addAttribute(Kind);
1227 return true;
1228 };
1229 return updateAttrMap<Attribute::AttrKind>(IRP, AttrKinds, RemoveAttrCB);
1230}
1231
1233 ArrayRef<StringRef> Attrs) {
1234 auto RemoveAttrCB = [&](StringRef Attr, AttributeSet AttrSet,
1235 AttributeMask &AM, AttrBuilder &) -> bool {
1236 if (!AttrSet.hasAttribute(Attr))
1237 return false;
1238 AM.addAttribute(Attr);
1239 return true;
1240 };
1241
1242 return updateAttrMap<StringRef>(IRP, Attrs, RemoveAttrCB);
1243}
1244
1246 ArrayRef<Attribute> Attrs,
1247 bool ForceReplace) {
1248 LLVMContext &Ctx = IRP.getAnchorValue().getContext();
1249 auto AddAttrCB = [&](const Attribute &Attr, AttributeSet AttrSet,
1250 AttributeMask &, AttrBuilder &AB) {
1251 return addIfNotExistent(Ctx, Attr, AttrSet, ForceReplace, AB);
1252 };
1253 return updateAttrMap<Attribute>(IRP, Attrs, AddAttrCB);
1254}
1255
1257const IRPosition
1259
1261 IRPositions.emplace_back(IRP);
1262
1263 // Helper to determine if operand bundles on a call site are benign or
1264 // potentially problematic. We handle only llvm.assume for now.
1265 auto CanIgnoreOperandBundles = [](const CallBase &CB) {
1266 return (isa<IntrinsicInst>(CB) &&
1267 cast<IntrinsicInst>(CB).getIntrinsicID() == Intrinsic ::assume);
1268 };
1269
1270 const auto *CB = dyn_cast<CallBase>(&IRP.getAnchorValue());
1271 switch (IRP.getPositionKind()) {
1275 return;
1278 IRPositions.emplace_back(IRPosition::function(*IRP.getAnchorScope()));
1279 return;
1281 assert(CB && "Expected call site!");
1282 // TODO: We need to look at the operand bundles similar to the redirection
1283 // in CallBase.
1284 if (!CB->hasOperandBundles() || CanIgnoreOperandBundles(*CB))
1285 if (auto *Callee = dyn_cast_if_present<Function>(CB->getCalledOperand()))
1286 IRPositions.emplace_back(IRPosition::function(*Callee));
1287 return;
1289 assert(CB && "Expected call site!");
1290 // TODO: We need to look at the operand bundles similar to the redirection
1291 // in CallBase.
1292 if (!CB->hasOperandBundles() || CanIgnoreOperandBundles(*CB)) {
1293 if (auto *Callee =
1294 dyn_cast_if_present<Function>(CB->getCalledOperand())) {
1295 IRPositions.emplace_back(IRPosition::returned(*Callee));
1296 IRPositions.emplace_back(IRPosition::function(*Callee));
1297 for (const Argument &Arg : Callee->args())
1298 if (Arg.hasReturnedAttr()) {
1299 IRPositions.emplace_back(
1300 IRPosition::callsite_argument(*CB, Arg.getArgNo()));
1301 IRPositions.emplace_back(
1302 IRPosition::value(*CB->getArgOperand(Arg.getArgNo())));
1303 IRPositions.emplace_back(IRPosition::argument(Arg));
1304 }
1305 }
1306 }
1307 IRPositions.emplace_back(IRPosition::callsite_function(*CB));
1308 return;
1310 assert(CB && "Expected call site!");
1311 // TODO: We need to look at the operand bundles similar to the redirection
1312 // in CallBase.
1313 if (!CB->hasOperandBundles() || CanIgnoreOperandBundles(*CB)) {
1314 auto *Callee = dyn_cast_if_present<Function>(CB->getCalledOperand());
1315 if (Callee) {
1316 if (Argument *Arg = IRP.getAssociatedArgument())
1317 IRPositions.emplace_back(IRPosition::argument(*Arg));
1318 IRPositions.emplace_back(IRPosition::function(*Callee));
1319 }
1320 }
1321 IRPositions.emplace_back(IRPosition::value(IRP.getAssociatedValue()));
1322 return;
1323 }
1324 }
1325}
1326
1327void IRPosition::verify() {
1328#ifdef EXPENSIVE_CHECKS
1329 switch (getPositionKind()) {
1330 case IRP_INVALID:
1331 assert((CBContext == nullptr) &&
1332 "Invalid position must not have CallBaseContext!");
1333 assert(!Enc.getOpaqueValue() &&
1334 "Expected a nullptr for an invalid position!");
1335 return;
1336 case IRP_FLOAT:
1337 assert((!isa<Argument>(&getAssociatedValue())) &&
1338 "Expected specialized kind for argument values!");
1339 return;
1340 case IRP_RETURNED:
1341 assert(isa<Function>(getAsValuePtr()) &&
1342 "Expected function for a 'returned' position!");
1343 assert(getAsValuePtr() == &getAssociatedValue() &&
1344 "Associated value mismatch!");
1345 return;
1347 assert((CBContext == nullptr) &&
1348 "'call site returned' position must not have CallBaseContext!");
1349 assert((isa<CallBase>(getAsValuePtr())) &&
1350 "Expected call base for 'call site returned' position!");
1351 assert(getAsValuePtr() == &getAssociatedValue() &&
1352 "Associated value mismatch!");
1353 return;
1354 case IRP_CALL_SITE:
1355 assert((CBContext == nullptr) &&
1356 "'call site function' position must not have CallBaseContext!");
1357 assert((isa<CallBase>(getAsValuePtr())) &&
1358 "Expected call base for 'call site function' position!");
1359 assert(getAsValuePtr() == &getAssociatedValue() &&
1360 "Associated value mismatch!");
1361 return;
1362 case IRP_FUNCTION:
1363 assert(isa<Function>(getAsValuePtr()) &&
1364 "Expected function for a 'function' position!");
1365 assert(getAsValuePtr() == &getAssociatedValue() &&
1366 "Associated value mismatch!");
1367 return;
1368 case IRP_ARGUMENT:
1369 assert(isa<Argument>(getAsValuePtr()) &&
1370 "Expected argument for a 'argument' position!");
1371 assert(getAsValuePtr() == &getAssociatedValue() &&
1372 "Associated value mismatch!");
1373 return;
1375 assert((CBContext == nullptr) &&
1376 "'call site argument' position must not have CallBaseContext!");
1377 Use *U = getAsUsePtr();
1378 (void)U; // Silence unused variable warning.
1379 assert(U && "Expected use for a 'call site argument' position!");
1380 assert(isa<CallBase>(U->getUser()) &&
1381 "Expected call base user for a 'call site argument' position!");
1382 assert(cast<CallBase>(U->getUser())->isArgOperand(U) &&
1383 "Expected call base argument operand for a 'call site argument' "
1384 "position");
1385 assert(cast<CallBase>(U->getUser())->getArgOperandNo(U) ==
1386 unsigned(getCallSiteArgNo()) &&
1387 "Argument number mismatch!");
1388 assert(U->get() == &getAssociatedValue() && "Associated value mismatch!");
1389 return;
1390 }
1391 }
1392#endif
1393}
1394
1395std::optional<Constant *>
1397 const AbstractAttribute &AA,
1398 bool &UsedAssumedInformation) {
1399 // First check all callbacks provided by outside AAs. If any of them returns
1400 // a non-null value that is different from the associated value, or
1401 // std::nullopt, we assume it's simplified.
1402 for (auto &CB : SimplificationCallbacks.lookup(IRP)) {
1403 std::optional<Value *> SimplifiedV = CB(IRP, &AA, UsedAssumedInformation);
1404 if (!SimplifiedV)
1405 return std::nullopt;
1406 if (isa_and_nonnull<Constant>(*SimplifiedV))
1407 return cast<Constant>(*SimplifiedV);
1408 return nullptr;
1409 }
1410 if (auto *C = dyn_cast<Constant>(&IRP.getAssociatedValue()))
1411 return C;
1413 if (getAssumedSimplifiedValues(IRP, &AA, Values,
1415 UsedAssumedInformation)) {
1416 if (Values.empty())
1417 return std::nullopt;
1418 if (auto *C = dyn_cast_or_null<Constant>(
1419 AAPotentialValues::getSingleValue(*this, AA, IRP, Values)))
1420 return C;
1421 }
1422 return nullptr;
1423}
1424
1426 const IRPosition &IRP, const AbstractAttribute *AA,
1427 bool &UsedAssumedInformation, AA::ValueScope S) {
1428 // First check all callbacks provided by outside AAs. If any of them returns
1429 // a non-null value that is different from the associated value, or
1430 // std::nullopt, we assume it's simplified.
1431 for (auto &CB : SimplificationCallbacks.lookup(IRP))
1432 return CB(IRP, AA, UsedAssumedInformation);
1433
1435 if (!getAssumedSimplifiedValues(IRP, AA, Values, S, UsedAssumedInformation))
1436 return &IRP.getAssociatedValue();
1437 if (Values.empty())
1438 return std::nullopt;
1439 if (AA)
1440 if (Value *V = AAPotentialValues::getSingleValue(*this, *AA, IRP, Values))
1441 return V;
1444 return nullptr;
1445 return &IRP.getAssociatedValue();
1446}
1447
1449 const IRPosition &InitialIRP, const AbstractAttribute *AA,
1451 bool &UsedAssumedInformation, bool RecurseForSelectAndPHI) {
1454 Worklist.push_back(InitialIRP);
1455 while (!Worklist.empty()) {
1456 const IRPosition &IRP = Worklist.pop_back_val();
1457
1458 // First check all callbacks provided by outside AAs. If any of them returns
1459 // a non-null value that is different from the associated value, or
1460 // std::nullopt, we assume it's simplified.
1461 int NV = Values.size();
1462 const auto &SimplificationCBs = SimplificationCallbacks.lookup(IRP);
1463 for (const auto &CB : SimplificationCBs) {
1464 std::optional<Value *> CBResult = CB(IRP, AA, UsedAssumedInformation);
1465 if (!CBResult.has_value())
1466 continue;
1467 Value *V = *CBResult;
1468 if (!V)
1469 return false;
1472 Values.push_back(AA::ValueAndContext{*V, nullptr});
1473 else
1474 return false;
1475 }
1476 if (SimplificationCBs.empty()) {
1477 // If no high-level/outside simplification occurred, use
1478 // AAPotentialValues.
1479 const auto *PotentialValuesAA =
1480 getOrCreateAAFor<AAPotentialValues>(IRP, AA, DepClassTy::OPTIONAL);
1481 if (PotentialValuesAA && PotentialValuesAA->getAssumedSimplifiedValues(*this, Values, S)) {
1482 UsedAssumedInformation |= !PotentialValuesAA->isAtFixpoint();
1483 } else if (IRP.getPositionKind() != IRPosition::IRP_RETURNED) {
1484 Values.push_back({IRP.getAssociatedValue(), IRP.getCtxI()});
1485 } else {
1486 // TODO: We could visit all returns and add the operands.
1487 return false;
1488 }
1489 }
1490
1491 if (!RecurseForSelectAndPHI)
1492 break;
1493
1494 for (int I = NV, E = Values.size(); I < E; ++I) {
1495 Value *V = Values[I].getValue();
1496 if (!isa<PHINode>(V) && !isa<SelectInst>(V))
1497 continue;
1498 if (!Seen.insert(V).second)
1499 continue;
1500 // Move the last element to this slot.
1501 Values[I] = Values[E - 1];
1502 // Eliminate the last slot, adjust the indices.
1503 Values.pop_back();
1504 --E;
1505 --I;
1506 // Add a new value (select or phi) to the worklist.
1507 Worklist.push_back(IRPosition::value(*V));
1508 }
1509 }
1510 return true;
1511}
1512
1514 std::optional<Value *> V, CallBase &CB, const AbstractAttribute &AA,
1515 bool &UsedAssumedInformation) {
1516 if (!V)
1517 return V;
1518 if (*V == nullptr || isa<Constant>(*V))
1519 return V;
1520 if (auto *Arg = dyn_cast<Argument>(*V))
1521 if (CB.getCalledOperand() == Arg->getParent() &&
1522 CB.arg_size() > Arg->getArgNo())
1523 if (!Arg->hasPointeeInMemoryValueAttr())
1524 return getAssumedSimplified(
1525 IRPosition::callsite_argument(CB, Arg->getArgNo()), AA,
1526 UsedAssumedInformation, AA::Intraprocedural);
1527 return nullptr;
1528}
1529
1531 // The abstract attributes are allocated via the BumpPtrAllocator Allocator,
1532 // thus we cannot delete them. We can, and want to, destruct them though.
1533 for (auto &It : AAMap) {
1534 AbstractAttribute *AA = It.getSecond();
1535 AA->~AbstractAttribute();
1536 }
1537}
1538
1540 const AAIsDead *FnLivenessAA,
1541 bool &UsedAssumedInformation,
1542 bool CheckBBLivenessOnly, DepClassTy DepClass) {
1543 if (!Configuration.UseLiveness)
1544 return false;
1545 const IRPosition &IRP = AA.getIRPosition();
1546 if (!Functions.count(IRP.getAnchorScope()))
1547 return false;
1548 return isAssumedDead(IRP, &AA, FnLivenessAA, UsedAssumedInformation,
1549 CheckBBLivenessOnly, DepClass);
1550}
1551
1553 const AbstractAttribute *QueryingAA,
1554 const AAIsDead *FnLivenessAA,
1555 bool &UsedAssumedInformation,
1556 bool CheckBBLivenessOnly, DepClassTy DepClass) {
1557 if (!Configuration.UseLiveness)
1558 return false;
1559 Instruction *UserI = dyn_cast<Instruction>(U.getUser());
1560 if (!UserI)
1561 return isAssumedDead(IRPosition::value(*U.get()), QueryingAA, FnLivenessAA,
1562 UsedAssumedInformation, CheckBBLivenessOnly, DepClass);
1563
1564 if (auto *CB = dyn_cast<CallBase>(UserI)) {
1565 // For call site argument uses we can check if the argument is
1566 // unused/dead.
1567 if (CB->isArgOperand(&U)) {
1568 const IRPosition &CSArgPos =
1569 IRPosition::callsite_argument(*CB, CB->getArgOperandNo(&U));
1570 return isAssumedDead(CSArgPos, QueryingAA, FnLivenessAA,
1571 UsedAssumedInformation, CheckBBLivenessOnly,
1572 DepClass);
1573 }
1574 } else if (ReturnInst *RI = dyn_cast<ReturnInst>(UserI)) {
1575 const IRPosition &RetPos = IRPosition::returned(*RI->getFunction());
1576 return isAssumedDead(RetPos, QueryingAA, FnLivenessAA,
1577 UsedAssumedInformation, CheckBBLivenessOnly, DepClass);
1578 } else if (PHINode *PHI = dyn_cast<PHINode>(UserI)) {
1579 BasicBlock *IncomingBB = PHI->getIncomingBlock(U);
1580 return isAssumedDead(*IncomingBB->getTerminator(), QueryingAA, FnLivenessAA,
1581 UsedAssumedInformation, CheckBBLivenessOnly, DepClass);
1582 } else if (StoreInst *SI = dyn_cast<StoreInst>(UserI)) {
1583 if (!CheckBBLivenessOnly && SI->getPointerOperand() != U.get()) {
1584 const IRPosition IRP = IRPosition::inst(*SI);
1585 const AAIsDead *IsDeadAA =
1586 getOrCreateAAFor<AAIsDead>(IRP, QueryingAA, DepClassTy::NONE);
1587 if (IsDeadAA && IsDeadAA->isRemovableStore()) {
1588 if (QueryingAA)
1589 recordDependence(*IsDeadAA, *QueryingAA, DepClass);
1590 if (!IsDeadAA->isKnown(AAIsDead::IS_REMOVABLE))
1591 UsedAssumedInformation = true;
1592 return true;
1593 }
1594 }
1595 }
1596
1597 return isAssumedDead(IRPosition::inst(*UserI), QueryingAA, FnLivenessAA,
1598 UsedAssumedInformation, CheckBBLivenessOnly, DepClass);
1599}
1600
1602 const AbstractAttribute *QueryingAA,
1603 const AAIsDead *FnLivenessAA,
1604 bool &UsedAssumedInformation,
1605 bool CheckBBLivenessOnly, DepClassTy DepClass,
1606 bool CheckForDeadStore) {
1607 if (!Configuration.UseLiveness)
1608 return false;
1609 const IRPosition::CallBaseContext *CBCtx =
1610 QueryingAA ? QueryingAA->getCallBaseContext() : nullptr;
1611
1612 if (ManifestAddedBlocks.contains(I.getParent()))
1613 return false;
1614
1615 const Function &F = *I.getFunction();
1616 if (!FnLivenessAA || FnLivenessAA->getAnchorScope() != &F)
1617 FnLivenessAA = getOrCreateAAFor<AAIsDead>(IRPosition::function(F, CBCtx),
1618 QueryingAA, DepClassTy::NONE);
1619
1620 // Don't use recursive reasoning.
1621 if (!FnLivenessAA || QueryingAA == FnLivenessAA)
1622 return false;
1623
1624 // If we have a context instruction and a liveness AA we use it.
1625 if (CheckBBLivenessOnly ? FnLivenessAA->isAssumedDead(I.getParent())
1626 : FnLivenessAA->isAssumedDead(&I)) {
1627 if (QueryingAA)
1628 recordDependence(*FnLivenessAA, *QueryingAA, DepClass);
1629 if (!FnLivenessAA->isKnownDead(&I))
1630 UsedAssumedInformation = true;
1631 return true;
1632 }
1633
1634 if (CheckBBLivenessOnly)
1635 return false;
1636
1637 const IRPosition IRP = IRPosition::inst(I, CBCtx);
1638 const AAIsDead *IsDeadAA =
1639 getOrCreateAAFor<AAIsDead>(IRP, QueryingAA, DepClassTy::NONE);
1640
1641 // Don't use recursive reasoning.
1642 if (!IsDeadAA || QueryingAA == IsDeadAA)
1643 return false;
1644
1645 if (IsDeadAA->isAssumedDead()) {
1646 if (QueryingAA)
1647 recordDependence(*IsDeadAA, *QueryingAA, DepClass);
1648 if (!IsDeadAA->isKnownDead())
1649 UsedAssumedInformation = true;
1650 return true;
1651 }
1652
1653 if (CheckForDeadStore && isa<StoreInst>(I) && IsDeadAA->isRemovableStore()) {
1654 if (QueryingAA)
1655 recordDependence(*IsDeadAA, *QueryingAA, DepClass);
1656 if (!IsDeadAA->isKnownDead())
1657 UsedAssumedInformation = true;
1658 return true;
1659 }
1660
1661 return false;
1662}
1663
1665 const AbstractAttribute *QueryingAA,
1666 const AAIsDead *FnLivenessAA,
1667 bool &UsedAssumedInformation,
1668 bool CheckBBLivenessOnly, DepClassTy DepClass) {
1669 if (!Configuration.UseLiveness)
1670 return false;
1671 // Don't check liveness for constants, e.g. functions, used as (floating)
1672 // values since the context instruction and such is here meaningless.
1674 isa<Constant>(IRP.getAssociatedValue())) {
1675 return false;
1676 }
1677
1678 Instruction *CtxI = IRP.getCtxI();
1679 if (CtxI &&
1680 isAssumedDead(*CtxI, QueryingAA, FnLivenessAA, UsedAssumedInformation,
1681 /* CheckBBLivenessOnly */ true,
1682 CheckBBLivenessOnly ? DepClass : DepClassTy::OPTIONAL))
1683 return true;
1684
1685 if (CheckBBLivenessOnly)
1686 return false;
1687
1688 // If we haven't succeeded we query the specific liveness info for the IRP.
1689 const AAIsDead *IsDeadAA;
1691 IsDeadAA = getOrCreateAAFor<AAIsDead>(
1693 QueryingAA, DepClassTy::NONE);
1694 else
1695 IsDeadAA = getOrCreateAAFor<AAIsDead>(IRP, QueryingAA, DepClassTy::NONE);
1696
1697 // Don't use recursive reasoning.
1698 if (!IsDeadAA || QueryingAA == IsDeadAA)
1699 return false;
1700
1701 if (IsDeadAA->isAssumedDead()) {
1702 if (QueryingAA)
1703 recordDependence(*IsDeadAA, *QueryingAA, DepClass);
1704 if (!IsDeadAA->isKnownDead())
1705 UsedAssumedInformation = true;
1706 return true;
1707 }
1708
1709 return false;
1710}
1711
1713 const AbstractAttribute *QueryingAA,
1714 const AAIsDead *FnLivenessAA,
1715 DepClassTy DepClass) {
1716 if (!Configuration.UseLiveness)
1717 return false;
1718 const Function &F = *BB.getParent();
1719 if (!FnLivenessAA || FnLivenessAA->getAnchorScope() != &F)
1720 FnLivenessAA = getOrCreateAAFor<AAIsDead>(IRPosition::function(F),
1721 QueryingAA, DepClassTy::NONE);
1722
1723 // Don't use recursive reasoning.
1724 if (!FnLivenessAA || QueryingAA == FnLivenessAA)
1725 return false;
1726
1727 if (FnLivenessAA->isAssumedDead(&BB)) {
1728 if (QueryingAA)
1729 recordDependence(*FnLivenessAA, *QueryingAA, DepClass);
1730 return true;
1731 }
1732
1733 return false;
1734}
1735
1738 const AbstractAttribute &QueryingAA, const CallBase &CB) {
1739 if (const Function *Callee = dyn_cast<Function>(CB.getCalledOperand()))
1740 return Pred(Callee);
1741
1742 const auto *CallEdgesAA = getAAFor<AACallEdges>(
1744 if (!CallEdgesAA || CallEdgesAA->hasUnknownCallee())
1745 return false;
1746
1747 const auto &Callees = CallEdgesAA->getOptimisticEdges();
1748 return Pred(Callees.getArrayRef());
1749}
1750
1751bool canMarkAsVisited(const User *Usr) {
1752 return isa<PHINode>(Usr) || !isa<Instruction>(Usr);
1753}
1754
1756 function_ref<bool(const Use &, bool &)> Pred,
1757 const AbstractAttribute &QueryingAA, const Value &V,
1758 bool CheckBBLivenessOnly, DepClassTy LivenessDepClass,
1759 bool IgnoreDroppableUses,
1760 function_ref<bool(const Use &OldU, const Use &NewU)> EquivalentUseCB) {
1761
1762 // Check virtual uses first.
1763 for (VirtualUseCallbackTy &CB : VirtualUseCallbacks.lookup(&V))
1764 if (!CB(*this, &QueryingAA))
1765 return false;
1766
1767 // Check the trivial case first as it catches void values.
1768 if (V.use_empty())
1769 return true;
1770
1771 const IRPosition &IRP = QueryingAA.getIRPosition();
1774
1775 auto AddUsers = [&](const Value &V, const Use *OldUse) {
1776 for (const Use &UU : V.uses()) {
1777 if (OldUse && EquivalentUseCB && !EquivalentUseCB(*OldUse, UU)) {
1778 LLVM_DEBUG(dbgs() << "[Attributor] Potential copy was "
1779 "rejected by the equivalence call back: "
1780 << *UU << "!\n");
1781 return false;
1782 }
1783
1784 Worklist.push_back(&UU);
1785 }
1786 return true;
1787 };
1788
1789 AddUsers(V, /* OldUse */ nullptr);
1790
1791 LLVM_DEBUG(dbgs() << "[Attributor] Got " << Worklist.size()
1792 << " initial uses to check\n");
1793
1794 const Function *ScopeFn = IRP.getAnchorScope();
1795 const auto *LivenessAA =
1796 ScopeFn ? getAAFor<AAIsDead>(QueryingAA, IRPosition::function(*ScopeFn),
1798 : nullptr;
1799
1800 while (!Worklist.empty()) {
1801 const Use *U = Worklist.pop_back_val();
1802 if (canMarkAsVisited(U->getUser()) && !Visited.insert(U).second)
1803 continue;
1805 if (auto *Fn = dyn_cast<Function>(U->getUser()))
1806 dbgs() << "[Attributor] Check use: " << **U << " in " << Fn->getName()
1807 << "\n";
1808 else
1809 dbgs() << "[Attributor] Check use: " << **U << " in " << *U->getUser()
1810 << "\n";
1811 });
1812 bool UsedAssumedInformation = false;
1813 if (isAssumedDead(*U, &QueryingAA, LivenessAA, UsedAssumedInformation,
1814 CheckBBLivenessOnly, LivenessDepClass)) {
1816 dbgs() << "[Attributor] Dead use, skip!\n");
1817 continue;
1818 }
1819 if (IgnoreDroppableUses && U->getUser()->isDroppable()) {
1821 dbgs() << "[Attributor] Droppable user, skip!\n");
1822 continue;
1823 }
1824
1825 if (auto *SI = dyn_cast<StoreInst>(U->getUser())) {
1826 if (&SI->getOperandUse(0) == U) {
1827 if (!Visited.insert(U).second)
1828 continue;
1829 SmallSetVector<Value *, 4> PotentialCopies;
1831 *this, *SI, PotentialCopies, QueryingAA, UsedAssumedInformation,
1832 /* OnlyExact */ true)) {
1834 dbgs()
1835 << "[Attributor] Value is stored, continue with "
1836 << PotentialCopies.size()
1837 << " potential copies instead!\n");
1838 for (Value *PotentialCopy : PotentialCopies)
1839 if (!AddUsers(*PotentialCopy, U))
1840 return false;
1841 continue;
1842 }
1843 }
1844 }
1845
1846 bool Follow = false;
1847 if (!Pred(*U, Follow))
1848 return false;
1849 if (!Follow)
1850 continue;
1851
1852 User &Usr = *U->getUser();
1853 AddUsers(Usr, /* OldUse */ nullptr);
1854 }
1855
1856 return true;
1857}
1858
1860 const AbstractAttribute &QueryingAA,
1861 bool RequireAllCallSites,
1862 bool &UsedAssumedInformation) {
1863 // We can try to determine information from
1864 // the call sites. However, this is only possible all call sites are known,
1865 // hence the function has internal linkage.
1866 const IRPosition &IRP = QueryingAA.getIRPosition();
1867 const Function *AssociatedFunction = IRP.getAssociatedFunction();
1868 if (!AssociatedFunction) {
1869 LLVM_DEBUG(dbgs() << "[Attributor] No function associated with " << IRP
1870 << "\n");
1871 return false;
1872 }
1873
1874 return checkForAllCallSites(Pred, *AssociatedFunction, RequireAllCallSites,
1875 &QueryingAA, UsedAssumedInformation);
1876}
1877
1879 const Function &Fn,
1880 bool RequireAllCallSites,
1881 const AbstractAttribute *QueryingAA,
1882 bool &UsedAssumedInformation,
1883 bool CheckPotentiallyDead) {
1884 if (RequireAllCallSites && !Fn.hasLocalLinkage()) {
1885 LLVM_DEBUG(
1886 dbgs()
1887 << "[Attributor] Function " << Fn.getName()
1888 << " has no internal linkage, hence not all call sites are known\n");
1889 return false;
1890 }
1891 // Check virtual uses first.
1892 for (VirtualUseCallbackTy &CB : VirtualUseCallbacks.lookup(&Fn))
1893 if (!CB(*this, QueryingAA))
1894 return false;
1895
1897 for (unsigned u = 0; u < Uses.size(); ++u) {
1898 const Use &U = *Uses[u];
1900 if (auto *Fn = dyn_cast<Function>(U))
1901 dbgs() << "[Attributor] Check use: " << Fn->getName() << " in "
1902 << *U.getUser() << "\n";
1903 else
1904 dbgs() << "[Attributor] Check use: " << *U << " in " << *U.getUser()
1905 << "\n";
1906 });
1907 if (!CheckPotentiallyDead &&
1908 isAssumedDead(U, QueryingAA, nullptr, UsedAssumedInformation,
1909 /* CheckBBLivenessOnly */ true)) {
1911 dbgs() << "[Attributor] Dead use, skip!\n");
1912 continue;
1913 }
1914 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U.getUser())) {
1915 if (CE->isCast() && CE->getType()->isPointerTy()) {
1917 dbgs() << "[Attributor] Use, is constant cast expression, add "
1918 << CE->getNumUses() << " uses of that expression instead!\n";
1919 });
1920 for (const Use &CEU : CE->uses())
1921 Uses.push_back(&CEU);
1922 continue;
1923 }
1924 }
1925
1926 AbstractCallSite ACS(&U);
1927 if (!ACS) {
1928 LLVM_DEBUG(dbgs() << "[Attributor] Function " << Fn.getName()
1929 << " has non call site use " << *U.get() << " in "
1930 << *U.getUser() << "\n");
1931 // BlockAddress users are allowed.
1932 if (isa<BlockAddress>(U.getUser()))
1933 continue;
1934 return false;
1935 }
1936
1937 const Use *EffectiveUse =
1938 ACS.isCallbackCall() ? &ACS.getCalleeUseForCallback() : &U;
1939 if (!ACS.isCallee(EffectiveUse)) {
1940 if (!RequireAllCallSites) {
1941 LLVM_DEBUG(dbgs() << "[Attributor] User " << *EffectiveUse->getUser()
1942 << " is not a call of " << Fn.getName()
1943 << ", skip use\n");
1944 continue;
1945 }
1946 LLVM_DEBUG(dbgs() << "[Attributor] User " << *EffectiveUse->getUser()
1947 << " is an invalid use of " << Fn.getName() << "\n");
1948 return false;
1949 }
1950
1951 // Make sure the arguments that can be matched between the call site and the
1952 // callee argee on their type. It is unlikely they do not and it doesn't
1953 // make sense for all attributes to know/care about this.
1954 assert(&Fn == ACS.getCalledFunction() && "Expected known callee");
1955 unsigned MinArgsParams =
1956 std::min(size_t(ACS.getNumArgOperands()), Fn.arg_size());
1957 for (unsigned u = 0; u < MinArgsParams; ++u) {
1958 Value *CSArgOp = ACS.getCallArgOperand(u);
1959 if (CSArgOp && Fn.getArg(u)->getType() != CSArgOp->getType()) {
1960 LLVM_DEBUG(
1961 dbgs() << "[Attributor] Call site / callee argument type mismatch ["
1962 << u << "@" << Fn.getName() << ": "
1963 << *Fn.getArg(u)->getType() << " vs. "
1964 << *ACS.getCallArgOperand(u)->getType() << "\n");
1965 return false;
1966 }
1967 }
1968
1969 if (Pred(ACS))
1970 continue;
1971
1972 LLVM_DEBUG(dbgs() << "[Attributor] Call site callback failed for "
1973 << *ACS.getInstruction() << "\n");
1974 return false;
1975 }
1976
1977 return true;
1978}
1979
1980bool Attributor::shouldPropagateCallBaseContext(const IRPosition &IRP) {
1981 // TODO: Maintain a cache of Values that are
1982 // on the pathway from a Argument to a Instruction that would effect the
1983 // liveness/return state etc.
1985}
1986
1988 const AbstractAttribute &QueryingAA,
1990 bool RecurseForSelectAndPHI) {
1991
1992 const IRPosition &IRP = QueryingAA.getIRPosition();
1993 const Function *AssociatedFunction = IRP.getAssociatedFunction();
1994 if (!AssociatedFunction)
1995 return false;
1996
1997 bool UsedAssumedInformation = false;
2000 IRPosition::returned(*AssociatedFunction), &QueryingAA, Values, S,
2001 UsedAssumedInformation, RecurseForSelectAndPHI))
2002 return false;
2003
2004 return llvm::all_of(Values, [&](const AA::ValueAndContext &VAC) {
2005 return Pred(*VAC.getValue());
2006 });
2007}
2008
2011 function_ref<bool(Instruction &)> Pred, const AbstractAttribute *QueryingAA,
2012 const AAIsDead *LivenessAA, ArrayRef<unsigned> Opcodes,
2013 bool &UsedAssumedInformation, bool CheckBBLivenessOnly = false,
2014 bool CheckPotentiallyDead = false) {
2015 for (unsigned Opcode : Opcodes) {
2016 // Check if we have instructions with this opcode at all first.
2017 auto *Insts = OpcodeInstMap.lookup(Opcode);
2018 if (!Insts)
2019 continue;
2020
2021 for (Instruction *I : *Insts) {
2022 // Skip dead instructions.
2023 if (A && !CheckPotentiallyDead &&
2024 A->isAssumedDead(IRPosition::inst(*I), QueryingAA, LivenessAA,
2025 UsedAssumedInformation, CheckBBLivenessOnly)) {
2027 dbgs() << "[Attributor] Instruction " << *I
2028 << " is potentially dead, skip!\n";);
2029 continue;
2030 }
2031
2032 if (!Pred(*I))
2033 return false;
2034 }
2035 }
2036 return true;
2037}
2038
2040 const Function *Fn,
2041 const AbstractAttribute *QueryingAA,
2042 ArrayRef<unsigned> Opcodes,
2043 bool &UsedAssumedInformation,
2044 bool CheckBBLivenessOnly,
2045 bool CheckPotentiallyDead) {
2046 // Since we need to provide instructions we have to have an exact definition.
2047 if (!Fn || Fn->isDeclaration())
2048 return false;
2049
2050 const IRPosition &QueryIRP = IRPosition::function(*Fn);
2051 const auto *LivenessAA =
2052 CheckPotentiallyDead && QueryingAA
2053 ? (getAAFor<AAIsDead>(*QueryingAA, QueryIRP, DepClassTy::NONE))
2054 : nullptr;
2055
2056 auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(*Fn);
2057 if (!checkForAllInstructionsImpl(this, OpcodeInstMap, Pred, QueryingAA,
2058 LivenessAA, Opcodes, UsedAssumedInformation,
2059 CheckBBLivenessOnly, CheckPotentiallyDead))
2060 return false;
2061
2062 return true;
2063}
2064
2066 const AbstractAttribute &QueryingAA,
2067 ArrayRef<unsigned> Opcodes,
2068 bool &UsedAssumedInformation,
2069 bool CheckBBLivenessOnly,
2070 bool CheckPotentiallyDead) {
2071 const IRPosition &IRP = QueryingAA.getIRPosition();
2072 const Function *AssociatedFunction = IRP.getAssociatedFunction();
2073 return checkForAllInstructions(Pred, AssociatedFunction, &QueryingAA, Opcodes,
2074 UsedAssumedInformation, CheckBBLivenessOnly,
2075 CheckPotentiallyDead);
2076}
2077
2079 function_ref<bool(Instruction &)> Pred, AbstractAttribute &QueryingAA,
2080 bool &UsedAssumedInformation) {
2081 TimeTraceScope TS("checkForAllReadWriteInstructions");
2082
2083 const Function *AssociatedFunction =
2084 QueryingAA.getIRPosition().getAssociatedFunction();
2085 if (!AssociatedFunction)
2086 return false;
2087
2088 const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
2089 const auto *LivenessAA =
2090 getAAFor<AAIsDead>(QueryingAA, QueryIRP, DepClassTy::NONE);
2091
2092 for (Instruction *I :
2093 InfoCache.getReadOrWriteInstsForFunction(*AssociatedFunction)) {
2094 // Skip dead instructions.
2095 if (isAssumedDead(IRPosition::inst(*I), &QueryingAA, LivenessAA,
2096 UsedAssumedInformation))
2097 continue;
2098
2099 if (!Pred(*I))
2100 return false;
2101 }
2102
2103 return true;
2104}
2105
2106void Attributor::runTillFixpoint() {
2107 TimeTraceScope TimeScope("Attributor::runTillFixpoint");
2108 LLVM_DEBUG(dbgs() << "[Attributor] Identified and initialized "
2109 << DG.SyntheticRoot.Deps.size()
2110 << " abstract attributes.\n");
2111
2112 // Now that all abstract attributes are collected and initialized we start
2113 // the abstract analysis.
2114
2115 unsigned IterationCounter = 1;
2116 unsigned MaxIterations =
2117 Configuration.MaxFixpointIterations.value_or(SetFixpointIterations);
2118
2120 SetVector<AbstractAttribute *> Worklist, InvalidAAs;
2121 Worklist.insert(DG.SyntheticRoot.begin(), DG.SyntheticRoot.end());
2122
2123 do {
2124 // Remember the size to determine new attributes.
2125 size_t NumAAs = DG.SyntheticRoot.Deps.size();
2126 LLVM_DEBUG(dbgs() << "\n\n[Attributor] #Iteration: " << IterationCounter
2127 << ", Worklist size: " << Worklist.size() << "\n");
2128
2129 // For invalid AAs we can fix dependent AAs that have a required dependence,
2130 // thereby folding long dependence chains in a single step without the need
2131 // to run updates.
2132 for (unsigned u = 0; u < InvalidAAs.size(); ++u) {
2133 AbstractAttribute *InvalidAA = InvalidAAs[u];
2134
2135 // Check the dependences to fast track invalidation.
2137 dbgs() << "[Attributor] InvalidAA: " << *InvalidAA
2138 << " has " << InvalidAA->Deps.size()
2139 << " required & optional dependences\n");
2140 for (auto &DepIt : InvalidAA->Deps) {
2141 AbstractAttribute *DepAA = cast<AbstractAttribute>(DepIt.getPointer());
2142 if (DepIt.getInt() == unsigned(DepClassTy::OPTIONAL)) {
2144 dbgs() << " - recompute: " << *DepAA);
2145 Worklist.insert(DepAA);
2146 continue;
2147 }
2149 << " - invalidate: " << *DepAA);
2151 assert(DepAA->getState().isAtFixpoint() && "Expected fixpoint state!");
2152 if (!DepAA->getState().isValidState())
2153 InvalidAAs.insert(DepAA);
2154 else
2155 ChangedAAs.push_back(DepAA);
2156 }
2157 InvalidAA->Deps.clear();
2158 }
2159
2160 // Add all abstract attributes that are potentially dependent on one that
2161 // changed to the work list.
2162 for (AbstractAttribute *ChangedAA : ChangedAAs) {
2163 for (auto &DepIt : ChangedAA->Deps)
2164 Worklist.insert(cast<AbstractAttribute>(DepIt.getPointer()));
2165 ChangedAA->Deps.clear();
2166 }
2167
2168 LLVM_DEBUG(dbgs() << "[Attributor] #Iteration: " << IterationCounter
2169 << ", Worklist+Dependent size: " << Worklist.size()
2170 << "\n");
2171
2172 // Reset the changed and invalid set.
2173 ChangedAAs.clear();
2174 InvalidAAs.clear();
2175
2176 // Update all abstract attribute in the work list and record the ones that
2177 // changed.
2178 for (AbstractAttribute *AA : Worklist) {
2179 const auto &AAState = AA->getState();
2180 if (!AAState.isAtFixpoint())
2181 if (updateAA(*AA) == ChangeStatus::CHANGED)
2182 ChangedAAs.push_back(AA);
2183
2184 // Use the InvalidAAs vector to propagate invalid states fast transitively
2185 // without requiring updates.
2186 if (!AAState.isValidState())
2187 InvalidAAs.insert(AA);
2188 }
2189
2190 // Add attributes to the changed set if they have been created in the last
2191 // iteration.
2192 ChangedAAs.append(DG.SyntheticRoot.begin() + NumAAs,
2193 DG.SyntheticRoot.end());
2194
2195 // Reset the work list and repopulate with the changed abstract attributes.
2196 // Note that dependent ones are added above.
2197 Worklist.clear();
2198 Worklist.insert(ChangedAAs.begin(), ChangedAAs.end());
2199 Worklist.insert(QueryAAsAwaitingUpdate.begin(),
2200 QueryAAsAwaitingUpdate.end());
2201 QueryAAsAwaitingUpdate.clear();
2202
2203 } while (!Worklist.empty() && (IterationCounter++ < MaxIterations));
2204
2205 if (IterationCounter > MaxIterations && !Functions.empty()) {
2206 auto Remark = [&](OptimizationRemarkMissed ORM) {
2207 return ORM << "Attributor did not reach a fixpoint after "
2208 << ore::NV("Iterations", MaxIterations) << " iterations.";
2209 };
2210 Function *F = Functions.front();
2211 emitRemark<OptimizationRemarkMissed>(F, "FixedPoint", Remark);
2212 }
2213
2214 LLVM_DEBUG(dbgs() << "\n[Attributor] Fixpoint iteration done after: "
2215 << IterationCounter << "/" << MaxIterations
2216 << " iterations\n");
2217
2218 // Reset abstract arguments not settled in a sound fixpoint by now. This
2219 // happens when we stopped the fixpoint iteration early. Note that only the
2220 // ones marked as "changed" *and* the ones transitively depending on them
2221 // need to be reverted to a pessimistic state. Others might not be in a
2222 // fixpoint state but we can use the optimistic results for them anyway.
2224 for (unsigned u = 0; u < ChangedAAs.size(); u++) {
2225 AbstractAttribute *ChangedAA = ChangedAAs[u];
2226 if (!Visited.insert(ChangedAA).second)
2227 continue;
2228
2229 AbstractState &State = ChangedAA->getState();
2230 if (!State.isAtFixpoint()) {
2232
2233 NumAttributesTimedOut++;
2234 }
2235
2236 for (auto &DepIt : ChangedAA->Deps)
2237 ChangedAAs.push_back(cast<AbstractAttribute>(DepIt.getPointer()));
2238 ChangedAA->Deps.clear();
2239 }
2240
2241 LLVM_DEBUG({
2242 if (!Visited.empty())
2243 dbgs() << "\n[Attributor] Finalized " << Visited.size()
2244 << " abstract attributes.\n";
2245 });
2246}
2247
2249 assert(AA.isQueryAA() &&
2250 "Non-query AAs should not be required to register for updates!");
2251 QueryAAsAwaitingUpdate.insert(&AA);
2252}
2253
2254ChangeStatus Attributor::manifestAttributes() {
2255 TimeTraceScope TimeScope("Attributor::manifestAttributes");
2256 size_t NumFinalAAs = DG.SyntheticRoot.Deps.size();
2257
2258 unsigned NumManifested = 0;
2259 unsigned NumAtFixpoint = 0;
2260 ChangeStatus ManifestChange = ChangeStatus::UNCHANGED;
2261 for (auto &DepAA : DG.SyntheticRoot.Deps) {
2262 AbstractAttribute *AA = cast<AbstractAttribute>(DepAA.getPointer());
2263 AbstractState &State = AA->getState();
2264
2265 // If there is not already a fixpoint reached, we can now take the
2266 // optimistic state. This is correct because we enforced a pessimistic one
2267 // on abstract attributes that were transitively dependent on a changed one
2268 // already above.
2269 if (!State.isAtFixpoint())
2271
2272 // We must not manifest Attributes that use Callbase info.
2273 if (AA->hasCallBaseContext())
2274 continue;
2275 // If the state is invalid, we do not try to manifest it.
2276 if (!State.isValidState())
2277 continue;
2278
2279 if (AA->getCtxI() && !isRunOn(*AA->getAnchorScope()))
2280 continue;
2281
2282 // Skip dead code.
2283 bool UsedAssumedInformation = false;
2284 if (isAssumedDead(*AA, nullptr, UsedAssumedInformation,
2285 /* CheckBBLivenessOnly */ true))
2286 continue;
2287 // Check if the manifest debug counter that allows skipping manifestation of
2288 // AAs
2289 if (!DebugCounter::shouldExecute(ManifestDBGCounter))
2290 continue;
2291 // Manifest the state and record if we changed the IR.
2292 ChangeStatus LocalChange = AA->manifest(*this);
2293 if (LocalChange == ChangeStatus::CHANGED && AreStatisticsEnabled())
2294 AA->trackStatistics();
2295 LLVM_DEBUG(dbgs() << "[Attributor] Manifest " << LocalChange << " : " << *AA
2296 << "\n");
2297
2298 ManifestChange = ManifestChange | LocalChange;
2299
2300 NumAtFixpoint++;
2301 NumManifested += (LocalChange == ChangeStatus::CHANGED);
2302 }
2303
2304 (void)NumManifested;
2305 (void)NumAtFixpoint;
2306 LLVM_DEBUG(dbgs() << "\n[Attributor] Manifested " << NumManifested
2307 << " arguments while " << NumAtFixpoint
2308 << " were in a valid fixpoint state\n");
2309
2310 NumAttributesManifested += NumManifested;
2311 NumAttributesValidFixpoint += NumAtFixpoint;
2312
2313 (void)NumFinalAAs;
2314 if (NumFinalAAs != DG.SyntheticRoot.Deps.size()) {
2315 auto DepIt = DG.SyntheticRoot.Deps.begin();
2316 for (unsigned u = 0; u < NumFinalAAs; ++u)
2317 ++DepIt;
2318 for (unsigned u = NumFinalAAs; u < DG.SyntheticRoot.Deps.size();
2319 ++u, ++DepIt) {
2320 errs() << "Unexpected abstract attribute: "
2321 << cast<AbstractAttribute>(DepIt->getPointer()) << " :: "
2322 << cast<AbstractAttribute>(DepIt->getPointer())
2323 ->getIRPosition()
2324 .getAssociatedValue()
2325 << "\n";
2326 }
2327 llvm_unreachable("Expected the final number of abstract attributes to "
2328 "remain unchanged!");
2329 }
2330
2331 for (auto &It : AttrsMap) {
2332 AttributeList &AL = It.getSecond();
2333 const IRPosition &IRP =
2334 isa<Function>(It.getFirst())
2335 ? IRPosition::function(*cast<Function>(It.getFirst()))
2336 : IRPosition::callsite_function(*cast<CallBase>(It.getFirst()));
2337 IRP.setAttrList(AL);
2338 }
2339
2340 return ManifestChange;
2341}
2342
2343void Attributor::identifyDeadInternalFunctions() {
2344 // Early exit if we don't intend to delete functions.
2345 if (!Configuration.DeleteFns)
2346 return;
2347
2348 // To avoid triggering an assertion in the lazy call graph we will not delete
2349 // any internal library functions. We should modify the assertion though and
2350 // allow internals to be deleted.
2351 const auto *TLI =
2352 isModulePass()
2353 ? nullptr
2355 LibFunc LF;
2356
2357 // Identify dead internal functions and delete them. This happens outside
2358 // the other fixpoint analysis as we might treat potentially dead functions
2359 // as live to lower the number of iterations. If they happen to be dead, the
2360 // below fixpoint loop will identify and eliminate them.
2361
2362 SmallVector<Function *, 8> InternalFns;
2363 for (Function *F : Functions)
2364 if (F->hasLocalLinkage() && (isModulePass() || !TLI->getLibFunc(*F, LF)))
2365 InternalFns.push_back(F);
2366
2367 SmallPtrSet<Function *, 8> LiveInternalFns;
2368 bool FoundLiveInternal = true;
2369 while (FoundLiveInternal) {
2370 FoundLiveInternal = false;
2371 for (Function *&F : InternalFns) {
2372 if (!F)
2373 continue;
2374
2375 bool UsedAssumedInformation = false;
2377 [&](AbstractCallSite ACS) {
2379 return ToBeDeletedFunctions.count(Callee) ||
2380 (Functions.count(Callee) && Callee->hasLocalLinkage() &&
2381 !LiveInternalFns.count(Callee));
2382 },
2383 *F, true, nullptr, UsedAssumedInformation)) {
2384 continue;
2385 }
2386
2387 LiveInternalFns.insert(F);
2388 F = nullptr;
2389 FoundLiveInternal = true;
2390 }
2391 }
2392
2393 for (Function *F : InternalFns)
2394 if (F)
2395 ToBeDeletedFunctions.insert(F);
2396}
2397
2398ChangeStatus Attributor::cleanupIR() {
2399 TimeTraceScope TimeScope("Attributor::cleanupIR");
2400 // Delete stuff at the end to avoid invalid references and a nice order.
2401 LLVM_DEBUG(dbgs() << "\n[Attributor] Delete/replace at least "
2402 << ToBeDeletedFunctions.size() << " functions and "
2403 << ToBeDeletedBlocks.size() << " blocks and "
2404 << ToBeDeletedInsts.size() << " instructions and "
2405 << ToBeChangedValues.size() << " values and "
2406 << ToBeChangedUses.size() << " uses. To insert "
2407 << ToBeChangedToUnreachableInsts.size()
2408 << " unreachables.\n"
2409 << "Preserve manifest added " << ManifestAddedBlocks.size()
2410 << " blocks\n");
2411
2413 SmallVector<Instruction *, 32> TerminatorsToFold;
2414
2415 auto ReplaceUse = [&](Use *U, Value *NewV) {
2416 Value *OldV = U->get();
2417
2418 // If we plan to replace NewV we need to update it at this point.
2419 do {
2420 const auto &Entry = ToBeChangedValues.lookup(NewV);
2421 if (!get<0>(Entry))
2422 break;
2423 NewV = get<0>(Entry);
2424 } while (true);
2425
2426 Instruction *I = dyn_cast<Instruction>(U->getUser());
2427 assert((!I || isRunOn(*I->getFunction())) &&
2428 "Cannot replace an instruction outside the current SCC!");
2429
2430 // Do not replace uses in returns if the value is a must-tail call we will
2431 // not delete.
2432 if (auto *RI = dyn_cast_or_null<ReturnInst>(I)) {
2433 if (auto *CI = dyn_cast<CallInst>(OldV->stripPointerCasts()))
2434 if (CI->isMustTailCall() && !ToBeDeletedInsts.count(CI))
2435 return;
2436 // If we rewrite a return and the new value is not an argument, strip the
2437 // `returned` attribute as it is wrong now.
2438 if (!isa<Argument>(NewV))
2439 for (auto &Arg : RI->getFunction()->args())
2440 Arg.removeAttr(Attribute::Returned);
2441 }
2442
2443 LLVM_DEBUG(dbgs() << "Use " << *NewV << " in " << *U->getUser()
2444 << " instead of " << *OldV << "\n");
2445 U->set(NewV);
2446
2447 if (Instruction *I = dyn_cast<Instruction>(OldV)) {
2448 CGModifiedFunctions.insert(I->getFunction());
2449 if (!isa<PHINode>(I) && !ToBeDeletedInsts.count(I) &&
2451 DeadInsts.push_back(I);
2452 }
2453 if (isa<UndefValue>(NewV) && isa<CallBase>(U->getUser())) {
2454 auto *CB = cast<CallBase>(U->getUser());
2455 if (CB->isArgOperand(U)) {
2456 unsigned Idx = CB->getArgOperandNo(U);
2457 CB->removeParamAttr(Idx, Attribute::NoUndef);
2458 auto *Callee = dyn_cast_if_present<Function>(CB->getCalledOperand());
2459 if (Callee && Callee->arg_size() > Idx)
2460 Callee->removeParamAttr(Idx, Attribute::NoUndef);
2461 }
2462 }
2463 if (isa<Constant>(NewV) && isa<BranchInst>(U->getUser())) {
2464 Instruction *UserI = cast<Instruction>(U->getUser());
2465 if (isa<UndefValue>(NewV)) {
2466 ToBeChangedToUnreachableInsts.insert(UserI);
2467 } else {
2468 TerminatorsToFold.push_back(UserI);
2469 }
2470 }
2471 };
2472
2473 for (auto &It : ToBeChangedUses) {
2474 Use *U = It.first;
2475 Value *NewV = It.second;
2476 ReplaceUse(U, NewV);
2477 }
2478
2480 for (auto &It : ToBeChangedValues) {
2481 Value *OldV = It.first;
2482 auto [NewV, Done] = It.second;
2483 Uses.clear();
2484 for (auto &U : OldV->uses())
2485 if (Done || !U.getUser()->isDroppable())
2486 Uses.push_back(&U);
2487 for (Use *U : Uses) {
2488 if (auto *I = dyn_cast<Instruction>(U->getUser()))
2489 if (!isRunOn(*I->getFunction()))
2490 continue;
2491 ReplaceUse(U, NewV);
2492 }
2493 }
2494
2495 for (const auto &V : InvokeWithDeadSuccessor)
2496 if (InvokeInst *II = dyn_cast_or_null<InvokeInst>(V)) {
2497 assert(isRunOn(*II->getFunction()) &&
2498 "Cannot replace an invoke outside the current SCC!");
2499 bool UnwindBBIsDead = II->hasFnAttr(Attribute::NoUnwind);
2500 bool NormalBBIsDead = II->hasFnAttr(Attribute::NoReturn);
2501 bool Invoke2CallAllowed =
2503 assert((UnwindBBIsDead || NormalBBIsDead) &&
2504 "Invoke does not have dead successors!");
2505 BasicBlock *BB = II->getParent();
2506 BasicBlock *NormalDestBB = II->getNormalDest();
2507 if (UnwindBBIsDead) {
2508 Instruction *NormalNextIP = &NormalDestBB->front();
2509 if (Invoke2CallAllowed) {
2511 NormalNextIP = BB->getTerminator();
2512 }
2513 if (NormalBBIsDead)
2514 ToBeChangedToUnreachableInsts.insert(NormalNextIP);
2515 } else {
2516 assert(NormalBBIsDead && "Broken invariant!");
2517 if (!NormalDestBB->getUniquePredecessor())
2518 NormalDestBB = SplitBlockPredecessors(NormalDestBB, {BB}, ".dead");
2519 ToBeChangedToUnreachableInsts.insert(&NormalDestBB->front());
2520 }
2521 }
2522 for (Instruction *I : TerminatorsToFold) {
2523 assert(isRunOn(*I->getFunction()) &&
2524 "Cannot replace a terminator outside the current SCC!");
2525 CGModifiedFunctions.insert(I->getFunction());
2526 ConstantFoldTerminator(I->getParent());
2527 }
2528 for (const auto &V : ToBeChangedToUnreachableInsts)
2529 if (Instruction *I = dyn_cast_or_null<Instruction>(V)) {
2530 LLVM_DEBUG(dbgs() << "[Attributor] Change to unreachable: " << *I
2531 << "\n");
2532 assert(isRunOn(*I->getFunction()) &&
2533 "Cannot replace an instruction outside the current SCC!");
2534 CGModifiedFunctions.insert(I->getFunction());
2536 }
2537
2538 for (const auto &V : ToBeDeletedInsts) {
2539 if (Instruction *I = dyn_cast_or_null<Instruction>(V)) {
2540 assert((!isa<CallBase>(I) || isa<IntrinsicInst>(I) ||
2541 isRunOn(*I->getFunction())) &&
2542 "Cannot delete an instruction outside the current SCC!");
2543 I->dropDroppableUses();
2544 CGModifiedFunctions.insert(I->getFunction());
2545 if (!I->getType()->isVoidTy())
2546 I->replaceAllUsesWith(UndefValue::get(I->getType()));
2547 if (!isa<PHINode>(I) && isInstructionTriviallyDead(I))
2548 DeadInsts.push_back(I);
2549 else
2550 I->eraseFromParent();
2551 }
2552 }
2553
2554 llvm::erase_if(DeadInsts, [&](WeakTrackingVH I) { return !I; });
2555
2556 LLVM_DEBUG({
2557 dbgs() << "[Attributor] DeadInsts size: " << DeadInsts.size() << "\n";
2558 for (auto &I : DeadInsts)
2559 if (I)
2560 dbgs() << " - " << *I << "\n";
2561 });
2562
2564
2565 if (unsigned NumDeadBlocks = ToBeDeletedBlocks.size()) {
2566 SmallVector<BasicBlock *, 8> ToBeDeletedBBs;
2567 ToBeDeletedBBs.reserve(NumDeadBlocks);
2568 for (BasicBlock *BB : ToBeDeletedBlocks) {
2569 assert(isRunOn(*BB->getParent()) &&
2570 "Cannot delete a block outside the current SCC!");
2571 CGModifiedFunctions.insert(BB->getParent());
2572 // Do not delete BBs added during manifests of AAs.
2573 if (ManifestAddedBlocks.contains(BB))
2574 continue;
2575 ToBeDeletedBBs.push_back(BB);
2576 }
2577 // Actually we do not delete the blocks but squash them into a single
2578 // unreachable but untangling branches that jump here is something we need
2579 // to do in a more generic way.
2580 detachDeadBlocks(ToBeDeletedBBs, nullptr);
2581 }
2582
2583 identifyDeadInternalFunctions();
2584
2585 // Rewrite the functions as requested during manifest.
2586 ChangeStatus ManifestChange = rewriteFunctionSignatures(CGModifiedFunctions);
2587
2588 for (Function *Fn : CGModifiedFunctions)
2589 if (!ToBeDeletedFunctions.count(Fn) && Functions.count(Fn))
2590 Configuration.CGUpdater.reanalyzeFunction(*Fn);
2591
2592 for (Function *Fn : ToBeDeletedFunctions) {
2593 if (!Functions.count(Fn))
2594 continue;
2595 Configuration.CGUpdater.removeFunction(*Fn);
2596 }
2597
2598 if (!ToBeChangedUses.empty())
2599 ManifestChange = ChangeStatus::CHANGED;
2600
2601 if (!ToBeChangedToUnreachableInsts.empty())
2602 ManifestChange = ChangeStatus::CHANGED;
2603
2604 if (!ToBeDeletedFunctions.empty())
2605 ManifestChange = ChangeStatus::CHANGED;
2606
2607 if (!ToBeDeletedBlocks.empty())
2608 ManifestChange = ChangeStatus::CHANGED;
2609
2610 if (!ToBeDeletedInsts.empty())
2611 ManifestChange = ChangeStatus::CHANGED;
2612
2613 if (!InvokeWithDeadSuccessor.empty())
2614 ManifestChange = ChangeStatus::CHANGED;
2615
2616 if (!DeadInsts.empty())
2617 ManifestChange = ChangeStatus::CHANGED;
2618
2619 NumFnDeleted += ToBeDeletedFunctions.size();
2620
2621 LLVM_DEBUG(dbgs() << "[Attributor] Deleted " << ToBeDeletedFunctions.size()
2622 << " functions after manifest.\n");
2623
2624#ifdef EXPENSIVE_CHECKS
2625 for (Function *F : Functions) {
2626 if (ToBeDeletedFunctions.count(F))
2627 continue;
2628 assert(!verifyFunction(*F, &errs()) && "Module verification failed!");
2629 }
2630#endif
2631
2632 return ManifestChange;
2633}
2634
2636 TimeTraceScope TimeScope("Attributor::run");
2637 AttributorCallGraph ACallGraph(*this);
2638
2639 if (PrintCallGraph)
2640 ACallGraph.populateAll();
2641
2642 Phase = AttributorPhase::UPDATE;
2643 runTillFixpoint();
2644
2645 // dump graphs on demand
2646 if (DumpDepGraph)
2647 DG.dumpGraph();
2648
2649 if (ViewDepGraph)
2650 DG.viewGraph();
2651
2653 DG.print();
2654
2655 Phase = AttributorPhase::MANIFEST;
2656 ChangeStatus ManifestChange = manifestAttributes();
2657
2658 Phase = AttributorPhase::CLEANUP;
2659 ChangeStatus CleanupChange = cleanupIR();
2660
2661 if (PrintCallGraph)
2662 ACallGraph.print();
2663
2664 return ManifestChange | CleanupChange;
2665}
2666
2667ChangeStatus Attributor::updateAA(AbstractAttribute &AA) {
2668 TimeTraceScope TimeScope("updateAA", [&]() {
2669 return AA.getName() + std::to_string(AA.getIRPosition().getPositionKind());
2670 });
2671 assert(Phase == AttributorPhase::UPDATE &&
2672 "We can update AA only in the update stage!");
2673
2674 // Use a new dependence vector for this update.
2675 DependenceVector DV;
2676 DependenceStack.push_back(&DV);
2677
2678 auto &AAState = AA.getState();
2680 bool UsedAssumedInformation = false;
2681 if (!isAssumedDead(AA, nullptr, UsedAssumedInformation,
2682 /* CheckBBLivenessOnly */ true))
2683 CS = AA.update(*this);
2684
2685 if (!AA.isQueryAA() && DV.empty() && !AA.getState().isAtFixpoint()) {
2686 // If the AA did not rely on outside information but changed, we run it
2687 // again to see if it found a fixpoint. Most AAs do but we don't require
2688 // them to. Hence, it might take the AA multiple iterations to get to a
2689 // fixpoint even if it does not rely on outside information, which is fine.
2691 if (CS == ChangeStatus::CHANGED)
2692 RerunCS = AA.update(*this);
2693
2694 // If the attribute did not change during the run or rerun, and it still did
2695 // not query any non-fix information, the state will not change and we can
2696 // indicate that right at this point.
2697 if (RerunCS == ChangeStatus::UNCHANGED && !AA.isQueryAA() && DV.empty())
2698 AAState.indicateOptimisticFixpoint();
2699 }
2700
2701 if (!AAState.isAtFixpoint())
2702 rememberDependences();
2703
2704 // Verify the stack was used properly, that is we pop the dependence vector we
2705 // put there earlier.
2706 DependenceVector *PoppedDV = DependenceStack.pop_back_val();
2707 (void)PoppedDV;
2708 assert(PoppedDV == &DV && "Inconsistent usage of the dependence stack!");
2709
2710 return CS;
2711}
2712
2714 assert(!F.isDeclaration() && "Cannot create a wrapper around a declaration!");
2715
2716 Module &M = *F.getParent();
2717 LLVMContext &Ctx = M.getContext();
2718 FunctionType *FnTy = F.getFunctionType();
2719
2720 Function *Wrapper =
2721 Function::Create(FnTy, F.getLinkage(), F.getAddressSpace(), F.getName());
2722 F.setName(""); // set the inside function anonymous
2723 M.getFunctionList().insert(F.getIterator(), Wrapper);
2724 // Flag whether the function is using new-debug-info or not.
2725 Wrapper->IsNewDbgInfoFormat = M.IsNewDbgInfoFormat;
2726
2727 F.setLinkage(GlobalValue::InternalLinkage);
2728
2729 F.replaceAllUsesWith(Wrapper);
2730 assert(F.use_empty() && "Uses remained after wrapper was created!");
2731
2732 // Move the COMDAT section to the wrapper.
2733 // TODO: Check if we need to keep it for F as well.
2734 Wrapper->setComdat(F.getComdat());
2735 F.setComdat(nullptr);
2736
2737 // Copy all metadata and attributes but keep them on F as well.
2739 F.getAllMetadata(MDs);
2740 for (auto MDIt : MDs)
2741 Wrapper->addMetadata(MDIt.first, *MDIt.second);
2742 Wrapper->setAttributes(F.getAttributes());
2743
2744 // Create the call in the wrapper.
2745 BasicBlock *EntryBB = BasicBlock::Create(Ctx, "entry", Wrapper);
2746
2748 Argument *FArgIt = F.arg_begin();
2749 for (Argument &Arg : Wrapper->args()) {
2750 Args.push_back(&Arg);
2751 Arg.setName((FArgIt++)->getName());
2752 }
2753
2754 CallInst *CI = CallInst::Create(&F, Args, "", EntryBB);
2755 CI->setTailCall(true);
2756 CI->addFnAttr(Attribute::NoInline);
2757 ReturnInst::Create(Ctx, CI->getType()->isVoidTy() ? nullptr : CI, EntryBB);
2758
2759 NumFnShallowWrappersCreated++;
2760}
2761
2763 if (F.isDeclaration() || F.hasLocalLinkage() ||
2765 return false;
2766 return true;
2767}
2768
2770 if (!AllowDeepWrapper && !Force)
2771 return nullptr;
2772 if (!isInternalizable(F))
2773 return nullptr;
2774
2775 SmallPtrSet<Function *, 2> FnSet = {&F};
2776 DenseMap<Function *, Function *> InternalizedFns;
2777 internalizeFunctions(FnSet, InternalizedFns);
2778
2779 return InternalizedFns[&F];
2780}
2781
2784 for (Function *F : FnSet)
2786 return false;
2787
2788 FnMap.clear();
2789 // Generate the internalized version of each function.
2790 for (Function *F : FnSet) {
2791 Module &M = *F->getParent();
2792 FunctionType *FnTy = F->getFunctionType();
2793
2794 // Create a copy of the current function
2795 Function *Copied =
2796 Function::Create(FnTy, F->getLinkage(), F->getAddressSpace(),
2797 F->getName() + ".internalized");
2798 ValueToValueMapTy VMap;
2799 auto *NewFArgIt = Copied->arg_begin();
2800 for (auto &Arg : F->args()) {
2801 auto ArgName = Arg.getName();
2802 NewFArgIt->setName(ArgName);
2803 VMap[&Arg] = &(*NewFArgIt++);
2804 }
2806 // Flag whether the function is using new-debug-info or not.
2807 Copied->IsNewDbgInfoFormat = F->IsNewDbgInfoFormat;
2808
2809 // Copy the body of the original function to the new one
2810 CloneFunctionInto(Copied, F, VMap,
2812
2813 // Set the linakage and visibility late as CloneFunctionInto has some
2814 // implicit requirements.
2817
2818 // Copy metadata
2820 F->getAllMetadata(MDs);
2821 for (auto MDIt : MDs)
2822 if (!Copied->hasMetadata())
2823 Copied->addMetadata(MDIt.first, *MDIt.second);
2824
2825 M.getFunctionList().insert(F->getIterator(), Copied);
2826 Copied->setDSOLocal(true);
2827 FnMap[F] = Copied;
2828 }
2829
2830 // Replace all uses of the old function with the new internalized function
2831 // unless the caller is a function that was just internalized.
2832 for (Function *F : FnSet) {
2833 auto &InternalizedFn = FnMap[F];
2834 auto IsNotInternalized = [&](Use &U) -> bool {
2835 if (auto *CB = dyn_cast<CallBase>(U.getUser()))
2836 return !FnMap.lookup(CB->getCaller());
2837 return false;
2838 };
2839 F->replaceUsesWithIf(InternalizedFn, IsNotInternalized);
2840 }
2841
2842 return true;
2843}
2844
2846 Argument &Arg, ArrayRef<Type *> ReplacementTypes) {
2847
2848 if (!Configuration.RewriteSignatures)
2849 return false;
2850
2851 Function *Fn = Arg.getParent();
2852 auto CallSiteCanBeChanged = [Fn](AbstractCallSite ACS) {
2853 // Forbid the call site to cast the function return type. If we need to
2854 // rewrite these functions we need to re-create a cast for the new call site
2855 // (if the old had uses).
2856 if (!ACS.getCalledFunction() ||
2857 ACS.getInstruction()->getType() !=
2859 return false;
2860 if (cast<CallBase>(ACS.getInstruction())->getCalledOperand()->getType() !=
2861 Fn->getType())
2862 return false;
2863 if (ACS.getNumArgOperands() != Fn->arg_size())
2864 return false;
2865 // Forbid must-tail calls for now.
2866 return !ACS.isCallbackCall() && !ACS.getInstruction()->isMustTailCall();
2867 };
2868
2869 // Avoid var-arg functions for now.
2870 if (Fn->isVarArg()) {
2871 LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite var-args functions\n");
2872 return false;
2873 }
2874
2875 // Avoid functions with complicated argument passing semantics.
2876 AttributeList FnAttributeList = Fn->getAttributes();
2877 if (FnAttributeList.hasAttrSomewhere(Attribute::Nest) ||
2878 FnAttributeList.hasAttrSomewhere(Attribute::StructRet) ||
2879 FnAttributeList.hasAttrSomewhere(Attribute::InAlloca) ||
2880 FnAttributeList.hasAttrSomewhere(Attribute::Preallocated)) {
2881 LLVM_DEBUG(
2882 dbgs() << "[Attributor] Cannot rewrite due to complex attribute\n");
2883 return false;
2884 }
2885
2886 // Avoid callbacks for now.
2887 bool UsedAssumedInformation = false;
2888 if (!checkForAllCallSites(CallSiteCanBeChanged, *Fn, true, nullptr,
2889 UsedAssumedInformation,
2890 /* CheckPotentiallyDead */ true)) {
2891 LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite all call sites\n");
2892 return false;
2893 }
2894
2895 auto InstPred = [](Instruction &I) {
2896 if (auto *CI = dyn_cast<CallInst>(&I))
2897 return !CI->isMustTailCall();
2898 return true;
2899 };
2900
2901 // Forbid must-tail calls for now.
2902 // TODO:
2903 auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(*Fn);
2904 if (!checkForAllInstructionsImpl(nullptr, OpcodeInstMap, InstPred, nullptr,
2905 nullptr, {Instruction::Call},
2906 UsedAssumedInformation)) {
2907 LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite due to instructions\n");
2908 return false;
2909 }
2910
2911 return true;
2912}
2913
2915 Argument &Arg, ArrayRef<Type *> ReplacementTypes,
2918 LLVM_DEBUG(dbgs() << "[Attributor] Register new rewrite of " << Arg << " in "
2919 << Arg.getParent()->getName() << " with "
2920 << ReplacementTypes.size() << " replacements\n");
2921 assert(isValidFunctionSignatureRewrite(Arg, ReplacementTypes) &&
2922 "Cannot register an invalid rewrite");
2923
2924 Function *Fn = Arg.getParent();
2926 ArgumentReplacementMap[Fn];
2927 if (ARIs.empty())
2928 ARIs.resize(Fn->arg_size());
2929
2930 // If we have a replacement already with less than or equal new arguments,
2931 // ignore this request.
2932 std::unique_ptr<ArgumentReplacementInfo> &ARI = ARIs[Arg.getArgNo()];
2933 if (ARI && ARI->getNumReplacementArgs() <= ReplacementTypes.size()) {
2934 LLVM_DEBUG(dbgs() << "[Attributor] Existing rewrite is preferred\n");
2935 return false;
2936 }
2937
2938 // If we have a replacement already but we like the new one better, delete
2939 // the old.
2940 ARI.reset();
2941
2942 LLVM_DEBUG(dbgs() << "[Attributor] Register new rewrite of " << Arg << " in "
2943 << Arg.getParent()->getName() << " with "
2944 << ReplacementTypes.size() << " replacements\n");
2945
2946 // Remember the replacement.
2947 ARI.reset(new ArgumentReplacementInfo(*this, Arg, ReplacementTypes,
2948 std::move(CalleeRepairCB),
2949 std::move(ACSRepairCB)));
2950
2951 return true;
2952}
2953
2954bool Attributor::shouldSeedAttribute(AbstractAttribute &AA) {
2955 bool Result = true;
2956#ifndef NDEBUG
2957 if (SeedAllowList.size() != 0)
2959 Function *Fn = AA.getAnchorScope();
2960 if (FunctionSeedAllowList.size() != 0 && Fn)
2962#endif
2963 return Result;
2964}
2965
2966ChangeStatus Attributor::rewriteFunctionSignatures(
2967 SmallSetVector<Function *, 8> &ModifiedFns) {
2969
2970 for (auto &It : ArgumentReplacementMap) {
2971 Function *OldFn = It.getFirst();
2972
2973 // Deleted functions do not require rewrites.
2974 if (!Functions.count(OldFn) || ToBeDeletedFunctions.count(OldFn))
2975 continue;
2976
2978 It.getSecond();
2979 assert(ARIs.size() == OldFn->arg_size() && "Inconsistent state!");
2980
2981 SmallVector<Type *, 16> NewArgumentTypes;
2982 SmallVector<AttributeSet, 16> NewArgumentAttributes;
2983
2984 // Collect replacement argument types and copy over existing attributes.
2985 AttributeList OldFnAttributeList = OldFn->getAttributes();
2986 for (Argument &Arg : OldFn->args()) {
2987 if (const std::unique_ptr<ArgumentReplacementInfo> &ARI =
2988 ARIs[Arg.getArgNo()]) {
2989 NewArgumentTypes.append(ARI->ReplacementTypes.begin(),
2990 ARI->ReplacementTypes.end());
2991 NewArgumentAttributes.append(ARI->getNumReplacementArgs(),
2992 AttributeSet());
2993 } else {
2994 NewArgumentTypes.push_back(Arg.getType());
2995 NewArgumentAttributes.push_back(
2996 OldFnAttributeList.getParamAttrs(Arg.getArgNo()));
2997 }
2998 }
2999
3000 uint64_t LargestVectorWidth = 0;
3001 for (auto *I : NewArgumentTypes)
3002 if (auto *VT = dyn_cast<llvm::VectorType>(I))
3003 LargestVectorWidth =
3004 std::max(LargestVectorWidth,
3005 VT->getPrimitiveSizeInBits().getKnownMinValue());
3006
3007 FunctionType *OldFnTy = OldFn->getFunctionType();
3008 Type *RetTy = OldFnTy->getReturnType();
3009
3010 // Construct the new function type using the new arguments types.
3011 FunctionType *NewFnTy =
3012 FunctionType::get(RetTy, NewArgumentTypes, OldFnTy->isVarArg());
3013
3014 LLVM_DEBUG(dbgs() << "[Attributor] Function rewrite '" << OldFn->getName()
3015 << "' from " << *OldFn->getFunctionType() << " to "
3016 << *NewFnTy << "\n");
3017
3018 // Create the new function body and insert it into the module.
3019 Function *NewFn = Function::Create(NewFnTy, OldFn->getLinkage(),
3020 OldFn->getAddressSpace(), "");
3021 Functions.insert(NewFn);
3022 OldFn->getParent()->getFunctionList().insert(OldFn->getIterator(), NewFn);
3023 NewFn->takeName(OldFn);
3024 NewFn->copyAttributesFrom(OldFn);
3025 // Flag whether the function is using new-debug-info or not.
3026 NewFn->IsNewDbgInfoFormat = OldFn->IsNewDbgInfoFormat;
3027
3028 // Patch the pointer to LLVM function in debug info descriptor.
3029 NewFn->setSubprogram(OldFn->getSubprogram());
3030 OldFn->setSubprogram(nullptr);
3031
3032 // Recompute the parameter attributes list based on the new arguments for
3033 // the function.
3034 LLVMContext &Ctx = OldFn->getContext();
3036 Ctx, OldFnAttributeList.getFnAttrs(), OldFnAttributeList.getRetAttrs(),
3037 NewArgumentAttributes));
3038 AttributeFuncs::updateMinLegalVectorWidthAttr(*NewFn, LargestVectorWidth);
3039
3040 // Remove argmem from the memory effects if we have no more pointer
3041 // arguments, or they are readnone.
3042 MemoryEffects ME = NewFn->getMemoryEffects();
3043 int ArgNo = -1;
3044 if (ME.doesAccessArgPointees() && all_of(NewArgumentTypes, [&](Type *T) {
3045 ++ArgNo;
3046 return !T->isPtrOrPtrVectorTy() ||
3047 NewFn->hasParamAttribute(ArgNo, Attribute::ReadNone);
3048 })) {
3050 }
3051
3052 // Since we have now created the new function, splice the body of the old
3053 // function right into the new function, leaving the old rotting hulk of the
3054 // function empty.
3055 NewFn->splice(NewFn->begin(), OldFn);
3056
3057 // Fixup block addresses to reference new function.
3058 SmallVector<BlockAddress *, 8u> BlockAddresses;
3059 for (User *U : OldFn->users())
3060 if (auto *BA = dyn_cast<BlockAddress>(U))
3061 BlockAddresses.push_back(BA);
3062 for (auto *BA : BlockAddresses)
3063 BA->replaceAllUsesWith(BlockAddress::get(NewFn, BA->getBasicBlock()));
3064
3065 // Set of all "call-like" instructions that invoke the old function mapped
3066 // to their new replacements.
3068
3069 // Callback to create a new "call-like" instruction for a given one.
3070 auto CallSiteReplacementCreator = [&](AbstractCallSite ACS) {
3071 CallBase *OldCB = cast<CallBase>(ACS.getInstruction());
3072 const AttributeList &OldCallAttributeList = OldCB->getAttributes();
3073
3074 // Collect the new argument operands for the replacement call site.
3075 SmallVector<Value *, 16> NewArgOperands;
3076 SmallVector<AttributeSet, 16> NewArgOperandAttributes;
3077 for (unsigned OldArgNum = 0; OldArgNum < ARIs.size(); ++OldArgNum) {
3078 unsigned NewFirstArgNum = NewArgOperands.size();
3079 (void)NewFirstArgNum; // only used inside assert.
3080 if (const std::unique_ptr<ArgumentReplacementInfo> &ARI =
3081 ARIs[OldArgNum]) {
3082 if (ARI->ACSRepairCB)
3083 ARI->ACSRepairCB(*ARI, ACS, NewArgOperands);
3084 assert(ARI->getNumReplacementArgs() + NewFirstArgNum ==
3085 NewArgOperands.size() &&
3086 "ACS repair callback did not provide as many operand as new "
3087 "types were registered!");
3088 // TODO: Exose the attribute set to the ACS repair callback
3089 NewArgOperandAttributes.append(ARI->ReplacementTypes.size(),
3090 AttributeSet());
3091 } else {
3092 NewArgOperands.push_back(ACS.getCallArgOperand(OldArgNum));
3093 NewArgOperandAttributes.push_back(
3094 OldCallAttributeList.getParamAttrs(OldArgNum));
3095 }
3096 }
3097
3098 assert(NewArgOperands.size() == NewArgOperandAttributes.size() &&
3099 "Mismatch # argument operands vs. # argument operand attributes!");
3100 assert(NewArgOperands.size() == NewFn->arg_size() &&
3101 "Mismatch # argument operands vs. # function arguments!");
3102
3103 SmallVector<OperandBundleDef, 4> OperandBundleDefs;
3104 OldCB->getOperandBundlesAsDefs(OperandBundleDefs);
3105
3106 // Create a new call or invoke instruction to replace the old one.
3107 CallBase *NewCB;
3108 if (InvokeInst *II = dyn_cast<InvokeInst>(OldCB)) {
3109 NewCB = InvokeInst::Create(NewFn, II->getNormalDest(),
3110 II->getUnwindDest(), NewArgOperands,
3111 OperandBundleDefs, "", OldCB->getIterator());
3112 } else {
3113 auto *NewCI = CallInst::Create(NewFn, NewArgOperands, OperandBundleDefs,
3114 "", OldCB->getIterator());
3115 NewCI->setTailCallKind(cast<CallInst>(OldCB)->getTailCallKind());
3116 NewCB = NewCI;
3117 }
3118
3119 // Copy over various properties and the new attributes.
3120 NewCB->copyMetadata(*OldCB, {LLVMContext::MD_prof, LLVMContext::MD_dbg});
3121 NewCB->setCallingConv(OldCB->getCallingConv());
3122 NewCB->takeName(OldCB);
3124 Ctx, OldCallAttributeList.getFnAttrs(),
3125 OldCallAttributeList.getRetAttrs(), NewArgOperandAttributes));
3126
3128 LargestVectorWidth);
3129
3130 CallSitePairs.push_back({OldCB, NewCB});
3131 return true;
3132 };
3133
3134 // Use the CallSiteReplacementCreator to create replacement call sites.
3135 bool UsedAssumedInformation = false;
3136 bool Success = checkForAllCallSites(CallSiteReplacementCreator, *OldFn,
3137 true, nullptr, UsedAssumedInformation,
3138 /* CheckPotentiallyDead */ true);
3139 (void)Success;
3140 assert(Success && "Assumed call site replacement to succeed!");
3141
3142 // Rewire the arguments.
3143 Argument *OldFnArgIt = OldFn->arg_begin();
3144 Argument *NewFnArgIt = NewFn->arg_begin();
3145 for (unsigned OldArgNum = 0; OldArgNum < ARIs.size();
3146 ++OldArgNum, ++OldFnArgIt) {
3147 if (const std::unique_ptr<ArgumentReplacementInfo> &ARI =
3148 ARIs[OldArgNum]) {
3149 if (ARI->CalleeRepairCB)
3150 ARI->CalleeRepairCB(*ARI, *NewFn, NewFnArgIt);
3151 if (ARI->ReplacementTypes.empty())
3152 OldFnArgIt->replaceAllUsesWith(
3153 PoisonValue::get(OldFnArgIt->getType()));
3154 NewFnArgIt += ARI->ReplacementTypes.size();
3155 } else {
3156 NewFnArgIt->takeName(&*OldFnArgIt);
3157 OldFnArgIt->replaceAllUsesWith(&*NewFnArgIt);
3158 ++NewFnArgIt;
3159 }
3160 }
3161
3162 // Eliminate the instructions *after* we visited all of them.
3163 for (auto &CallSitePair : CallSitePairs) {
3164 CallBase &OldCB = *CallSitePair.first;
3165 CallBase &NewCB = *CallSitePair.second;
3166 assert(OldCB.getType() == NewCB.getType() &&
3167 "Cannot handle call sites with different types!");
3168 ModifiedFns.insert(OldCB.getFunction());
3169 OldCB.replaceAllUsesWith(&NewCB);
3170 OldCB.eraseFromParent();
3171 }
3172
3173 // Replace the function in the call graph (if any).
3174 Configuration.CGUpdater.replaceFunctionWith(*OldFn, *NewFn);
3175
3176 // If the old function was modified and needed to be reanalyzed, the new one
3177 // does now.
3178 if (ModifiedFns.remove(OldFn))
3179 ModifiedFns.insert(NewFn);
3180
3181 Changed = ChangeStatus::CHANGED;
3182 }
3183
3184 return Changed;
3185}
3186
3187void InformationCache::initializeInformationCache(const Function &CF,
3188 FunctionInfo &FI) {
3189 // As we do not modify the function here we can remove the const
3190 // withouth breaking implicit assumptions. At the end of the day, we could
3191 // initialize the cache eagerly which would look the same to the users.
3192 Function &F = const_cast<Function &>(CF);
3193
3194 // Walk all instructions to find interesting instructions that might be
3195 // queried by abstract attributes during their initialization or update.
3196 // This has to happen before we create attributes.
3197
3199
3200 // Add \p V to the assume uses map which track the number of uses outside of
3201 // "visited" assumes. If no outside uses are left the value is added to the
3202 // assume only use vector.
3203 auto AddToAssumeUsesMap = [&](const Value &V) -> void {
3205 if (auto *I = dyn_cast<Instruction>(&V))
3206 Worklist.push_back(I);
3207 while (!Worklist.empty()) {
3208 const Instruction *I = Worklist.pop_back_val();
3209 std::optional<short> &NumUses = AssumeUsesMap[I];
3210 if (!NumUses)
3211 NumUses = I->getNumUses();
3212 NumUses = *NumUses - /* this assume */ 1;
3213 if (*NumUses != 0)
3214 continue;
3215 AssumeOnlyValues.insert(I);
3216 for (const Value *Op : I->operands())
3217 if (auto *OpI = dyn_cast<Instruction>(Op))
3218 Worklist.push_back(OpI);
3219 }
3220 };
3221
3222 for (Instruction &I : instructions(&F)) {
3223 bool IsInterestingOpcode = false;
3224
3225 // To allow easy access to all instructions in a function with a given
3226 // opcode we store them in the InfoCache. As not all opcodes are interesting
3227 // to concrete attributes we only cache the ones that are as identified in
3228 // the following switch.
3229 // Note: There are no concrete attributes now so this is initially empty.
3230 switch (I.getOpcode()) {
3231 default:
3232 assert(!isa<CallBase>(&I) &&
3233 "New call base instruction type needs to be known in the "
3234 "Attributor.");
3235 break;
3236 case Instruction::Call:
3237 // Calls are interesting on their own, additionally:
3238 // For `llvm.assume` calls we also fill the KnowledgeMap as we find them.
3239 // For `must-tail` calls we remember the caller and callee.
3240 if (auto *Assume = dyn_cast<AssumeInst>(&I)) {
3241 AssumeOnlyValues.insert(Assume);
3242 fillMapFromAssume(*Assume, KnowledgeMap);
3243 AddToAssumeUsesMap(*Assume->getArgOperand(0));
3244 } else if (cast<CallInst>(I).isMustTailCall()) {
3245 FI.ContainsMustTailCall = true;
3246 if (auto *Callee = dyn_cast_if_present<Function>(
3247 cast<CallInst>(I).getCalledOperand()))
3248 getFunctionInfo(*Callee).CalledViaMustTail = true;
3249 }
3250 [[fallthrough]];
3251 case Instruction::CallBr:
3252 case Instruction::Invoke:
3253 case Instruction::CleanupRet:
3254 case Instruction::CatchSwitch:
3255 case Instruction::AtomicRMW:
3256 case Instruction::AtomicCmpXchg:
3257 case Instruction::Br:
3258 case Instruction::Resume:
3259 case Instruction::Ret:
3260 case Instruction::Load:
3261 // The alignment of a pointer is interesting for loads.
3262 case Instruction::Store:
3263 // The alignment of a pointer is interesting for stores.
3264 case Instruction::Alloca:
3265 case Instruction::AddrSpaceCast:
3266 IsInterestingOpcode = true;
3267 }
3268 if (IsInterestingOpcode) {
3269 auto *&Insts = FI.OpcodeInstMap[I.getOpcode()];
3270 if (!Insts)
3271 Insts = new (Allocator) InstructionVectorTy();
3272 Insts->push_back(&I);
3273 }
3274 if (I.mayReadOrWriteMemory())
3275 FI.RWInsts.push_back(&I);
3276 }
3277
3278 if (F.hasFnAttribute(Attribute::AlwaysInline) &&
3280 InlineableFunctions.insert(&F);
3281}
3282
3283InformationCache::FunctionInfo::~FunctionInfo() {
3284 // The instruction vectors are allocated using a BumpPtrAllocator, we need to
3285 // manually destroy them.
3286 for (auto &It : OpcodeInstMap)
3287 It.getSecond()->~InstructionVectorTy();
3288}
3289
3292 assert(A.isClosedWorldModule() && "Cannot see all indirect callees!");
3293 return IndirectlyCallableFunctions;
3294}
3295
3296std::optional<unsigned> InformationCache::getFlatAddressSpace() const {
3297 if (TargetTriple.isAMDGPU() || TargetTriple.isNVPTX())
3298 return 0;
3299 return std::nullopt;
3300}
3301
3303 const AbstractAttribute &ToAA,
3304 DepClassTy DepClass) {
3305 if (DepClass == DepClassTy::NONE)
3306 return;
3307 // If we are outside of an update, thus before the actual fixpoint iteration
3308 // started (= when we create AAs), we do not track dependences because we will
3309 // put all AAs into the initial worklist anyway.
3310 if (DependenceStack.empty())
3311 return;
3312 if (FromAA.getState().isAtFixpoint())
3313 return;
3314 DependenceStack.back()->push_back({&FromAA, &ToAA, DepClass});
3315}
3316
3317void Attributor::rememberDependences() {
3318 assert(!DependenceStack.empty() && "No dependences to remember!");
3319
3320 for (DepInfo &DI : *DependenceStack.back()) {
3321 assert((DI.DepClass == DepClassTy::REQUIRED ||
3322 DI.DepClass == DepClassTy::OPTIONAL) &&
3323 "Expected required or optional dependence (1 bit)!");
3324 auto &DepAAs = const_cast<AbstractAttribute &>(*DI.FromAA).Deps;
3325 DepAAs.insert(AbstractAttribute::DepTy(
3326 const_cast<AbstractAttribute *>(DI.ToAA), unsigned(DI.DepClass)));
3327 }
3328}
3329
3330template <Attribute::AttrKind AK, typename AAType>
3331void Attributor::checkAndQueryIRAttr(const IRPosition &IRP,
3332 AttributeSet Attrs) {
3333 bool IsKnown;
3334 if (!Attrs.hasAttribute(AK))
3335 if (!Configuration.Allowed || Configuration.Allowed->count(&AAType::ID))
3336 if (!AA::hasAssumedIRAttr<AK>(*this, nullptr, IRP, DepClassTy::NONE,
3337 IsKnown))
3338 getOrCreateAAFor<AAType>(IRP);
3339}
3340
3342 if (!VisitedFunctions.insert(&F).second)
3343 return;
3344 if (F.isDeclaration())
3345 return;
3346
3347 // In non-module runs we need to look at the call sites of a function to
3348 // determine if it is part of a must-tail call edge. This will influence what
3349 // attributes we can derive.
3350 InformationCache::FunctionInfo &FI = InfoCache.getFunctionInfo(F);
3351 if (!isModulePass() && !FI.CalledViaMustTail) {
3352 for (const Use &U : F.uses())
3353 if (const auto *CB = dyn_cast<CallBase>(U.getUser()))
3354 if (CB->isCallee(&U) && CB->isMustTailCall())
3355 FI.CalledViaMustTail = true;
3356 }
3357
3359 bool IsIPOAmendable = isFunctionIPOAmendable(F);
3360 auto Attrs = F.getAttributes();
3361 auto FnAttrs = Attrs.getFnAttrs();
3362
3363 // Check for dead BasicBlocks in every function.
3364 // We need dead instruction detection because we do not want to deal with
3365 // broken IR in which SSA rules do not apply.
3366 getOrCreateAAFor<AAIsDead>(FPos);
3367
3368 // Every function might contain instructions that cause "undefined
3369 // behavior".
3370 getOrCreateAAFor<AAUndefinedBehavior>(FPos);
3371
3372 // Every function might be applicable for Heap-To-Stack conversion.
3374 getOrCreateAAFor<AAHeapToStack>(FPos);
3375
3376 // Every function might be "must-progress".
3377 checkAndQueryIRAttr<Attribute::MustProgress, AAMustProgress>(FPos, FnAttrs);
3378
3379 // Every function might be "no-free".
3380 checkAndQueryIRAttr<Attribute::NoFree, AANoFree>(FPos, FnAttrs);
3381
3382 // Every function might be "will-return".
3383 checkAndQueryIRAttr<Attribute::WillReturn, AAWillReturn>(FPos, FnAttrs);
3384
3385 // Every function might be marked "nosync"
3386 checkAndQueryIRAttr<Attribute::NoSync, AANoSync>(FPos, FnAttrs);
3387
3388 // Everything that is visible from the outside (=function, argument, return
3389 // positions), cannot be changed if the function is not IPO amendable. We can
3390 // however analyse the code inside.
3391 if (IsIPOAmendable) {
3392
3393 // Every function can be nounwind.
3394 checkAndQueryIRAttr<Attribute::NoUnwind, AANoUnwind>(FPos, FnAttrs);
3395
3396 // Every function might be "no-return".
3397 checkAndQueryIRAttr<Attribute::NoReturn, AANoReturn>(FPos, FnAttrs);
3398
3399 // Every function might be "no-recurse".
3400 checkAndQueryIRAttr<Attribute::NoRecurse, AANoRecurse>(FPos, FnAttrs);
3401
3402 // Every function can be "non-convergent".
3403 if (Attrs.hasFnAttr(Attribute::Convergent))
3404 getOrCreateAAFor<AANonConvergent>(FPos);
3405
3406 // Every function might be "readnone/readonly/writeonly/...".
3407 getOrCreateAAFor<AAMemoryBehavior>(FPos);
3408
3409 // Every function can be "readnone/argmemonly/inaccessiblememonly/...".
3410 getOrCreateAAFor<AAMemoryLocation>(FPos);
3411
3412 // Every function can track active assumptions.
3413 getOrCreateAAFor<AAAssumptionInfo>(FPos);
3414
3415 // If we're not using a dynamic mode for float, there's nothing worthwhile
3416 // to infer. This misses the edge case denormal-fp-math="dynamic" and
3417 // denormal-fp-math-f32=something, but that likely has no real world use.
3418 DenormalMode Mode = F.getDenormalMode(APFloat::IEEEsingle());
3419 if (Mode.Input == DenormalMode::Dynamic ||
3420 Mode.Output == DenormalMode::Dynamic)
3421 getOrCreateAAFor<AADenormalFPMath>(FPos);
3422
3423 // Return attributes are only appropriate if the return type is non void.
3424 Type *ReturnType = F.getReturnType();
3425 if (!ReturnType->isVoidTy()) {
3427 AttributeSet RetAttrs = Attrs.getRetAttrs();
3428
3429 // Every returned value might be dead.
3430 getOrCreateAAFor<AAIsDead>(RetPos);
3431
3432 // Every function might be simplified.
3433 bool UsedAssumedInformation = false;
3434 getAssumedSimplified(RetPos, nullptr, UsedAssumedInformation,
3436
3437 // Every returned value might be marked noundef.
3438 checkAndQueryIRAttr<Attribute::NoUndef, AANoUndef>(RetPos, RetAttrs);
3439
3440 if (ReturnType->isPointerTy()) {
3441
3442 // Every function with pointer return type might be marked align.
3443 getOrCreateAAFor<AAAlign>(RetPos);
3444
3445 // Every function with pointer return type might be marked nonnull.
3446 checkAndQueryIRAttr<Attribute::NonNull, AANonNull>(RetPos, RetAttrs);
3447
3448 // Every function with pointer return type might be marked noalias.
3449 checkAndQueryIRAttr<Attribute::NoAlias, AANoAlias>(RetPos, RetAttrs);
3450
3451 // Every function with pointer return type might be marked
3452 // dereferenceable.
3453 getOrCreateAAFor<AADereferenceable>(RetPos);
3454 } else if (AttributeFuncs::isNoFPClassCompatibleType(ReturnType)) {
3455 getOrCreateAAFor<AANoFPClass>(RetPos);
3456 }
3457 }
3458 }
3459
3460 for (Argument &Arg : F.args()) {
3461 IRPosition ArgPos = IRPosition::argument(Arg);
3462 auto ArgNo = Arg.getArgNo();
3463 AttributeSet ArgAttrs = Attrs.getParamAttrs(ArgNo);
3464
3465 if (!IsIPOAmendable) {
3466 if (Arg.getType()->isPointerTy())
3467 // Every argument with pointer type might be marked nofree.
3468 checkAndQueryIRAttr<Attribute::NoFree, AANoFree>(ArgPos, ArgAttrs);
3469 continue;
3470 }
3471
3472 // Every argument might be simplified. We have to go through the
3473 // Attributor interface though as outside AAs can register custom
3474 // simplification callbacks.
3475 bool UsedAssumedInformation = false;
3476 getAssumedSimplified(ArgPos, /* AA */ nullptr, UsedAssumedInformation,
3478
3479 // Every argument might be dead.
3480 getOrCreateAAFor<AAIsDead>(ArgPos);
3481
3482 // Every argument might be marked noundef.
3483 checkAndQueryIRAttr<Attribute::NoUndef, AANoUndef>(ArgPos, ArgAttrs);
3484
3485 if (Arg.getType()->isPointerTy()) {
3486 // Every argument with pointer type might be marked nonnull.
3487 checkAndQueryIRAttr<Attribute::NonNull, AANonNull>(ArgPos, ArgAttrs);
3488
3489 // Every argument with pointer type might be marked noalias.
3490 checkAndQueryIRAttr<Attribute::NoAlias, AANoAlias>(ArgPos, ArgAttrs);
3491
3492 // Every argument with pointer type might be marked dereferenceable.
3493 getOrCreateAAFor<AADereferenceable>(ArgPos);
3494
3495 // Every argument with pointer type might be marked align.
3496 getOrCreateAAFor<AAAlign>(ArgPos);
3497
3498 // Every argument with pointer type might be marked nocapture.
3499 checkAndQueryIRAttr<Attribute::NoCapture, AANoCapture>(ArgPos, ArgAttrs);
3500
3501 // Every argument with pointer type might be marked
3502 // "readnone/readonly/writeonly/..."
3503 getOrCreateAAFor<AAMemoryBehavior>(ArgPos);
3504
3505 // Every argument with pointer type might be marked nofree.
3506 checkAndQueryIRAttr<Attribute::NoFree, AANoFree>(ArgPos, ArgAttrs);
3507
3508 // Every argument with pointer type might be privatizable (or
3509 // promotable)
3510 getOrCreateAAFor<AAPrivatizablePtr>(ArgPos);
3511 } else if (AttributeFuncs::isNoFPClassCompatibleType(Arg.getType())) {
3512 getOrCreateAAFor<AANoFPClass>(ArgPos);
3513 }
3514 }
3515
3516 auto CallSitePred = [&](Instruction &I) -> bool {
3517 auto &CB = cast<CallBase>(I);
3518 IRPosition CBInstPos = IRPosition::inst(CB);
3520
3521 // Call sites might be dead if they do not have side effects and no live
3522 // users. The return value might be dead if there are no live users.
3523 getOrCreateAAFor<AAIsDead>(CBInstPos);
3524
3525 Function *Callee = dyn_cast_if_present<Function>(CB.getCalledOperand());
3526 // TODO: Even if the callee is not known now we might be able to simplify
3527 // the call/callee.
3528 if (!Callee) {
3529 getOrCreateAAFor<AAIndirectCallInfo>(CBFnPos);
3530 return true;
3531 }
3532
3533 // Every call site can track active assumptions.
3534 getOrCreateAAFor<AAAssumptionInfo>(CBFnPos);
3535
3536 // Skip declarations except if annotations on their call sites were
3537 // explicitly requested.
3538 if (!AnnotateDeclarationCallSites && Callee->isDeclaration() &&
3539 !Callee->hasMetadata(LLVMContext::MD_callback))
3540 return true;
3541
3542 if (!Callee->getReturnType()->isVoidTy() && !CB.use_empty()) {
3544 bool UsedAssumedInformation = false;
3545 getAssumedSimplified(CBRetPos, nullptr, UsedAssumedInformation,
3547
3548 if (AttributeFuncs::isNoFPClassCompatibleType(Callee->getReturnType()))
3549 getOrCreateAAFor<AANoFPClass>(CBInstPos);
3550 }
3551
3552 const AttributeList &CBAttrs = CBFnPos.getAttrList();
3553 for (int I = 0, E = CB.arg_size(); I < E; ++I) {
3554
3556 AttributeSet CBArgAttrs = CBAttrs.getParamAttrs(I);
3557
3558 // Every call site argument might be dead.
3559 getOrCreateAAFor<AAIsDead>(CBArgPos);
3560
3561 // Call site argument might be simplified. We have to go through the
3562 // Attributor interface though as outside AAs can register custom
3563 // simplification callbacks.
3564 bool UsedAssumedInformation = false;
3565 getAssumedSimplified(CBArgPos, /* AA */ nullptr, UsedAssumedInformation,
3567
3568 // Every call site argument might be marked "noundef".
3569 checkAndQueryIRAttr<Attribute::NoUndef, AANoUndef>(CBArgPos, CBArgAttrs);
3570
3571 Type *ArgTy = CB.getArgOperand(I)->getType();
3572
3573 if (!ArgTy->isPointerTy()) {
3575 getOrCreateAAFor<AANoFPClass>(CBArgPos);
3576
3577 continue;
3578 }
3579
3580 // Call site argument attribute "non-null".
3581 checkAndQueryIRAttr<Attribute::NonNull, AANonNull>(CBArgPos, CBArgAttrs);
3582
3583 // Call site argument attribute "nocapture".
3584 checkAndQueryIRAttr<Attribute::NoCapture, AANoCapture>(CBArgPos,
3585 CBArgAttrs);
3586
3587 // Call site argument attribute "no-alias".
3588 checkAndQueryIRAttr<Attribute::NoAlias, AANoAlias>(CBArgPos, CBArgAttrs);
3589
3590 // Call site argument attribute "dereferenceable".
3591 getOrCreateAAFor<AADereferenceable>(CBArgPos);
3592
3593 // Call site argument attribute "align".
3594 getOrCreateAAFor<AAAlign>(CBArgPos);
3595
3596 // Call site argument attribute
3597 // "readnone/readonly/writeonly/..."
3598 if (!CBAttrs.hasParamAttr(I, Attribute::ReadNone))
3599 getOrCreateAAFor<AAMemoryBehavior>(CBArgPos);
3600
3601 // Call site argument attribute "nofree".
3602 checkAndQueryIRAttr<Attribute::NoFree, AANoFree>(CBArgPos, CBArgAttrs);
3603 }
3604 return true;
3605 };
3606
3607 auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(F);
3608 [[maybe_unused]] bool Success;
3609 bool UsedAssumedInformation = false;
3611 nullptr, OpcodeInstMap, CallSitePred, nullptr, nullptr,
3612 {(unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr,
3613 (unsigned)Instruction::Call},
3614 UsedAssumedInformation);
3615 assert(Success && "Expected the check call to be successful!");
3616
3617 auto LoadStorePred = [&](Instruction &I) -> bool {
3618 if (auto *LI = dyn_cast<LoadInst>(&I)) {
3619 getOrCreateAAFor<AAAlign>(IRPosition::value(*LI->getPointerOperand()));
3620 if (SimplifyAllLoads)
3622 UsedAssumedInformation, AA::Intraprocedural);
3623 getOrCreateAAFor<AAAddressSpace>(
3624 IRPosition::value(*LI->getPointerOperand()));
3625 } else {
3626 auto &SI = cast<StoreInst>(I);
3627 getOrCreateAAFor<AAIsDead>(IRPosition::inst(I));
3628 getAssumedSimplified(IRPosition::value(*SI.getValueOperand()), nullptr,
3629 UsedAssumedInformation, AA::Intraprocedural);
3630 getOrCreateAAFor<AAAlign>(IRPosition::value(*SI.getPointerOperand()));
3631 getOrCreateAAFor<AAAddressSpace>(
3632 IRPosition::value(*SI.getPointerOperand()));
3633 }
3634 return true;
3635 };
3637 nullptr, OpcodeInstMap, LoadStorePred, nullptr, nullptr,
3638 {(unsigned)Instruction::Load, (unsigned)Instruction::Store},
3639 UsedAssumedInformation);
3640 assert(Success && "Expected the check call to be successful!");
3641
3642 // AllocaInstPredicate
3643 auto AAAllocationInfoPred = [&](Instruction &I) -> bool {
3644 getOrCreateAAFor<AAAllocationInfo>(IRPosition::value(I));
3645 return true;
3646 };
3647
3649 nullptr, OpcodeInstMap, AAAllocationInfoPred, nullptr, nullptr,
3650 {(unsigned)Instruction::Alloca}, UsedAssumedInformation);
3651 assert(Success && "Expected the check call to be successful!");
3652}
3653
3655 if (CloseWorldAssumption.getNumOccurrences())
3656 return CloseWorldAssumption;
3657 return isModulePass() && Configuration.IsClosedWorldModule;
3658}
3659
3660/// Helpers to ease debugging through output streams and print calls.
3661///
3662///{
3664 return OS << (S == ChangeStatus::CHANGED ? "changed" : "unchanged");
3665}
3666
3668 switch (AP) {
3670 return OS << "inv";
3672 return OS << "flt";
3674 return OS << "fn_ret";
3676 return OS << "cs_ret";
3678 return OS << "fn";
3680 return OS << "cs";
3682 return OS << "arg";
3684 return OS << "cs_arg";
3685 }
3686 llvm_unreachable("Unknown attribute position!");
3687}
3688
3690 const Value &AV = Pos.getAssociatedValue();
3691 OS << "{" << Pos.getPositionKind() << ":" << AV.getName() << " ["
3692 << Pos.getAnchorValue().getName() << "@" << Pos.getCallSiteArgNo() << "]";
3693
3694 if (Pos.hasCallBaseContext())
3695 OS << "[cb_context:" << *Pos.getCallBaseContext() << "]";
3696 return OS << "}";
3697}
3698
3700 OS << "range-state(" << S.getBitWidth() << ")<";
3701 S.getKnown().print(OS);
3702 OS << " / ";
3703 S.getAssumed().print(OS);
3704 OS << ">";
3705
3706 return OS << static_cast<const AbstractState &>(S);
3707}
3708
3710 return OS << (!S.isValidState() ? "top" : (S.isAtFixpoint() ? "fix" : ""));
3711}
3712
3714 AA.print(OS);
3715 return OS;
3716}
3717
3720 OS << "set-state(< {";
3721 if (!S.isValidState())
3722 OS << "full-set";
3723 else {
3724 for (const auto &It : S.getAssumedSet())
3725 OS << It << ", ";
3726 if (S.undefIsContained())
3727 OS << "undef ";
3728 }
3729 OS << "} >)";
3730
3731 return OS;
3732}
3733
3735 const PotentialLLVMValuesState &S) {
3736 OS << "set-state(< {";
3737 if (!S.isValidState())
3738 OS << "full-set";
3739 else {
3740 for (const auto &It : S.getAssumedSet()) {
3741 if (auto *F = dyn_cast<Function>(It.first.getValue()))
3742 OS << "@" << F->getName() << "[" << int(It.second) << "], ";
3743 else
3744 OS << *It.first.getValue() << "[" << int(It.second) << "], ";
3745 }
3746 if (S.undefIsContained())
3747 OS << "undef ";
3748 }
3749 OS << "} >)";
3750
3751 return OS;
3752}
3753
3755 OS << "[";
3756 OS << getName();
3757 OS << "] for CtxI ";
3758
3759 if (auto *I = getCtxI()) {
3760 OS << "'";
3761 I->print(OS);
3762 OS << "'";
3763 } else
3764 OS << "<<null inst>>";
3765
3766 OS << " at position " << getIRPosition() << " with state " << getAsStr(A)
3767 << '\n';
3768}
3769
3771 print(OS);
3772
3773 for (const auto &DepAA : Deps) {
3774 auto *AA = DepAA.getPointer();
3775 OS << " updates ";
3776 AA->print(OS);
3777 }
3778
3779 OS << '\n';
3780}
3781
3783 const AAPointerInfo::Access &Acc) {
3784 OS << " [" << Acc.getKind() << "] " << *Acc.getRemoteInst();
3785 if (Acc.getLocalInst() != Acc.getRemoteInst())
3786 OS << " via " << *Acc.getLocalInst();
3787 if (Acc.getContent()) {
3788 if (*Acc.getContent())
3789 OS << " [" << **Acc.getContent() << "]";
3790 else
3791 OS << " [ <unknown> ]";
3792 }
3793 return OS;
3794}
3795///}
3796
3797/// ----------------------------------------------------------------------------
3798/// Pass (Manager) Boilerplate
3799/// ----------------------------------------------------------------------------
3800
3802 SetVector<Function *> &Functions,
3803 AnalysisGetter &AG,
3804 CallGraphUpdater &CGUpdater,
3805 bool DeleteFns, bool IsModulePass) {
3806 if (Functions.empty())
3807 return false;
3808
3809 LLVM_DEBUG({
3810 dbgs() << "[Attributor] Run on module with " << Functions.size()
3811 << " functions:\n";
3812 for (Function *Fn : Functions)
3813 dbgs() << " - " << Fn->getName() << "\n";
3814 });
3815
3816 // Create an Attributor and initially empty information cache that is filled
3817 // while we identify default attribute opportunities.
3818 AttributorConfig AC(CGUpdater);
3819 AC.IsModulePass = IsModulePass;
3820 AC.DeleteFns = DeleteFns;
3821
3822 /// Tracking callback for specialization of indirect calls.
3824 IndirectCalleeTrackingMap;
3825 if (MaxSpecializationPerCB.getNumOccurrences()) {
3826 AC.IndirectCalleeSpecializationCallback =
3827 [&](Attributor &, const AbstractAttribute &AA, CallBase &CB,
3828 Function &Callee, unsigned) {
3829 if (MaxSpecializationPerCB == 0)
3830 return false;
3831 auto &Set = IndirectCalleeTrackingMap[&CB];
3832 if (!Set)
3833 Set = std::make_unique<SmallPtrSet<Function *, 8>>();
3834 if (Set->size() >= MaxSpecializationPerCB)
3835 return Set->contains(&Callee);
3836 Set->insert(&Callee);
3837 return true;
3838 };
3839 }
3840
3841 Attributor A(Functions, InfoCache, AC);
3842
3843 // Create shallow wrappers for all functions that are not IPO amendable
3845 for (Function *F : Functions)
3846 if (!A.isFunctionIPOAmendable(*F))
3848
3849 // Internalize non-exact functions
3850 // TODO: for now we eagerly internalize functions without calculating the
3851 // cost, we need a cost interface to determine whether internalizing
3852 // a function is "beneficial"
3853 if (AllowDeepWrapper) {
3854 unsigned FunSize = Functions.size();
3855 for (unsigned u = 0; u < FunSize; u++) {
3856 Function *F = Functions[u];
3857 if (!F->isDeclaration() && !F->isDefinitionExact() && F->getNumUses() &&
3858 !GlobalValue::isInterposableLinkage(F->getLinkage())) {
3860 assert(NewF && "Could not internalize function.");
3861 Functions.insert(NewF);
3862
3863 // Update call graph
3864 CGUpdater.replaceFunctionWith(*F, *NewF);
3865 for (const Use &U : NewF->uses())
3866 if (CallBase *CB = dyn_cast<CallBase>(U.getUser())) {
3867 auto *CallerF = CB->getCaller();
3868 CGUpdater.reanalyzeFunction(*CallerF);
3869 }
3870 }
3871 }
3872 }
3873
3874 for (Function *F : Functions) {
3875 if (F->hasExactDefinition())
3876 NumFnWithExactDefinition++;
3877 else
3878 NumFnWithoutExactDefinition++;
3879
3880 // We look at internal functions only on-demand but if any use is not a
3881 // direct call or outside the current set of analyzed functions, we have
3882 // to do it eagerly.
3883 if (F->hasLocalLinkage()) {
3884 if (llvm::all_of(F->uses(), [&Functions](const Use &U) {
3885 const auto *CB = dyn_cast<CallBase>(U.getUser());
3886 return CB && CB->isCallee(&U) &&
3887 Functions.count(const_cast<Function *>(CB->getCaller()));
3888 }))
3889 continue;
3890 }
3891
3892 // Populate the Attributor with abstract attribute opportunities in the
3893 // function and the information cache with IR information.
3894 A.identifyDefaultAbstractAttributes(*F);
3895 }
3896
3897 ChangeStatus Changed = A.run();
3898
3899 LLVM_DEBUG(dbgs() << "[Attributor] Done with " << Functions.size()
3900 << " functions, result: " << Changed << ".\n");
3901 return Changed == ChangeStatus::CHANGED;
3902}
3903
3905 SetVector<Function *> &Functions,
3906 AnalysisGetter &AG,
3907 CallGraphUpdater &CGUpdater,
3909 bool IsModulePass) {
3910 if (Functions.empty())
3911 return false;
3912
3913 LLVM_DEBUG({
3914 dbgs() << "[AttributorLight] Run on module with " << Functions.size()
3915 << " functions:\n";
3916 for (Function *Fn : Functions)
3917 dbgs() << " - " << Fn->getName() << "\n";
3918 });
3919
3920 // Create an Attributor and initially empty information cache that is filled
3921 // while we identify default attribute opportunities.
3922 AttributorConfig AC(CGUpdater);
3923 AC.IsModulePass = IsModulePass;
3924 AC.DeleteFns = false;
3925 DenseSet<const char *> Allowed(
3931 AC.Allowed = &Allowed;
3932 AC.UseLiveness = false;
3933
3934 Attributor A(Functions, InfoCache, AC);
3935
3936 for (Function *F : Functions) {
3937 if (F->hasExactDefinition())
3938 NumFnWithExactDefinition++;
3939 else
3940 NumFnWithoutExactDefinition++;
3941
3942 // We look at internal functions only on-demand but if any use is not a
3943 // direct call or outside the current set of analyzed functions, we have
3944 // to do it eagerly.
3945 if (AC.UseLiveness && F->hasLocalLinkage()) {
3946 if (llvm::all_of(F->uses(), [&Functions](const Use &U) {
3947 const auto *CB = dyn_cast<CallBase>(U.getUser());
3948 return CB && CB->isCallee(&U) &&
3949 Functions.count(const_cast<Function *>(CB->getCaller()));
3950 }))
3951 continue;
3952 }
3953
3954 // Populate the Attributor with abstract attribute opportunities in the
3955 // function and the information cache with IR information.
3956 A.identifyDefaultAbstractAttributes(*F);
3957 }
3958
3959 ChangeStatus Changed = A.run();
3960
3961 if (Changed == ChangeStatus::CHANGED) {
3962 // Invalidate analyses for modified functions so that we don't have to
3963 // invalidate all analyses for all functions in this SCC.
3964 PreservedAnalyses FuncPA;
3965 // We haven't changed the CFG for modified functions.
3966 FuncPA.preserveSet<CFGAnalyses>();
3967 for (Function *Changed : A.getModifiedFunctions()) {
3968 FAM.invalidate(*Changed, FuncPA);
3969 // Also invalidate any direct callers of changed functions since analyses
3970 // may care about attributes of direct callees. For example, MemorySSA
3971 // cares about whether or not a call's callee modifies memory and queries
3972 // that through function attributes.
3973 for (auto *U : Changed->users()) {
3974 if (auto *Call = dyn_cast<CallBase>(U)) {
3975 if (Call->getCalledFunction() == Changed)
3976 FAM.invalidate(*Call->getFunction(), FuncPA);
3977 }
3978 }
3979 }
3980 }
3981 LLVM_DEBUG(dbgs() << "[Attributor] Done with " << Functions.size()
3982 << " functions, result: " << Changed << ".\n");
3983 return Changed == ChangeStatus::CHANGED;
3984}
3985
3986void AADepGraph::viewGraph() { llvm::ViewGraph(this, "Dependency Graph"); }
3987
3989 static std::atomic<int> CallTimes;
3990 std::string Prefix;
3991
3992 if (!DepGraphDotFileNamePrefix.empty())
3994 else
3995 Prefix = "dep_graph";
3996 std::string Filename =
3997 Prefix + "_" + std::to_string(CallTimes.load()) + ".dot";
3998
3999 outs() << "Dependency graph dump to " << Filename << ".\n";
4000
4001 std::error_code EC;
4002
4003 raw_fd_ostream File(Filename, EC, sys::fs::OF_TextWithCRLF);
4004 if (!EC)
4005 llvm::WriteGraph(File, this);
4006
4007 CallTimes++;
4008}
4009
4011 for (auto DepAA : SyntheticRoot.Deps)
4012 cast<AbstractAttribute>(DepAA.getPointer())->printWithDeps(outs());
4013}
4014
4018 AnalysisGetter AG(FAM);
4019
4020 SetVector<Function *> Functions;
4021 for (Function &F : M)
4022 Functions.insert(&F);
4023
4024 CallGraphUpdater CGUpdater;
4026 InformationCache InfoCache(M, AG, Allocator, /* CGSCC */ nullptr);
4027 if (runAttributorOnFunctions(InfoCache, Functions, AG, CGUpdater,
4028 /* DeleteFns */ true, /* IsModulePass */ true)) {
4029 // FIXME: Think about passes we will preserve and add them here.
4030 return PreservedAnalyses::none();
4031 }
4032 return PreservedAnalyses::all();
4033}
4034
4037 LazyCallGraph &CG,
4038 CGSCCUpdateResult &UR) {
4040 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager();
4041 AnalysisGetter AG(FAM);
4042
4043 SetVector<Function *> Functions;
4044 for (LazyCallGraph::Node &N : C)
4045 Functions.insert(&N.getFunction());
4046
4047 if (Functions.empty())
4048 return PreservedAnalyses::all();
4049
4050 Module &M = *Functions.back()->getParent();
4051 CallGraphUpdater CGUpdater;
4052 CGUpdater.initialize(CG, C, AM, UR);
4054 InformationCache InfoCache(M, AG, Allocator, /* CGSCC */ &Functions);
4055 if (runAttributorOnFunctions(InfoCache, Functions, AG, CGUpdater,
4056 /* DeleteFns */ false,
4057 /* IsModulePass */ false)) {
4058 // FIXME: Think about passes we will preserve and add them here.
4061 return PA;
4062 }
4063 return PreservedAnalyses::all();
4064}
4065
4070 AnalysisGetter AG(FAM, /* CachedOnly */ true);
4071
4072 SetVector<Function *> Functions;
4073 for (Function &F : M)
4074 Functions.insert(&F);
4075
4076 CallGraphUpdater CGUpdater;
4078 InformationCache InfoCache(M, AG, Allocator, /* CGSCC */ nullptr);
4079 if (runAttributorLightOnFunctions(InfoCache, Functions, AG, CGUpdater, FAM,
4080 /* IsModulePass */ true)) {
4082 // We have not added or removed functions.
4084 // We already invalidated all relevant function analyses above.
4086 return PA;
4087 }
4088 return PreservedAnalyses::all();
4089}
4090
4093 LazyCallGraph &CG,
4094 CGSCCUpdateResult &UR) {
4096 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager();
4097 AnalysisGetter AG(FAM);
4098
4099 SetVector<Function *> Functions;
4100 for (LazyCallGraph::Node &N : C)
4101 Functions.insert(&N.getFunction());
4102
4103 if (Functions.empty())
4104 return PreservedAnalyses::all();
4105
4106 Module &M = *Functions.back()->getParent();
4107 CallGraphUpdater CGUpdater;
4108 CGUpdater.initialize(CG, C, AM, UR);
4110 InformationCache InfoCache(M, AG, Allocator, /* CGSCC */ &Functions);
4111 if (runAttributorLightOnFunctions(InfoCache, Functions, AG, CGUpdater, FAM,
4112 /* IsModulePass */ false)) {
4114 // We have not added or removed functions.
4116 // We already invalidated all relevant function analyses above.
4118 return PA;
4119 }
4120 return PreservedAnalyses::all();
4121}
4122namespace llvm {
4123
4124template <> struct GraphTraits<AADepGraphNode *> {
4128
4129 static NodeRef getEntryNode(AADepGraphNode *DGN) { return DGN; }
4130 static NodeRef DepGetVal(const DepTy &DT) { return DT.getPointer(); }
4131
4135
4136 static ChildIteratorType child_begin(NodeRef N) { return N->child_begin(); }
4137
4138 static ChildIteratorType child_end(NodeRef N) { return N->child_end(); }
4139};
4140
4141template <>
4143 static NodeRef getEntryNode(AADepGraph *DG) { return DG->GetEntryNode(); }
4144
4147
4148 static nodes_iterator nodes_begin(AADepGraph *DG) { return DG->begin(); }
4149
4150 static nodes_iterator nodes_end(AADepGraph *DG) { return DG->end(); }
4151};
4152
4153template <> struct DOTGraphTraits<AADepGraph *> : public DefaultDOTGraphTraits {
4155
4156 static std::string getNodeLabel(const AADepGraphNode *Node,
4157 const AADepGraph *DG) {
4158 std::string AAString;
4159 raw_string_ostream O(AAString);
4160 Node->print(O);
4161 return AAString;
4162 }
4163};
4164
4165} // end namespace llvm
#define Success
amdgpu aa AMDGPU Address space based Alias Analysis Wrapper
Rewrite undef for PHI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Expand Atomic instructions
This file contains the simple types necessary to represent the attributes associated with functions a...
static cl::opt< bool > AllowShallowWrappers("attributor-allow-shallow-wrappers", cl::Hidden, cl::desc("Allow the Attributor to create shallow " "wrappers for non-exact definitions."), cl::init(false))
bool canMarkAsVisited(const User *Usr)
#define VERBOSE_DEBUG_TYPE
Definition: Attributor.cpp:66
static cl::opt< bool > EnableHeapToStack("enable-heap-to-stack-conversion", cl::init(true), cl::Hidden)
static cl::list< std::string > SeedAllowList("attributor-seed-allow-list", cl::Hidden, cl::desc("Comma separated list of attribute names that are " "allowed to be seeded."), cl::CommaSeparated)
static bool runAttributorOnFunctions(InformationCache &InfoCache, SetVector< Function * > &Functions, AnalysisGetter &AG, CallGraphUpdater &CGUpdater, bool DeleteFns, bool IsModulePass)
}
static bool isPotentiallyReachable(Attributor &A, const Instruction &FromI, const Instruction *ToI, const Function &ToFn, const AbstractAttribute &QueryingAA, const AA::InstExclusionSetTy *ExclusionSet, std::function< bool(const Function &F)> GoBackwardsCB)
Definition: Attributor.cpp:660
static bool getPotentialCopiesOfMemoryValue(Attributor &A, Ty &I, SmallSetVector< Value *, 4 > &PotentialCopies, SmallSetVector< Instruction *, 4 > *PotentialValueOrigins, const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation, bool OnlyExact)
Definition: Attributor.cpp:364
static bool runAttributorLightOnFunctions(InformationCache &InfoCache, SetVector< Function * > &Functions, AnalysisGetter &AG, CallGraphUpdater &CGUpdater, FunctionAnalysisManager &FAM, bool IsModulePass)
static cl::opt< unsigned, true > MaxInitializationChainLengthX("attributor-max-initialization-chain-length", cl::Hidden, cl::desc("Maximal number of chained initializations (to avoid stack overflows)"), cl::location(MaxInitializationChainLength), cl::init(1024))
static cl::opt< unsigned > MaxSpecializationPerCB("attributor-max-specializations-per-call-base", cl::Hidden, cl::desc("Maximal number of callees specialized for " "a call base"), cl::init(UINT32_MAX))
static cl::opt< bool > SimplifyAllLoads("attributor-simplify-all-loads", cl::Hidden, cl::desc("Try to simplify all loads."), cl::init(true))
static bool addIfNotExistent(LLVMContext &Ctx, const Attribute &Attr, AttributeSet AttrSet, bool ForceReplace, AttrBuilder &AB)
Return true if the information provided by Attr was added to the attribute set AttrSet.
Definition: Attributor.cpp:955
static cl::opt< bool > ViewDepGraph("attributor-view-dep-graph", cl::Hidden, cl::desc("View the dependency graph."), cl::init(false))
static bool isEqualOrWorse(const Attribute &New, const Attribute &Old)
Return true if New is equal or worse than Old.
Definition: Attributor.cpp:945
static cl::opt< bool > AllowDeepWrapper("attributor-allow-deep-wrappers", cl::Hidden, cl::desc("Allow the Attributor to use IP information " "derived from non-exact functions via cloning"), cl::init(false))
static cl::opt< bool > DumpDepGraph("attributor-dump-dep-graph", cl::Hidden, cl::desc("Dump the dependency graph to dot files."), cl::init(false))
static cl::opt< bool > PrintCallGraph("attributor-print-call-graph", cl::Hidden, cl::desc("Print Attributor's internal call graph"), cl::init(false))
static bool checkForAllInstructionsImpl(Attributor *A, InformationCache::OpcodeInstMapTy &OpcodeInstMap, function_ref< bool(Instruction &)> Pred, const AbstractAttribute *QueryingAA, const AAIsDead *LivenessAA, ArrayRef< unsigned > Opcodes, bool &UsedAssumedInformation, bool CheckBBLivenessOnly=false, bool CheckPotentiallyDead=false)
static cl::opt< bool > PrintDependencies("attributor-print-dep", cl::Hidden, cl::desc("Print attribute dependencies"), cl::init(false))
static bool isAssumedReadOnlyOrReadNone(Attributor &A, const IRPosition &IRP, const AbstractAttribute &QueryingAA, bool RequireReadNone, bool &IsKnown)
Definition: Attributor.cpp:608
static cl::opt< std::string > DepGraphDotFileNamePrefix("attributor-depgraph-dot-filename-prefix", cl::Hidden, cl::desc("The prefix used for the CallGraph dot file names."))
static cl::opt< bool > AnnotateDeclarationCallSites("attributor-annotate-decl-cs", cl::Hidden, cl::desc("Annotate call sites of function declarations."), cl::init(false))
static cl::opt< unsigned > SetFixpointIterations("attributor-max-iterations", cl::Hidden, cl::desc("Maximal number of fixpoint iterations."), cl::init(32))
static cl::list< std::string > FunctionSeedAllowList("attributor-function-seed-allow-list", cl::Hidden, cl::desc("Comma separated list of function names that are " "allowed to be seeded."), cl::CommaSeparated)
static cl::opt< bool > EnableCallSiteSpecific("attributor-enable-call-site-specific-deduction", cl::Hidden, cl::desc("Allow the Attributor to do call site specific analysis"), cl::init(false))
static cl::opt< bool > CloseWorldAssumption("attributor-assume-closed-world", cl::Hidden, cl::desc("Should a closed world be assumed, or not. Default if not set."))
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
This file provides interfaces used to build and manipulate a call graph, which is a very useful tool ...
This file contains the declarations for the subclasses of Constant, which represent the different fla...
return RetTy
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file provides an implementation of debug counters.
#define DEBUG_COUNTER(VARNAME, COUNTERNAME, DESC)
Definition: DebugCounter.h:190
#define LLVM_DEBUG(...)
Definition: Debug.h:106
#define DEBUG_WITH_TYPE(TYPE,...)
DEBUG_WITH_TYPE macro - This macro should be used by passes to emit debug information.
Definition: Debug.h:64
static Function * getFunction(Constant *C)
Definition: Evaluator.cpp:235
IRTranslator LLVM IR MI
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
Contains a collection of routines for determining if a given instruction is guaranteed to execute if ...
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
FunctionAnalysisManager FAM
This file defines the PointerIntPair class.
static StringRef getName(Value *V)
Basic Register Allocator
Remove Loads Into Fake Uses
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool isSimple(Instruction *I)
This file contains some templates that are useful if you are working with the STL at all.
raw_pwrite_stream & OS
This file defines the SmallPtrSet class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:166
Class for arbitrary precision integers.
Definition: APInt.h:78
AbstractCallSite.
CallBase * getInstruction() const
Return the underlying instruction.
bool isCallbackCall() const
Return true if this ACS represents a callback call.
const Use & getCalleeUseForCallback() const
Return the use of the callee value in the underlying instruction.
static void getCallbackUses(const CallBase &CB, SmallVectorImpl< const Use * > &CallbackUses)
Add operand uses of CB that represent callback uses into CallbackUses.
bool isCallee(Value::const_user_iterator UI) const
Return true if UI is the use that defines the callee of this ACS.
Value * getCallArgOperand(Argument &Arg) const
Return the operand of the underlying instruction associated with Arg.
int getCallArgOperandNo(Argument &Arg) const
Return the operand index of the underlying instruction associated with Arg.
unsigned getNumArgOperands() const
Return the number of parameters of the callee.
Function * getCalledFunction() const
Return the function being called if this is a direct call, otherwise return null (if it's an indirect...
This templated class represents "all analyses that operate over <a particular IR unit>" (e....
Definition: Analysis.h:49
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:253
void invalidate(IRUnitT &IR, const PreservedAnalyses &PA)
Invalidate cached analyses for an IR unit.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:410
This class represents an incoming formal argument to a Function.
Definition: Argument.h:31
const Function * getParent() const
Definition: Argument.h:43
unsigned getArgNo() const
Return the index of this formal argument in its containing function.
Definition: Argument.h:49
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:168
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:163
AttributeSet getFnAttrs() const
The function attributes are returned.
static AttributeList get(LLVMContext &C, ArrayRef< std::pair< unsigned, Attribute > > Attrs)
Create an AttributeList with the specified parameters in it.
AttributeSet getRetAttrs() const
The attributes for the ret value are returned.
bool hasAttrSomewhere(Attribute::AttrKind Kind, unsigned *Index=nullptr) const
Return true if the specified attribute is set for at least one parameter or for the return value.
bool hasParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Return true if the attribute exists for the given argument.
Definition: Attributes.h:829
AttributeSet getParamAttrs(unsigned ArgNo) const
The attributes for the argument or parameter at the given index are returned.
MemoryEffects getMemoryEffects() const
bool hasAttribute(Attribute::AttrKind Kind) const
Return true if the attribute exists in this set.
Attribute getAttribute(Attribute::AttrKind Kind) const
Return the attribute object.
bool isStringAttribute() const
Return true if the attribute is a string (target-dependent) attribute.
Definition: Attributes.cpp:348
bool isEnumAttribute() const
Return true if the attribute is an Attribute::AttrKind type.
Definition: Attributes.cpp:340
bool isIntAttribute() const
Return true if the attribute is an integer attribute.
Definition: Attributes.cpp:344
uint64_t getValueAsInt() const
Return the attribute's value as an integer.
Definition: Attributes.cpp:371
StringRef getKindAsString() const
Return the attribute's kind as a string.
Definition: Attributes.cpp:385
static Attribute get(LLVMContext &Context, AttrKind Kind, uint64_t Val=0)
Return a uniquified Attribute object.
Definition: Attributes.cpp:95
Attribute::AttrKind getKindAsEnum() const
Return the attribute's kind as an enum (Attribute::AttrKind).
Definition: Attributes.cpp:364
MemoryEffects getMemoryEffects() const
Returns memory effects.
Definition: Attributes.cpp:484
StringRef getValueAsString() const
Return the attribute's value as a string.
Definition: Attributes.cpp:392
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
Definition: Attributes.h:86
@ None
No attributes have been set.
Definition: Attributes.h:88
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
const Instruction & front() const
Definition: BasicBlock.h:471
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition: BasicBlock.h:212
const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
Definition: BasicBlock.cpp:467
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:219
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:239
static BlockAddress * get(Function *F, BasicBlock *BB)
Return a BlockAddress for the specified function and basic block.
Definition: Constants.cpp:1897
Allocate memory in an ever growing pool, as if by bump-pointer.
Definition: Allocator.h:66
Represents analyses that only rely on functions' control flow.
Definition: Analysis.h:72
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1120
void setCallingConv(CallingConv::ID CC)
Definition: InstrTypes.h:1411
void addFnAttr(Attribute::AttrKind Kind)
Adds the attribute to the function.
Definition: InstrTypes.h:1482
void getOperandBundlesAsDefs(SmallVectorImpl< OperandBundleDef > &Defs) const
Return the list of operand bundles attached to this instruction as a vector of OperandBundleDefs.
CallingConv::ID getCallingConv() const
Definition: InstrTypes.h:1407
bool isMustTailCall() const
Tests if this call site must be tail call optimized.
Value * getCalledOperand() const
Definition: InstrTypes.h:1342
void setAttributes(AttributeList A)
Set the attributes for this call.
Definition: InstrTypes.h:1428
unsigned arg_size() const
Definition: InstrTypes.h:1292
AttributeList getAttributes() const
Return the attributes for this call.
Definition: InstrTypes.h:1425
Function * getCaller()
Helper to get the caller (the parent function).
Wrapper to unify "old style" CallGraph and "new style" LazyCallGraph.
void removeFunction(Function &Fn)
Remove Fn from the call graph.
void replaceFunctionWith(Function &OldFn, Function &NewFn)
Replace OldFn in the call graph (and SCC) with NewFn.
void reanalyzeFunction(Function &Fn)
After an CGSCC pass changes a function in ways that affect the call graph, this method can be called ...
void initialize(LazyCallGraph &LCG, LazyCallGraph::SCC &SCC, CGSCCAnalysisManager &AM, CGSCCUpdateResult &UR)
Initializers for usage outside of a CGSCC pass, inside a CGSCC pass in the old and new pass manager (...
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
void setTailCall(bool IsTc=true)
A constant value that is initialized with an expression using other constant values.
Definition: Constants.h:1108
static Constant * getPointerCast(Constant *C, Type *Ty)
Create a BitCast, AddrSpaceCast, or a PtrToInt cast constant expression.
Definition: Constants.cpp:2253
static Constant * getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2279
void print(raw_ostream &OS) const
Print out the bounds to a stream.
This is an important base class in LLVM.
Definition: Constant.h:42
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:373
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
static bool shouldExecute(unsigned CounterName)
Definition: DebugCounter.h:87
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition: DenseMap.h:194
bool empty() const
Definition: DenseMap.h:98
Implements a dense probed hash-table based set.
Definition: DenseSet.h:278
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:279
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:162
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Definition: Dominators.cpp:122
A proxy from a FunctionAnalysisManager to an SCC.
Class to represent function types.
Definition: DerivedTypes.h:105
static FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
void setSubprogram(DISubprogram *SP)
Set the attached subprogram.
Definition: Metadata.cpp:1870
static Function * Create(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, const Twine &N="", Module *M=nullptr)
Definition: Function.h:173
void splice(Function::iterator ToIt, Function *FromF)
Transfer all blocks from FromF to this function at ToIt.
Definition: Function.h:761
const BasicBlock & getEntryBlock() const
Definition: Function.h:809
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition: Function.h:216
iterator_range< arg_iterator > args()
Definition: Function.h:892
DISubprogram * getSubprogram() const
Get the attached subprogram.
Definition: Metadata.cpp:1874
MemoryEffects getMemoryEffects() const
Definition: Function.cpp:876
bool hasParamAttribute(unsigned ArgNo, Attribute::AttrKind Kind) const
check if an attributes is in the list of attributes.
Definition: Function.cpp:743
bool IsNewDbgInfoFormat
Is this function using intrinsics to record the position of debugging information,...
Definition: Function.h:116
AttributeList getAttributes() const
Return the attribute list for this Function.
Definition: Function.h:353
iterator begin()
Definition: Function.h:853
arg_iterator arg_begin()
Definition: Function.h:868
void setAttributes(AttributeList Attrs)
Set the attribute list for this Function.
Definition: Function.h:356
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:369
size_t arg_size() const
Definition: Function.h:901
Type * getReturnType() const
Returns the type of the ret val.
Definition: Function.h:221
void setMemoryEffects(MemoryEffects ME)
Definition: Function.cpp:879
Argument * getArg(unsigned i) const
Definition: Function.h:886
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
Definition: Function.h:234
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.cpp:731
void copyAttributesFrom(const Function *Src)
copyAttributesFrom - copy all additional attributes (those not needed to create a Function) from the ...
Definition: Function.cpp:860
bool hasMetadata() const
Return true if this value has any metadata attached to it.
Definition: Value.h:589
void addMetadata(unsigned KindID, MDNode &MD)
Add a metadata attachment.
Definition: Metadata.cpp:1565
bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
Definition: Globals.cpp:296
LinkageTypes getLinkage() const
Definition: GlobalValue.h:546
bool hasLocalLinkage() const
Definition: GlobalValue.h:528
void setLinkage(LinkageTypes LT)
Definition: GlobalValue.h:537
unsigned getAddressSpace() const
Definition: GlobalValue.h:205
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:656
void setDSOLocal(bool Local)
Definition: GlobalValue.h:303
PointerType * getType() const
Global values are always pointers.
Definition: GlobalValue.h:294
@ DefaultVisibility
The GV is visible.
Definition: GlobalValue.h:67
void setVisibility(VisibilityTypes V)
Definition: GlobalValue.h:254
static bool isInterposableLinkage(LinkageTypes Linkage)
Whether the definition of this global may be replaced by something non-equivalent at link time.
Definition: GlobalValue.h:425
@ PrivateLinkage
Like Internal, but omit from symbol table.
Definition: GlobalValue.h:60
@ InternalLinkage
Rename collisions when linking (static functions).
Definition: GlobalValue.h:59
bool isSuccess() const
Definition: InlineCost.h:188
An analysis over an "outer" IR unit that provides access to an analysis manager over an "inner" IR un...
Definition: PassManager.h:567
InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Definition: Instruction.cpp:92
const Function * getFunction() const
Return the function this instruction belongs to.
Definition: Instruction.cpp:70
const Instruction * getNextNonDebugInstruction(bool SkipPseudoOp=false) const
Return a pointer to the next non-debug instruction in the same basic block as 'this',...
void copyMetadata(const Instruction &SrcInst, ArrayRef< unsigned > WL=ArrayRef< unsigned >())
Copy metadata from SrcInst to this instruction.
Invoke instruction.
static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
A node in the call graph.
An SCC of the call graph.
A lazily constructed view of the call graph of a module.
An instruction for reading from memory.
Definition: Instructions.h:176
This is the common base class for memset/memcpy/memmove.
This class wraps the llvm.memcpy/memmove intrinsics.
static MemoryEffectsBase argMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Create MemoryEffectsBase that can only access argument memory.
Definition: ModRef.h:132
bool doesAccessArgPointees() const
Whether this function may access argument memory.
Definition: ModRef.h:206
static MemoryLocation getForSource(const MemTransferInst *MTI)
Return a location representing the source of a memory transfer.
static MemoryLocation getForDest(const MemIntrinsic *MI)
Return a location representing the destination of a memory set or transfer.
static std::optional< MemoryLocation > getOrNone(const Instruction *Inst)
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
const FunctionListType & getFunctionList() const
Get the Module's list of functions (constant).
Definition: Module.h:614
Diagnostic information for missed-optimization remarks.
PointerIntPair - This class implements a pair of a pointer and small integer.
void * getOpaqueValue() const
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Definition: Constants.cpp:1878
A set of analyses that are preserved following a run of a transformation pass.
Definition: Analysis.h:111
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
Definition: Analysis.h:114
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: Analysis.h:117
void preserveSet()
Mark an analysis set as preserved.
Definition: Analysis.h:146
void preserve()
Mark an analysis as preserved.
Definition: Analysis.h:131
Return a value (possibly void), from a function.
static ReturnInst * Create(LLVMContext &C, Value *retVal=nullptr, InsertPosition InsertBefore=nullptr)
A vector that has set insertion semantics.
Definition: SetVector.h:57
ArrayRef< value_type > getArrayRef() const
Definition: SetVector.h:84
bool remove(const value_type &X)
Remove an item from the set vector.
Definition: SetVector.h:188
size_type size() const
Determine the number of elements in the SetVector.
Definition: SetVector.h:98
const value_type & front() const
Return the first element of the SetVector.
Definition: SetVector.h:143
const value_type & back() const
Return the last element of the SetVector.
Definition: SetVector.h:149
typename vector_type::const_iterator iterator
Definition: SetVector.h:69
iterator end()
Get an iterator to the end of the SetVector.
Definition: SetVector.h:113
void clear()
Completely clear the SetVector.
Definition: SetVector.h:273
size_type count(const key_type &key) const
Count the number of elements of a given key in the SetVector.
Definition: SetVector.h:264
bool empty() const
Determine if the SetVector is empty or not.
Definition: SetVector.h:93
iterator begin()
Get an iterator to the beginning of the SetVector.
Definition: SetVector.h:103
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition: SetVector.h:162
size_type size() const
Definition: SmallPtrSet.h:94
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:363
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:452
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:384
A SetVector that performs no allocations if smaller than a certain size.
Definition: SetVector.h:370
bool empty() const
Definition: SmallVector.h:81
size_t size() const
Definition: SmallVector.h:78
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:573
void reserve(size_type N)
Definition: SmallVector.h:663
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:683
void resize(size_type N)
Definition: SmallVector.h:638
void push_back(const T &Elt)
Definition: SmallVector.h:413
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1196
An instruction for storing to memory.
Definition: Instructions.h:292
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:51
A visitor class for IR positions.
Definition: Attributor.h:1116
SubsumingPositionIterator(const IRPosition &IRP)
Provides information about what library functions are available for the current target.
The TimeTraceScope is a helper class to call the begin and end functions of the time trace profiler.
Definition: TimeProfiler.h:180
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
bool isAMDGPU() const
Definition: Triple.h:862
bool isNVPTX() const
Tests whether the target is NVPTX (32- or 64-bit).
Definition: Triple.h:855
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:264
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition: Type.h:184
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:237
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
bool isVoidTy() const
Return true if this is 'void'.
Definition: Type.h:139
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
Definition: Constants.cpp:1859
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:534
iterator_range< user_iterator > users()
Definition: Value.h:421
const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition: Value.cpp:694
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1075
iterator_range< use_iterator > uses()
Definition: Value.h:376
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
void takeName(Value *V)
Transfer the name from V to this value.
Definition: Value.cpp:383
Value handle that is nullable, but tries to track the Value.
Definition: ValueHandle.h:204
size_type count(const_arg_type_t< ValueT > V) const
Return 1 if the specified key is in the set, 0 otherwise.
Definition: DenseSet.h:95
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition: ilist_node.h:32
self_iterator getIterator()
Definition: ilist_node.h:132
iterator insert(iterator where, pointer New)
Definition: ilist.h:165
A raw_ostream that writes to a file descriptor.
Definition: raw_ostream.h:460
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
A raw_ostream that writes to an std::string.
Definition: raw_ostream.h:661
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
bool isAssumedReadNone(Attributor &A, const IRPosition &IRP, const AbstractAttribute &QueryingAA, bool &IsKnown)
Return true if IRP is readnone.
Definition: Attributor.cpp:653
bool isAssumedReadOnly(Attributor &A, const IRPosition &IRP, const AbstractAttribute &QueryingAA, bool &IsKnown)
Return true if IRP is readonly.
Definition: Attributor.cpp:648
std::optional< Value * > combineOptionalValuesInAAValueLatice(const std::optional< Value * > &A, const std::optional< Value * > &B, Type *Ty)
Return the combination of A and B such that the result is a possible value of both.
Definition: Attributor.cpp:339
bool isValidAtPosition(const ValueAndContext &VAC, InformationCache &InfoCache)
Return true if the value of VAC is a valid at the position of VAC, that is a constant,...
Definition: Attributor.cpp:290
bool isAssumedThreadLocalObject(Attributor &A, Value &Obj, const AbstractAttribute &QueryingAA)
Return true if Obj is assumed to be a thread local object.
Definition: Attributor.cpp:835
bool isDynamicallyUnique(Attributor &A, const AbstractAttribute &QueryingAA, const Value &V, bool ForAnalysisOnly=true)
Return true if V is dynamically unique, that is, there are no two "instances" of V at runtime with di...
Definition: Attributor.cpp:231
bool getPotentialCopiesOfStoredValue(Attributor &A, StoreInst &SI, SmallSetVector< Value *, 4 > &PotentialCopies, const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation, bool OnlyExact=false)
Collect all potential values of the one stored by SI into PotentialCopies.
Definition: Attributor.cpp:599
bool isPotentiallyAffectedByBarrier(Attributor &A, const Instruction &I, const AbstractAttribute &QueryingAA)
Return true if I is potentially affected by a barrier.
Definition: Attributor.cpp:889
bool isGPU(const Module &M)
Return true iff M target a GPU (and we can use GPU AS reasoning).
Definition: Attributor.cpp:200
Constant * getInitialValueForObj(Attributor &A, const AbstractAttribute &QueryingAA, Value &Obj, Type &Ty, const TargetLibraryInfo *TLI, const DataLayout &DL, RangeTy *RangePtr=nullptr)
Return the initial value of Obj with type Ty if that is a constant.
Definition: Attributor.cpp:242
ValueScope
Flags to distinguish intra-procedural queries from potentially inter-procedural queries.
Definition: Attributor.h:180
@ Intraprocedural
Definition: Attributor.h:181
@ Interprocedural
Definition: Attributor.h:182
bool isValidInScope(const Value &V, const Function *Scope)
Return true if V is a valid value in Scope, that is a constant or an instruction/argument of Scope.
Definition: Attributor.cpp:280
bool isPotentiallyReachable(Attributor &A, const Instruction &FromI, const Instruction &ToI, const AbstractAttribute &QueryingAA, const AA::InstExclusionSetTy *ExclusionSet=nullptr, std::function< bool(const Function &F)> GoBackwardsCB=nullptr)
Return true if ToI is potentially reachable from FromI without running into any instruction in Exclus...
Definition: Attributor.cpp:816
bool isNoSyncInst(Attributor &A, const Instruction &I, const AbstractAttribute &QueryingAA)
Return true if I is a nosync instruction.
Definition: Attributor.cpp:205
bool getPotentiallyLoadedValues(Attributor &A, LoadInst &LI, SmallSetVector< Value *, 4 > &PotentialValues, SmallSetVector< Instruction *, 4 > &PotentialValueOrigins, const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation, bool OnlyExact=false)
Collect all potential values LI could read into PotentialValues.
Definition: Attributor.cpp:589
Value * getWithType(Value &V, Type &Ty)
Try to convert V to type Ty without introducing new instructions.
Definition: Attributor.cpp:316
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
bool isNoFPClassCompatibleType(Type *Ty)
Returns true if this is a type legal for the 'nofpclass' attribute.
void updateMinLegalVectorWidthAttr(Function &Fn, uint64_t Width)
Update min-legal-vector-width if it is in Attribute and less than Width.
@ Entry
Definition: COFF.h:844
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
@ CommaSeparated
Definition: CommandLine.h:163
LocationClass< Ty > location(Ty &L)
Definition: CommandLine.h:463
@ Assume
Do not drop type tests (default).
DiagnosticInfoOptimizationBase::Argument NV
@ OF_TextWithCRLF
The file should be opened in text mode and use a carriage linefeed '\r '.
Definition: FileSystem.h:767
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:480
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1739
Constant * getInitialValueOfAllocation(const Value *V, const TargetLibraryInfo *TLI, Type *Ty)
If this is a call to an allocation function that initializes memory to a fixed value,...
bool RecursivelyDeleteTriviallyDeadInstructions(Value *V, const TargetLibraryInfo *TLI=nullptr, MemorySSAUpdater *MSSAU=nullptr, std::function< void(Value *)> AboutToDeleteCallback=std::function< void(Value *)>())
If the specified value is a trivially dead instruction, delete it.
Definition: Local.cpp:546
unsigned MaxInitializationChainLength
The value passed to the line option that defines the maximal initialization chain length.
Definition: Attributor.cpp:109
bool ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions=false, const TargetLibraryInfo *TLI=nullptr, DomTreeUpdater *DTU=nullptr)
If a terminator instruction is predicated on a constant value, convert it into an unconditional branc...
Definition: Local.cpp:136
APInt operator&(APInt a, const APInt &b)
Definition: APInt.h:2092
void detachDeadBlocks(ArrayRef< BasicBlock * > BBs, SmallVectorImpl< DominatorTree::UpdateType > *Updates, bool KeepOneInputPHIs=false)
Replace contents of every block in BBs with single unreachable instruction.
@ Done
Definition: Threading.h:61
bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
Definition: Verifier.cpp:7293
CallInst * changeToCall(InvokeInst *II, DomTreeUpdater *DTU=nullptr)
This function converts the specified invoke into a normal call.
Definition: Local.cpp:2972
raw_fd_ostream & outs()
This returns a reference to a raw_fd_ostream for standard output.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
bool isNoAliasCall(const Value *V)
Return true if this pointer is returned by a noalias function.
raw_ostream & WriteGraph(raw_ostream &O, const GraphType &G, bool ShortNames=false, const Twine &Title="")
Definition: GraphWriter.h:359
InlineResult isInlineViable(Function &Callee)
Minimal filter to detect invalid constructs for inlining.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1746
bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
Definition: Local.cpp:406
Constant * ConstantFoldLoadFromUniformValue(Constant *C, Type *Ty, const DataLayout &DL)
If C is a uniform value where all bits are the same (either all zero, all ones, all undef or all pois...
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
Definition: Function.cpp:1187
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
bool AreStatisticsEnabled()
Check if statistics are enabled.
Definition: Statistic.cpp:139
Constant * ConstantFoldLoadFromConst(Constant *C, Type *Ty, const APInt &Offset, const DataLayout &DL)
Extract value of C at the given Offset reinterpreted as Ty.
unsigned changeToUnreachable(Instruction *I, bool PreserveLCSSA=false, DomTreeUpdater *DTU=nullptr, MemorySSAUpdater *MSSAU=nullptr)
Insert an unreachable instruction before the specified instruction, making it and the rest of the cod...
Definition: Local.cpp:2906
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
BasicBlock * SplitBlockPredecessors(BasicBlock *BB, ArrayRef< BasicBlock * > Preds, const char *Suffix, DominatorTree *DT, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, bool PreserveLCSSA=false)
This method introduces at least one new basic block into the function and moves some of the predecess...
bool operator&=(SparseBitVector< ElementSize > *LHS, const SparseBitVector< ElementSize > &RHS)
void ViewGraph(const GraphType &G, const Twine &Name, bool ShortNames=false, const Twine &Title="", GraphProgram::Name Program=GraphProgram::DOT)
ViewGraph - Emit a dot graph, run 'dot', run gv on the postscript file, then cleanup.
Definition: GraphWriter.h:427
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
Definition: APFixedPoint.h:303
void CloneFunctionInto(Function *NewFunc, const Function *OldFunc, ValueToValueMapTy &VMap, CloneFunctionChangeType Changes, SmallVectorImpl< ReturnInst * > &Returns, const char *NameSuffix="", ClonedCodeInfo *CodeInfo=nullptr, ValueMapTypeRemapper *TypeMapper=nullptr, ValueMaterializer *Materializer=nullptr)
Clone OldFunc into NewFunc, transforming the old arguments into references to VMap values.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition: Casting.h:565
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
Definition: STLExtras.h:2099
iterator_range< pointer_iterator< WrappedIteratorT > > make_pointer_range(RangeT &&Range)
Definition: iterator.h:363
bool isAllocationFn(const Value *V, const TargetLibraryInfo *TLI)
Tests if a value is a call or invoke to a library function that allocates or reallocates memory (eith...
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1903
ChangeStatus
{
Definition: Attributor.h:489
void fillMapFromAssume(AssumeInst &Assume, RetainedKnowledgeMap &Result)
Insert into the map all the informations contained in the operand bundles of the llvm....
bool operator|=(SparseBitVector< ElementSize > &LHS, const SparseBitVector< ElementSize > *RHS)
Constant * ConstantFoldCastInstruction(unsigned opcode, Constant *V, Type *DestTy)
DepClassTy
Definition: Attributor.h:499
@ OPTIONAL
The target may be valid if the source is not.
@ NONE
Do not track a dependence between source and target.
@ REQUIRED
The target cannot be valid if the source is not.
APInt operator|(APInt a, const APInt &b)
Definition: APInt.h:2112
#define N
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:5530
DepSetTy Deps
Set of dependency graph nodes which should be updated if this one is updated.
Definition: Attributor.h:516
aaiterator begin()
Definition: Attributor.h:530
aaiterator end()
Definition: Attributor.h:531
The data structure for the dependency graph.
Definition: Attributor.h:550
iterator begin()
Definition: Attributor.h:565
AADepGraphNode SyntheticRoot
There is no root node for the dependency graph.
Definition: Attributor.h:562
void print()
Print dependency graph.
iterator end()
Definition: Attributor.h:566
void dumpGraph()
Dump graph to file.
AADepGraphNode * GetEntryNode()
Definition: Attributor.h:563
An abstract interface to track if a value leaves it's defining function instance.
Definition: Attributor.h:4310
bool isAssumedUniqueForAnalysis() const
Return true if we assume that the underlying value is unique in its scope wrt.
Definition: Attributor.h:4324
An abstract Attribute for computing reachability between functions.
Definition: Attributor.h:5683
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:5718
An abstract interface to determine reachability of point A to B.
Definition: Attributor.h:3816
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:3844
An abstract interface for liveness abstract attribute.
Definition: Attributor.h:3976
virtual bool isKnownDead() const =0
Returns true if the underlying value is known dead.
virtual bool isAssumedDead() const =0
The query functions are protected such that other attributes need to go through the Attributor interf...
virtual bool isRemovableStore() const
Return true if the underlying value is a store that is known to be removable.
Definition: Attributor.h:4020
static bool mayCatchAsynchronousExceptions(const Function &F)
Determine if F might catch asynchronous exceptions.
Definition: Attributor.h:4041
An abstract interface for memory access kind related attributes (readnone/readonly/writeonly).
Definition: Attributor.h:4630
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:4696
An abstract interface for all memory location attributes (readnone/argmemonly/inaccessiblememonly/ina...
Definition: Attributor.h:4705
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:4881
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:3627
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:4424
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:5441
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:3940
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:3709
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:3971
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:3585
static bool isNonRelaxedAtomic(const Instruction *I)
Helper function used to determine whether an instruction is non-relaxed atomic.
static bool isNoSyncIntrinsic(const Instruction *I)
Helper function specific for intrinsics which are potentially volatile.
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:3502
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:3678
An access description.
Definition: Attributor.h:5988
bool isWrittenValueUnknown() const
Return true if the value written cannot be determined at all.
Definition: Attributor.h:6094
std::optional< Value * > getContent() const
Return the written value which can be llvm::null if it is not yet determined.
Definition: Attributor.h:6113
bool isWriteOrAssumption() const
Return true if this is a write access.
Definition: Attributor.h:6064
bool isRead() const
Return true if this is a read access.
Definition: Attributor.h:6058
Value * getWrittenValue() const
Return the value writen, if any.
Definition: Attributor.h:6105
Instruction * getLocalInst() const
Return the instruction that causes the access with respect to the local scope of the associated attri...
Definition: Attributor.h:6085
Instruction * getRemoteInst() const
Return the actual instruction that causes the access.
Definition: Attributor.h:6088
bool isWrittenValueYetUndetermined() const
Return true if the value written is not known yet.
Definition: Attributor.h:6091
AccessKind getKind() const
Return the access kind.
Definition: Attributor.h:6055
An abstract interface for struct information.
Definition: Attributor.h:5755
static Value * getSingleValue(Attributor &A, const AbstractAttribute &AA, const IRPosition &IRP, SmallVectorImpl< AA::ValueAndContext > &Values)
Extract the single value in Values if any.
An abstract attribute for getting all assumption underlying objects.
Definition: Attributor.h:6244
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:6274
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:3773
Helper to represent an access offset and size, with logic to deal with uncertainty and check for over...
Definition: Attributor.h:237
bool offsetOrSizeAreUnknown() const
Return true if offset or size are unknown.
Definition: Attributor.h:246
static const fltSemantics & IEEEsingle() LLVM_READNONE
Definition: APFloat.cpp:265
Base struct for all "concrete attribute" deductions.
Definition: Attributor.h:3284
ChangeStatus update(Attributor &A)
Hook for the Attributor to trigger an update of the internal state.
virtual ChangeStatus manifest(Attributor &A)
Hook for the Attributor to trigger the manifestation of the information represented by the abstract a...
Definition: Attributor.h:3399
virtual void printWithDeps(raw_ostream &OS) const
void print(raw_ostream &OS) const
Helper functions, for debug purposes only.
Definition: Attributor.h:3368
virtual StateType & getState()=0
Return the internal abstract state for inspection.
virtual const std::string getName() const =0
This function should return the name of the AbstractAttribute.
virtual ~AbstractAttribute()=default
Virtual destructor.
virtual const std::string getAsStr(Attributor *A) const =0
This function should return the "summarized" assumed state as string.
virtual bool isQueryAA() const
A query AA is always scheduled as long as we do updates because it does lazy computation that cannot ...
Definition: Attributor.h:3356
virtual ChangeStatus updateImpl(Attributor &A)=0
The actual update/transfer function which has to be implemented by the derived classes.
virtual void trackStatistics() const =0
Hook to enable custom statistic tracking, called after manifest that resulted in a change if statisti...
const IRPosition & getIRPosition() const
Return an IR position, see struct IRPosition.
Definition: Attributor.h:3363
An interface to query the internal state of an abstract attribute.
Definition: Attributor.h:2604
virtual ChangeStatus indicatePessimisticFixpoint()=0
Indicate that the abstract state should converge to the pessimistic state.
virtual bool isAtFixpoint() const =0
Return if this abstract state is fixed, thus does not need to be updated if information changes as it...
virtual bool isValidState() const =0
Return if this abstract state is in a valid state.
virtual ChangeStatus indicateOptimisticFixpoint()=0
Indicate that the abstract state should converge to the optimistic state.
Wrapper for FunctionAnalysisManager.
Definition: Attributor.h:1127
PreservedAnalyses run(LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM, LazyCallGraph &CG, CGSCCUpdateResult &UR)
void populateAll() const
Force populate the entire call graph.
Definition: Attributor.h:5547
Configuration for the Attributor.
Definition: Attributor.h:1422
bool UseLiveness
Flag to determine if we should skip all liveness checks early on.
Definition: Attributor.h:1446
std::optional< unsigned > MaxFixpointIterations
Maximum number of iterations to run until fixpoint.
Definition: Attributor.h:1469
DenseSet< const char * > * Allowed
If not null, a set limiting the attribute opportunities.
Definition: Attributor.h:1466
bool RewriteSignatures
Flag to determine if we rewrite function signatures.
Definition: Attributor.h:1439
bool DeleteFns
Flag to determine if we can delete functions or keep dead ones around.
Definition: Attributor.h:1436
bool IsClosedWorldModule
Flag to indicate if the entire world is contained in this module, that is, no outside functions exist...
Definition: Attributor.h:1450
CallGraphUpdater & CGUpdater
Helper to update an underlying call graph and to delete functions.
Definition: Attributor.h:1463
PreservedAnalyses run(LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM, LazyCallGraph &CG, CGSCCUpdateResult &UR)
PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
Helper struct used in the communication between an abstract attribute (AA) that wants to change the s...
Definition: Attributor.h:2212
std::function< void(const ArgumentReplacementInfo &, AbstractCallSite, SmallVectorImpl< Value * > &)> ACSRepairCBTy
Abstract call site (ACS) repair callback type.
Definition: Attributor.h:2235
std::function< void(const ArgumentReplacementInfo &, Function &, Function::arg_iterator)> CalleeRepairCBTy
Callee repair callback type.
Definition: Attributor.h:2221
The fixpoint analysis framework that orchestrates the attribute deduction.
Definition: Attributor.h:1516
bool registerFunctionSignatureRewrite(Argument &Arg, ArrayRef< Type * > ReplacementTypes, ArgumentReplacementInfo::CalleeRepairCBTy &&CalleeRepairCB, ArgumentReplacementInfo::ACSRepairCBTy &&ACSRepairCB)
Register a rewrite for a function signature.
bool checkForAllCallees(function_ref< bool(ArrayRef< const Function * > Callees)> Pred, const AbstractAttribute &QueryingAA, const CallBase &CB)
Check Pred on all potential Callees of CB.
bool isModulePass() const
Return true if this is a module pass, false otherwise.
Definition: Attributor.h:1724
bool isValidFunctionSignatureRewrite(Argument &Arg, ArrayRef< Type * > ReplacementTypes)
Check if we can rewrite a function signature.
static bool isInternalizable(Function &F)
Returns true if the function F can be internalized.
ChangeStatus removeAttrs(const IRPosition &IRP, ArrayRef< Attribute::AttrKind > AttrKinds)
Remove all AttrKinds attached to IRP.
bool isRunOn(Function &Fn) const
Return true if we derive attributes for Fn.
Definition: Attributor.h:1742
bool isAssumedDead(const AbstractAttribute &AA, const AAIsDead *LivenessAA, bool &UsedAssumedInformation, bool CheckBBLivenessOnly=false, DepClassTy DepClass=DepClassTy::OPTIONAL)
Return true if AA (or its context instruction) is assumed dead.
bool checkForAllInstructions(function_ref< bool(Instruction &)> Pred, const Function *Fn, const AbstractAttribute *QueryingAA, ArrayRef< unsigned > Opcodes, bool &UsedAssumedInformation, bool CheckBBLivenessOnly=false, bool CheckPotentiallyDead=false)
Check Pred on all instructions in Fn with an opcode present in Opcodes.
void recordDependence(const AbstractAttribute &FromAA, const AbstractAttribute &ToAA, DepClassTy DepClass)
Explicitly record a dependence from FromAA to ToAA, that is if FromAA changes ToAA should be updated ...
static void createShallowWrapper(Function &F)
Create a shallow wrapper for F such that F has internal linkage afterwards.
std::optional< Value * > getAssumedSimplified(const IRPosition &IRP, const AbstractAttribute &AA, bool &UsedAssumedInformation, AA::ValueScope S)
If V is assumed simplified, return it, if it is unclear yet, return std::nullopt, otherwise return nu...
Definition: Attributor.h:1975
static Function * internalizeFunction(Function &F, bool Force=false)
Make another copy of the function F such that the copied version has internal linkage afterwards and ...
bool isFunctionIPOAmendable(const Function &F)
Determine whether the function F is IPO amendable.
Definition: Attributor.h:1821
bool checkForAllReadWriteInstructions(function_ref< bool(Instruction &)> Pred, AbstractAttribute &QueryingAA, bool &UsedAssumedInformation)
Check Pred on all Read/Write instructions.
bool checkForAllReturnedValues(function_ref< bool(Value &)> Pred, const AbstractAttribute &QueryingAA, AA::ValueScope S=AA::ValueScope::Intraprocedural, bool RecurseForSelectAndPHI=true)
Check Pred on all values potentially returned by the function associated with QueryingAA.
bool isClosedWorldModule() const
Return true if the module contains the whole world, thus, no outside functions exist.
std::optional< Constant * > getAssumedConstant(const IRPosition &IRP, const AbstractAttribute &AA, bool &UsedAssumedInformation)
If IRP is assumed to be a constant, return it, if it is unclear yet, return std::nullopt,...
Attributor(SetVector< Function * > &Functions, InformationCache &InfoCache, AttributorConfig Configuration)
Constructor.
void getAttrs(const IRPosition &IRP, ArrayRef< Attribute::AttrKind > AKs, SmallVectorImpl< Attribute > &Attrs, bool IgnoreSubsumingPositions=false)
Return the attributes of any kind in AKs existing in the IR at a position that will affect this one.
InformationCache & getInfoCache()
Return the internal information cache.
Definition: Attributor.h:1721
std::optional< Value * > translateArgumentToCallSiteContent(std::optional< Value * > V, CallBase &CB, const AbstractAttribute &AA, bool &UsedAssumedInformation)
Translate V from the callee context into the call site context.
bool checkForAllUses(function_ref< bool(const Use &, bool &)> Pred, const AbstractAttribute &QueryingAA, const Value &V, bool CheckBBLivenessOnly=false, DepClassTy LivenessDepClass=DepClassTy::OPTIONAL, bool IgnoreDroppableUses=true, function_ref< bool(const Use &OldU, const Use &NewU)> EquivalentUseCB=nullptr)
Check Pred on all (transitive) uses of V.
ChangeStatus manifestAttrs(const IRPosition &IRP, ArrayRef< Attribute > DeducedAttrs, bool ForceReplace=false)
Attach DeducedAttrs to IRP, if ForceReplace is set we do this even if the same attribute kind was alr...
bool hasAttr(const IRPosition &IRP, ArrayRef< Attribute::AttrKind > AKs, bool IgnoreSubsumingPositions=false, Attribute::AttrKind ImpliedAttributeKind=Attribute::None)
Return true if any kind in AKs existing in the IR at a position that will affect this one.
void registerForUpdate(AbstractAttribute &AA)
Allows a query AA to request an update if a new query was received.
void identifyDefaultAbstractAttributes(Function &F)
Determine opportunities to derive 'default' attributes in F and create abstract attribute objects for...
bool getAssumedSimplifiedValues(const IRPosition &IRP, const AbstractAttribute *AA, SmallVectorImpl< AA::ValueAndContext > &Values, AA::ValueScope S, bool &UsedAssumedInformation, bool RecurseForSelectAndPHI=true)
Try to simplify IRP and in the scope S.
std::function< bool(Attributor &, const AbstractAttribute *)> VirtualUseCallbackTy
Definition: Attributor.h:2063
ChangeStatus run()
Run the analyses until a fixpoint is reached or enforced (timeout).
static bool internalizeFunctions(SmallPtrSetImpl< Function * > &FnSet, DenseMap< Function *, Function * > &FnMap)
Make copies of each function in the set FnSet such that the copied version has internal linkage after...
bool checkForAllCallSites(function_ref< bool(AbstractCallSite)> Pred, const AbstractAttribute &QueryingAA, bool RequireAllCallSites, bool &UsedAssumedInformation)
Check Pred on all function call sites.
bool getAttrsFromAssumes(const IRPosition &IRP, Attribute::AttrKind AK, SmallVectorImpl< Attribute > &Attrs)
Return the attributes of kind AK existing in the IR as operand bundles of an llvm....
bool isKnown(base_t BitsEncoding=BestState) const
Return true if the bits set in BitsEncoding are "known bits".
Definition: Attributor.h:2752
Support structure for SCC passes to communicate updates the call graph back to the CGSCC pass manager...
static std::string getNodeLabel(const AADepGraphNode *Node, const AADepGraph *DG)
DOTGraphTraits - Template class that can be specialized to customize how graphs are converted to 'dot...
DefaultDOTGraphTraits - This class provides the default implementations of all of the DOTGraphTraits ...
Represent subnormal handling kind for floating point instruction inputs and outputs.
@ Dynamic
Denormals have unknown treatment.
An information struct used to provide DenseMap with the various necessary components for a given valu...
Definition: DenseMapInfo.h:52
static NodeRef DepGetVal(const DepTy &DT)
static ChildIteratorType child_end(NodeRef N)
static NodeRef getEntryNode(AADepGraphNode *DGN)
static ChildIteratorType child_begin(NodeRef N)
AADepGraphNode::DepSetTy::iterator ChildEdgeIteratorType
static NodeRef getEntryNode(AADepGraph *DG)
static nodes_iterator nodes_begin(AADepGraph *DG)
static nodes_iterator nodes_end(AADepGraph *DG)
Helper to describe and deal with positions in the LLVM-IR.
Definition: Attributor.h:586
Function * getAssociatedFunction() const
Return the associated function, if any.
Definition: Attributor.h:717
void setAttrList(const AttributeList &AttrList) const
Update the attributes associated with this function or call site scope.
Definition: Attributor.h:853
unsigned getAttrIdx() const
Return the index in the attribute list for this position.
Definition: Attributor.h:818
bool hasCallBaseContext() const
Check if the position has any call base context.
Definition: Attributor.h:935
static const IRPosition callsite_returned(const CallBase &CB)
Create a position describing the returned value of CB.
Definition: Attributor.h:654
static const IRPosition returned(const Function &F, const CallBaseContext *CBContext=nullptr)
Create a position describing the returned value of F.
Definition: Attributor.h:636
Argument * getAssociatedArgument() const
Return the associated argument, if any.
Definition: Attributor.cpp:995
static const IRPosition value(const Value &V, const CallBaseContext *CBContext=nullptr)
Create a position describing the value of V.
Definition: Attributor.h:610
AttributeList getAttrList() const
Return the attributes associated with this function or call site scope.
Definition: Attributor.h:846
static const IRPosition inst(const Instruction &I, const CallBaseContext *CBContext=nullptr)
Create a position describing the instruction I.
Definition: Attributor.h:622
static const IRPosition callsite_argument(const CallBase &CB, unsigned ArgNo)
Create a position describing the argument of CB at position ArgNo.
Definition: Attributor.h:659
static const IRPosition TombstoneKey
Definition: Attributor.h:941
Kind
The positions we distinguish in the IR.
Definition: Attributor.h:592
@ IRP_ARGUMENT
An attribute for a function argument.
Definition: Attributor.h:600
@ IRP_RETURNED
An attribute for the function return value.
Definition: Attributor.h:596
@ IRP_CALL_SITE
An attribute for a call site (function scope).
Definition: Attributor.h:599
@ IRP_CALL_SITE_RETURNED
An attribute for a call site return value.
Definition: Attributor.h:597
@ IRP_FUNCTION
An attribute for a function (scope).
Definition: Attributor.h:598
@ IRP_FLOAT
A position that is not associated with a spot suitable for attributes.
Definition: Attributor.h:594
@ IRP_CALL_SITE_ARGUMENT
An attribute for a call site argument.
Definition: Attributor.h:601
@ IRP_INVALID
An invalid position.
Definition: Attributor.h:593
Instruction * getCtxI() const
Return the context instruction, if any.
Definition: Attributor.h:770
static const IRPosition argument(const Argument &Arg, const CallBaseContext *CBContext=nullptr)
Create a position describing the argument Arg.
Definition: Attributor.h:643
static const IRPosition EmptyKey
Special DenseMap key values.
Definition: Attributor.h:940
static const IRPosition function(const Function &F, const CallBaseContext *CBContext=nullptr)
Create a position describing the function scope of F.
Definition: Attributor.h:629
const CallBaseContext * getCallBaseContext() const
Get the call base context from the position.
Definition: Attributor.h:932
Value & getAssociatedValue() const
Return the value this abstract attribute is associated with.
Definition: Attributor.h:784
Value & getAnchorValue() const
Return the value this abstract attribute is anchored with.
Definition: Attributor.h:703
Value * getAttrListAnchor() const
Return the value attributes are attached to.
Definition: Attributor.h:839
int getCallSiteArgNo() const
Return the call site argument number of the associated value if it is an argument or call site argume...
Definition: Attributor.h:813
Kind getPositionKind() const
Return the associated position kind.
Definition: Attributor.h:882
static const IRPosition callsite_function(const CallBase &CB)
Create a position describing the function scope of CB.
Definition: Attributor.h:649
Function * getAnchorScope() const
Return the Function surrounding the anchor value.
Definition: Attributor.h:758
Data structure to hold cached (LLVM-IR) information.
Definition: Attributor.h:1203
bool stackIsAccessibleByOtherThreads()
Return true if the stack (llvm::Alloca) can be accessed by other threads.
Definition: Attributor.h:1332
MustBeExecutedContextExplorer * getMustBeExecutedContextExplorer()
Return MustBeExecutedContextExplorer.
Definition: Attributor.h:1281
const ArrayRef< Function * > getIndirectlyCallableFunctions(Attributor &A) const
Return all functions that might be called indirectly, only valid for closed world modules (see isClos...
TargetLibraryInfo * getTargetLibraryInfoForFunction(const Function &F)
Return TargetLibraryInfo for function F.
Definition: Attributor.h:1286
std::optional< unsigned > getFlatAddressSpace() const
Return the flat address space if the associated target has.
OpcodeInstMapTy & getOpcodeInstMapForFunction(const Function &F)
Return the map that relates "interesting" opcodes with all instructions with that opcode in F.
Definition: Attributor.h:1271
const RetainedKnowledgeMap & getKnowledgeMap() const
Return the map conaining all the knowledge we have from llvm.assumes.
Definition: Attributor.h:1316
SmallVector< Instruction *, 8 > InstructionVectorTy
A vector type to hold instructions.
Definition: Attributor.h:1264
InstructionVectorTy & getReadOrWriteInstsForFunction(const Function &F)
Return the instructions in F that may read or write memory.
Definition: Attributor.h:1276
AP::Result * getAnalysisResultForFunction(const Function &F, bool CachedOnly=false)
Return the analysis result from a pass AP for function F.
Definition: Attributor.h:1307
State for an integer range.
Definition: Attributor.h:2930
ConstantRange getKnown() const
Return the known state encoding.
Definition: Attributor.h:2986
ConstantRange getAssumed() const
Return the assumed state encoding.
Definition: Attributor.h:2989
uint32_t getBitWidth() const
Return associated values' bit width.
Definition: Attributor.h:2963
A "must be executed context" for a given program point PP is the set of instructions,...
Definition: MustExecute.h:385
iterator & end()
Return an universal end iterator.
Definition: MustExecute.h:433
bool findInContextOf(const Instruction *I, const Instruction *PP)
Helper to look for I in the context of PP.
Definition: MustExecute.h:469
iterator & begin(const Instruction *PP)
Return an iterator to explore the context around PP.
Definition: MustExecute.h:419
A class for a set state.
Definition: Attributor.h:4960
bool undefIsContained() const
Returns whether this state contains an undef value or not.
Definition: Attributor.h:4996
bool isValidState() const override
See AbstractState::isValidState(...)
Definition: Attributor.h:4969
const SetTy & getAssumedSet() const
Return this set.
Definition: Attributor.h:4990