LLVM 19.0.0git
Attributor.cpp
Go to the documentation of this file.
1//===- Attributor.cpp - Module-wide attribute deduction -------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements an interprocedural pass that deduces and/or propagates
10// attributes. This is done in an abstract interpretation style fixpoint
11// iteration. See the Attributor.h file comment and the class descriptions in
12// that file for more information.
13//
14//===----------------------------------------------------------------------===//
15
17
18#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/STLExtras.h"
22#include "llvm/ADT/Statistic.h"
30#include "llvm/IR/Attributes.h"
31#include "llvm/IR/Constant.h"
33#include "llvm/IR/Constants.h"
34#include "llvm/IR/DataLayout.h"
35#include "llvm/IR/GlobalValue.h"
37#include "llvm/IR/Instruction.h"
40#include "llvm/IR/LLVMContext.h"
41#include "llvm/IR/ValueHandle.h"
44#include "llvm/Support/Debug.h"
48#include "llvm/Support/ModRef.h"
53#include <cstdint>
54#include <memory>
55
56#ifdef EXPENSIVE_CHECKS
57#include "llvm/IR/Verifier.h"
58#endif
59
60#include <cassert>
61#include <optional>
62#include <string>
63
64using namespace llvm;
65
66#define DEBUG_TYPE "attributor"
67#define VERBOSE_DEBUG_TYPE DEBUG_TYPE "-verbose"
68
69DEBUG_COUNTER(ManifestDBGCounter, "attributor-manifest",
70 "Determine what attributes are manifested in the IR");
71
72STATISTIC(NumFnDeleted, "Number of function deleted");
73STATISTIC(NumFnWithExactDefinition,
74 "Number of functions with exact definitions");
75STATISTIC(NumFnWithoutExactDefinition,
76 "Number of functions without exact definitions");
77STATISTIC(NumFnShallowWrappersCreated, "Number of shallow wrappers created");
78STATISTIC(NumAttributesTimedOut,
79 "Number of abstract attributes timed out before fixpoint");
80STATISTIC(NumAttributesValidFixpoint,
81 "Number of abstract attributes in a valid fixpoint state");
82STATISTIC(NumAttributesManifested,
83 "Number of abstract attributes manifested in IR");
84
85// TODO: Determine a good default value.
86//
87// In the LLVM-TS and SPEC2006, 32 seems to not induce compile time overheads
88// (when run with the first 5 abstract attributes). The results also indicate
89// that we never reach 32 iterations but always find a fixpoint sooner.
90//
91// This will become more evolved once we perform two interleaved fixpoint
92// iterations: bottom-up and top-down.
94 SetFixpointIterations("attributor-max-iterations", cl::Hidden,
95 cl::desc("Maximal number of fixpoint iterations."),
96 cl::init(32));
97
99 MaxSpecializationPerCB("attributor-max-specializations-per-call-base",
101 cl::desc("Maximal number of callees specialized for "
102 "a call base"),
103 cl::init(UINT32_MAX));
104
106 "attributor-max-initialization-chain-length", cl::Hidden,
107 cl::desc(
108 "Maximal number of chained initializations (to avoid stack overflows)"),
111
113 "attributor-annotate-decl-cs", cl::Hidden,
114 cl::desc("Annotate call sites of function declarations."), cl::init(false));
115
116static cl::opt<bool> EnableHeapToStack("enable-heap-to-stack-conversion",
117 cl::init(true), cl::Hidden);
118
119static cl::opt<bool>
120 AllowShallowWrappers("attributor-allow-shallow-wrappers", cl::Hidden,
121 cl::desc("Allow the Attributor to create shallow "
122 "wrappers for non-exact definitions."),
123 cl::init(false));
124
125static cl::opt<bool>
126 AllowDeepWrapper("attributor-allow-deep-wrappers", cl::Hidden,
127 cl::desc("Allow the Attributor to use IP information "
128 "derived from non-exact functions via cloning"),
129 cl::init(false));
130
131// These options can only used for debug builds.
132#ifndef NDEBUG
134 SeedAllowList("attributor-seed-allow-list", cl::Hidden,
135 cl::desc("Comma separated list of attribute names that are "
136 "allowed to be seeded."),
138
140 "attributor-function-seed-allow-list", cl::Hidden,
141 cl::desc("Comma separated list of function names that are "
142 "allowed to be seeded."),
144#endif
145
146static cl::opt<bool>
147 DumpDepGraph("attributor-dump-dep-graph", cl::Hidden,
148 cl::desc("Dump the dependency graph to dot files."),
149 cl::init(false));
150
152 "attributor-depgraph-dot-filename-prefix", cl::Hidden,
153 cl::desc("The prefix used for the CallGraph dot file names."));
154
155static cl::opt<bool> ViewDepGraph("attributor-view-dep-graph", cl::Hidden,
156 cl::desc("View the dependency graph."),
157 cl::init(false));
158
159static cl::opt<bool> PrintDependencies("attributor-print-dep", cl::Hidden,
160 cl::desc("Print attribute dependencies"),
161 cl::init(false));
162
164 "attributor-enable-call-site-specific-deduction", cl::Hidden,
165 cl::desc("Allow the Attributor to do call site specific analysis"),
166 cl::init(false));
167
168static cl::opt<bool>
169 PrintCallGraph("attributor-print-call-graph", cl::Hidden,
170 cl::desc("Print Attributor's internal call graph"),
171 cl::init(false));
172
173static cl::opt<bool> SimplifyAllLoads("attributor-simplify-all-loads",
175 cl::desc("Try to simplify all loads."),
176 cl::init(true));
177
179 "attributor-assume-closed-world", cl::Hidden,
180 cl::desc("Should a closed world be assumed, or not. Default if not set."));
181
182/// Logic operators for the change status enum class.
183///
184///{
186 return L == ChangeStatus::CHANGED ? L : R;
187}
189 L = L | R;
190 return L;
191}
193 return L == ChangeStatus::UNCHANGED ? L : R;
194}
196 L = L & R;
197 return L;
198}
199///}
200
201bool AA::isGPU(const Module &M) {
202 Triple T(M.getTargetTriple());
203 return T.isAMDGPU() || T.isNVPTX();
204}
205
207 const AbstractAttribute &QueryingAA) {
208 // We are looking for volatile instructions or non-relaxed atomics.
209 if (const auto *CB = dyn_cast<CallBase>(&I)) {
210 if (CB->hasFnAttr(Attribute::NoSync))
211 return true;
212
213 // Non-convergent and readnone imply nosync.
214 if (!CB->isConvergent() && !CB->mayReadOrWriteMemory())
215 return true;
216
218 return true;
219
220 bool IsKnownNoSync;
221 return AA::hasAssumedIRAttr<Attribute::NoSync>(
222 A, &QueryingAA, IRPosition::callsite_function(*CB),
223 DepClassTy::OPTIONAL, IsKnownNoSync);
224 }
225
226 if (!I.mayReadOrWriteMemory())
227 return true;
228
229 return !I.isVolatile() && !AANoSync::isNonRelaxedAtomic(&I);
230}
231
233 const Value &V, bool ForAnalysisOnly) {
234 // TODO: See the AAInstanceInfo class comment.
235 if (!ForAnalysisOnly)
236 return false;
237 auto *InstanceInfoAA = A.getAAFor<AAInstanceInfo>(
238 QueryingAA, IRPosition::value(V), DepClassTy::OPTIONAL);
239 return InstanceInfoAA && InstanceInfoAA->isAssumedUniqueForAnalysis();
240}
241
242Constant *
244 Value &Obj, Type &Ty, const TargetLibraryInfo *TLI,
245 const DataLayout &DL, AA::RangeTy *RangePtr) {
246 if (isa<AllocaInst>(Obj))
247 return UndefValue::get(&Ty);
248 if (Constant *Init = getInitialValueOfAllocation(&Obj, TLI, &Ty))
249 return Init;
250 auto *GV = dyn_cast<GlobalVariable>(&Obj);
251 if (!GV)
252 return nullptr;
253
254 bool UsedAssumedInformation = false;
255 Constant *Initializer = nullptr;
256 if (A.hasGlobalVariableSimplificationCallback(*GV)) {
257 auto AssumedGV = A.getAssumedInitializerFromCallBack(
258 *GV, &QueryingAA, UsedAssumedInformation);
259 Initializer = *AssumedGV;
260 if (!Initializer)
261 return nullptr;
262 } else {
263 if (!GV->hasLocalLinkage() &&
264 (GV->isInterposable() || !(GV->isConstant() && GV->hasInitializer())))
265 return nullptr;
266 if (!GV->hasInitializer())
267 return UndefValue::get(&Ty);
268
269 if (!Initializer)
270 Initializer = GV->getInitializer();
271 }
272
273 if (RangePtr && !RangePtr->offsetOrSizeAreUnknown()) {
274 APInt Offset = APInt(64, RangePtr->Offset);
275 return ConstantFoldLoadFromConst(Initializer, &Ty, Offset, DL);
276 }
277
278 return ConstantFoldLoadFromUniformValue(Initializer, &Ty, DL);
279}
280
281bool AA::isValidInScope(const Value &V, const Function *Scope) {
282 if (isa<Constant>(V))
283 return true;
284 if (auto *I = dyn_cast<Instruction>(&V))
285 return I->getFunction() == Scope;
286 if (auto *A = dyn_cast<Argument>(&V))
287 return A->getParent() == Scope;
288 return false;
289}
290
292 InformationCache &InfoCache) {
293 if (isa<Constant>(VAC.getValue()) || VAC.getValue() == VAC.getCtxI())
294 return true;
295 const Function *Scope = nullptr;
296 const Instruction *CtxI = VAC.getCtxI();
297 if (CtxI)
298 Scope = CtxI->getFunction();
299 if (auto *A = dyn_cast<Argument>(VAC.getValue()))
300 return A->getParent() == Scope;
301 if (auto *I = dyn_cast<Instruction>(VAC.getValue())) {
302 if (I->getFunction() == Scope) {
303 if (const DominatorTree *DT =
305 *Scope))
306 return DT->dominates(I, CtxI);
307 // Local dominance check mostly for the old PM passes.
308 if (CtxI && I->getParent() == CtxI->getParent())
309 return llvm::any_of(
310 make_range(I->getIterator(), I->getParent()->end()),
311 [&](const Instruction &AfterI) { return &AfterI == CtxI; });
312 }
313 }
314 return false;
315}
316
318 if (V.getType() == &Ty)
319 return &V;
320 if (isa<PoisonValue>(V))
321 return PoisonValue::get(&Ty);
322 if (isa<UndefValue>(V))
323 return UndefValue::get(&Ty);
324 if (auto *C = dyn_cast<Constant>(&V)) {
325 if (C->isNullValue())
326 return Constant::getNullValue(&Ty);
327 if (C->getType()->isPointerTy() && Ty.isPointerTy())
328 return ConstantExpr::getPointerCast(C, &Ty);
329 if (C->getType()->getPrimitiveSizeInBits() >= Ty.getPrimitiveSizeInBits()) {
330 if (C->getType()->isIntegerTy() && Ty.isIntegerTy())
331 return ConstantExpr::getTrunc(C, &Ty, /* OnlyIfReduced */ true);
332 if (C->getType()->isFloatingPointTy() && Ty.isFloatingPointTy())
333 return ConstantFoldCastInstruction(Instruction::FPTrunc, C, &Ty);
334 }
335 }
336 return nullptr;
337}
338
339std::optional<Value *>
340AA::combineOptionalValuesInAAValueLatice(const std::optional<Value *> &A,
341 const std::optional<Value *> &B,
342 Type *Ty) {
343 if (A == B)
344 return A;
345 if (!B)
346 return A;
347 if (*B == nullptr)
348 return nullptr;
349 if (!A)
350 return Ty ? getWithType(**B, *Ty) : nullptr;
351 if (*A == nullptr)
352 return nullptr;
353 if (!Ty)
354 Ty = (*A)->getType();
355 if (isa_and_nonnull<UndefValue>(*A))
356 return getWithType(**B, *Ty);
357 if (isa<UndefValue>(*B))
358 return A;
359 if (*A && *B && *A == getWithType(**B, *Ty))
360 return A;
361 return nullptr;
362}
363
364template <bool IsLoad, typename Ty>
366 Attributor &A, Ty &I, SmallSetVector<Value *, 4> &PotentialCopies,
367 SmallSetVector<Instruction *, 4> *PotentialValueOrigins,
368 const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation,
369 bool OnlyExact) {
370 LLVM_DEBUG(dbgs() << "Trying to determine the potential copies of " << I
371 << " (only exact: " << OnlyExact << ")\n";);
372
373 Value &Ptr = *I.getPointerOperand();
374 // Containers to remember the pointer infos and new copies while we are not
375 // sure that we can find all of them. If we abort we want to avoid spurious
376 // dependences and potential copies in the provided container.
380
381 const auto *TLI =
382 A.getInfoCache().getTargetLibraryInfoForFunction(*I.getFunction());
383
384 auto Pred = [&](Value &Obj) {
385 LLVM_DEBUG(dbgs() << "Visit underlying object " << Obj << "\n");
386 if (isa<UndefValue>(&Obj))
387 return true;
388 if (isa<ConstantPointerNull>(&Obj)) {
389 // A null pointer access can be undefined but any offset from null may
390 // be OK. We do not try to optimize the latter.
391 if (!NullPointerIsDefined(I.getFunction(),
392 Ptr.getType()->getPointerAddressSpace()) &&
393 A.getAssumedSimplified(Ptr, QueryingAA, UsedAssumedInformation,
394 AA::Interprocedural) == &Obj)
395 return true;
397 dbgs() << "Underlying object is a valid nullptr, giving up.\n";);
398 return false;
399 }
400 // TODO: Use assumed noalias return.
401 if (!isa<AllocaInst>(&Obj) && !isa<GlobalVariable>(&Obj) &&
402 !(IsLoad ? isAllocationFn(&Obj, TLI) : isNoAliasCall(&Obj))) {
403 LLVM_DEBUG(dbgs() << "Underlying object is not supported yet: " << Obj
404 << "\n";);
405 return false;
406 }
407 if (auto *GV = dyn_cast<GlobalVariable>(&Obj))
408 if (!GV->hasLocalLinkage() &&
409 !(GV->isConstant() && GV->hasInitializer())) {
410 LLVM_DEBUG(dbgs() << "Underlying object is global with external "
411 "linkage, not supported yet: "
412 << Obj << "\n";);
413 return false;
414 }
415
416 bool NullOnly = true;
417 bool NullRequired = false;
418 auto CheckForNullOnlyAndUndef = [&](std::optional<Value *> V,
419 bool IsExact) {
420 if (!V || *V == nullptr)
421 NullOnly = false;
422 else if (isa<UndefValue>(*V))
423 /* No op */;
424 else if (isa<Constant>(*V) && cast<Constant>(*V)->isNullValue())
425 NullRequired = !IsExact;
426 else
427 NullOnly = false;
428 };
429
430 auto AdjustWrittenValueType = [&](const AAPointerInfo::Access &Acc,
431 Value &V) {
432 Value *AdjV = AA::getWithType(V, *I.getType());
433 if (!AdjV) {
434 LLVM_DEBUG(dbgs() << "Underlying object written but stored value "
435 "cannot be converted to read type: "
436 << *Acc.getRemoteInst() << " : " << *I.getType()
437 << "\n";);
438 }
439 return AdjV;
440 };
441
442 auto SkipCB = [&](const AAPointerInfo::Access &Acc) {
443 if ((IsLoad && !Acc.isWriteOrAssumption()) || (!IsLoad && !Acc.isRead()))
444 return true;
445 if (IsLoad) {
447 return true;
448 if (PotentialValueOrigins && !isa<AssumeInst>(Acc.getRemoteInst()))
449 return false;
450 if (!Acc.isWrittenValueUnknown())
451 if (Value *V = AdjustWrittenValueType(Acc, *Acc.getWrittenValue()))
452 if (NewCopies.count(V)) {
453 NewCopyOrigins.insert(Acc.getRemoteInst());
454 return true;
455 }
456 if (auto *SI = dyn_cast<StoreInst>(Acc.getRemoteInst()))
457 if (Value *V = AdjustWrittenValueType(Acc, *SI->getValueOperand()))
458 if (NewCopies.count(V)) {
459 NewCopyOrigins.insert(Acc.getRemoteInst());
460 return true;
461 }
462 }
463 return false;
464 };
465
466 auto CheckAccess = [&](const AAPointerInfo::Access &Acc, bool IsExact) {
467 if ((IsLoad && !Acc.isWriteOrAssumption()) || (!IsLoad && !Acc.isRead()))
468 return true;
469 if (IsLoad && Acc.isWrittenValueYetUndetermined())
470 return true;
471 CheckForNullOnlyAndUndef(Acc.getContent(), IsExact);
472 if (OnlyExact && !IsExact && !NullOnly &&
473 !isa_and_nonnull<UndefValue>(Acc.getWrittenValue())) {
474 LLVM_DEBUG(dbgs() << "Non exact access " << *Acc.getRemoteInst()
475 << ", abort!\n");
476 return false;
477 }
478 if (NullRequired && !NullOnly) {
479 LLVM_DEBUG(dbgs() << "Required all `null` accesses due to non exact "
480 "one, however found non-null one: "
481 << *Acc.getRemoteInst() << ", abort!\n");
482 return false;
483 }
484 if (IsLoad) {
485 assert(isa<LoadInst>(I) && "Expected load or store instruction only!");
486 if (!Acc.isWrittenValueUnknown()) {
487 Value *V = AdjustWrittenValueType(Acc, *Acc.getWrittenValue());
488 if (!V)
489 return false;
490 NewCopies.insert(V);
491 if (PotentialValueOrigins)
492 NewCopyOrigins.insert(Acc.getRemoteInst());
493 return true;
494 }
495 auto *SI = dyn_cast<StoreInst>(Acc.getRemoteInst());
496 if (!SI) {
497 LLVM_DEBUG(dbgs() << "Underlying object written through a non-store "
498 "instruction not supported yet: "
499 << *Acc.getRemoteInst() << "\n";);
500 return false;
501 }
502 Value *V = AdjustWrittenValueType(Acc, *SI->getValueOperand());
503 if (!V)
504 return false;
505 NewCopies.insert(V);
506 if (PotentialValueOrigins)
507 NewCopyOrigins.insert(SI);
508 } else {
509 assert(isa<StoreInst>(I) && "Expected load or store instruction only!");
510 auto *LI = dyn_cast<LoadInst>(Acc.getRemoteInst());
511 if (!LI && OnlyExact) {
512 LLVM_DEBUG(dbgs() << "Underlying object read through a non-load "
513 "instruction not supported yet: "
514 << *Acc.getRemoteInst() << "\n";);
515 return false;
516 }
517 NewCopies.insert(Acc.getRemoteInst());
518 }
519 return true;
520 };
521
522 // If the value has been written to we don't need the initial value of the
523 // object.
524 bool HasBeenWrittenTo = false;
525
527 auto *PI = A.getAAFor<AAPointerInfo>(QueryingAA, IRPosition::value(Obj),
528 DepClassTy::NONE);
529 if (!PI || !PI->forallInterferingAccesses(
530 A, QueryingAA, I,
531 /* FindInterferingWrites */ IsLoad,
532 /* FindInterferingReads */ !IsLoad, CheckAccess,
533 HasBeenWrittenTo, Range, SkipCB)) {
535 dbgs()
536 << "Failed to verify all interfering accesses for underlying object: "
537 << Obj << "\n");
538 return false;
539 }
540
541 if (IsLoad && !HasBeenWrittenTo && !Range.isUnassigned()) {
542 const DataLayout &DL = A.getDataLayout();
543 Value *InitialValue = AA::getInitialValueForObj(
544 A, QueryingAA, Obj, *I.getType(), TLI, DL, &Range);
545 if (!InitialValue) {
546 LLVM_DEBUG(dbgs() << "Could not determine required initial value of "
547 "underlying object, abort!\n");
548 return false;
549 }
550 CheckForNullOnlyAndUndef(InitialValue, /* IsExact */ true);
551 if (NullRequired && !NullOnly) {
552 LLVM_DEBUG(dbgs() << "Non exact access but initial value that is not "
553 "null or undef, abort!\n");
554 return false;
555 }
556
557 NewCopies.insert(InitialValue);
558 if (PotentialValueOrigins)
559 NewCopyOrigins.insert(nullptr);
560 }
561
562 PIs.push_back(PI);
563
564 return true;
565 };
566
567 const auto *AAUO = A.getAAFor<AAUnderlyingObjects>(
568 QueryingAA, IRPosition::value(Ptr), DepClassTy::OPTIONAL);
569 if (!AAUO || !AAUO->forallUnderlyingObjects(Pred)) {
571 dbgs() << "Underlying objects stored into could not be determined\n";);
572 return false;
573 }
574
575 // Only if we were successful collection all potential copies we record
576 // dependences (on non-fix AAPointerInfo AAs). We also only then modify the
577 // given PotentialCopies container.
578 for (const auto *PI : PIs) {
579 if (!PI->getState().isAtFixpoint())
580 UsedAssumedInformation = true;
581 A.recordDependence(*PI, QueryingAA, DepClassTy::OPTIONAL);
582 }
583 PotentialCopies.insert(NewCopies.begin(), NewCopies.end());
584 if (PotentialValueOrigins)
585 PotentialValueOrigins->insert(NewCopyOrigins.begin(), NewCopyOrigins.end());
586
587 return true;
588}
589
591 Attributor &A, LoadInst &LI, SmallSetVector<Value *, 4> &PotentialValues,
592 SmallSetVector<Instruction *, 4> &PotentialValueOrigins,
593 const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation,
594 bool OnlyExact) {
595 return getPotentialCopiesOfMemoryValue</* IsLoad */ true>(
596 A, LI, PotentialValues, &PotentialValueOrigins, QueryingAA,
597 UsedAssumedInformation, OnlyExact);
598}
599
601 Attributor &A, StoreInst &SI, SmallSetVector<Value *, 4> &PotentialCopies,
602 const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation,
603 bool OnlyExact) {
604 return getPotentialCopiesOfMemoryValue</* IsLoad */ false>(
605 A, SI, PotentialCopies, nullptr, QueryingAA, UsedAssumedInformation,
606 OnlyExact);
607}
608
610 const AbstractAttribute &QueryingAA,
611 bool RequireReadNone, bool &IsKnown) {
612 if (RequireReadNone) {
613 if (AA::hasAssumedIRAttr<Attribute::ReadNone>(
614 A, &QueryingAA, IRP, DepClassTy::OPTIONAL, IsKnown,
615 /* IgnoreSubsumingPositions */ true))
616 return true;
617 } else if (AA::hasAssumedIRAttr<Attribute::ReadOnly>(
618 A, &QueryingAA, IRP, DepClassTy::OPTIONAL, IsKnown,
619 /* IgnoreSubsumingPositions */ true))
620 return true;
621
624 const auto *MemLocAA =
625 A.getAAFor<AAMemoryLocation>(QueryingAA, IRP, DepClassTy::NONE);
626 if (MemLocAA && MemLocAA->isAssumedReadNone()) {
627 IsKnown = MemLocAA->isKnownReadNone();
628 if (!IsKnown)
629 A.recordDependence(*MemLocAA, QueryingAA, DepClassTy::OPTIONAL);
630 return true;
631 }
632 }
633
634 const auto *MemBehaviorAA =
635 A.getAAFor<AAMemoryBehavior>(QueryingAA, IRP, DepClassTy::NONE);
636 if (MemBehaviorAA &&
637 (MemBehaviorAA->isAssumedReadNone() ||
638 (!RequireReadNone && MemBehaviorAA->isAssumedReadOnly()))) {
639 IsKnown = RequireReadNone ? MemBehaviorAA->isKnownReadNone()
640 : MemBehaviorAA->isKnownReadOnly();
641 if (!IsKnown)
642 A.recordDependence(*MemBehaviorAA, QueryingAA, DepClassTy::OPTIONAL);
643 return true;
644 }
645
646 return false;
647}
648
650 const AbstractAttribute &QueryingAA, bool &IsKnown) {
651 return isAssumedReadOnlyOrReadNone(A, IRP, QueryingAA,
652 /* RequireReadNone */ false, IsKnown);
653}
655 const AbstractAttribute &QueryingAA, bool &IsKnown) {
656 return isAssumedReadOnlyOrReadNone(A, IRP, QueryingAA,
657 /* RequireReadNone */ true, IsKnown);
658}
659
660static bool
662 const Instruction *ToI, const Function &ToFn,
663 const AbstractAttribute &QueryingAA,
664 const AA::InstExclusionSetTy *ExclusionSet,
665 std::function<bool(const Function &F)> GoBackwardsCB) {
667 dbgs() << "[AA] isPotentiallyReachable @" << ToFn.getName() << " from "
668 << FromI << " [GBCB: " << bool(GoBackwardsCB) << "][#ExS: "
669 << (ExclusionSet ? std::to_string(ExclusionSet->size()) : "none")
670 << "]\n";
671 if (ExclusionSet)
672 for (auto *ES : *ExclusionSet)
673 dbgs() << *ES << "\n";
674 });
675
676 // We know kernels (generally) cannot be called from within the module. Thus,
677 // for reachability we would need to step back from a kernel which would allow
678 // us to reach anything anyway. Even if a kernel is invoked from another
679 // kernel, values like allocas and shared memory are not accessible. We
680 // implicitly check for this situation to avoid costly lookups.
681 if (GoBackwardsCB && &ToFn != FromI.getFunction() &&
682 !GoBackwardsCB(*FromI.getFunction()) && ToFn.hasFnAttribute("kernel") &&
683 FromI.getFunction()->hasFnAttribute("kernel")) {
684 LLVM_DEBUG(dbgs() << "[AA] assume kernel cannot be reached from within the "
685 "module; success\n";);
686 return false;
687 }
688
689 // If we can go arbitrarily backwards we will eventually reach an entry point
690 // that can reach ToI. Only if a set of blocks through which we cannot go is
691 // provided, or once we track internal functions not accessible from the
692 // outside, it makes sense to perform backwards analysis in the absence of a
693 // GoBackwardsCB.
694 if (!GoBackwardsCB && !ExclusionSet) {
695 LLVM_DEBUG(dbgs() << "[AA] check @" << ToFn.getName() << " from " << FromI
696 << " is not checked backwards and does not have an "
697 "exclusion set, abort\n");
698 return true;
699 }
700
703 Worklist.push_back(&FromI);
704
705 while (!Worklist.empty()) {
706 const Instruction *CurFromI = Worklist.pop_back_val();
707 if (!Visited.insert(CurFromI).second)
708 continue;
709
710 const Function *FromFn = CurFromI->getFunction();
711 if (FromFn == &ToFn) {
712 if (!ToI)
713 return true;
714 LLVM_DEBUG(dbgs() << "[AA] check " << *ToI << " from " << *CurFromI
715 << " intraprocedurally\n");
716 const auto *ReachabilityAA = A.getAAFor<AAIntraFnReachability>(
717 QueryingAA, IRPosition::function(ToFn), DepClassTy::OPTIONAL);
718 bool Result = !ReachabilityAA || ReachabilityAA->isAssumedReachable(
719 A, *CurFromI, *ToI, ExclusionSet);
720 LLVM_DEBUG(dbgs() << "[AA] " << *CurFromI << " "
721 << (Result ? "can potentially " : "cannot ") << "reach "
722 << *ToI << " [Intra]\n");
723 if (Result)
724 return true;
725 }
726
727 bool Result = true;
728 if (!ToFn.isDeclaration() && ToI) {
729 const auto *ToReachabilityAA = A.getAAFor<AAIntraFnReachability>(
730 QueryingAA, IRPosition::function(ToFn), DepClassTy::OPTIONAL);
731 const Instruction &EntryI = ToFn.getEntryBlock().front();
732 Result = !ToReachabilityAA || ToReachabilityAA->isAssumedReachable(
733 A, EntryI, *ToI, ExclusionSet);
734 LLVM_DEBUG(dbgs() << "[AA] Entry " << EntryI << " of @" << ToFn.getName()
735 << " " << (Result ? "can potentially " : "cannot ")
736 << "reach @" << *ToI << " [ToFn]\n");
737 }
738
739 if (Result) {
740 // The entry of the ToFn can reach the instruction ToI. If the current
741 // instruction is already known to reach the ToFn.
742 const auto *FnReachabilityAA = A.getAAFor<AAInterFnReachability>(
743 QueryingAA, IRPosition::function(*FromFn), DepClassTy::OPTIONAL);
744 Result = !FnReachabilityAA || FnReachabilityAA->instructionCanReach(
745 A, *CurFromI, ToFn, ExclusionSet);
746 LLVM_DEBUG(dbgs() << "[AA] " << *CurFromI << " in @" << FromFn->getName()
747 << " " << (Result ? "can potentially " : "cannot ")
748 << "reach @" << ToFn.getName() << " [FromFn]\n");
749 if (Result)
750 return true;
751 }
752
753 // TODO: Check assumed nounwind.
754 const auto *ReachabilityAA = A.getAAFor<AAIntraFnReachability>(
755 QueryingAA, IRPosition::function(*FromFn), DepClassTy::OPTIONAL);
756 auto ReturnInstCB = [&](Instruction &Ret) {
757 bool Result = !ReachabilityAA || ReachabilityAA->isAssumedReachable(
758 A, *CurFromI, Ret, ExclusionSet);
759 LLVM_DEBUG(dbgs() << "[AA][Ret] " << *CurFromI << " "
760 << (Result ? "can potentially " : "cannot ") << "reach "
761 << Ret << " [Intra]\n");
762 return !Result;
763 };
764
765 // Check if we can reach returns.
766 bool UsedAssumedInformation = false;
767 if (A.checkForAllInstructions(ReturnInstCB, FromFn, &QueryingAA,
768 {Instruction::Ret}, UsedAssumedInformation)) {
769 LLVM_DEBUG(dbgs() << "[AA] No return is reachable, done\n");
770 continue;
771 }
772
773 if (!GoBackwardsCB) {
774 LLVM_DEBUG(dbgs() << "[AA] check @" << ToFn.getName() << " from " << FromI
775 << " is not checked backwards, abort\n");
776 return true;
777 }
778
779 // If we do not go backwards from the FromFn we are done here and so far we
780 // could not find a way to reach ToFn/ToI.
781 if (!GoBackwardsCB(*FromFn))
782 continue;
783
784 LLVM_DEBUG(dbgs() << "Stepping backwards to the call sites of @"
785 << FromFn->getName() << "\n");
786
787 auto CheckCallSite = [&](AbstractCallSite ACS) {
788 CallBase *CB = ACS.getInstruction();
789 if (!CB)
790 return false;
791
792 if (isa<InvokeInst>(CB))
793 return false;
794
796 Worklist.push_back(Inst);
797 return true;
798 };
799
800 Result = !A.checkForAllCallSites(CheckCallSite, *FromFn,
801 /* RequireAllCallSites */ true,
802 &QueryingAA, UsedAssumedInformation);
803 if (Result) {
804 LLVM_DEBUG(dbgs() << "[AA] stepping back to call sites from " << *CurFromI
805 << " in @" << FromFn->getName()
806 << " failed, give up\n");
807 return true;
808 }
809
810 LLVM_DEBUG(dbgs() << "[AA] stepped back to call sites from " << *CurFromI
811 << " in @" << FromFn->getName()
812 << " worklist size is: " << Worklist.size() << "\n");
813 }
814 return false;
815}
816
818 Attributor &A, const Instruction &FromI, const Instruction &ToI,
819 const AbstractAttribute &QueryingAA,
820 const AA::InstExclusionSetTy *ExclusionSet,
821 std::function<bool(const Function &F)> GoBackwardsCB) {
822 const Function *ToFn = ToI.getFunction();
823 return ::isPotentiallyReachable(A, FromI, &ToI, *ToFn, QueryingAA,
824 ExclusionSet, GoBackwardsCB);
825}
826
828 Attributor &A, const Instruction &FromI, const Function &ToFn,
829 const AbstractAttribute &QueryingAA,
830 const AA::InstExclusionSetTy *ExclusionSet,
831 std::function<bool(const Function &F)> GoBackwardsCB) {
832 return ::isPotentiallyReachable(A, FromI, /* ToI */ nullptr, ToFn, QueryingAA,
833 ExclusionSet, GoBackwardsCB);
834}
835
837 const AbstractAttribute &QueryingAA) {
838 if (isa<UndefValue>(Obj))
839 return true;
840 if (isa<AllocaInst>(Obj)) {
841 InformationCache &InfoCache = A.getInfoCache();
842 if (!InfoCache.stackIsAccessibleByOtherThreads()) {
844 dbgs() << "[AA] Object '" << Obj
845 << "' is thread local; stack objects are thread local.\n");
846 return true;
847 }
848 bool IsKnownNoCapture;
849 bool IsAssumedNoCapture = AA::hasAssumedIRAttr<Attribute::NoCapture>(
850 A, &QueryingAA, IRPosition::value(Obj), DepClassTy::OPTIONAL,
851 IsKnownNoCapture);
852 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj << "' is "
853 << (IsAssumedNoCapture ? "" : "not") << " thread local; "
854 << (IsAssumedNoCapture ? "non-" : "")
855 << "captured stack object.\n");
856 return IsAssumedNoCapture;
857 }
858 if (auto *GV = dyn_cast<GlobalVariable>(&Obj)) {
859 if (GV->isConstant()) {
860 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj
861 << "' is thread local; constant global\n");
862 return true;
863 }
864 if (GV->isThreadLocal()) {
865 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj
866 << "' is thread local; thread local global\n");
867 return true;
868 }
869 }
870
871 if (A.getInfoCache().targetIsGPU()) {
872 if (Obj.getType()->getPointerAddressSpace() ==
873 (int)AA::GPUAddressSpace::Local) {
874 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj
875 << "' is thread local; GPU local memory\n");
876 return true;
877 }
878 if (Obj.getType()->getPointerAddressSpace() ==
879 (int)AA::GPUAddressSpace::Constant) {
880 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj
881 << "' is thread local; GPU constant memory\n");
882 return true;
883 }
884 }
885
886 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj << "' is not thread local\n");
887 return false;
888}
889
891 const AbstractAttribute &QueryingAA) {
892 if (!I.mayHaveSideEffects() && !I.mayReadFromMemory())
893 return false;
894
896
897 auto AddLocationPtr = [&](std::optional<MemoryLocation> Loc) {
898 if (!Loc || !Loc->Ptr) {
900 dbgs() << "[AA] Access to unknown location; -> requires barriers\n");
901 return false;
902 }
903 Ptrs.insert(Loc->Ptr);
904 return true;
905 };
906
907 if (const MemIntrinsic *MI = dyn_cast<MemIntrinsic>(&I)) {
908 if (!AddLocationPtr(MemoryLocation::getForDest(MI)))
909 return true;
910 if (const MemTransferInst *MTI = dyn_cast<MemTransferInst>(&I))
911 if (!AddLocationPtr(MemoryLocation::getForSource(MTI)))
912 return true;
913 } else if (!AddLocationPtr(MemoryLocation::getOrNone(&I)))
914 return true;
915
916 return isPotentiallyAffectedByBarrier(A, Ptrs.getArrayRef(), QueryingAA, &I);
917}
918
921 const AbstractAttribute &QueryingAA,
922 const Instruction *CtxI) {
923 for (const Value *Ptr : Ptrs) {
924 if (!Ptr) {
925 LLVM_DEBUG(dbgs() << "[AA] nullptr; -> requires barriers\n");
926 return true;
927 }
928
929 auto Pred = [&](Value &Obj) {
930 if (AA::isAssumedThreadLocalObject(A, Obj, QueryingAA))
931 return true;
932 LLVM_DEBUG(dbgs() << "[AA] Access to '" << Obj << "' via '" << *Ptr
933 << "'; -> requires barrier\n");
934 return false;
935 };
936
937 const auto *UnderlyingObjsAA = A.getAAFor<AAUnderlyingObjects>(
938 QueryingAA, IRPosition::value(*Ptr), DepClassTy::OPTIONAL);
939 if (!UnderlyingObjsAA || !UnderlyingObjsAA->forallUnderlyingObjects(Pred))
940 return true;
941 }
942 return false;
943}
944
945/// Return true if \p New is equal or worse than \p Old.
946static bool isEqualOrWorse(const Attribute &New, const Attribute &Old) {
947 if (!Old.isIntAttribute())
948 return true;
949
950 return Old.getValueAsInt() >= New.getValueAsInt();
951}
952
953/// Return true if the information provided by \p Attr was added to the
954/// attribute set \p AttrSet. This is only the case if it was not already
955/// present in \p AttrSet.
956static bool addIfNotExistent(LLVMContext &Ctx, const Attribute &Attr,
957 AttributeSet AttrSet, bool ForceReplace,
958 AttrBuilder &AB) {
959
960 if (Attr.isEnumAttribute()) {
962 if (AttrSet.hasAttribute(Kind))
963 return false;
964 AB.addAttribute(Kind);
965 return true;
966 }
967 if (Attr.isStringAttribute()) {
968 StringRef Kind = Attr.getKindAsString();
969 if (AttrSet.hasAttribute(Kind)) {
970 if (!ForceReplace)
971 return false;
972 }
973 AB.addAttribute(Kind, Attr.getValueAsString());
974 return true;
975 }
976 if (Attr.isIntAttribute()) {
978 if (!ForceReplace && Kind == Attribute::Memory) {
979 MemoryEffects ME = Attr.getMemoryEffects() & AttrSet.getMemoryEffects();
980 if (ME == AttrSet.getMemoryEffects())
981 return false;
982 AB.addMemoryAttr(ME);
983 return true;
984 }
985 if (AttrSet.hasAttribute(Kind)) {
986 if (!ForceReplace && isEqualOrWorse(Attr, AttrSet.getAttribute(Kind)))
987 return false;
988 }
989 AB.addAttribute(Attr);
990 return true;
991 }
992
993 llvm_unreachable("Expected enum or string attribute!");
994}
995
998 return cast<Argument>(&getAnchorValue());
999
1000 // Not an Argument and no argument number means this is not a call site
1001 // argument, thus we cannot find a callback argument to return.
1002 int ArgNo = getCallSiteArgNo();
1003 if (ArgNo < 0)
1004 return nullptr;
1005
1006 // Use abstract call sites to make the connection between the call site
1007 // values and the ones in callbacks. If a callback was found that makes use
1008 // of the underlying call site operand, we want the corresponding callback
1009 // callee argument and not the direct callee argument.
1010 std::optional<Argument *> CBCandidateArg;
1011 SmallVector<const Use *, 4> CallbackUses;
1012 const auto &CB = cast<CallBase>(getAnchorValue());
1013 AbstractCallSite::getCallbackUses(CB, CallbackUses);
1014 for (const Use *U : CallbackUses) {
1015 AbstractCallSite ACS(U);
1016 assert(ACS && ACS.isCallbackCall());
1017 if (!ACS.getCalledFunction())
1018 continue;
1019
1020 for (unsigned u = 0, e = ACS.getNumArgOperands(); u < e; u++) {
1021
1022 // Test if the underlying call site operand is argument number u of the
1023 // callback callee.
1024 if (ACS.getCallArgOperandNo(u) != ArgNo)
1025 continue;
1026
1027 assert(ACS.getCalledFunction()->arg_size() > u &&
1028 "ACS mapped into var-args arguments!");
1029 if (CBCandidateArg) {
1030 CBCandidateArg = nullptr;
1031 break;
1032 }
1033 CBCandidateArg = ACS.getCalledFunction()->getArg(u);
1034 }
1035 }
1036
1037 // If we found a unique callback candidate argument, return it.
1038 if (CBCandidateArg && *CBCandidateArg)
1039 return *CBCandidateArg;
1040
1041 // If no callbacks were found, or none used the underlying call site operand
1042 // exclusively, use the direct callee argument if available.
1043 auto *Callee = dyn_cast_if_present<Function>(CB.getCalledOperand());
1044 if (Callee && Callee->arg_size() > unsigned(ArgNo))
1045 return Callee->getArg(ArgNo);
1046
1047 return nullptr;
1048}
1049
1052 if (getState().isAtFixpoint())
1053 return HasChanged;
1054
1055 LLVM_DEBUG(dbgs() << "[Attributor] Update: " << *this << "\n");
1056
1057 HasChanged = updateImpl(A);
1058
1059 LLVM_DEBUG(dbgs() << "[Attributor] Update " << HasChanged << " " << *this
1060 << "\n");
1061
1062 return HasChanged;
1063}
1064
1066 InformationCache &InfoCache,
1067 AttributorConfig Configuration)
1068 : Allocator(InfoCache.Allocator), Functions(Functions),
1069 InfoCache(InfoCache), Configuration(Configuration) {
1070 if (!isClosedWorldModule())
1071 return;
1072 for (Function *Fn : Functions)
1073 if (Fn->hasAddressTaken(/*PutOffender=*/nullptr,
1074 /*IgnoreCallbackUses=*/false,
1075 /*IgnoreAssumeLikeCalls=*/true,
1076 /*IgnoreLLVMUsed=*/true,
1077 /*IgnoreARCAttachedCall=*/false,
1078 /*IgnoreCastedDirectCall=*/true))
1079 InfoCache.IndirectlyCallableFunctions.push_back(Fn);
1080}
1081
1086 "Did expect a valid position!");
1089 if (!Explorer)
1090 return false;
1091
1092 Value &AssociatedValue = IRP.getAssociatedValue();
1093
1094 const Assume2KnowledgeMap &A2K =
1095 getInfoCache().getKnowledgeMap().lookup({&AssociatedValue, AK});
1096
1097 // Check if we found any potential assume use, if not we don't need to create
1098 // explorer iterators.
1099 if (A2K.empty())
1100 return false;
1101
1102 LLVMContext &Ctx = AssociatedValue.getContext();
1103 unsigned AttrsSize = Attrs.size();
1104 auto EIt = Explorer->begin(IRP.getCtxI()),
1105 EEnd = Explorer->end(IRP.getCtxI());
1106 for (const auto &It : A2K)
1107 if (Explorer->findInContextOf(It.first, EIt, EEnd))
1108 Attrs.push_back(Attribute::get(Ctx, AK, It.second.Max));
1109 return AttrsSize != Attrs.size();
1110}
1111
1112template <typename DescTy>
1114Attributor::updateAttrMap(const IRPosition &IRP, ArrayRef<DescTy> AttrDescs,
1115 function_ref<bool(const DescTy &, AttributeSet,
1117 CB) {
1118 if (AttrDescs.empty())
1120 switch (IRP.getPositionKind()) {
1124 default:
1125 break;
1126 };
1127
1128 AttributeList AL;
1129 Value *AttrListAnchor = IRP.getAttrListAnchor();
1130 auto It = AttrsMap.find(AttrListAnchor);
1131 if (It == AttrsMap.end())
1132 AL = IRP.getAttrList();
1133 else
1134 AL = It->getSecond();
1135
1136 LLVMContext &Ctx = IRP.getAnchorValue().getContext();
1137 auto AttrIdx = IRP.getAttrIdx();
1138 AttributeSet AS = AL.getAttributes(AttrIdx);
1139 AttributeMask AM;
1140 AttrBuilder AB(Ctx);
1141
1143 for (const DescTy &AttrDesc : AttrDescs)
1144 if (CB(AttrDesc, AS, AM, AB))
1145 HasChanged = ChangeStatus::CHANGED;
1146
1147 if (HasChanged == ChangeStatus::UNCHANGED)
1149
1150 AL = AL.removeAttributesAtIndex(Ctx, AttrIdx, AM);
1151 AL = AL.addAttributesAtIndex(Ctx, AttrIdx, AB);
1152 AttrsMap[AttrListAnchor] = AL;
1153 return ChangeStatus::CHANGED;
1154}
1155
1158 bool IgnoreSubsumingPositions,
1159 Attribute::AttrKind ImpliedAttributeKind) {
1160 bool Implied = false;
1161 bool HasAttr = false;
1162 auto HasAttrCB = [&](const Attribute::AttrKind &Kind, AttributeSet AttrSet,
1164 if (AttrSet.hasAttribute(Kind)) {
1165 Implied |= Kind != ImpliedAttributeKind;
1166 HasAttr = true;
1167 }
1168 return false;
1169 };
1170 for (const IRPosition &EquivIRP : SubsumingPositionIterator(IRP)) {
1171 updateAttrMap<Attribute::AttrKind>(EquivIRP, AttrKinds, HasAttrCB);
1172 if (HasAttr)
1173 break;
1174 // The first position returned by the SubsumingPositionIterator is
1175 // always the position itself. If we ignore subsuming positions we
1176 // are done after the first iteration.
1177 if (IgnoreSubsumingPositions)
1178 break;
1179 Implied = true;
1180 }
1181 if (!HasAttr) {
1182 Implied = true;
1184 for (Attribute::AttrKind AK : AttrKinds)
1185 if (getAttrsFromAssumes(IRP, AK, Attrs)) {
1186 HasAttr = true;
1187 break;
1188 }
1189 }
1190
1191 // Check if we should manifest the implied attribute kind at the IRP.
1192 if (ImpliedAttributeKind != Attribute::None && HasAttr && Implied)
1194 ImpliedAttributeKind)});
1195 return HasAttr;
1196}
1197
1201 bool IgnoreSubsumingPositions) {
1202 auto CollectAttrCB = [&](const Attribute::AttrKind &Kind,
1203 AttributeSet AttrSet, AttributeMask &,
1204 AttrBuilder &) {
1205 if (AttrSet.hasAttribute(Kind))
1206 Attrs.push_back(AttrSet.getAttribute(Kind));
1207 return false;
1208 };
1209 for (const IRPosition &EquivIRP : SubsumingPositionIterator(IRP)) {
1210 updateAttrMap<Attribute::AttrKind>(EquivIRP, AttrKinds, CollectAttrCB);
1211 // The first position returned by the SubsumingPositionIterator is
1212 // always the position itself. If we ignore subsuming positions we
1213 // are done after the first iteration.
1214 if (IgnoreSubsumingPositions)
1215 break;
1216 }
1217 for (Attribute::AttrKind AK : AttrKinds)
1218 getAttrsFromAssumes(IRP, AK, Attrs);
1219}
1220
1223 auto RemoveAttrCB = [&](const Attribute::AttrKind &Kind, AttributeSet AttrSet,
1224 AttributeMask &AM, AttrBuilder &) {
1225 if (!AttrSet.hasAttribute(Kind))
1226 return false;
1227 AM.addAttribute(Kind);
1228 return true;
1229 };
1230 return updateAttrMap<Attribute::AttrKind>(IRP, AttrKinds, RemoveAttrCB);
1231}
1232
1234 ArrayRef<StringRef> Attrs) {
1235 auto RemoveAttrCB = [&](StringRef Attr, AttributeSet AttrSet,
1236 AttributeMask &AM, AttrBuilder &) -> bool {
1237 if (!AttrSet.hasAttribute(Attr))
1238 return false;
1239 AM.addAttribute(Attr);
1240 return true;
1241 };
1242
1243 return updateAttrMap<StringRef>(IRP, Attrs, RemoveAttrCB);
1244}
1245
1247 ArrayRef<Attribute> Attrs,
1248 bool ForceReplace) {
1249 LLVMContext &Ctx = IRP.getAnchorValue().getContext();
1250 auto AddAttrCB = [&](const Attribute &Attr, AttributeSet AttrSet,
1251 AttributeMask &, AttrBuilder &AB) {
1252 return addIfNotExistent(Ctx, Attr, AttrSet, ForceReplace, AB);
1253 };
1254 return updateAttrMap<Attribute>(IRP, Attrs, AddAttrCB);
1255}
1256
1258const IRPosition
1260
1262 IRPositions.emplace_back(IRP);
1263
1264 // Helper to determine if operand bundles on a call site are benign or
1265 // potentially problematic. We handle only llvm.assume for now.
1266 auto CanIgnoreOperandBundles = [](const CallBase &CB) {
1267 return (isa<IntrinsicInst>(CB) &&
1268 cast<IntrinsicInst>(CB).getIntrinsicID() == Intrinsic ::assume);
1269 };
1270
1271 const auto *CB = dyn_cast<CallBase>(&IRP.getAnchorValue());
1272 switch (IRP.getPositionKind()) {
1276 return;
1279 IRPositions.emplace_back(IRPosition::function(*IRP.getAnchorScope()));
1280 return;
1282 assert(CB && "Expected call site!");
1283 // TODO: We need to look at the operand bundles similar to the redirection
1284 // in CallBase.
1285 if (!CB->hasOperandBundles() || CanIgnoreOperandBundles(*CB))
1286 if (auto *Callee = dyn_cast_if_present<Function>(CB->getCalledOperand()))
1287 IRPositions.emplace_back(IRPosition::function(*Callee));
1288 return;
1290 assert(CB && "Expected call site!");
1291 // TODO: We need to look at the operand bundles similar to the redirection
1292 // in CallBase.
1293 if (!CB->hasOperandBundles() || CanIgnoreOperandBundles(*CB)) {
1294 if (auto *Callee =
1295 dyn_cast_if_present<Function>(CB->getCalledOperand())) {
1296 IRPositions.emplace_back(IRPosition::returned(*Callee));
1297 IRPositions.emplace_back(IRPosition::function(*Callee));
1298 for (const Argument &Arg : Callee->args())
1299 if (Arg.hasReturnedAttr()) {
1300 IRPositions.emplace_back(
1301 IRPosition::callsite_argument(*CB, Arg.getArgNo()));
1302 IRPositions.emplace_back(
1303 IRPosition::value(*CB->getArgOperand(Arg.getArgNo())));
1304 IRPositions.emplace_back(IRPosition::argument(Arg));
1305 }
1306 }
1307 }
1308 IRPositions.emplace_back(IRPosition::callsite_function(*CB));
1309 return;
1311 assert(CB && "Expected call site!");
1312 // TODO: We need to look at the operand bundles similar to the redirection
1313 // in CallBase.
1314 if (!CB->hasOperandBundles() || CanIgnoreOperandBundles(*CB)) {
1315 auto *Callee = dyn_cast_if_present<Function>(CB->getCalledOperand());
1316 if (Callee) {
1317 if (Argument *Arg = IRP.getAssociatedArgument())
1318 IRPositions.emplace_back(IRPosition::argument(*Arg));
1319 IRPositions.emplace_back(IRPosition::function(*Callee));
1320 }
1321 }
1322 IRPositions.emplace_back(IRPosition::value(IRP.getAssociatedValue()));
1323 return;
1324 }
1325 }
1326}
1327
1328void IRPosition::verify() {
1329#ifdef EXPENSIVE_CHECKS
1330 switch (getPositionKind()) {
1331 case IRP_INVALID:
1332 assert((CBContext == nullptr) &&
1333 "Invalid position must not have CallBaseContext!");
1334 assert(!Enc.getOpaqueValue() &&
1335 "Expected a nullptr for an invalid position!");
1336 return;
1337 case IRP_FLOAT:
1338 assert((!isa<Argument>(&getAssociatedValue())) &&
1339 "Expected specialized kind for argument values!");
1340 return;
1341 case IRP_RETURNED:
1342 assert(isa<Function>(getAsValuePtr()) &&
1343 "Expected function for a 'returned' position!");
1344 assert(getAsValuePtr() == &getAssociatedValue() &&
1345 "Associated value mismatch!");
1346 return;
1348 assert((CBContext == nullptr) &&
1349 "'call site returned' position must not have CallBaseContext!");
1350 assert((isa<CallBase>(getAsValuePtr())) &&
1351 "Expected call base for 'call site returned' position!");
1352 assert(getAsValuePtr() == &getAssociatedValue() &&
1353 "Associated value mismatch!");
1354 return;
1355 case IRP_CALL_SITE:
1356 assert((CBContext == nullptr) &&
1357 "'call site function' position must not have CallBaseContext!");
1358 assert((isa<CallBase>(getAsValuePtr())) &&
1359 "Expected call base for 'call site function' position!");
1360 assert(getAsValuePtr() == &getAssociatedValue() &&
1361 "Associated value mismatch!");
1362 return;
1363 case IRP_FUNCTION:
1364 assert(isa<Function>(getAsValuePtr()) &&
1365 "Expected function for a 'function' position!");
1366 assert(getAsValuePtr() == &getAssociatedValue() &&
1367 "Associated value mismatch!");
1368 return;
1369 case IRP_ARGUMENT:
1370 assert(isa<Argument>(getAsValuePtr()) &&
1371 "Expected argument for a 'argument' position!");
1372 assert(getAsValuePtr() == &getAssociatedValue() &&
1373 "Associated value mismatch!");
1374 return;
1376 assert((CBContext == nullptr) &&
1377 "'call site argument' position must not have CallBaseContext!");
1378 Use *U = getAsUsePtr();
1379 (void)U; // Silence unused variable warning.
1380 assert(U && "Expected use for a 'call site argument' position!");
1381 assert(isa<CallBase>(U->getUser()) &&
1382 "Expected call base user for a 'call site argument' position!");
1383 assert(cast<CallBase>(U->getUser())->isArgOperand(U) &&
1384 "Expected call base argument operand for a 'call site argument' "
1385 "position");
1386 assert(cast<CallBase>(U->getUser())->getArgOperandNo(U) ==
1387 unsigned(getCallSiteArgNo()) &&
1388 "Argument number mismatch!");
1389 assert(U->get() == &getAssociatedValue() && "Associated value mismatch!");
1390 return;
1391 }
1392 }
1393#endif
1394}
1395
1396std::optional<Constant *>
1398 const AbstractAttribute &AA,
1399 bool &UsedAssumedInformation) {
1400 // First check all callbacks provided by outside AAs. If any of them returns
1401 // a non-null value that is different from the associated value, or
1402 // std::nullopt, we assume it's simplified.
1403 for (auto &CB : SimplificationCallbacks.lookup(IRP)) {
1404 std::optional<Value *> SimplifiedV = CB(IRP, &AA, UsedAssumedInformation);
1405 if (!SimplifiedV)
1406 return std::nullopt;
1407 if (isa_and_nonnull<Constant>(*SimplifiedV))
1408 return cast<Constant>(*SimplifiedV);
1409 return nullptr;
1410 }
1411 if (auto *C = dyn_cast<Constant>(&IRP.getAssociatedValue()))
1412 return C;
1414 if (getAssumedSimplifiedValues(IRP, &AA, Values,
1416 UsedAssumedInformation)) {
1417 if (Values.empty())
1418 return std::nullopt;
1419 if (auto *C = dyn_cast_or_null<Constant>(
1420 AAPotentialValues::getSingleValue(*this, AA, IRP, Values)))
1421 return C;
1422 }
1423 return nullptr;
1424}
1425
1427 const IRPosition &IRP, const AbstractAttribute *AA,
1428 bool &UsedAssumedInformation, AA::ValueScope S) {
1429 // First check all callbacks provided by outside AAs. If any of them returns
1430 // a non-null value that is different from the associated value, or
1431 // std::nullopt, we assume it's simplified.
1432 for (auto &CB : SimplificationCallbacks.lookup(IRP))
1433 return CB(IRP, AA, UsedAssumedInformation);
1434
1436 if (!getAssumedSimplifiedValues(IRP, AA, Values, S, UsedAssumedInformation))
1437 return &IRP.getAssociatedValue();
1438 if (Values.empty())
1439 return std::nullopt;
1440 if (AA)
1441 if (Value *V = AAPotentialValues::getSingleValue(*this, *AA, IRP, Values))
1442 return V;
1445 return nullptr;
1446 return &IRP.getAssociatedValue();
1447}
1448
1450 const IRPosition &InitialIRP, const AbstractAttribute *AA,
1452 bool &UsedAssumedInformation, bool RecurseForSelectAndPHI) {
1455 Worklist.push_back(InitialIRP);
1456 while (!Worklist.empty()) {
1457 const IRPosition &IRP = Worklist.pop_back_val();
1458
1459 // First check all callbacks provided by outside AAs. If any of them returns
1460 // a non-null value that is different from the associated value, or
1461 // std::nullopt, we assume it's simplified.
1462 int NV = Values.size();
1463 const auto &SimplificationCBs = SimplificationCallbacks.lookup(IRP);
1464 for (const auto &CB : SimplificationCBs) {
1465 std::optional<Value *> CBResult = CB(IRP, AA, UsedAssumedInformation);
1466 if (!CBResult.has_value())
1467 continue;
1468 Value *V = *CBResult;
1469 if (!V)
1470 return false;
1473 Values.push_back(AA::ValueAndContext{*V, nullptr});
1474 else
1475 return false;
1476 }
1477 if (SimplificationCBs.empty()) {
1478 // If no high-level/outside simplification occurred, use
1479 // AAPotentialValues.
1480 const auto *PotentialValuesAA =
1481 getOrCreateAAFor<AAPotentialValues>(IRP, AA, DepClassTy::OPTIONAL);
1482 if (PotentialValuesAA && PotentialValuesAA->getAssumedSimplifiedValues(*this, Values, S)) {
1483 UsedAssumedInformation |= !PotentialValuesAA->isAtFixpoint();
1484 } else if (IRP.getPositionKind() != IRPosition::IRP_RETURNED) {
1485 Values.push_back({IRP.getAssociatedValue(), IRP.getCtxI()});
1486 } else {
1487 // TODO: We could visit all returns and add the operands.
1488 return false;
1489 }
1490 }
1491
1492 if (!RecurseForSelectAndPHI)
1493 break;
1494
1495 for (int I = NV, E = Values.size(); I < E; ++I) {
1496 Value *V = Values[I].getValue();
1497 if (!isa<PHINode>(V) && !isa<SelectInst>(V))
1498 continue;
1499 if (!Seen.insert(V).second)
1500 continue;
1501 // Move the last element to this slot.
1502 Values[I] = Values[E - 1];
1503 // Eliminate the last slot, adjust the indices.
1504 Values.pop_back();
1505 --E;
1506 --I;
1507 // Add a new value (select or phi) to the worklist.
1508 Worklist.push_back(IRPosition::value(*V));
1509 }
1510 }
1511 return true;
1512}
1513
1515 std::optional<Value *> V, CallBase &CB, const AbstractAttribute &AA,
1516 bool &UsedAssumedInformation) {
1517 if (!V)
1518 return V;
1519 if (*V == nullptr || isa<Constant>(*V))
1520 return V;
1521 if (auto *Arg = dyn_cast<Argument>(*V))
1522 if (CB.getCalledOperand() == Arg->getParent() &&
1523 CB.arg_size() > Arg->getArgNo())
1524 if (!Arg->hasPointeeInMemoryValueAttr())
1525 return getAssumedSimplified(
1526 IRPosition::callsite_argument(CB, Arg->getArgNo()), AA,
1527 UsedAssumedInformation, AA::Intraprocedural);
1528 return nullptr;
1529}
1530
1532 // The abstract attributes are allocated via the BumpPtrAllocator Allocator,
1533 // thus we cannot delete them. We can, and want to, destruct them though.
1534 for (auto &It : AAMap) {
1535 AbstractAttribute *AA = It.getSecond();
1536 AA->~AbstractAttribute();
1537 }
1538}
1539
1541 const AAIsDead *FnLivenessAA,
1542 bool &UsedAssumedInformation,
1543 bool CheckBBLivenessOnly, DepClassTy DepClass) {
1544 if (!Configuration.UseLiveness)
1545 return false;
1546 const IRPosition &IRP = AA.getIRPosition();
1547 if (!Functions.count(IRP.getAnchorScope()))
1548 return false;
1549 return isAssumedDead(IRP, &AA, FnLivenessAA, UsedAssumedInformation,
1550 CheckBBLivenessOnly, DepClass);
1551}
1552
1554 const AbstractAttribute *QueryingAA,
1555 const AAIsDead *FnLivenessAA,
1556 bool &UsedAssumedInformation,
1557 bool CheckBBLivenessOnly, DepClassTy DepClass) {
1558 if (!Configuration.UseLiveness)
1559 return false;
1560 Instruction *UserI = dyn_cast<Instruction>(U.getUser());
1561 if (!UserI)
1562 return isAssumedDead(IRPosition::value(*U.get()), QueryingAA, FnLivenessAA,
1563 UsedAssumedInformation, CheckBBLivenessOnly, DepClass);
1564
1565 if (auto *CB = dyn_cast<CallBase>(UserI)) {
1566 // For call site argument uses we can check if the argument is
1567 // unused/dead.
1568 if (CB->isArgOperand(&U)) {
1569 const IRPosition &CSArgPos =
1570 IRPosition::callsite_argument(*CB, CB->getArgOperandNo(&U));
1571 return isAssumedDead(CSArgPos, QueryingAA, FnLivenessAA,
1572 UsedAssumedInformation, CheckBBLivenessOnly,
1573 DepClass);
1574 }
1575 } else if (ReturnInst *RI = dyn_cast<ReturnInst>(UserI)) {
1576 const IRPosition &RetPos = IRPosition::returned(*RI->getFunction());
1577 return isAssumedDead(RetPos, QueryingAA, FnLivenessAA,
1578 UsedAssumedInformation, CheckBBLivenessOnly, DepClass);
1579 } else if (PHINode *PHI = dyn_cast<PHINode>(UserI)) {
1580 BasicBlock *IncomingBB = PHI->getIncomingBlock(U);
1581 return isAssumedDead(*IncomingBB->getTerminator(), QueryingAA, FnLivenessAA,
1582 UsedAssumedInformation, CheckBBLivenessOnly, DepClass);
1583 } else if (StoreInst *SI = dyn_cast<StoreInst>(UserI)) {
1584 if (!CheckBBLivenessOnly && SI->getPointerOperand() != U.get()) {
1585 const IRPosition IRP = IRPosition::inst(*SI);
1586 const AAIsDead *IsDeadAA =
1587 getOrCreateAAFor<AAIsDead>(IRP, QueryingAA, DepClassTy::NONE);
1588 if (IsDeadAA && IsDeadAA->isRemovableStore()) {
1589 if (QueryingAA)
1590 recordDependence(*IsDeadAA, *QueryingAA, DepClass);
1591 if (!IsDeadAA->isKnown(AAIsDead::IS_REMOVABLE))
1592 UsedAssumedInformation = true;
1593 return true;
1594 }
1595 }
1596 }
1597
1598 return isAssumedDead(IRPosition::inst(*UserI), QueryingAA, FnLivenessAA,
1599 UsedAssumedInformation, CheckBBLivenessOnly, DepClass);
1600}
1601
1603 const AbstractAttribute *QueryingAA,
1604 const AAIsDead *FnLivenessAA,
1605 bool &UsedAssumedInformation,
1606 bool CheckBBLivenessOnly, DepClassTy DepClass,
1607 bool CheckForDeadStore) {
1608 if (!Configuration.UseLiveness)
1609 return false;
1610 const IRPosition::CallBaseContext *CBCtx =
1611 QueryingAA ? QueryingAA->getCallBaseContext() : nullptr;
1612
1613 if (ManifestAddedBlocks.contains(I.getParent()))
1614 return false;
1615
1616 const Function &F = *I.getFunction();
1617 if (!FnLivenessAA || FnLivenessAA->getAnchorScope() != &F)
1618 FnLivenessAA = getOrCreateAAFor<AAIsDead>(IRPosition::function(F, CBCtx),
1619 QueryingAA, DepClassTy::NONE);
1620
1621 // Don't use recursive reasoning.
1622 if (!FnLivenessAA || QueryingAA == FnLivenessAA)
1623 return false;
1624
1625 // If we have a context instruction and a liveness AA we use it.
1626 if (CheckBBLivenessOnly ? FnLivenessAA->isAssumedDead(I.getParent())
1627 : FnLivenessAA->isAssumedDead(&I)) {
1628 if (QueryingAA)
1629 recordDependence(*FnLivenessAA, *QueryingAA, DepClass);
1630 if (!FnLivenessAA->isKnownDead(&I))
1631 UsedAssumedInformation = true;
1632 return true;
1633 }
1634
1635 if (CheckBBLivenessOnly)
1636 return false;
1637
1638 const IRPosition IRP = IRPosition::inst(I, CBCtx);
1639 const AAIsDead *IsDeadAA =
1640 getOrCreateAAFor<AAIsDead>(IRP, QueryingAA, DepClassTy::NONE);
1641
1642 // Don't use recursive reasoning.
1643 if (!IsDeadAA || QueryingAA == IsDeadAA)
1644 return false;
1645
1646 if (IsDeadAA->isAssumedDead()) {
1647 if (QueryingAA)
1648 recordDependence(*IsDeadAA, *QueryingAA, DepClass);
1649 if (!IsDeadAA->isKnownDead())
1650 UsedAssumedInformation = true;
1651 return true;
1652 }
1653
1654 if (CheckForDeadStore && isa<StoreInst>(I) && IsDeadAA->isRemovableStore()) {
1655 if (QueryingAA)
1656 recordDependence(*IsDeadAA, *QueryingAA, DepClass);
1657 if (!IsDeadAA->isKnownDead())
1658 UsedAssumedInformation = true;
1659 return true;
1660 }
1661
1662 return false;
1663}
1664
1666 const AbstractAttribute *QueryingAA,
1667 const AAIsDead *FnLivenessAA,
1668 bool &UsedAssumedInformation,
1669 bool CheckBBLivenessOnly, DepClassTy DepClass) {
1670 if (!Configuration.UseLiveness)
1671 return false;
1672 // Don't check liveness for constants, e.g. functions, used as (floating)
1673 // values since the context instruction and such is here meaningless.
1675 isa<Constant>(IRP.getAssociatedValue())) {
1676 return false;
1677 }
1678
1679 Instruction *CtxI = IRP.getCtxI();
1680 if (CtxI &&
1681 isAssumedDead(*CtxI, QueryingAA, FnLivenessAA, UsedAssumedInformation,
1682 /* CheckBBLivenessOnly */ true,
1683 CheckBBLivenessOnly ? DepClass : DepClassTy::OPTIONAL))
1684 return true;
1685
1686 if (CheckBBLivenessOnly)
1687 return false;
1688
1689 // If we haven't succeeded we query the specific liveness info for the IRP.
1690 const AAIsDead *IsDeadAA;
1692 IsDeadAA = getOrCreateAAFor<AAIsDead>(
1694 QueryingAA, DepClassTy::NONE);
1695 else
1696 IsDeadAA = getOrCreateAAFor<AAIsDead>(IRP, QueryingAA, DepClassTy::NONE);
1697
1698 // Don't use recursive reasoning.
1699 if (!IsDeadAA || QueryingAA == IsDeadAA)
1700 return false;
1701
1702 if (IsDeadAA->isAssumedDead()) {
1703 if (QueryingAA)
1704 recordDependence(*IsDeadAA, *QueryingAA, DepClass);
1705 if (!IsDeadAA->isKnownDead())
1706 UsedAssumedInformation = true;
1707 return true;
1708 }
1709
1710 return false;
1711}
1712
1714 const AbstractAttribute *QueryingAA,
1715 const AAIsDead *FnLivenessAA,
1716 DepClassTy DepClass) {
1717 if (!Configuration.UseLiveness)
1718 return false;
1719 const Function &F = *BB.getParent();
1720 if (!FnLivenessAA || FnLivenessAA->getAnchorScope() != &F)
1721 FnLivenessAA = getOrCreateAAFor<AAIsDead>(IRPosition::function(F),
1722 QueryingAA, DepClassTy::NONE);
1723
1724 // Don't use recursive reasoning.
1725 if (!FnLivenessAA || QueryingAA == FnLivenessAA)
1726 return false;
1727
1728 if (FnLivenessAA->isAssumedDead(&BB)) {
1729 if (QueryingAA)
1730 recordDependence(*FnLivenessAA, *QueryingAA, DepClass);
1731 return true;
1732 }
1733
1734 return false;
1735}
1736
1739 const AbstractAttribute &QueryingAA, const CallBase &CB) {
1740 if (const Function *Callee = dyn_cast<Function>(CB.getCalledOperand()))
1741 return Pred(Callee);
1742
1743 const auto *CallEdgesAA = getAAFor<AACallEdges>(
1745 if (!CallEdgesAA || CallEdgesAA->hasUnknownCallee())
1746 return false;
1747
1748 const auto &Callees = CallEdgesAA->getOptimisticEdges();
1749 return Pred(Callees.getArrayRef());
1750}
1751
1752bool canMarkAsVisited(const User *Usr) {
1753 return isa<PHINode>(Usr) || !isa<Instruction>(Usr);
1754}
1755
1757 function_ref<bool(const Use &, bool &)> Pred,
1758 const AbstractAttribute &QueryingAA, const Value &V,
1759 bool CheckBBLivenessOnly, DepClassTy LivenessDepClass,
1760 bool IgnoreDroppableUses,
1761 function_ref<bool(const Use &OldU, const Use &NewU)> EquivalentUseCB) {
1762
1763 // Check virtual uses first.
1764 for (VirtualUseCallbackTy &CB : VirtualUseCallbacks.lookup(&V))
1765 if (!CB(*this, &QueryingAA))
1766 return false;
1767
1768 // Check the trivial case first as it catches void values.
1769 if (V.use_empty())
1770 return true;
1771
1772 const IRPosition &IRP = QueryingAA.getIRPosition();
1775
1776 auto AddUsers = [&](const Value &V, const Use *OldUse) {
1777 for (const Use &UU : V.uses()) {
1778 if (OldUse && EquivalentUseCB && !EquivalentUseCB(*OldUse, UU)) {
1779 LLVM_DEBUG(dbgs() << "[Attributor] Potential copy was "
1780 "rejected by the equivalence call back: "
1781 << *UU << "!\n");
1782 return false;
1783 }
1784
1785 Worklist.push_back(&UU);
1786 }
1787 return true;
1788 };
1789
1790 AddUsers(V, /* OldUse */ nullptr);
1791
1792 LLVM_DEBUG(dbgs() << "[Attributor] Got " << Worklist.size()
1793 << " initial uses to check\n");
1794
1795 const Function *ScopeFn = IRP.getAnchorScope();
1796 const auto *LivenessAA =
1797 ScopeFn ? getAAFor<AAIsDead>(QueryingAA, IRPosition::function(*ScopeFn),
1799 : nullptr;
1800
1801 while (!Worklist.empty()) {
1802 const Use *U = Worklist.pop_back_val();
1803 if (canMarkAsVisited(U->getUser()) && !Visited.insert(U).second)
1804 continue;
1806 if (auto *Fn = dyn_cast<Function>(U->getUser()))
1807 dbgs() << "[Attributor] Check use: " << **U << " in " << Fn->getName()
1808 << "\n";
1809 else
1810 dbgs() << "[Attributor] Check use: " << **U << " in " << *U->getUser()
1811 << "\n";
1812 });
1813 bool UsedAssumedInformation = false;
1814 if (isAssumedDead(*U, &QueryingAA, LivenessAA, UsedAssumedInformation,
1815 CheckBBLivenessOnly, LivenessDepClass)) {
1817 dbgs() << "[Attributor] Dead use, skip!\n");
1818 continue;
1819 }
1820 if (IgnoreDroppableUses && U->getUser()->isDroppable()) {
1822 dbgs() << "[Attributor] Droppable user, skip!\n");
1823 continue;
1824 }
1825
1826 if (auto *SI = dyn_cast<StoreInst>(U->getUser())) {
1827 if (&SI->getOperandUse(0) == U) {
1828 if (!Visited.insert(U).second)
1829 continue;
1830 SmallSetVector<Value *, 4> PotentialCopies;
1832 *this, *SI, PotentialCopies, QueryingAA, UsedAssumedInformation,
1833 /* OnlyExact */ true)) {
1835 dbgs()
1836 << "[Attributor] Value is stored, continue with "
1837 << PotentialCopies.size()
1838 << " potential copies instead!\n");
1839 for (Value *PotentialCopy : PotentialCopies)
1840 if (!AddUsers(*PotentialCopy, U))
1841 return false;
1842 continue;
1843 }
1844 }
1845 }
1846
1847 bool Follow = false;
1848 if (!Pred(*U, Follow))
1849 return false;
1850 if (!Follow)
1851 continue;
1852
1853 User &Usr = *U->getUser();
1854 AddUsers(Usr, /* OldUse */ nullptr);
1855
1856 auto *RI = dyn_cast<ReturnInst>(&Usr);
1857 if (!RI)
1858 continue;
1859
1860 Function &F = *RI->getFunction();
1861 auto CallSitePred = [&](AbstractCallSite ACS) {
1862 return AddUsers(*ACS.getInstruction(), U);
1863 };
1864 if (!checkForAllCallSites(CallSitePred, F, /* RequireAllCallSites */ true,
1865 &QueryingAA, UsedAssumedInformation)) {
1866 LLVM_DEBUG(dbgs() << "[Attributor] Could not follow return instruction "
1867 "to all call sites: "
1868 << *RI << "\n");
1869 return false;
1870 }
1871 }
1872
1873 return true;
1874}
1875
1877 const AbstractAttribute &QueryingAA,
1878 bool RequireAllCallSites,
1879 bool &UsedAssumedInformation) {
1880 // We can try to determine information from
1881 // the call sites. However, this is only possible all call sites are known,
1882 // hence the function has internal linkage.
1883 const IRPosition &IRP = QueryingAA.getIRPosition();
1884 const Function *AssociatedFunction = IRP.getAssociatedFunction();
1885 if (!AssociatedFunction) {
1886 LLVM_DEBUG(dbgs() << "[Attributor] No function associated with " << IRP
1887 << "\n");
1888 return false;
1889 }
1890
1891 return checkForAllCallSites(Pred, *AssociatedFunction, RequireAllCallSites,
1892 &QueryingAA, UsedAssumedInformation);
1893}
1894
1896 const Function &Fn,
1897 bool RequireAllCallSites,
1898 const AbstractAttribute *QueryingAA,
1899 bool &UsedAssumedInformation,
1900 bool CheckPotentiallyDead) {
1901 if (RequireAllCallSites && !Fn.hasLocalLinkage()) {
1902 LLVM_DEBUG(
1903 dbgs()
1904 << "[Attributor] Function " << Fn.getName()
1905 << " has no internal linkage, hence not all call sites are known\n");
1906 return false;
1907 }
1908 // Check virtual uses first.
1909 for (VirtualUseCallbackTy &CB : VirtualUseCallbacks.lookup(&Fn))
1910 if (!CB(*this, QueryingAA))
1911 return false;
1912
1914 for (unsigned u = 0; u < Uses.size(); ++u) {
1915 const Use &U = *Uses[u];
1917 if (auto *Fn = dyn_cast<Function>(U))
1918 dbgs() << "[Attributor] Check use: " << Fn->getName() << " in "
1919 << *U.getUser() << "\n";
1920 else
1921 dbgs() << "[Attributor] Check use: " << *U << " in " << *U.getUser()
1922 << "\n";
1923 });
1924 if (!CheckPotentiallyDead &&
1925 isAssumedDead(U, QueryingAA, nullptr, UsedAssumedInformation,
1926 /* CheckBBLivenessOnly */ true)) {
1928 dbgs() << "[Attributor] Dead use, skip!\n");
1929 continue;
1930 }
1931 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U.getUser())) {
1932 if (CE->isCast() && CE->getType()->isPointerTy()) {
1934 dbgs() << "[Attributor] Use, is constant cast expression, add "
1935 << CE->getNumUses() << " uses of that expression instead!\n";
1936 });
1937 for (const Use &CEU : CE->uses())
1938 Uses.push_back(&CEU);
1939 continue;
1940 }
1941 }
1942
1943 AbstractCallSite ACS(&U);
1944 if (!ACS) {
1945 LLVM_DEBUG(dbgs() << "[Attributor] Function " << Fn.getName()
1946 << " has non call site use " << *U.get() << " in "
1947 << *U.getUser() << "\n");
1948 // BlockAddress users are allowed.
1949 if (isa<BlockAddress>(U.getUser()))
1950 continue;
1951 return false;
1952 }
1953
1954 const Use *EffectiveUse =
1955 ACS.isCallbackCall() ? &ACS.getCalleeUseForCallback() : &U;
1956 if (!ACS.isCallee(EffectiveUse)) {
1957 if (!RequireAllCallSites) {
1958 LLVM_DEBUG(dbgs() << "[Attributor] User " << *EffectiveUse->getUser()
1959 << " is not a call of " << Fn.getName()
1960 << ", skip use\n");
1961 continue;
1962 }
1963 LLVM_DEBUG(dbgs() << "[Attributor] User " << *EffectiveUse->getUser()
1964 << " is an invalid use of " << Fn.getName() << "\n");
1965 return false;
1966 }
1967
1968 // Make sure the arguments that can be matched between the call site and the
1969 // callee argee on their type. It is unlikely they do not and it doesn't
1970 // make sense for all attributes to know/care about this.
1971 assert(&Fn == ACS.getCalledFunction() && "Expected known callee");
1972 unsigned MinArgsParams =
1973 std::min(size_t(ACS.getNumArgOperands()), Fn.arg_size());
1974 for (unsigned u = 0; u < MinArgsParams; ++u) {
1975 Value *CSArgOp = ACS.getCallArgOperand(u);
1976 if (CSArgOp && Fn.getArg(u)->getType() != CSArgOp->getType()) {
1977 LLVM_DEBUG(
1978 dbgs() << "[Attributor] Call site / callee argument type mismatch ["
1979 << u << "@" << Fn.getName() << ": "
1980 << *Fn.getArg(u)->getType() << " vs. "
1981 << *ACS.getCallArgOperand(u)->getType() << "\n");
1982 return false;
1983 }
1984 }
1985
1986 if (Pred(ACS))
1987 continue;
1988
1989 LLVM_DEBUG(dbgs() << "[Attributor] Call site callback failed for "
1990 << *ACS.getInstruction() << "\n");
1991 return false;
1992 }
1993
1994 return true;
1995}
1996
1997bool Attributor::shouldPropagateCallBaseContext(const IRPosition &IRP) {
1998 // TODO: Maintain a cache of Values that are
1999 // on the pathway from a Argument to a Instruction that would effect the
2000 // liveness/return state etc.
2002}
2003
2005 const AbstractAttribute &QueryingAA,
2007 bool RecurseForSelectAndPHI) {
2008
2009 const IRPosition &IRP = QueryingAA.getIRPosition();
2010 const Function *AssociatedFunction = IRP.getAssociatedFunction();
2011 if (!AssociatedFunction)
2012 return false;
2013
2014 bool UsedAssumedInformation = false;
2017 IRPosition::returned(*AssociatedFunction), &QueryingAA, Values, S,
2018 UsedAssumedInformation, RecurseForSelectAndPHI))
2019 return false;
2020
2021 return llvm::all_of(Values, [&](const AA::ValueAndContext &VAC) {
2022 return Pred(*VAC.getValue());
2023 });
2024}
2025
2028 function_ref<bool(Instruction &)> Pred, const AbstractAttribute *QueryingAA,
2029 const AAIsDead *LivenessAA, ArrayRef<unsigned> Opcodes,
2030 bool &UsedAssumedInformation, bool CheckBBLivenessOnly = false,
2031 bool CheckPotentiallyDead = false) {
2032 for (unsigned Opcode : Opcodes) {
2033 // Check if we have instructions with this opcode at all first.
2034 auto *Insts = OpcodeInstMap.lookup(Opcode);
2035 if (!Insts)
2036 continue;
2037
2038 for (Instruction *I : *Insts) {
2039 // Skip dead instructions.
2040 if (A && !CheckPotentiallyDead &&
2041 A->isAssumedDead(IRPosition::inst(*I), QueryingAA, LivenessAA,
2042 UsedAssumedInformation, CheckBBLivenessOnly)) {
2044 dbgs() << "[Attributor] Instruction " << *I
2045 << " is potentially dead, skip!\n";);
2046 continue;
2047 }
2048
2049 if (!Pred(*I))
2050 return false;
2051 }
2052 }
2053 return true;
2054}
2055
2057 const Function *Fn,
2058 const AbstractAttribute *QueryingAA,
2059 ArrayRef<unsigned> Opcodes,
2060 bool &UsedAssumedInformation,
2061 bool CheckBBLivenessOnly,
2062 bool CheckPotentiallyDead) {
2063 // Since we need to provide instructions we have to have an exact definition.
2064 if (!Fn || Fn->isDeclaration())
2065 return false;
2066
2067 const IRPosition &QueryIRP = IRPosition::function(*Fn);
2068 const auto *LivenessAA =
2069 CheckPotentiallyDead && QueryingAA
2070 ? (getAAFor<AAIsDead>(*QueryingAA, QueryIRP, DepClassTy::NONE))
2071 : nullptr;
2072
2073 auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(*Fn);
2074 if (!checkForAllInstructionsImpl(this, OpcodeInstMap, Pred, QueryingAA,
2075 LivenessAA, Opcodes, UsedAssumedInformation,
2076 CheckBBLivenessOnly, CheckPotentiallyDead))
2077 return false;
2078
2079 return true;
2080}
2081
2083 const AbstractAttribute &QueryingAA,
2084 ArrayRef<unsigned> Opcodes,
2085 bool &UsedAssumedInformation,
2086 bool CheckBBLivenessOnly,
2087 bool CheckPotentiallyDead) {
2088 const IRPosition &IRP = QueryingAA.getIRPosition();
2089 const Function *AssociatedFunction = IRP.getAssociatedFunction();
2090 return checkForAllInstructions(Pred, AssociatedFunction, &QueryingAA, Opcodes,
2091 UsedAssumedInformation, CheckBBLivenessOnly,
2092 CheckPotentiallyDead);
2093}
2094
2096 function_ref<bool(Instruction &)> Pred, AbstractAttribute &QueryingAA,
2097 bool &UsedAssumedInformation) {
2098 TimeTraceScope TS("checkForAllReadWriteInstructions");
2099
2100 const Function *AssociatedFunction =
2101 QueryingAA.getIRPosition().getAssociatedFunction();
2102 if (!AssociatedFunction)
2103 return false;
2104
2105 const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
2106 const auto *LivenessAA =
2107 getAAFor<AAIsDead>(QueryingAA, QueryIRP, DepClassTy::NONE);
2108
2109 for (Instruction *I :
2110 InfoCache.getReadOrWriteInstsForFunction(*AssociatedFunction)) {
2111 // Skip dead instructions.
2112 if (isAssumedDead(IRPosition::inst(*I), &QueryingAA, LivenessAA,
2113 UsedAssumedInformation))
2114 continue;
2115
2116 if (!Pred(*I))
2117 return false;
2118 }
2119
2120 return true;
2121}
2122
2123void Attributor::runTillFixpoint() {
2124 TimeTraceScope TimeScope("Attributor::runTillFixpoint");
2125 LLVM_DEBUG(dbgs() << "[Attributor] Identified and initialized "
2126 << DG.SyntheticRoot.Deps.size()
2127 << " abstract attributes.\n");
2128
2129 // Now that all abstract attributes are collected and initialized we start
2130 // the abstract analysis.
2131
2132 unsigned IterationCounter = 1;
2133 unsigned MaxIterations =
2134 Configuration.MaxFixpointIterations.value_or(SetFixpointIterations);
2135
2137 SetVector<AbstractAttribute *> Worklist, InvalidAAs;
2138 Worklist.insert(DG.SyntheticRoot.begin(), DG.SyntheticRoot.end());
2139
2140 do {
2141 // Remember the size to determine new attributes.
2142 size_t NumAAs = DG.SyntheticRoot.Deps.size();
2143 LLVM_DEBUG(dbgs() << "\n\n[Attributor] #Iteration: " << IterationCounter
2144 << ", Worklist size: " << Worklist.size() << "\n");
2145
2146 // For invalid AAs we can fix dependent AAs that have a required dependence,
2147 // thereby folding long dependence chains in a single step without the need
2148 // to run updates.
2149 for (unsigned u = 0; u < InvalidAAs.size(); ++u) {
2150 AbstractAttribute *InvalidAA = InvalidAAs[u];
2151
2152 // Check the dependences to fast track invalidation.
2154 dbgs() << "[Attributor] InvalidAA: " << *InvalidAA
2155 << " has " << InvalidAA->Deps.size()
2156 << " required & optional dependences\n");
2157 for (auto &DepIt : InvalidAA->Deps) {
2158 AbstractAttribute *DepAA = cast<AbstractAttribute>(DepIt.getPointer());
2159 if (DepIt.getInt() == unsigned(DepClassTy::OPTIONAL)) {
2161 dbgs() << " - recompute: " << *DepAA);
2162 Worklist.insert(DepAA);
2163 continue;
2164 }
2166 << " - invalidate: " << *DepAA);
2168 assert(DepAA->getState().isAtFixpoint() && "Expected fixpoint state!");
2169 if (!DepAA->getState().isValidState())
2170 InvalidAAs.insert(DepAA);
2171 else
2172 ChangedAAs.push_back(DepAA);
2173 }
2174 InvalidAA->Deps.clear();
2175 }
2176
2177 // Add all abstract attributes that are potentially dependent on one that
2178 // changed to the work list.
2179 for (AbstractAttribute *ChangedAA : ChangedAAs) {
2180 for (auto &DepIt : ChangedAA->Deps)
2181 Worklist.insert(cast<AbstractAttribute>(DepIt.getPointer()));
2182 ChangedAA->Deps.clear();
2183 }
2184
2185 LLVM_DEBUG(dbgs() << "[Attributor] #Iteration: " << IterationCounter
2186 << ", Worklist+Dependent size: " << Worklist.size()
2187 << "\n");
2188
2189 // Reset the changed and invalid set.
2190 ChangedAAs.clear();
2191 InvalidAAs.clear();
2192
2193 // Update all abstract attribute in the work list and record the ones that
2194 // changed.
2195 for (AbstractAttribute *AA : Worklist) {
2196 const auto &AAState = AA->getState();
2197 if (!AAState.isAtFixpoint())
2198 if (updateAA(*AA) == ChangeStatus::CHANGED)
2199 ChangedAAs.push_back(AA);
2200
2201 // Use the InvalidAAs vector to propagate invalid states fast transitively
2202 // without requiring updates.
2203 if (!AAState.isValidState())
2204 InvalidAAs.insert(AA);
2205 }
2206
2207 // Add attributes to the changed set if they have been created in the last
2208 // iteration.
2209 ChangedAAs.append(DG.SyntheticRoot.begin() + NumAAs,
2210 DG.SyntheticRoot.end());
2211
2212 // Reset the work list and repopulate with the changed abstract attributes.
2213 // Note that dependent ones are added above.
2214 Worklist.clear();
2215 Worklist.insert(ChangedAAs.begin(), ChangedAAs.end());
2216 Worklist.insert(QueryAAsAwaitingUpdate.begin(),
2217 QueryAAsAwaitingUpdate.end());
2218 QueryAAsAwaitingUpdate.clear();
2219
2220 } while (!Worklist.empty() && (IterationCounter++ < MaxIterations));
2221
2222 if (IterationCounter > MaxIterations && !Functions.empty()) {
2223 auto Remark = [&](OptimizationRemarkMissed ORM) {
2224 return ORM << "Attributor did not reach a fixpoint after "
2225 << ore::NV("Iterations", MaxIterations) << " iterations.";
2226 };
2227 Function *F = Functions.front();
2228 emitRemark<OptimizationRemarkMissed>(F, "FixedPoint", Remark);
2229 }
2230
2231 LLVM_DEBUG(dbgs() << "\n[Attributor] Fixpoint iteration done after: "
2232 << IterationCounter << "/" << MaxIterations
2233 << " iterations\n");
2234
2235 // Reset abstract arguments not settled in a sound fixpoint by now. This
2236 // happens when we stopped the fixpoint iteration early. Note that only the
2237 // ones marked as "changed" *and* the ones transitively depending on them
2238 // need to be reverted to a pessimistic state. Others might not be in a
2239 // fixpoint state but we can use the optimistic results for them anyway.
2241 for (unsigned u = 0; u < ChangedAAs.size(); u++) {
2242 AbstractAttribute *ChangedAA = ChangedAAs[u];
2243 if (!Visited.insert(ChangedAA).second)
2244 continue;
2245
2246 AbstractState &State = ChangedAA->getState();
2247 if (!State.isAtFixpoint()) {
2249
2250 NumAttributesTimedOut++;
2251 }
2252
2253 for (auto &DepIt : ChangedAA->Deps)
2254 ChangedAAs.push_back(cast<AbstractAttribute>(DepIt.getPointer()));
2255 ChangedAA->Deps.clear();
2256 }
2257
2258 LLVM_DEBUG({
2259 if (!Visited.empty())
2260 dbgs() << "\n[Attributor] Finalized " << Visited.size()
2261 << " abstract attributes.\n";
2262 });
2263}
2264
2266 assert(AA.isQueryAA() &&
2267 "Non-query AAs should not be required to register for updates!");
2268 QueryAAsAwaitingUpdate.insert(&AA);
2269}
2270
2271ChangeStatus Attributor::manifestAttributes() {
2272 TimeTraceScope TimeScope("Attributor::manifestAttributes");
2273 size_t NumFinalAAs = DG.SyntheticRoot.Deps.size();
2274
2275 unsigned NumManifested = 0;
2276 unsigned NumAtFixpoint = 0;
2277 ChangeStatus ManifestChange = ChangeStatus::UNCHANGED;
2278 for (auto &DepAA : DG.SyntheticRoot.Deps) {
2279 AbstractAttribute *AA = cast<AbstractAttribute>(DepAA.getPointer());
2280 AbstractState &State = AA->getState();
2281
2282 // If there is not already a fixpoint reached, we can now take the
2283 // optimistic state. This is correct because we enforced a pessimistic one
2284 // on abstract attributes that were transitively dependent on a changed one
2285 // already above.
2286 if (!State.isAtFixpoint())
2288
2289 // We must not manifest Attributes that use Callbase info.
2290 if (AA->hasCallBaseContext())
2291 continue;
2292 // If the state is invalid, we do not try to manifest it.
2293 if (!State.isValidState())
2294 continue;
2295
2296 if (AA->getCtxI() && !isRunOn(*AA->getAnchorScope()))
2297 continue;
2298
2299 // Skip dead code.
2300 bool UsedAssumedInformation = false;
2301 if (isAssumedDead(*AA, nullptr, UsedAssumedInformation,
2302 /* CheckBBLivenessOnly */ true))
2303 continue;
2304 // Check if the manifest debug counter that allows skipping manifestation of
2305 // AAs
2306 if (!DebugCounter::shouldExecute(ManifestDBGCounter))
2307 continue;
2308 // Manifest the state and record if we changed the IR.
2309 ChangeStatus LocalChange = AA->manifest(*this);
2310 if (LocalChange == ChangeStatus::CHANGED && AreStatisticsEnabled())
2311 AA->trackStatistics();
2312 LLVM_DEBUG(dbgs() << "[Attributor] Manifest " << LocalChange << " : " << *AA
2313 << "\n");
2314
2315 ManifestChange = ManifestChange | LocalChange;
2316
2317 NumAtFixpoint++;
2318 NumManifested += (LocalChange == ChangeStatus::CHANGED);
2319 }
2320
2321 (void)NumManifested;
2322 (void)NumAtFixpoint;
2323 LLVM_DEBUG(dbgs() << "\n[Attributor] Manifested " << NumManifested
2324 << " arguments while " << NumAtFixpoint
2325 << " were in a valid fixpoint state\n");
2326
2327 NumAttributesManifested += NumManifested;
2328 NumAttributesValidFixpoint += NumAtFixpoint;
2329
2330 (void)NumFinalAAs;
2331 if (NumFinalAAs != DG.SyntheticRoot.Deps.size()) {
2332 auto DepIt = DG.SyntheticRoot.Deps.begin();
2333 for (unsigned u = 0; u < NumFinalAAs; ++u)
2334 ++DepIt;
2335 for (unsigned u = NumFinalAAs; u < DG.SyntheticRoot.Deps.size();
2336 ++u, ++DepIt) {
2337 errs() << "Unexpected abstract attribute: "
2338 << cast<AbstractAttribute>(DepIt->getPointer()) << " :: "
2339 << cast<AbstractAttribute>(DepIt->getPointer())
2340 ->getIRPosition()
2341 .getAssociatedValue()
2342 << "\n";
2343 }
2344 llvm_unreachable("Expected the final number of abstract attributes to "
2345 "remain unchanged!");
2346 }
2347
2348 for (auto &It : AttrsMap) {
2349 AttributeList &AL = It.getSecond();
2350 const IRPosition &IRP =
2351 isa<Function>(It.getFirst())
2352 ? IRPosition::function(*cast<Function>(It.getFirst()))
2353 : IRPosition::callsite_function(*cast<CallBase>(It.getFirst()));
2354 IRP.setAttrList(AL);
2355 }
2356
2357 return ManifestChange;
2358}
2359
2360void Attributor::identifyDeadInternalFunctions() {
2361 // Early exit if we don't intend to delete functions.
2362 if (!Configuration.DeleteFns)
2363 return;
2364
2365 // To avoid triggering an assertion in the lazy call graph we will not delete
2366 // any internal library functions. We should modify the assertion though and
2367 // allow internals to be deleted.
2368 const auto *TLI =
2369 isModulePass()
2370 ? nullptr
2372 LibFunc LF;
2373
2374 // Identify dead internal functions and delete them. This happens outside
2375 // the other fixpoint analysis as we might treat potentially dead functions
2376 // as live to lower the number of iterations. If they happen to be dead, the
2377 // below fixpoint loop will identify and eliminate them.
2378
2379 SmallVector<Function *, 8> InternalFns;
2380 for (Function *F : Functions)
2381 if (F->hasLocalLinkage() && (isModulePass() || !TLI->getLibFunc(*F, LF)))
2382 InternalFns.push_back(F);
2383
2384 SmallPtrSet<Function *, 8> LiveInternalFns;
2385 bool FoundLiveInternal = true;
2386 while (FoundLiveInternal) {
2387 FoundLiveInternal = false;
2388 for (Function *&F : InternalFns) {
2389 if (!F)
2390 continue;
2391
2392 bool UsedAssumedInformation = false;
2394 [&](AbstractCallSite ACS) {
2396 return ToBeDeletedFunctions.count(Callee) ||
2397 (Functions.count(Callee) && Callee->hasLocalLinkage() &&
2398 !LiveInternalFns.count(Callee));
2399 },
2400 *F, true, nullptr, UsedAssumedInformation)) {
2401 continue;
2402 }
2403
2404 LiveInternalFns.insert(F);
2405 F = nullptr;
2406 FoundLiveInternal = true;
2407 }
2408 }
2409
2410 for (Function *F : InternalFns)
2411 if (F)
2412 ToBeDeletedFunctions.insert(F);
2413}
2414
2415ChangeStatus Attributor::cleanupIR() {
2416 TimeTraceScope TimeScope("Attributor::cleanupIR");
2417 // Delete stuff at the end to avoid invalid references and a nice order.
2418 LLVM_DEBUG(dbgs() << "\n[Attributor] Delete/replace at least "
2419 << ToBeDeletedFunctions.size() << " functions and "
2420 << ToBeDeletedBlocks.size() << " blocks and "
2421 << ToBeDeletedInsts.size() << " instructions and "
2422 << ToBeChangedValues.size() << " values and "
2423 << ToBeChangedUses.size() << " uses. To insert "
2424 << ToBeChangedToUnreachableInsts.size()
2425 << " unreachables.\n"
2426 << "Preserve manifest added " << ManifestAddedBlocks.size()
2427 << " blocks\n");
2428
2430 SmallVector<Instruction *, 32> TerminatorsToFold;
2431
2432 auto ReplaceUse = [&](Use *U, Value *NewV) {
2433 Value *OldV = U->get();
2434
2435 // If we plan to replace NewV we need to update it at this point.
2436 do {
2437 const auto &Entry = ToBeChangedValues.lookup(NewV);
2438 if (!get<0>(Entry))
2439 break;
2440 NewV = get<0>(Entry);
2441 } while (true);
2442
2443 Instruction *I = dyn_cast<Instruction>(U->getUser());
2444 assert((!I || isRunOn(*I->getFunction())) &&
2445 "Cannot replace an instruction outside the current SCC!");
2446
2447 // Do not replace uses in returns if the value is a must-tail call we will
2448 // not delete.
2449 if (auto *RI = dyn_cast_or_null<ReturnInst>(I)) {
2450 if (auto *CI = dyn_cast<CallInst>(OldV->stripPointerCasts()))
2451 if (CI->isMustTailCall() && !ToBeDeletedInsts.count(CI))
2452 return;
2453 // If we rewrite a return and the new value is not an argument, strip the
2454 // `returned` attribute as it is wrong now.
2455 if (!isa<Argument>(NewV))
2456 for (auto &Arg : RI->getFunction()->args())
2457 Arg.removeAttr(Attribute::Returned);
2458 }
2459
2460 LLVM_DEBUG(dbgs() << "Use " << *NewV << " in " << *U->getUser()
2461 << " instead of " << *OldV << "\n");
2462 U->set(NewV);
2463
2464 if (Instruction *I = dyn_cast<Instruction>(OldV)) {
2465 CGModifiedFunctions.insert(I->getFunction());
2466 if (!isa<PHINode>(I) && !ToBeDeletedInsts.count(I) &&
2468 DeadInsts.push_back(I);
2469 }
2470 if (isa<UndefValue>(NewV) && isa<CallBase>(U->getUser())) {
2471 auto *CB = cast<CallBase>(U->getUser());
2472 if (CB->isArgOperand(U)) {
2473 unsigned Idx = CB->getArgOperandNo(U);
2474 CB->removeParamAttr(Idx, Attribute::NoUndef);
2475 auto *Callee = dyn_cast_if_present<Function>(CB->getCalledOperand());
2476 if (Callee && Callee->arg_size() > Idx)
2477 Callee->removeParamAttr(Idx, Attribute::NoUndef);
2478 }
2479 }
2480 if (isa<Constant>(NewV) && isa<BranchInst>(U->getUser())) {
2481 Instruction *UserI = cast<Instruction>(U->getUser());
2482 if (isa<UndefValue>(NewV)) {
2483 ToBeChangedToUnreachableInsts.insert(UserI);
2484 } else {
2485 TerminatorsToFold.push_back(UserI);
2486 }
2487 }
2488 };
2489
2490 for (auto &It : ToBeChangedUses) {
2491 Use *U = It.first;
2492 Value *NewV = It.second;
2493 ReplaceUse(U, NewV);
2494 }
2495
2497 for (auto &It : ToBeChangedValues) {
2498 Value *OldV = It.first;
2499 auto [NewV, Done] = It.second;
2500 Uses.clear();
2501 for (auto &U : OldV->uses())
2502 if (Done || !U.getUser()->isDroppable())
2503 Uses.push_back(&U);
2504 for (Use *U : Uses) {
2505 if (auto *I = dyn_cast<Instruction>(U->getUser()))
2506 if (!isRunOn(*I->getFunction()))
2507 continue;
2508 ReplaceUse(U, NewV);
2509 }
2510 }
2511
2512 for (const auto &V : InvokeWithDeadSuccessor)
2513 if (InvokeInst *II = dyn_cast_or_null<InvokeInst>(V)) {
2514 assert(isRunOn(*II->getFunction()) &&
2515 "Cannot replace an invoke outside the current SCC!");
2516 bool UnwindBBIsDead = II->hasFnAttr(Attribute::NoUnwind);
2517 bool NormalBBIsDead = II->hasFnAttr(Attribute::NoReturn);
2518 bool Invoke2CallAllowed =
2520 assert((UnwindBBIsDead || NormalBBIsDead) &&
2521 "Invoke does not have dead successors!");
2522 BasicBlock *BB = II->getParent();
2523 BasicBlock *NormalDestBB = II->getNormalDest();
2524 if (UnwindBBIsDead) {
2525 Instruction *NormalNextIP = &NormalDestBB->front();
2526 if (Invoke2CallAllowed) {
2528 NormalNextIP = BB->getTerminator();
2529 }
2530 if (NormalBBIsDead)
2531 ToBeChangedToUnreachableInsts.insert(NormalNextIP);
2532 } else {
2533 assert(NormalBBIsDead && "Broken invariant!");
2534 if (!NormalDestBB->getUniquePredecessor())
2535 NormalDestBB = SplitBlockPredecessors(NormalDestBB, {BB}, ".dead");
2536 ToBeChangedToUnreachableInsts.insert(&NormalDestBB->front());
2537 }
2538 }
2539 for (Instruction *I : TerminatorsToFold) {
2540 assert(isRunOn(*I->getFunction()) &&
2541 "Cannot replace a terminator outside the current SCC!");
2542 CGModifiedFunctions.insert(I->getFunction());
2543 ConstantFoldTerminator(I->getParent());
2544 }
2545 for (const auto &V : ToBeChangedToUnreachableInsts)
2546 if (Instruction *I = dyn_cast_or_null<Instruction>(V)) {
2547 LLVM_DEBUG(dbgs() << "[Attributor] Change to unreachable: " << *I
2548 << "\n");
2549 assert(isRunOn(*I->getFunction()) &&
2550 "Cannot replace an instruction outside the current SCC!");
2551 CGModifiedFunctions.insert(I->getFunction());
2553 }
2554
2555 for (const auto &V : ToBeDeletedInsts) {
2556 if (Instruction *I = dyn_cast_or_null<Instruction>(V)) {
2557 assert((!isa<CallBase>(I) || isa<IntrinsicInst>(I) ||
2558 isRunOn(*I->getFunction())) &&
2559 "Cannot delete an instruction outside the current SCC!");
2560 I->dropDroppableUses();
2561 CGModifiedFunctions.insert(I->getFunction());
2562 if (!I->getType()->isVoidTy())
2563 I->replaceAllUsesWith(UndefValue::get(I->getType()));
2564 if (!isa<PHINode>(I) && isInstructionTriviallyDead(I))
2565 DeadInsts.push_back(I);
2566 else
2567 I->eraseFromParent();
2568 }
2569 }
2570
2571 llvm::erase_if(DeadInsts, [&](WeakTrackingVH I) { return !I; });
2572
2573 LLVM_DEBUG({
2574 dbgs() << "[Attributor] DeadInsts size: " << DeadInsts.size() << "\n";
2575 for (auto &I : DeadInsts)
2576 if (I)
2577 dbgs() << " - " << *I << "\n";
2578 });
2579
2581
2582 if (unsigned NumDeadBlocks = ToBeDeletedBlocks.size()) {
2583 SmallVector<BasicBlock *, 8> ToBeDeletedBBs;
2584 ToBeDeletedBBs.reserve(NumDeadBlocks);
2585 for (BasicBlock *BB : ToBeDeletedBlocks) {
2586 assert(isRunOn(*BB->getParent()) &&
2587 "Cannot delete a block outside the current SCC!");
2588 CGModifiedFunctions.insert(BB->getParent());
2589 // Do not delete BBs added during manifests of AAs.
2590 if (ManifestAddedBlocks.contains(BB))
2591 continue;
2592 ToBeDeletedBBs.push_back(BB);
2593 }
2594 // Actually we do not delete the blocks but squash them into a single
2595 // unreachable but untangling branches that jump here is something we need
2596 // to do in a more generic way.
2597 detachDeadBlocks(ToBeDeletedBBs, nullptr);
2598 }
2599
2600 identifyDeadInternalFunctions();
2601
2602 // Rewrite the functions as requested during manifest.
2603 ChangeStatus ManifestChange = rewriteFunctionSignatures(CGModifiedFunctions);
2604
2605 for (Function *Fn : CGModifiedFunctions)
2606 if (!ToBeDeletedFunctions.count(Fn) && Functions.count(Fn))
2607 Configuration.CGUpdater.reanalyzeFunction(*Fn);
2608
2609 for (Function *Fn : ToBeDeletedFunctions) {
2610 if (!Functions.count(Fn))
2611 continue;
2612 Configuration.CGUpdater.removeFunction(*Fn);
2613 }
2614
2615 if (!ToBeChangedUses.empty())
2616 ManifestChange = ChangeStatus::CHANGED;
2617
2618 if (!ToBeChangedToUnreachableInsts.empty())
2619 ManifestChange = ChangeStatus::CHANGED;
2620
2621 if (!ToBeDeletedFunctions.empty())
2622 ManifestChange = ChangeStatus::CHANGED;
2623
2624 if (!ToBeDeletedBlocks.empty())
2625 ManifestChange = ChangeStatus::CHANGED;
2626
2627 if (!ToBeDeletedInsts.empty())
2628 ManifestChange = ChangeStatus::CHANGED;
2629
2630 if (!InvokeWithDeadSuccessor.empty())
2631 ManifestChange = ChangeStatus::CHANGED;
2632
2633 if (!DeadInsts.empty())
2634 ManifestChange = ChangeStatus::CHANGED;
2635
2636 NumFnDeleted += ToBeDeletedFunctions.size();
2637
2638 LLVM_DEBUG(dbgs() << "[Attributor] Deleted " << ToBeDeletedFunctions.size()
2639 << " functions after manifest.\n");
2640
2641#ifdef EXPENSIVE_CHECKS
2642 for (Function *F : Functions) {
2643 if (ToBeDeletedFunctions.count(F))
2644 continue;
2645 assert(!verifyFunction(*F, &errs()) && "Module verification failed!");
2646 }
2647#endif
2648
2649 return ManifestChange;
2650}
2651
2653 TimeTraceScope TimeScope("Attributor::run");
2654 AttributorCallGraph ACallGraph(*this);
2655
2656 if (PrintCallGraph)
2657 ACallGraph.populateAll();
2658
2659 Phase = AttributorPhase::UPDATE;
2660 runTillFixpoint();
2661
2662 // dump graphs on demand
2663 if (DumpDepGraph)
2664 DG.dumpGraph();
2665
2666 if (ViewDepGraph)
2667 DG.viewGraph();
2668
2670 DG.print();
2671
2672 Phase = AttributorPhase::MANIFEST;
2673 ChangeStatus ManifestChange = manifestAttributes();
2674
2675 Phase = AttributorPhase::CLEANUP;
2676 ChangeStatus CleanupChange = cleanupIR();
2677
2678 if (PrintCallGraph)
2679 ACallGraph.print();
2680
2681 return ManifestChange | CleanupChange;
2682}
2683
2684ChangeStatus Attributor::updateAA(AbstractAttribute &AA) {
2685 TimeTraceScope TimeScope("updateAA", [&]() {
2686 return AA.getName() + std::to_string(AA.getIRPosition().getPositionKind());
2687 });
2688 assert(Phase == AttributorPhase::UPDATE &&
2689 "We can update AA only in the update stage!");
2690
2691 // Use a new dependence vector for this update.
2692 DependenceVector DV;
2693 DependenceStack.push_back(&DV);
2694
2695 auto &AAState = AA.getState();
2697 bool UsedAssumedInformation = false;
2698 if (!isAssumedDead(AA, nullptr, UsedAssumedInformation,
2699 /* CheckBBLivenessOnly */ true))
2700 CS = AA.update(*this);
2701
2702 if (!AA.isQueryAA() && DV.empty() && !AA.getState().isAtFixpoint()) {
2703 // If the AA did not rely on outside information but changed, we run it
2704 // again to see if it found a fixpoint. Most AAs do but we don't require
2705 // them to. Hence, it might take the AA multiple iterations to get to a
2706 // fixpoint even if it does not rely on outside information, which is fine.
2708 if (CS == ChangeStatus::CHANGED)
2709 RerunCS = AA.update(*this);
2710
2711 // If the attribute did not change during the run or rerun, and it still did
2712 // not query any non-fix information, the state will not change and we can
2713 // indicate that right at this point.
2714 if (RerunCS == ChangeStatus::UNCHANGED && !AA.isQueryAA() && DV.empty())
2715 AAState.indicateOptimisticFixpoint();
2716 }
2717
2718 if (!AAState.isAtFixpoint())
2719 rememberDependences();
2720
2721 // Verify the stack was used properly, that is we pop the dependence vector we
2722 // put there earlier.
2723 DependenceVector *PoppedDV = DependenceStack.pop_back_val();
2724 (void)PoppedDV;
2725 assert(PoppedDV == &DV && "Inconsistent usage of the dependence stack!");
2726
2727 return CS;
2728}
2729
2731 assert(!F.isDeclaration() && "Cannot create a wrapper around a declaration!");
2732
2733 Module &M = *F.getParent();
2734 LLVMContext &Ctx = M.getContext();
2735 FunctionType *FnTy = F.getFunctionType();
2736
2737 Function *Wrapper =
2738 Function::Create(FnTy, F.getLinkage(), F.getAddressSpace(), F.getName());
2739 F.setName(""); // set the inside function anonymous
2740 M.getFunctionList().insert(F.getIterator(), Wrapper);
2741 // Flag whether the function is using new-debug-info or not.
2742 Wrapper->IsNewDbgInfoFormat = M.IsNewDbgInfoFormat;
2743
2744 F.setLinkage(GlobalValue::InternalLinkage);
2745
2746 F.replaceAllUsesWith(Wrapper);
2747 assert(F.use_empty() && "Uses remained after wrapper was created!");
2748
2749 // Move the COMDAT section to the wrapper.
2750 // TODO: Check if we need to keep it for F as well.
2751 Wrapper->setComdat(F.getComdat());
2752 F.setComdat(nullptr);
2753
2754 // Copy all metadata and attributes but keep them on F as well.
2756 F.getAllMetadata(MDs);
2757 for (auto MDIt : MDs)
2758 Wrapper->addMetadata(MDIt.first, *MDIt.second);
2759 Wrapper->setAttributes(F.getAttributes());
2760
2761 // Create the call in the wrapper.
2762 BasicBlock *EntryBB = BasicBlock::Create(Ctx, "entry", Wrapper);
2763
2765 Argument *FArgIt = F.arg_begin();
2766 for (Argument &Arg : Wrapper->args()) {
2767 Args.push_back(&Arg);
2768 Arg.setName((FArgIt++)->getName());
2769 }
2770
2771 CallInst *CI = CallInst::Create(&F, Args, "", EntryBB);
2772 CI->setTailCall(true);
2773 CI->addFnAttr(Attribute::NoInline);
2774 ReturnInst::Create(Ctx, CI->getType()->isVoidTy() ? nullptr : CI, EntryBB);
2775
2776 NumFnShallowWrappersCreated++;
2777}
2778
2780 if (F.isDeclaration() || F.hasLocalLinkage() ||
2782 return false;
2783 return true;
2784}
2785
2787 if (!AllowDeepWrapper && !Force)
2788 return nullptr;
2789 if (!isInternalizable(F))
2790 return nullptr;
2791
2792 SmallPtrSet<Function *, 2> FnSet = {&F};
2793 DenseMap<Function *, Function *> InternalizedFns;
2794 internalizeFunctions(FnSet, InternalizedFns);
2795
2796 return InternalizedFns[&F];
2797}
2798
2801 for (Function *F : FnSet)
2803 return false;
2804
2805 FnMap.clear();
2806 // Generate the internalized version of each function.
2807 for (Function *F : FnSet) {
2808 Module &M = *F->getParent();
2809 FunctionType *FnTy = F->getFunctionType();
2810
2811 // Create a copy of the current function
2812 Function *Copied =
2813 Function::Create(FnTy, F->getLinkage(), F->getAddressSpace(),
2814 F->getName() + ".internalized");
2815 ValueToValueMapTy VMap;
2816 auto *NewFArgIt = Copied->arg_begin();
2817 for (auto &Arg : F->args()) {
2818 auto ArgName = Arg.getName();
2819 NewFArgIt->setName(ArgName);
2820 VMap[&Arg] = &(*NewFArgIt++);
2821 }
2823 // Flag whether the function is using new-debug-info or not.
2824 Copied->IsNewDbgInfoFormat = F->IsNewDbgInfoFormat;
2825
2826 // Copy the body of the original function to the new one
2827 CloneFunctionInto(Copied, F, VMap,
2829
2830 // Set the linakage and visibility late as CloneFunctionInto has some
2831 // implicit requirements.
2834
2835 // Copy metadata
2837 F->getAllMetadata(MDs);
2838 for (auto MDIt : MDs)
2839 if (!Copied->hasMetadata())
2840 Copied->addMetadata(MDIt.first, *MDIt.second);
2841
2842 M.getFunctionList().insert(F->getIterator(), Copied);
2843 Copied->setDSOLocal(true);
2844 FnMap[F] = Copied;
2845 }
2846
2847 // Replace all uses of the old function with the new internalized function
2848 // unless the caller is a function that was just internalized.
2849 for (Function *F : FnSet) {
2850 auto &InternalizedFn = FnMap[F];
2851 auto IsNotInternalized = [&](Use &U) -> bool {
2852 if (auto *CB = dyn_cast<CallBase>(U.getUser()))
2853 return !FnMap.lookup(CB->getCaller());
2854 return false;
2855 };
2856 F->replaceUsesWithIf(InternalizedFn, IsNotInternalized);
2857 }
2858
2859 return true;
2860}
2861
2863 Argument &Arg, ArrayRef<Type *> ReplacementTypes) {
2864
2865 if (!Configuration.RewriteSignatures)
2866 return false;
2867
2868 Function *Fn = Arg.getParent();
2869 auto CallSiteCanBeChanged = [Fn](AbstractCallSite ACS) {
2870 // Forbid the call site to cast the function return type. If we need to
2871 // rewrite these functions we need to re-create a cast for the new call site
2872 // (if the old had uses).
2873 if (!ACS.getCalledFunction() ||
2874 ACS.getInstruction()->getType() !=
2876 return false;
2877 if (cast<CallBase>(ACS.getInstruction())->getCalledOperand()->getType() !=
2878 Fn->getType())
2879 return false;
2880 if (ACS.getNumArgOperands() != Fn->arg_size())
2881 return false;
2882 // Forbid must-tail calls for now.
2883 return !ACS.isCallbackCall() && !ACS.getInstruction()->isMustTailCall();
2884 };
2885
2886 // Avoid var-arg functions for now.
2887 if (Fn->isVarArg()) {
2888 LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite var-args functions\n");
2889 return false;
2890 }
2891
2892 // Avoid functions with complicated argument passing semantics.
2893 AttributeList FnAttributeList = Fn->getAttributes();
2894 if (FnAttributeList.hasAttrSomewhere(Attribute::Nest) ||
2895 FnAttributeList.hasAttrSomewhere(Attribute::StructRet) ||
2896 FnAttributeList.hasAttrSomewhere(Attribute::InAlloca) ||
2897 FnAttributeList.hasAttrSomewhere(Attribute::Preallocated)) {
2898 LLVM_DEBUG(
2899 dbgs() << "[Attributor] Cannot rewrite due to complex attribute\n");
2900 return false;
2901 }
2902
2903 // Avoid callbacks for now.
2904 bool UsedAssumedInformation = false;
2905 if (!checkForAllCallSites(CallSiteCanBeChanged, *Fn, true, nullptr,
2906 UsedAssumedInformation,
2907 /* CheckPotentiallyDead */ true)) {
2908 LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite all call sites\n");
2909 return false;
2910 }
2911
2912 auto InstPred = [](Instruction &I) {
2913 if (auto *CI = dyn_cast<CallInst>(&I))
2914 return !CI->isMustTailCall();
2915 return true;
2916 };
2917
2918 // Forbid must-tail calls for now.
2919 // TODO:
2920 auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(*Fn);
2921 if (!checkForAllInstructionsImpl(nullptr, OpcodeInstMap, InstPred, nullptr,
2922 nullptr, {Instruction::Call},
2923 UsedAssumedInformation)) {
2924 LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite due to instructions\n");
2925 return false;
2926 }
2927
2928 return true;
2929}
2930
2932 Argument &Arg, ArrayRef<Type *> ReplacementTypes,
2935 LLVM_DEBUG(dbgs() << "[Attributor] Register new rewrite of " << Arg << " in "
2936 << Arg.getParent()->getName() << " with "
2937 << ReplacementTypes.size() << " replacements\n");
2938 assert(isValidFunctionSignatureRewrite(Arg, ReplacementTypes) &&
2939 "Cannot register an invalid rewrite");
2940
2941 Function *Fn = Arg.getParent();
2943 ArgumentReplacementMap[Fn];
2944 if (ARIs.empty())
2945 ARIs.resize(Fn->arg_size());
2946
2947 // If we have a replacement already with less than or equal new arguments,
2948 // ignore this request.
2949 std::unique_ptr<ArgumentReplacementInfo> &ARI = ARIs[Arg.getArgNo()];
2950 if (ARI && ARI->getNumReplacementArgs() <= ReplacementTypes.size()) {
2951 LLVM_DEBUG(dbgs() << "[Attributor] Existing rewrite is preferred\n");
2952 return false;
2953 }
2954
2955 // If we have a replacement already but we like the new one better, delete
2956 // the old.
2957 ARI.reset();
2958
2959 LLVM_DEBUG(dbgs() << "[Attributor] Register new rewrite of " << Arg << " in "
2960 << Arg.getParent()->getName() << " with "
2961 << ReplacementTypes.size() << " replacements\n");
2962
2963 // Remember the replacement.
2964 ARI.reset(new ArgumentReplacementInfo(*this, Arg, ReplacementTypes,
2965 std::move(CalleeRepairCB),
2966 std::move(ACSRepairCB)));
2967
2968 return true;
2969}
2970
2971bool Attributor::shouldSeedAttribute(AbstractAttribute &AA) {
2972 bool Result = true;
2973#ifndef NDEBUG
2974 if (SeedAllowList.size() != 0)
2976 Function *Fn = AA.getAnchorScope();
2977 if (FunctionSeedAllowList.size() != 0 && Fn)
2979#endif
2980 return Result;
2981}
2982
2983ChangeStatus Attributor::rewriteFunctionSignatures(
2984 SmallSetVector<Function *, 8> &ModifiedFns) {
2986
2987 for (auto &It : ArgumentReplacementMap) {
2988 Function *OldFn = It.getFirst();
2989
2990 // Deleted functions do not require rewrites.
2991 if (!Functions.count(OldFn) || ToBeDeletedFunctions.count(OldFn))
2992 continue;
2993
2995 It.getSecond();
2996 assert(ARIs.size() == OldFn->arg_size() && "Inconsistent state!");
2997
2998 SmallVector<Type *, 16> NewArgumentTypes;
2999 SmallVector<AttributeSet, 16> NewArgumentAttributes;
3000
3001 // Collect replacement argument types and copy over existing attributes.
3002 AttributeList OldFnAttributeList = OldFn->getAttributes();
3003 for (Argument &Arg : OldFn->args()) {
3004 if (const std::unique_ptr<ArgumentReplacementInfo> &ARI =
3005 ARIs[Arg.getArgNo()]) {
3006 NewArgumentTypes.append(ARI->ReplacementTypes.begin(),
3007 ARI->ReplacementTypes.end());
3008 NewArgumentAttributes.append(ARI->getNumReplacementArgs(),
3009 AttributeSet());
3010 } else {
3011 NewArgumentTypes.push_back(Arg.getType());
3012 NewArgumentAttributes.push_back(
3013 OldFnAttributeList.getParamAttrs(Arg.getArgNo()));
3014 }
3015 }
3016
3017 uint64_t LargestVectorWidth = 0;
3018 for (auto *I : NewArgumentTypes)
3019 if (auto *VT = dyn_cast<llvm::VectorType>(I))
3020 LargestVectorWidth =
3021 std::max(LargestVectorWidth,
3022 VT->getPrimitiveSizeInBits().getKnownMinValue());
3023
3024 FunctionType *OldFnTy = OldFn->getFunctionType();
3025 Type *RetTy = OldFnTy->getReturnType();
3026
3027 // Construct the new function type using the new arguments types.
3028 FunctionType *NewFnTy =
3029 FunctionType::get(RetTy, NewArgumentTypes, OldFnTy->isVarArg());
3030
3031 LLVM_DEBUG(dbgs() << "[Attributor] Function rewrite '" << OldFn->getName()
3032 << "' from " << *OldFn->getFunctionType() << " to "
3033 << *NewFnTy << "\n");
3034
3035 // Create the new function body and insert it into the module.
3036 Function *NewFn = Function::Create(NewFnTy, OldFn->getLinkage(),
3037 OldFn->getAddressSpace(), "");
3038 Functions.insert(NewFn);
3039 OldFn->getParent()->getFunctionList().insert(OldFn->getIterator(), NewFn);
3040 NewFn->takeName(OldFn);
3041 NewFn->copyAttributesFrom(OldFn);
3042 // Flag whether the function is using new-debug-info or not.
3043 NewFn->IsNewDbgInfoFormat = OldFn->IsNewDbgInfoFormat;
3044
3045 // Patch the pointer to LLVM function in debug info descriptor.
3046 NewFn->setSubprogram(OldFn->getSubprogram());
3047 OldFn->setSubprogram(nullptr);
3048
3049 // Recompute the parameter attributes list based on the new arguments for
3050 // the function.
3051 LLVMContext &Ctx = OldFn->getContext();
3053 Ctx, OldFnAttributeList.getFnAttrs(), OldFnAttributeList.getRetAttrs(),
3054 NewArgumentAttributes));
3055 AttributeFuncs::updateMinLegalVectorWidthAttr(*NewFn, LargestVectorWidth);
3056
3057 // Remove argmem from the memory effects if we have no more pointer
3058 // arguments, or they are readnone.
3059 MemoryEffects ME = NewFn->getMemoryEffects();
3060 int ArgNo = -1;
3061 if (ME.doesAccessArgPointees() && all_of(NewArgumentTypes, [&](Type *T) {
3062 ++ArgNo;
3063 return !T->isPtrOrPtrVectorTy() ||
3064 NewFn->hasParamAttribute(ArgNo, Attribute::ReadNone);
3065 })) {
3067 }
3068
3069 // Since we have now created the new function, splice the body of the old
3070 // function right into the new function, leaving the old rotting hulk of the
3071 // function empty.
3072 NewFn->splice(NewFn->begin(), OldFn);
3073
3074 // Fixup block addresses to reference new function.
3075 SmallVector<BlockAddress *, 8u> BlockAddresses;
3076 for (User *U : OldFn->users())
3077 if (auto *BA = dyn_cast<BlockAddress>(U))
3078 BlockAddresses.push_back(BA);
3079 for (auto *BA : BlockAddresses)
3080 BA->replaceAllUsesWith(BlockAddress::get(NewFn, BA->getBasicBlock()));
3081
3082 // Set of all "call-like" instructions that invoke the old function mapped
3083 // to their new replacements.
3085
3086 // Callback to create a new "call-like" instruction for a given one.
3087 auto CallSiteReplacementCreator = [&](AbstractCallSite ACS) {
3088 CallBase *OldCB = cast<CallBase>(ACS.getInstruction());
3089 const AttributeList &OldCallAttributeList = OldCB->getAttributes();
3090
3091 // Collect the new argument operands for the replacement call site.
3092 SmallVector<Value *, 16> NewArgOperands;
3093 SmallVector<AttributeSet, 16> NewArgOperandAttributes;
3094 for (unsigned OldArgNum = 0; OldArgNum < ARIs.size(); ++OldArgNum) {
3095 unsigned NewFirstArgNum = NewArgOperands.size();
3096 (void)NewFirstArgNum; // only used inside assert.
3097 if (const std::unique_ptr<ArgumentReplacementInfo> &ARI =
3098 ARIs[OldArgNum]) {
3099 if (ARI->ACSRepairCB)
3100 ARI->ACSRepairCB(*ARI, ACS, NewArgOperands);
3101 assert(ARI->getNumReplacementArgs() + NewFirstArgNum ==
3102 NewArgOperands.size() &&
3103 "ACS repair callback did not provide as many operand as new "
3104 "types were registered!");
3105 // TODO: Exose the attribute set to the ACS repair callback
3106 NewArgOperandAttributes.append(ARI->ReplacementTypes.size(),
3107 AttributeSet());
3108 } else {
3109 NewArgOperands.push_back(ACS.getCallArgOperand(OldArgNum));
3110 NewArgOperandAttributes.push_back(
3111 OldCallAttributeList.getParamAttrs(OldArgNum));
3112 }
3113 }
3114
3115 assert(NewArgOperands.size() == NewArgOperandAttributes.size() &&
3116 "Mismatch # argument operands vs. # argument operand attributes!");
3117 assert(NewArgOperands.size() == NewFn->arg_size() &&
3118 "Mismatch # argument operands vs. # function arguments!");
3119
3120 SmallVector<OperandBundleDef, 4> OperandBundleDefs;
3121 OldCB->getOperandBundlesAsDefs(OperandBundleDefs);
3122
3123 // Create a new call or invoke instruction to replace the old one.
3124 CallBase *NewCB;
3125 if (InvokeInst *II = dyn_cast<InvokeInst>(OldCB)) {
3126 NewCB = InvokeInst::Create(NewFn, II->getNormalDest(),
3127 II->getUnwindDest(), NewArgOperands,
3128 OperandBundleDefs, "", OldCB->getIterator());
3129 } else {
3130 auto *NewCI = CallInst::Create(NewFn, NewArgOperands, OperandBundleDefs,
3131 "", OldCB->getIterator());
3132 NewCI->setTailCallKind(cast<CallInst>(OldCB)->getTailCallKind());
3133 NewCB = NewCI;
3134 }
3135
3136 // Copy over various properties and the new attributes.
3137 NewCB->copyMetadata(*OldCB, {LLVMContext::MD_prof, LLVMContext::MD_dbg});
3138 NewCB->setCallingConv(OldCB->getCallingConv());
3139 NewCB->takeName(OldCB);
3141 Ctx, OldCallAttributeList.getFnAttrs(),
3142 OldCallAttributeList.getRetAttrs(), NewArgOperandAttributes));
3143
3145 LargestVectorWidth);
3146
3147 CallSitePairs.push_back({OldCB, NewCB});
3148 return true;
3149 };
3150
3151 // Use the CallSiteReplacementCreator to create replacement call sites.
3152 bool UsedAssumedInformation = false;
3153 bool Success = checkForAllCallSites(CallSiteReplacementCreator, *OldFn,
3154 true, nullptr, UsedAssumedInformation,
3155 /* CheckPotentiallyDead */ true);
3156 (void)Success;
3157 assert(Success && "Assumed call site replacement to succeed!");
3158
3159 // Rewire the arguments.
3160 Argument *OldFnArgIt = OldFn->arg_begin();
3161 Argument *NewFnArgIt = NewFn->arg_begin();
3162 for (unsigned OldArgNum = 0; OldArgNum < ARIs.size();
3163 ++OldArgNum, ++OldFnArgIt) {
3164 if (const std::unique_ptr<ArgumentReplacementInfo> &ARI =
3165 ARIs[OldArgNum]) {
3166 if (ARI->CalleeRepairCB)
3167 ARI->CalleeRepairCB(*ARI, *NewFn, NewFnArgIt);
3168 if (ARI->ReplacementTypes.empty())
3169 OldFnArgIt->replaceAllUsesWith(
3170 PoisonValue::get(OldFnArgIt->getType()));
3171 NewFnArgIt += ARI->ReplacementTypes.size();
3172 } else {
3173 NewFnArgIt->takeName(&*OldFnArgIt);
3174 OldFnArgIt->replaceAllUsesWith(&*NewFnArgIt);
3175 ++NewFnArgIt;
3176 }
3177 }
3178
3179 // Eliminate the instructions *after* we visited all of them.
3180 for (auto &CallSitePair : CallSitePairs) {
3181 CallBase &OldCB = *CallSitePair.first;
3182 CallBase &NewCB = *CallSitePair.second;
3183 assert(OldCB.getType() == NewCB.getType() &&
3184 "Cannot handle call sites with different types!");
3185 ModifiedFns.insert(OldCB.getFunction());
3186 OldCB.replaceAllUsesWith(&NewCB);
3187 OldCB.eraseFromParent();
3188 }
3189
3190 // Replace the function in the call graph (if any).
3191 Configuration.CGUpdater.replaceFunctionWith(*OldFn, *NewFn);
3192
3193 // If the old function was modified and needed to be reanalyzed, the new one
3194 // does now.
3195 if (ModifiedFns.remove(OldFn))
3196 ModifiedFns.insert(NewFn);
3197
3198 Changed = ChangeStatus::CHANGED;
3199 }
3200
3201 return Changed;
3202}
3203
3204void InformationCache::initializeInformationCache(const Function &CF,
3205 FunctionInfo &FI) {
3206 // As we do not modify the function here we can remove the const
3207 // withouth breaking implicit assumptions. At the end of the day, we could
3208 // initialize the cache eagerly which would look the same to the users.
3209 Function &F = const_cast<Function &>(CF);
3210
3211 // Walk all instructions to find interesting instructions that might be
3212 // queried by abstract attributes during their initialization or update.
3213 // This has to happen before we create attributes.
3214
3216
3217 // Add \p V to the assume uses map which track the number of uses outside of
3218 // "visited" assumes. If no outside uses are left the value is added to the
3219 // assume only use vector.
3220 auto AddToAssumeUsesMap = [&](const Value &V) -> void {
3222 if (auto *I = dyn_cast<Instruction>(&V))
3223 Worklist.push_back(I);
3224 while (!Worklist.empty()) {
3225 const Instruction *I = Worklist.pop_back_val();
3226 std::optional<short> &NumUses = AssumeUsesMap[I];
3227 if (!NumUses)
3228 NumUses = I->getNumUses();
3229 NumUses = *NumUses - /* this assume */ 1;
3230 if (*NumUses != 0)
3231 continue;
3232 AssumeOnlyValues.insert(I);
3233 for (const Value *Op : I->operands())
3234 if (auto *OpI = dyn_cast<Instruction>(Op))
3235 Worklist.push_back(OpI);
3236 }
3237 };
3238
3239 for (Instruction &I : instructions(&F)) {
3240 bool IsInterestingOpcode = false;
3241
3242 // To allow easy access to all instructions in a function with a given
3243 // opcode we store them in the InfoCache. As not all opcodes are interesting
3244 // to concrete attributes we only cache the ones that are as identified in
3245 // the following switch.
3246 // Note: There are no concrete attributes now so this is initially empty.
3247 switch (I.getOpcode()) {
3248 default:
3249 assert(!isa<CallBase>(&I) &&
3250 "New call base instruction type needs to be known in the "
3251 "Attributor.");
3252 break;
3253 case Instruction::Call:
3254 // Calls are interesting on their own, additionally:
3255 // For `llvm.assume` calls we also fill the KnowledgeMap as we find them.
3256 // For `must-tail` calls we remember the caller and callee.
3257 if (auto *Assume = dyn_cast<AssumeInst>(&I)) {
3258 AssumeOnlyValues.insert(Assume);
3259 fillMapFromAssume(*Assume, KnowledgeMap);
3260 AddToAssumeUsesMap(*Assume->getArgOperand(0));
3261 } else if (cast<CallInst>(I).isMustTailCall()) {
3262 FI.ContainsMustTailCall = true;
3263 if (auto *Callee = dyn_cast_if_present<Function>(
3264 cast<CallInst>(I).getCalledOperand()))
3265 getFunctionInfo(*Callee).CalledViaMustTail = true;
3266 }
3267 [[fallthrough]];
3268 case Instruction::CallBr:
3269 case Instruction::Invoke:
3270 case Instruction::CleanupRet:
3271 case Instruction::CatchSwitch:
3272 case Instruction::AtomicRMW:
3273 case Instruction::AtomicCmpXchg:
3274 case Instruction::Br:
3275 case Instruction::Resume:
3276 case Instruction::Ret:
3277 case Instruction::Load:
3278 // The alignment of a pointer is interesting for loads.
3279 case Instruction::Store:
3280 // The alignment of a pointer is interesting for stores.
3281 case Instruction::Alloca:
3282 case Instruction::AddrSpaceCast:
3283 IsInterestingOpcode = true;
3284 }
3285 if (IsInterestingOpcode) {
3286 auto *&Insts = FI.OpcodeInstMap[I.getOpcode()];
3287 if (!Insts)
3288 Insts = new (Allocator) InstructionVectorTy();
3289 Insts->push_back(&I);
3290 }
3291 if (I.mayReadOrWriteMemory())
3292 FI.RWInsts.push_back(&I);
3293 }
3294
3295 if (F.hasFnAttribute(Attribute::AlwaysInline) &&
3297 InlineableFunctions.insert(&F);
3298}
3299
3300InformationCache::FunctionInfo::~FunctionInfo() {
3301 // The instruction vectors are allocated using a BumpPtrAllocator, we need to
3302 // manually destroy them.
3303 for (auto &It : OpcodeInstMap)
3304 It.getSecond()->~InstructionVectorTy();
3305}
3306
3309 assert(A.isClosedWorldModule() && "Cannot see all indirect callees!");
3310 return IndirectlyCallableFunctions;
3311}
3312
3314 const AbstractAttribute &ToAA,
3315 DepClassTy DepClass) {
3316 if (DepClass == DepClassTy::NONE)
3317 return;
3318 // If we are outside of an update, thus before the actual fixpoint iteration
3319 // started (= when we create AAs), we do not track dependences because we will
3320 // put all AAs into the initial worklist anyway.
3321 if (DependenceStack.empty())
3322 return;
3323 if (FromAA.getState().isAtFixpoint())
3324 return;
3325 DependenceStack.back()->push_back({&FromAA, &ToAA, DepClass});
3326}
3327
3328void Attributor::rememberDependences() {
3329 assert(!DependenceStack.empty() && "No dependences to remember!");
3330
3331 for (DepInfo &DI : *DependenceStack.back()) {
3332 assert((DI.DepClass == DepClassTy::REQUIRED ||
3333 DI.DepClass == DepClassTy::OPTIONAL) &&
3334 "Expected required or optional dependence (1 bit)!");
3335 auto &DepAAs = const_cast<AbstractAttribute &>(*DI.FromAA).Deps;
3336 DepAAs.insert(AbstractAttribute::DepTy(
3337 const_cast<AbstractAttribute *>(DI.ToAA), unsigned(DI.DepClass)));
3338 }
3339}
3340
3341template <Attribute::AttrKind AK, typename AAType>
3342void Attributor::checkAndQueryIRAttr(const IRPosition &IRP,
3343 AttributeSet Attrs) {
3344 bool IsKnown;
3345 if (!Attrs.hasAttribute(AK))
3346 if (!Configuration.Allowed || Configuration.Allowed->count(&AAType::ID))
3347 if (!AA::hasAssumedIRAttr<AK>(*this, nullptr, IRP, DepClassTy::NONE,
3348 IsKnown))
3349 getOrCreateAAFor<AAType>(IRP);
3350}
3351
3353 if (!VisitedFunctions.insert(&F).second)
3354 return;
3355 if (F.isDeclaration())
3356 return;
3357
3358 // In non-module runs we need to look at the call sites of a function to
3359 // determine if it is part of a must-tail call edge. This will influence what
3360 // attributes we can derive.
3361 InformationCache::FunctionInfo &FI = InfoCache.getFunctionInfo(F);
3362 if (!isModulePass() && !FI.CalledViaMustTail) {
3363 for (const Use &U : F.uses())
3364 if (const auto *CB = dyn_cast<CallBase>(U.getUser()))
3365 if (CB->isCallee(&U) && CB->isMustTailCall())
3366 FI.CalledViaMustTail = true;
3367 }
3368
3370 bool IsIPOAmendable = isFunctionIPOAmendable(F);
3371 auto Attrs = F.getAttributes();
3372 auto FnAttrs = Attrs.getFnAttrs();
3373
3374 // Check for dead BasicBlocks in every function.
3375 // We need dead instruction detection because we do not want to deal with
3376 // broken IR in which SSA rules do not apply.
3377 getOrCreateAAFor<AAIsDead>(FPos);
3378
3379 // Every function might contain instructions that cause "undefined
3380 // behavior".
3381 getOrCreateAAFor<AAUndefinedBehavior>(FPos);
3382
3383 // Every function might be applicable for Heap-To-Stack conversion.
3385 getOrCreateAAFor<AAHeapToStack>(FPos);
3386
3387 // Every function might be "must-progress".
3388 checkAndQueryIRAttr<Attribute::MustProgress, AAMustProgress>(FPos, FnAttrs);
3389
3390 // Every function might be "no-free".
3391 checkAndQueryIRAttr<Attribute::NoFree, AANoFree>(FPos, FnAttrs);
3392
3393 // Every function might be "will-return".
3394 checkAndQueryIRAttr<Attribute::WillReturn, AAWillReturn>(FPos, FnAttrs);
3395
3396 // Every function might be marked "nosync"
3397 checkAndQueryIRAttr<Attribute::NoSync, AANoSync>(FPos, FnAttrs);
3398
3399 // Everything that is visible from the outside (=function, argument, return
3400 // positions), cannot be changed if the function is not IPO amendable. We can
3401 // however analyse the code inside.
3402 if (IsIPOAmendable) {
3403
3404 // Every function can be nounwind.
3405 checkAndQueryIRAttr<Attribute::NoUnwind, AANoUnwind>(FPos, FnAttrs);
3406
3407 // Every function might be "no-return".
3408 checkAndQueryIRAttr<Attribute::NoReturn, AANoReturn>(FPos, FnAttrs);
3409
3410 // Every function might be "no-recurse".
3411 checkAndQueryIRAttr<Attribute::NoRecurse, AANoRecurse>(FPos, FnAttrs);
3412
3413 // Every function can be "non-convergent".
3414 if (Attrs.hasFnAttr(Attribute::Convergent))
3415 getOrCreateAAFor<AANonConvergent>(FPos);
3416
3417 // Every function might be "readnone/readonly/writeonly/...".
3418 getOrCreateAAFor<AAMemoryBehavior>(FPos);
3419
3420 // Every function can be "readnone/argmemonly/inaccessiblememonly/...".
3421 getOrCreateAAFor<AAMemoryLocation>(FPos);
3422
3423 // Every function can track active assumptions.
3424 getOrCreateAAFor<AAAssumptionInfo>(FPos);
3425
3426 // If we're not using a dynamic mode for float, there's nothing worthwhile
3427 // to infer. This misses the edge case denormal-fp-math="dynamic" and
3428 // denormal-fp-math-f32=something, but that likely has no real world use.
3429 DenormalMode Mode = F.getDenormalMode(APFloat::IEEEsingle());
3430 if (Mode.Input == DenormalMode::Dynamic ||
3431 Mode.Output == DenormalMode::Dynamic)
3432 getOrCreateAAFor<AADenormalFPMath>(FPos);
3433
3434 // Return attributes are only appropriate if the return type is non void.
3435 Type *ReturnType = F.getReturnType();
3436 if (!ReturnType->isVoidTy()) {
3438 AttributeSet RetAttrs = Attrs.getRetAttrs();
3439
3440 // Every returned value might be dead.
3441 getOrCreateAAFor<AAIsDead>(RetPos);
3442
3443 // Every function might be simplified.
3444 bool UsedAssumedInformation = false;
3445 getAssumedSimplified(RetPos, nullptr, UsedAssumedInformation,
3447
3448 // Every returned value might be marked noundef.
3449 checkAndQueryIRAttr<Attribute::NoUndef, AANoUndef>(RetPos, RetAttrs);
3450
3451 if (ReturnType->isPointerTy()) {
3452
3453 // Every function with pointer return type might be marked align.
3454 getOrCreateAAFor<AAAlign>(RetPos);
3455
3456 // Every function with pointer return type might be marked nonnull.
3457 checkAndQueryIRAttr<Attribute::NonNull, AANonNull>(RetPos, RetAttrs);
3458
3459 // Every function with pointer return type might be marked noalias.
3460 checkAndQueryIRAttr<Attribute::NoAlias, AANoAlias>(RetPos, RetAttrs);
3461
3462 // Every function with pointer return type might be marked
3463 // dereferenceable.
3464 getOrCreateAAFor<AADereferenceable>(RetPos);
3465 } else if (AttributeFuncs::isNoFPClassCompatibleType(ReturnType)) {
3466 getOrCreateAAFor<AANoFPClass>(RetPos);
3467 }
3468 }
3469 }
3470
3471 for (Argument &Arg : F.args()) {
3472 IRPosition ArgPos = IRPosition::argument(Arg);
3473 auto ArgNo = Arg.getArgNo();
3474 AttributeSet ArgAttrs = Attrs.getParamAttrs(ArgNo);
3475
3476 if (!IsIPOAmendable) {
3477 if (Arg.getType()->isPointerTy())
3478 // Every argument with pointer type might be marked nofree.
3479 checkAndQueryIRAttr<Attribute::NoFree, AANoFree>(ArgPos, ArgAttrs);
3480 continue;
3481 }
3482
3483 // Every argument might be simplified. We have to go through the
3484 // Attributor interface though as outside AAs can register custom
3485 // simplification callbacks.
3486 bool UsedAssumedInformation = false;
3487 getAssumedSimplified(ArgPos, /* AA */ nullptr, UsedAssumedInformation,
3489
3490 // Every argument might be dead.
3491 getOrCreateAAFor<AAIsDead>(ArgPos);
3492
3493 // Every argument might be marked noundef.
3494 checkAndQueryIRAttr<Attribute::NoUndef, AANoUndef>(ArgPos, ArgAttrs);
3495
3496 if (Arg.getType()->isPointerTy()) {
3497 // Every argument with pointer type might be marked nonnull.
3498 checkAndQueryIRAttr<Attribute::NonNull, AANonNull>(ArgPos, ArgAttrs);
3499
3500 // Every argument with pointer type might be marked noalias.
3501 checkAndQueryIRAttr<Attribute::NoAlias, AANoAlias>(ArgPos, ArgAttrs);
3502
3503 // Every argument with pointer type might be marked dereferenceable.
3504 getOrCreateAAFor<AADereferenceable>(ArgPos);
3505
3506 // Every argument with pointer type might be marked align.
3507 getOrCreateAAFor<AAAlign>(ArgPos);
3508
3509 // Every argument with pointer type might be marked nocapture.
3510 checkAndQueryIRAttr<Attribute::NoCapture, AANoCapture>(ArgPos, ArgAttrs);
3511
3512 // Every argument with pointer type might be marked
3513 // "readnone/readonly/writeonly/..."
3514 getOrCreateAAFor<AAMemoryBehavior>(ArgPos);
3515
3516 // Every argument with pointer type might be marked nofree.
3517 checkAndQueryIRAttr<Attribute::NoFree, AANoFree>(ArgPos, ArgAttrs);
3518
3519 // Every argument with pointer type might be privatizable (or
3520 // promotable)
3521 getOrCreateAAFor<AAPrivatizablePtr>(ArgPos);
3522 } else if (AttributeFuncs::isNoFPClassCompatibleType(Arg.getType())) {
3523 getOrCreateAAFor<AANoFPClass>(ArgPos);
3524 }
3525 }
3526
3527 auto CallSitePred = [&](Instruction &I) -> bool {
3528 auto &CB = cast<CallBase>(I);
3529 IRPosition CBInstPos = IRPosition::inst(CB);
3531
3532 // Call sites might be dead if they do not have side effects and no live
3533 // users. The return value might be dead if there are no live users.
3534 getOrCreateAAFor<AAIsDead>(CBInstPos);
3535
3536 Function *Callee = dyn_cast_if_present<Function>(CB.getCalledOperand());
3537 // TODO: Even if the callee is not known now we might be able to simplify
3538 // the call/callee.
3539 if (!Callee) {
3540 getOrCreateAAFor<AAIndirectCallInfo>(CBFnPos);
3541 return true;
3542 }
3543
3544 // Every call site can track active assumptions.
3545 getOrCreateAAFor<AAAssumptionInfo>(CBFnPos);
3546
3547 // Skip declarations except if annotations on their call sites were
3548 // explicitly requested.
3549 if (!AnnotateDeclarationCallSites && Callee->isDeclaration() &&
3550 !Callee->hasMetadata(LLVMContext::MD_callback))
3551 return true;
3552
3553 if (!Callee->getReturnType()->isVoidTy() && !CB.use_empty()) {
3555 bool UsedAssumedInformation = false;
3556 getAssumedSimplified(CBRetPos, nullptr, UsedAssumedInformation,
3558
3559 if (AttributeFuncs::isNoFPClassCompatibleType(Callee->getReturnType()))
3560 getOrCreateAAFor<AANoFPClass>(CBInstPos);
3561 }
3562
3563 const AttributeList &CBAttrs = CBFnPos.getAttrList();
3564 for (int I = 0, E = CB.arg_size(); I < E; ++I) {
3565
3567 AttributeSet CBArgAttrs = CBAttrs.getParamAttrs(I);
3568
3569 // Every call site argument might be dead.
3570 getOrCreateAAFor<AAIsDead>(CBArgPos);
3571
3572 // Call site argument might be simplified. We have to go through the
3573 // Attributor interface though as outside AAs can register custom
3574 // simplification callbacks.
3575 bool UsedAssumedInformation = false;
3576 getAssumedSimplified(CBArgPos, /* AA */ nullptr, UsedAssumedInformation,
3578
3579 // Every call site argument might be marked "noundef".
3580 checkAndQueryIRAttr<Attribute::NoUndef, AANoUndef>(CBArgPos, CBArgAttrs);
3581
3582 Type *ArgTy = CB.getArgOperand(I)->getType();
3583
3584 if (!ArgTy->isPointerTy()) {
3586 getOrCreateAAFor<AANoFPClass>(CBArgPos);
3587
3588 continue;
3589 }
3590
3591 // Call site argument attribute "non-null".
3592 checkAndQueryIRAttr<Attribute::NonNull, AANonNull>(CBArgPos, CBArgAttrs);
3593
3594 // Call site argument attribute "nocapture".
3595 checkAndQueryIRAttr<Attribute::NoCapture, AANoCapture>(CBArgPos,
3596 CBArgAttrs);
3597
3598 // Call site argument attribute "no-alias".
3599 checkAndQueryIRAttr<Attribute::NoAlias, AANoAlias>(CBArgPos, CBArgAttrs);
3600
3601 // Call site argument attribute "dereferenceable".
3602 getOrCreateAAFor<AADereferenceable>(CBArgPos);
3603
3604 // Call site argument attribute "align".
3605 getOrCreateAAFor<AAAlign>(CBArgPos);
3606
3607 // Call site argument attribute
3608 // "readnone/readonly/writeonly/..."
3609 if (!CBAttrs.hasParamAttr(I, Attribute::ReadNone))
3610 getOrCreateAAFor<AAMemoryBehavior>(CBArgPos);
3611
3612 // Call site argument attribute "nofree".
3613 checkAndQueryIRAttr<Attribute::NoFree, AANoFree>(CBArgPos, CBArgAttrs);
3614 }
3615 return true;
3616 };
3617
3618 auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(F);
3619 [[maybe_unused]] bool Success;
3620 bool UsedAssumedInformation = false;
3622 nullptr, OpcodeInstMap, CallSitePred, nullptr, nullptr,
3623 {(unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr,
3624 (unsigned)Instruction::Call},
3625 UsedAssumedInformation);
3626 assert(Success && "Expected the check call to be successful!");
3627
3628 auto LoadStorePred = [&](Instruction &I) -> bool {
3629 if (auto *LI = dyn_cast<LoadInst>(&I)) {
3630 getOrCreateAAFor<AAAlign>(IRPosition::value(*LI->getPointerOperand()));
3631 if (SimplifyAllLoads)
3633 UsedAssumedInformation, AA::Intraprocedural);
3634 getOrCreateAAFor<AAAddressSpace>(
3635 IRPosition::value(*LI->getPointerOperand()));
3636 } else {
3637 auto &SI = cast<StoreInst>(I);
3638 getOrCreateAAFor<AAIsDead>(IRPosition::inst(I));
3639 getAssumedSimplified(IRPosition::value(*SI.getValueOperand()), nullptr,
3640 UsedAssumedInformation, AA::Intraprocedural);
3641 getOrCreateAAFor<AAAlign>(IRPosition::value(*SI.getPointerOperand()));
3642 getOrCreateAAFor<AAAddressSpace>(
3643 IRPosition::value(*SI.getPointerOperand()));
3644 }
3645 return true;
3646 };
3648 nullptr, OpcodeInstMap, LoadStorePred, nullptr, nullptr,
3649 {(unsigned)Instruction::Load, (unsigned)Instruction::Store},
3650 UsedAssumedInformation);
3651 assert(Success && "Expected the check call to be successful!");
3652
3653 // AllocaInstPredicate
3654 auto AAAllocationInfoPred = [&](Instruction &I) -> bool {
3655 getOrCreateAAFor<AAAllocationInfo>(IRPosition::value(I));
3656 return true;
3657 };
3658
3660 nullptr, OpcodeInstMap, AAAllocationInfoPred, nullptr, nullptr,
3661 {(unsigned)Instruction::Alloca}, UsedAssumedInformation);
3662 assert(Success && "Expected the check call to be successful!");
3663}
3664
3666 if (CloseWorldAssumption.getNumOccurrences())
3667 return CloseWorldAssumption;
3668 return isModulePass() && Configuration.IsClosedWorldModule;
3669}
3670
3671/// Helpers to ease debugging through output streams and print calls.
3672///
3673///{
3675 return OS << (S == ChangeStatus::CHANGED ? "changed" : "unchanged");
3676}
3677
3679 switch (AP) {
3681 return OS << "inv";
3683 return OS << "flt";
3685 return OS << "fn_ret";
3687 return OS << "cs_ret";
3689 return OS << "fn";
3691 return OS << "cs";
3693 return OS << "arg";
3695 return OS << "cs_arg";
3696 }
3697 llvm_unreachable("Unknown attribute position!");
3698}
3699
3701 const Value &AV = Pos.getAssociatedValue();
3702 OS << "{" << Pos.getPositionKind() << ":" << AV.getName() << " ["
3703 << Pos.getAnchorValue().getName() << "@" << Pos.getCallSiteArgNo() << "]";
3704
3705 if (Pos.hasCallBaseContext())
3706 OS << "[cb_context:" << *Pos.getCallBaseContext() << "]";
3707 return OS << "}";
3708}
3709
3711 OS << "range-state(" << S.getBitWidth() << ")<";
3712 S.getKnown().print(OS);
3713 OS << " / ";
3714 S.getAssumed().print(OS);
3715 OS << ">";
3716
3717 return OS << static_cast<const AbstractState &>(S);
3718}
3719
3721 return OS << (!S.isValidState() ? "top" : (S.isAtFixpoint() ? "fix" : ""));
3722}
3723
3725 AA.print(OS);
3726 return OS;
3727}
3728
3731 OS << "set-state(< {";
3732 if (!S.isValidState())
3733 OS << "full-set";
3734 else {
3735 for (const auto &It : S.getAssumedSet())
3736 OS << It << ", ";
3737 if (S.undefIsContained())
3738 OS << "undef ";
3739 }
3740 OS << "} >)";
3741
3742 return OS;
3743}
3744
3746 const PotentialLLVMValuesState &S) {
3747 OS << "set-state(< {";
3748 if (!S.isValidState())
3749 OS << "full-set";
3750 else {
3751 for (const auto &It : S.getAssumedSet()) {
3752 if (auto *F = dyn_cast<Function>(It.first.getValue()))
3753 OS << "@" << F->getName() << "[" << int(It.second) << "], ";
3754 else
3755 OS << *It.first.getValue() << "[" << int(It.second) << "], ";
3756 }
3757 if (S.undefIsContained())
3758 OS << "undef ";
3759 }
3760 OS << "} >)";
3761
3762 return OS;
3763}
3764
3766 OS << "[";
3767 OS << getName();
3768 OS << "] for CtxI ";
3769
3770 if (auto *I = getCtxI()) {
3771 OS << "'";
3772 I->print(OS);
3773 OS << "'";
3774 } else
3775 OS << "<<null inst>>";
3776
3777 OS << " at position " << getIRPosition() << " with state " << getAsStr(A)
3778 << '\n';
3779}
3780
3782 print(OS);
3783
3784 for (const auto &DepAA : Deps) {
3785 auto *AA = DepAA.getPointer();
3786 OS << " updates ";
3787 AA->print(OS);
3788 }
3789
3790 OS << '\n';
3791}
3792
3794 const AAPointerInfo::Access &Acc) {
3795 OS << " [" << Acc.getKind() << "] " << *Acc.getRemoteInst();
3796 if (Acc.getLocalInst() != Acc.getRemoteInst())
3797 OS << " via " << *Acc.getLocalInst();
3798 if (Acc.getContent()) {
3799 if (*Acc.getContent())
3800 OS << " [" << **Acc.getContent() << "]";
3801 else
3802 OS << " [ <unknown> ]";
3803 }
3804 return OS;
3805}
3806///}
3807
3808/// ----------------------------------------------------------------------------
3809/// Pass (Manager) Boilerplate
3810/// ----------------------------------------------------------------------------
3811
3813 SetVector<Function *> &Functions,
3814 AnalysisGetter &AG,
3815 CallGraphUpdater &CGUpdater,
3816 bool DeleteFns, bool IsModulePass) {
3817 if (Functions.empty())
3818 return false;
3819
3820 LLVM_DEBUG({
3821 dbgs() << "[Attributor] Run on module with " << Functions.size()
3822 << " functions:\n";
3823 for (Function *Fn : Functions)
3824 dbgs() << " - " << Fn->getName() << "\n";
3825 });
3826
3827 // Create an Attributor and initially empty information cache that is filled
3828 // while we identify default attribute opportunities.
3829 AttributorConfig AC(CGUpdater);
3830 AC.IsModulePass = IsModulePass;
3831 AC.DeleteFns = DeleteFns;
3832
3833 /// Tracking callback for specialization of indirect calls.
3835 IndirectCalleeTrackingMap;
3836 if (MaxSpecializationPerCB.getNumOccurrences()) {
3837 AC.IndirectCalleeSpecializationCallback =
3838 [&](Attributor &, const AbstractAttribute &AA, CallBase &CB,
3839 Function &Callee) {
3840 if (MaxSpecializationPerCB == 0)
3841 return false;
3842 auto &Set = IndirectCalleeTrackingMap[&CB];
3843 if (!Set)
3844 Set = std::make_unique<SmallPtrSet<Function *, 8>>();
3845 if (Set->size() >= MaxSpecializationPerCB)
3846 return Set->contains(&Callee);
3847 Set->insert(&Callee);
3848 return true;
3849 };
3850 }
3851
3852 Attributor A(Functions, InfoCache, AC);
3853
3854 // Create shallow wrappers for all functions that are not IPO amendable
3856 for (Function *F : Functions)
3857 if (!A.isFunctionIPOAmendable(*F))
3859
3860 // Internalize non-exact functions
3861 // TODO: for now we eagerly internalize functions without calculating the
3862 // cost, we need a cost interface to determine whether internalizing
3863 // a function is "beneficial"
3864 if (AllowDeepWrapper) {
3865 unsigned FunSize = Functions.size();
3866 for (unsigned u = 0; u < FunSize; u++) {
3867 Function *F = Functions[u];
3868 if (!F->isDeclaration() && !F->isDefinitionExact() && F->getNumUses() &&
3869 !GlobalValue::isInterposableLinkage(F->getLinkage())) {
3871 assert(NewF && "Could not internalize function.");
3872 Functions.insert(NewF);
3873
3874 // Update call graph
3875 CGUpdater.replaceFunctionWith(*F, *NewF);
3876 for (const Use &U : NewF->uses())
3877 if (CallBase *CB = dyn_cast<CallBase>(U.getUser())) {
3878 auto *CallerF = CB->getCaller();
3879 CGUpdater.reanalyzeFunction(*CallerF);
3880 }
3881 }
3882 }
3883 }
3884
3885 for (Function *F : Functions) {
3886 if (F->hasExactDefinition())
3887 NumFnWithExactDefinition++;
3888 else
3889 NumFnWithoutExactDefinition++;
3890
3891 // We look at internal functions only on-demand but if any use is not a
3892 // direct call or outside the current set of analyzed functions, we have
3893 // to do it eagerly.
3894 if (F->hasLocalLinkage()) {
3895 if (llvm::all_of(F->uses(), [&Functions](const Use &U) {
3896 const auto *CB = dyn_cast<CallBase>(U.getUser());
3897 return CB && CB->isCallee(&U) &&
3898 Functions.count(const_cast<Function *>(CB->getCaller()));
3899 }))
3900 continue;
3901 }
3902
3903 // Populate the Attributor with abstract attribute opportunities in the
3904 // function and the information cache with IR information.
3905 A.identifyDefaultAbstractAttributes(*F);
3906 }
3907
3908 ChangeStatus Changed = A.run();
3909
3910 LLVM_DEBUG(dbgs() << "[Attributor] Done with " << Functions.size()
3911 << " functions, result: " << Changed << ".\n");
3912 return Changed == ChangeStatus::CHANGED;
3913}
3914
3916 SetVector<Function *> &Functions,
3917 AnalysisGetter &AG,
3918 CallGraphUpdater &CGUpdater,
3920 bool IsModulePass) {
3921 if (Functions.empty())
3922 return false;
3923
3924 LLVM_DEBUG({
3925 dbgs() << "[AttributorLight] Run on module with " << Functions.size()
3926 << " functions:\n";
3927 for (Function *Fn : Functions)
3928 dbgs() << " - " << Fn->getName() << "\n";
3929 });
3930
3931 // Create an Attributor and initially empty information cache that is filled
3932 // while we identify default attribute opportunities.
3933 AttributorConfig AC(CGUpdater);
3934 AC.IsModulePass = IsModulePass;
3935 AC.DeleteFns = false;
3936 DenseSet<const char *> Allowed(
3942 AC.Allowed = &Allowed;
3943 AC.UseLiveness = false;
3944
3945 Attributor A(Functions, InfoCache, AC);
3946
3947 for (Function *F : Functions) {
3948 if (F->hasExactDefinition())
3949 NumFnWithExactDefinition++;
3950 else
3951 NumFnWithoutExactDefinition++;
3952
3953 // We look at internal functions only on-demand but if any use is not a
3954 // direct call or outside the current set of analyzed functions, we have
3955 // to do it eagerly.
3956 if (AC.UseLiveness && F->hasLocalLinkage()) {
3957 if (llvm::all_of(F->uses(), [&Functions](const Use &U) {
3958 const auto *CB = dyn_cast<CallBase>(U.getUser());
3959 return CB && CB->isCallee(&U) &&
3960 Functions.count(const_cast<Function *>(CB->getCaller()));
3961 }))
3962 continue;
3963 }
3964
3965 // Populate the Attributor with abstract attribute opportunities in the
3966 // function and the information cache with IR information.
3967 A.identifyDefaultAbstractAttributes(*F);
3968 }
3969
3970 ChangeStatus Changed = A.run();
3971
3972 if (Changed == ChangeStatus::CHANGED) {
3973 // Invalidate analyses for modified functions so that we don't have to
3974 // invalidate all analyses for all functions in this SCC.
3975 PreservedAnalyses FuncPA;
3976 // We haven't changed the CFG for modified functions.
3977 FuncPA.preserveSet<CFGAnalyses>();
3978 for (Function *Changed : A.getModifiedFunctions()) {
3979 FAM.invalidate(*Changed, FuncPA);
3980 // Also invalidate any direct callers of changed functions since analyses
3981 // may care about attributes of direct callees. For example, MemorySSA
3982 // cares about whether or not a call's callee modifies memory and queries
3983 // that through function attributes.
3984 for (auto *U : Changed->users()) {
3985 if (auto *Call = dyn_cast<CallBase>(U)) {
3986 if (Call->getCalledFunction() == Changed)
3987 FAM.invalidate(*Call->getFunction(), FuncPA);
3988 }
3989 }
3990 }
3991 }
3992 LLVM_DEBUG(dbgs() << "[Attributor] Done with " << Functions.size()
3993 << " functions, result: " << Changed << ".\n");
3994 return Changed == ChangeStatus::CHANGED;
3995}
3996
3997void AADepGraph::viewGraph() { llvm::ViewGraph(this, "Dependency Graph"); }
3998
4000 static std::atomic<int> CallTimes;
4001 std::string Prefix;
4002
4003 if (!DepGraphDotFileNamePrefix.empty())
4005 else
4006 Prefix = "dep_graph";
4007 std::string Filename =
4008 Prefix + "_" + std::to_string(CallTimes.load()) + ".dot";
4009
4010 outs() << "Dependency graph dump to " << Filename << ".\n";
4011
4012 std::error_code EC;
4013
4014 raw_fd_ostream File(Filename, EC, sys::fs::OF_TextWithCRLF);
4015 if (!EC)
4016 llvm::WriteGraph(File, this);
4017
4018 CallTimes++;
4019}
4020
4022 for (auto DepAA : SyntheticRoot.Deps)
4023 cast<AbstractAttribute>(DepAA.getPointer())->printWithDeps(outs());
4024}
4025
4029 AnalysisGetter AG(FAM);
4030
4031 SetVector<Function *> Functions;
4032 for (Function &F : M)
4033 Functions.insert(&F);
4034
4035 CallGraphUpdater CGUpdater;
4037 InformationCache InfoCache(M, AG, Allocator, /* CGSCC */ nullptr);
4038 if (runAttributorOnFunctions(InfoCache, Functions, AG, CGUpdater,
4039 /* DeleteFns */ true, /* IsModulePass */ true)) {
4040 // FIXME: Think about passes we will preserve and add them here.
4041 return PreservedAnalyses::none();
4042 }
4043 return PreservedAnalyses::all();
4044}
4045
4048 LazyCallGraph &CG,
4049 CGSCCUpdateResult &UR) {
4051 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager();
4052 AnalysisGetter AG(FAM);
4053
4054 SetVector<Function *> Functions;
4055 for (LazyCallGraph::Node &N : C)
4056 Functions.insert(&N.getFunction());
4057
4058 if (Functions.empty())
4059 return PreservedAnalyses::all();
4060
4061 Module &M = *Functions.back()->getParent();
4062 CallGraphUpdater CGUpdater;
4063 CGUpdater.initialize(CG, C, AM, UR);
4065 InformationCache InfoCache(M, AG, Allocator, /* CGSCC */ &Functions);
4066 if (runAttributorOnFunctions(InfoCache, Functions, AG, CGUpdater,
4067 /* DeleteFns */ false,
4068 /* IsModulePass */ false)) {
4069 // FIXME: Think about passes we will preserve and add them here.
4072 return PA;
4073 }
4074 return PreservedAnalyses::all();
4075}
4076
4081 AnalysisGetter AG(FAM, /* CachedOnly */ true);
4082
4083 SetVector<Function *> Functions;
4084 for (Function &F : M)
4085 Functions.insert(&F);
4086
4087 CallGraphUpdater CGUpdater;
4089 InformationCache InfoCache(M, AG, Allocator, /* CGSCC */ nullptr);
4090 if (runAttributorLightOnFunctions(InfoCache, Functions, AG, CGUpdater, FAM,
4091 /* IsModulePass */ true)) {
4093 // We have not added or removed functions.
4095 // We already invalidated all relevant function analyses above.
4097 return PA;
4098 }
4099 return PreservedAnalyses::all();
4100}
4101
4104 LazyCallGraph &CG,
4105 CGSCCUpdateResult &UR) {
4107 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager();
4108 AnalysisGetter AG(FAM);
4109
4110 SetVector<Function *> Functions;
4111 for (LazyCallGraph::Node &N : C)
4112 Functions.insert(&N.getFunction());
4113
4114 if (Functions.empty())
4115 return PreservedAnalyses::all();
4116
4117 Module &M = *Functions.back()->getParent();
4118 CallGraphUpdater CGUpdater;
4119 CGUpdater.initialize(CG, C, AM, UR);
4121 InformationCache InfoCache(M, AG, Allocator, /* CGSCC */ &Functions);
4122 if (runAttributorLightOnFunctions(InfoCache, Functions, AG, CGUpdater, FAM,
4123 /* IsModulePass */ false)) {
4125 // We have not added or removed functions.
4127 // We already invalidated all relevant function analyses above.
4129 return PA;
4130 }
4131 return PreservedAnalyses::all();
4132}
4133namespace llvm {
4134
4135template <> struct GraphTraits<AADepGraphNode *> {
4139
4140 static NodeRef getEntryNode(AADepGraphNode *DGN) { return DGN; }
4141 static NodeRef DepGetVal(const DepTy &DT) { return DT.getPointer(); }
4142
4146
4147 static ChildIteratorType child_begin(NodeRef N) { return N->child_begin(); }
4148
4149 static ChildIteratorType child_end(NodeRef N) { return N->child_end(); }
4150};
4151
4152template <>
4154 static NodeRef getEntryNode(AADepGraph *DG) { return DG->GetEntryNode(); }
4155
4158
4159 static nodes_iterator nodes_begin(AADepGraph *DG) { return DG->begin(); }
4160
4161 static nodes_iterator nodes_end(AADepGraph *DG) { return DG->end(); }
4162};
4163
4164template <> struct DOTGraphTraits<AADepGraph *> : public DefaultDOTGraphTraits {
4166
4167 static std::string getNodeLabel(const AADepGraphNode *Node,
4168 const AADepGraph *DG) {
4169 std::string AAString;
4170 raw_string_ostream O(AAString);
4171 Node->print(O);
4172 return AAString;
4173 }
4174};
4175
4176} // end namespace llvm
#define Success
amdgpu aa AMDGPU Address space based Alias Analysis Wrapper
Rewrite undef for PHI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Expand Atomic instructions
This file contains the simple types necessary to represent the attributes associated with functions a...
static cl::opt< bool > AllowShallowWrappers("attributor-allow-shallow-wrappers", cl::Hidden, cl::desc("Allow the Attributor to create shallow " "wrappers for non-exact definitions."), cl::init(false))
bool canMarkAsVisited(const User *Usr)
#define VERBOSE_DEBUG_TYPE
Definition: Attributor.cpp:67
static cl::opt< bool > EnableHeapToStack("enable-heap-to-stack-conversion", cl::init(true), cl::Hidden)
static cl::list< std::string > SeedAllowList("attributor-seed-allow-list", cl::Hidden, cl::desc("Comma separated list of attribute names that are " "allowed to be seeded."), cl::CommaSeparated)
static bool runAttributorOnFunctions(InformationCache &InfoCache, SetVector< Function * > &Functions, AnalysisGetter &AG, CallGraphUpdater &CGUpdater, bool DeleteFns, bool IsModulePass)
}
static bool isPotentiallyReachable(Attributor &A, const Instruction &FromI, const Instruction *ToI, const Function &ToFn, const AbstractAttribute &QueryingAA, const AA::InstExclusionSetTy *ExclusionSet, std::function< bool(const Function &F)> GoBackwardsCB)
Definition: Attributor.cpp:661
static bool getPotentialCopiesOfMemoryValue(Attributor &A, Ty &I, SmallSetVector< Value *, 4 > &PotentialCopies, SmallSetVector< Instruction *, 4 > *PotentialValueOrigins, const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation, bool OnlyExact)
Definition: Attributor.cpp:365
static bool runAttributorLightOnFunctions(InformationCache &InfoCache, SetVector< Function * > &Functions, AnalysisGetter &AG, CallGraphUpdater &CGUpdater, FunctionAnalysisManager &FAM, bool IsModulePass)
static cl::opt< unsigned, true > MaxInitializationChainLengthX("attributor-max-initialization-chain-length", cl::Hidden, cl::desc("Maximal number of chained initializations (to avoid stack overflows)"), cl::location(MaxInitializationChainLength), cl::init(1024))
static cl::opt< unsigned > MaxSpecializationPerCB("attributor-max-specializations-per-call-base", cl::Hidden, cl::desc("Maximal number of callees specialized for " "a call base"), cl::init(UINT32_MAX))
static cl::opt< bool > SimplifyAllLoads("attributor-simplify-all-loads", cl::Hidden, cl::desc("Try to simplify all loads."), cl::init(true))
static bool addIfNotExistent(LLVMContext &Ctx, const Attribute &Attr, AttributeSet AttrSet, bool ForceReplace, AttrBuilder &AB)
Return true if the information provided by Attr was added to the attribute set AttrSet.
Definition: Attributor.cpp:956
static cl::opt< bool > ViewDepGraph("attributor-view-dep-graph", cl::Hidden, cl::desc("View the dependency graph."), cl::init(false))
static bool isEqualOrWorse(const Attribute &New, const Attribute &Old)
Return true if New is equal or worse than Old.
Definition: Attributor.cpp:946
static cl::opt< bool > AllowDeepWrapper("attributor-allow-deep-wrappers", cl::Hidden, cl::desc("Allow the Attributor to use IP information " "derived from non-exact functions via cloning"), cl::init(false))
static cl::opt< bool > DumpDepGraph("attributor-dump-dep-graph", cl::Hidden, cl::desc("Dump the dependency graph to dot files."), cl::init(false))
static cl::opt< bool > PrintCallGraph("attributor-print-call-graph", cl::Hidden, cl::desc("Print Attributor's internal call graph"), cl::init(false))
static bool checkForAllInstructionsImpl(Attributor *A, InformationCache::OpcodeInstMapTy &OpcodeInstMap, function_ref< bool(Instruction &)> Pred, const AbstractAttribute *QueryingAA, const AAIsDead *LivenessAA, ArrayRef< unsigned > Opcodes, bool &UsedAssumedInformation, bool CheckBBLivenessOnly=false, bool CheckPotentiallyDead=false)
static cl::opt< bool > PrintDependencies("attributor-print-dep", cl::Hidden, cl::desc("Print attribute dependencies"), cl::init(false))
static bool isAssumedReadOnlyOrReadNone(Attributor &A, const IRPosition &IRP, const AbstractAttribute &QueryingAA, bool RequireReadNone, bool &IsKnown)
Definition: Attributor.cpp:609
static cl::opt< std::string > DepGraphDotFileNamePrefix("attributor-depgraph-dot-filename-prefix", cl::Hidden, cl::desc("The prefix used for the CallGraph dot file names."))
static cl::opt< bool > AnnotateDeclarationCallSites("attributor-annotate-decl-cs", cl::Hidden, cl::desc("Annotate call sites of function declarations."), cl::init(false))
static cl::opt< unsigned > SetFixpointIterations("attributor-max-iterations", cl::Hidden, cl::desc("Maximal number of fixpoint iterations."), cl::init(32))
static cl::list< std::string > FunctionSeedAllowList("attributor-function-seed-allow-list", cl::Hidden, cl::desc("Comma separated list of function names that are " "allowed to be seeded."), cl::CommaSeparated)
static cl::opt< bool > EnableCallSiteSpecific("attributor-enable-call-site-specific-deduction", cl::Hidden, cl::desc("Allow the Attributor to do call site specific analysis"), cl::init(false))
static cl::opt< bool > CloseWorldAssumption("attributor-assume-closed-world", cl::Hidden, cl::desc("Should a closed world be assumed, or not. Default if not set."))
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
This file provides interfaces used to build and manipulate a call graph, which is a very useful tool ...
This file contains the declarations for the subclasses of Constant, which represent the different fla...
return RetTy
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file provides an implementation of debug counters.
#define DEBUG_COUNTER(VARNAME, COUNTERNAME, DESC)
Definition: DebugCounter.h:190
#define LLVM_DEBUG(X)
Definition: Debug.h:101
#define DEBUG_WITH_TYPE(TYPE, X)
DEBUG_WITH_TYPE macro - This macro should be used by passes to emit debug information.
Definition: Debug.h:64
static Function * getFunction(Constant *C)
Definition: Evaluator.cpp:236
Rewrite Partial Register Uses
IRTranslator LLVM IR MI
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
Contains a collection of routines for determining if a given instruction is guaranteed to execute if ...
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
FunctionAnalysisManager FAM
This file defines the PointerIntPair class.
static StringRef getName(Value *V)
Basic Register Allocator
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool isSimple(Instruction *I)
This file contains some templates that are useful if you are working with the STL at all.
raw_pwrite_stream & OS
This file defines the SmallPtrSet class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:167
Class for arbitrary precision integers.
Definition: APInt.h:78
AbstractCallSite.
CallBase * getInstruction() const
Return the underlying instruction.
bool isCallbackCall() const
Return true if this ACS represents a callback call.
const Use & getCalleeUseForCallback() const
Return the use of the callee value in the underlying instruction.
static void getCallbackUses(const CallBase &CB, SmallVectorImpl< const Use * > &CallbackUses)
Add operand uses of CB that represent callback uses into CallbackUses.
bool isCallee(Value::const_user_iterator UI) const
Return true if UI is the use that defines the callee of this ACS.
Value * getCallArgOperand(Argument &Arg) const
Return the operand of the underlying instruction associated with Arg.
int getCallArgOperandNo(Argument &Arg) const
Return the operand index of the underlying instruction associated with Arg.
unsigned getNumArgOperands() const
Return the number of parameters of the callee.
Function * getCalledFunction() const
Return the function being called if this is a direct call, otherwise return null (if it's an indirect...
This templated class represents "all analyses that operate over <a particular IR unit>" (e....
Definition: Analysis.h:49
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:253
void invalidate(IRUnitT &IR, const PreservedAnalyses &PA)
Invalidate cached analyses for an IR unit.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:405
This class represents an incoming formal argument to a Function.
Definition: Argument.h:31
const Function * getParent() const
Definition: Argument.h:43
unsigned getArgNo() const
Return the index of this formal argument in its containing function.
Definition: Argument.h:49
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:165
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:160
AttributeSet getFnAttrs() const
The function attributes are returned.
static AttributeList get(LLVMContext &C, ArrayRef< std::pair< unsigned, Attribute > > Attrs)
Create an AttributeList with the specified parameters in it.
AttributeSet getRetAttrs() const
The attributes for the ret value are returned.
bool hasAttrSomewhere(Attribute::AttrKind Kind, unsigned *Index=nullptr) const
Return true if the specified attribute is set for at least one parameter or for the return value.
bool hasParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Return true if the attribute exists for the given argument.
Definition: Attributes.h:805
AttributeSet getParamAttrs(unsigned ArgNo) const
The attributes for the argument or parameter at the given index are returned.
MemoryEffects getMemoryEffects() const
Definition: Attributes.cpp:988
bool hasAttribute(Attribute::AttrKind Kind) const
Return true if the attribute exists in this set.
Definition: Attributes.cpp:909
Attribute getAttribute(Attribute::AttrKind Kind) const
Return the attribute object.
Definition: Attributes.cpp:917
bool isStringAttribute() const
Return true if the attribute is a string (target-dependent) attribute.
Definition: Attributes.cpp:346
bool isEnumAttribute() const
Return true if the attribute is an Attribute::AttrKind type.
Definition: Attributes.cpp:338
bool isIntAttribute() const
Return true if the attribute is an integer attribute.
Definition: Attributes.cpp:342
uint64_t getValueAsInt() const
Return the attribute's value as an integer.
Definition: Attributes.cpp:370
StringRef getKindAsString() const
Return the attribute's kind as a string.
Definition: Attributes.cpp:384
static Attribute get(LLVMContext &Context, AttrKind Kind, uint64_t Val=0)
Return a uniquified Attribute object.
Definition: Attributes.cpp:94
Attribute::AttrKind getKindAsEnum() const
Return the attribute's kind as an enum (Attribute::AttrKind).
Definition: Attributes.cpp:362
MemoryEffects getMemoryEffects() const
Returns memory effects.
Definition: Attributes.cpp:483
StringRef getValueAsString() const
Return the attribute's value as a string.
Definition: Attributes.cpp:391
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
Definition: Attributes.h:86
@ None
No attributes have been set.
Definition: Attributes.h:88
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
const Instruction & front() const
Definition: BasicBlock.h:461
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition: BasicBlock.h:202
const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
Definition: BasicBlock.cpp:465
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:209
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:229
static BlockAddress * get(Function *F, BasicBlock *BB)
Return a BlockAddress for the specified function and basic block.
Definition: Constants.cpp:1871
Allocate memory in an ever growing pool, as if by bump-pointer.
Definition: Allocator.h:66
Represents analyses that only rely on functions' control flow.
Definition: Analysis.h:72
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1236
void setCallingConv(CallingConv::ID CC)
Definition: InstrTypes.h:1527
void addFnAttr(Attribute::AttrKind Kind)
Adds the attribute to the function.
Definition: InstrTypes.h:1574
void getOperandBundlesAsDefs(SmallVectorImpl< OperandBundleDef > &Defs) const
Return the list of operand bundles attached to this instruction as a vector of OperandBundleDefs.
CallingConv::ID getCallingConv() const
Definition: InstrTypes.h:1523
bool isMustTailCall() const
Tests if this call site must be tail call optimized.
Value * getCalledOperand() const
Definition: InstrTypes.h:1458
void setAttributes(AttributeList A)
Set the parameter attributes for this call.
Definition: InstrTypes.h:1546
unsigned arg_size() const
Definition: InstrTypes.h:1408
AttributeList getAttributes() const
Return the parameter attributes for this call.
Definition: InstrTypes.h:1542
Function * getCaller()
Helper to get the caller (the parent function).
Wrapper to unify "old style" CallGraph and "new style" LazyCallGraph.
void removeFunction(Function &Fn)
Remove Fn from the call graph.
void replaceFunctionWith(Function &OldFn, Function &NewFn)
Replace OldFn in the call graph (and SCC) with NewFn.
void reanalyzeFunction(Function &Fn)
After an CGSCC pass changes a function in ways that affect the call graph, this method can be called ...
void initialize(LazyCallGraph &LCG, LazyCallGraph::SCC &SCC, CGSCCAnalysisManager &AM, CGSCCUpdateResult &UR)
Initializers for usage outside of a CGSCC pass, inside a CGSCC pass in the old and new pass manager (...
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
void setTailCall(bool IsTc=true)
A constant value that is initialized with an expression using other constant values.
Definition: Constants.h:1084
static Constant * getPointerCast(Constant *C, Type *Ty)
Create a BitCast, AddrSpaceCast, or a PtrToInt cast constant expression.
Definition: Constants.cpp:2215
static Constant * getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2241
void print(raw_ostream &OS) const
Print out the bounds to a stream.
This is an important base class in LLVM.
Definition: Constant.h:42
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:370
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
static bool shouldExecute(unsigned CounterName)
Definition: DebugCounter.h:87
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition: DenseMap.h:202
bool empty() const
Definition: DenseMap.h:98
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
Definition: DenseMap.h:145
Implements a dense probed hash-table based set.
Definition: DenseSet.h:271
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:279
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:162
A proxy from a FunctionAnalysisManager to an SCC.
Class to represent function types.
Definition: DerivedTypes.h:103
static FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
void setSubprogram(DISubprogram *SP)
Set the attached subprogram.
Definition: Metadata.cpp:1826
static Function * Create(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, const Twine &N="", Module *M=nullptr)
Definition: Function.h:165
void splice(Function::iterator ToIt, Function *FromF)
Transfer all blocks from FromF to this function at ToIt.
Definition: Function.h:752
const BasicBlock & getEntryBlock() const
Definition: Function.h:800
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition: Function.h:207
iterator_range< arg_iterator > args()
Definition: Function.h:855
DISubprogram * getSubprogram() const
Get the attached subprogram.
Definition: Metadata.cpp:1830
MemoryEffects getMemoryEffects() const
Definition: Function.cpp:855
bool hasParamAttribute(unsigned ArgNo, Attribute::AttrKind Kind) const
check if an attributes is in the list of attributes.
Definition: Function.cpp:731
bool IsNewDbgInfoFormat
Is this function using intrinsics to record the position of debugging information,...
Definition: Function.h:108
AttributeList getAttributes() const
Return the attribute list for this Function.
Definition: Function.h:350
iterator begin()
Definition: Function.h:816
arg_iterator arg_begin()
Definition: Function.h:831
void setAttributes(AttributeList Attrs)
Set the attribute list for this Function.
Definition: Function.h:353
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:358
size_t arg_size() const
Definition: Function.h:864
Type * getReturnType() const
Returns the type of the ret val.
Definition: Function.h:212
void setMemoryEffects(MemoryEffects ME)
Definition: Function.cpp:858
Argument * getArg(unsigned i) const
Definition: Function.h:849
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
Definition: Function.h:225
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.cpp:719
void copyAttributesFrom(const Function *Src)
copyAttributesFrom - copy all additional attributes (those not needed to create a Function) from the ...
Definition: Function.cpp:839
bool hasMetadata() const
Return true if this value has any metadata attached to it.
Definition: Value.h:589
void addMetadata(unsigned KindID, MDNode &MD)
Add a metadata attachment.
Definition: Metadata.cpp:1521
bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
Definition: Globals.cpp:290
LinkageTypes getLinkage() const
Definition: GlobalValue.h:546
bool hasLocalLinkage() const
Definition: GlobalValue.h:528
void setLinkage(LinkageTypes LT)
Definition: GlobalValue.h:537
unsigned getAddressSpace() const
Definition: GlobalValue.h:205
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:656
void setDSOLocal(bool Local)
Definition: GlobalValue.h:303
PointerType * getType() const
Global values are always pointers.
Definition: GlobalValue.h:294
@ DefaultVisibility
The GV is visible.
Definition: GlobalValue.h:67
void setVisibility(VisibilityTypes V)
Definition: GlobalValue.h:254
static bool isInterposableLinkage(LinkageTypes Linkage)
Whether the definition of this global may be replaced by something non-equivalent at link time.
Definition: GlobalValue.h:425
@ PrivateLinkage
Like Internal, but omit from symbol table.
Definition: GlobalValue.h:60
@ InternalLinkage
Rename collisions when linking (static functions).
Definition: GlobalValue.h:59
bool isSuccess() const
Definition: InlineCost.h:189
An analysis over an "outer" IR unit that provides access to an analysis manager over an "inner" IR un...
Definition: PassManager.h:563
InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Definition: Instruction.cpp:92
const Function * getFunction() const
Return the function this instruction belongs to.
Definition: Instruction.cpp:70
const Instruction * getNextNonDebugInstruction(bool SkipPseudoOp=false) const
Return a pointer to the next non-debug instruction in the same basic block as 'this',...
void copyMetadata(const Instruction &SrcInst, ArrayRef< unsigned > WL=ArrayRef< unsigned >())
Copy metadata from SrcInst to this instruction.
Invoke instruction.
static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
A node in the call graph.
An SCC of the call graph.
A lazily constructed view of the call graph of a module.
An instruction for reading from memory.
Definition: Instructions.h:174
This is the common base class for memset/memcpy/memmove.
This class wraps the llvm.memcpy/memmove intrinsics.
static MemoryEffectsBase argMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Create MemoryEffectsBase that can only access argument memory.
Definition: ModRef.h:132
bool doesAccessArgPointees() const
Whether this function may access argument memory.
Definition: ModRef.h:206
static MemoryLocation getForSource(const MemTransferInst *MTI)
Return a location representing the source of a memory transfer.
static MemoryLocation getForDest(const MemIntrinsic *MI)
Return a location representing the destination of a memory set or transfer.
static std::optional< MemoryLocation > getOrNone(const Instruction *Inst)
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
const FunctionListType & getFunctionList() const
Get the Module's list of functions (constant).
Definition: Module.h:613
Diagnostic information for missed-optimization remarks.
PointerIntPair - This class implements a pair of a pointer and small integer.
void * getOpaqueValue() const
PointerTy getPointer() const
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Definition: Constants.cpp:1852
A set of analyses that are preserved following a run of a transformation pass.
Definition: Analysis.h:111
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
Definition: Analysis.h:114
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: Analysis.h:117
void preserveSet()
Mark an analysis set as preserved.
Definition: Analysis.h:146
void preserve()
Mark an analysis as preserved.
Definition: Analysis.h:131
Return a value (possibly void), from a function.
static ReturnInst * Create(LLVMContext &C, Value *retVal=nullptr, InsertPosition InsertBefore=nullptr)
A vector that has set insertion semantics.
Definition: SetVector.h:57
ArrayRef< value_type > getArrayRef() const
Definition: SetVector.h:84
bool remove(const value_type &X)
Remove an item from the set vector.
Definition: SetVector.h:188
size_type size() const
Determine the number of elements in the SetVector.
Definition: SetVector.h:98
const value_type & front() const
Return the first element of the SetVector.
Definition: SetVector.h:143
const value_type & back() const
Return the last element of the SetVector.
Definition: SetVector.h:149
typename vector_type::const_iterator iterator
Definition: SetVector.h:69
iterator end()
Get an iterator to the end of the SetVector.
Definition: SetVector.h:113
void clear()
Completely clear the SetVector.
Definition: SetVector.h:273
size_type count(const key_type &key) const
Count the number of elements of a given key in the SetVector.
Definition: SetVector.h:264
bool empty() const
Determine if the SetVector is empty or not.
Definition: SetVector.h:93
iterator begin()
Get an iterator to the beginning of the SetVector.
Definition: SetVector.h:103
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition: SetVector.h:162
size_type size() const
Definition: SmallPtrSet.h:94
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:323
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:412
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:344
A SetVector that performs no allocations if smaller than a certain size.
Definition: SetVector.h:370
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
void reserve(size_type N)
Definition: SmallVector.h:676
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:696
void resize(size_type N)
Definition: SmallVector.h:651
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
An instruction for storing to memory.
Definition: Instructions.h:290
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
A visitor class for IR positions.
Definition: Attributor.h:1111
SubsumingPositionIterator(const IRPosition &IRP)
Provides information about what library functions are available for the current target.
The TimeTraceScope is a helper class to call the begin and end functions of the time trace profiler.
Definition: TimeProfiler.h:147
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:255
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition: Type.h:185
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:228
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
bool isVoidTy() const
Return true if this is 'void'.
Definition: Type.h:140
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
Definition: Constants.cpp:1833
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:534
iterator_range< user_iterator > users()
Definition: Value.h:421
const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition: Value.cpp:694
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1075
iterator_range< use_iterator > uses()
Definition: Value.h:376
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
void takeName(Value *V)
Transfer the name from V to this value.
Definition: Value.cpp:383
Value handle that is nullable, but tries to track the Value.
Definition: ValueHandle.h:204
size_type count(const_arg_type_t< ValueT > V) const
Return 1 if the specified key is in the set, 0 otherwise.
Definition: DenseSet.h:97
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition: ilist_node.h:32
self_iterator getIterator()
Definition: ilist_node.h:132
iterator insert(iterator where, pointer New)
Definition: ilist.h:165
A raw_ostream that writes to a file descriptor.
Definition: raw_ostream.h:460
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
A raw_ostream that writes to an std::string.
Definition: raw_ostream.h:661
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
bool isAssumedReadNone(Attributor &A, const IRPosition &IRP, const AbstractAttribute &QueryingAA, bool &IsKnown)
Return true if IRP is readnone.
Definition: Attributor.cpp:654
bool isAssumedReadOnly(Attributor &A, const IRPosition &IRP, const AbstractAttribute &QueryingAA, bool &IsKnown)
Return true if IRP is readonly.
Definition: Attributor.cpp:649
std::optional< Value * > combineOptionalValuesInAAValueLatice(const std::optional< Value * > &A, const std::optional< Value * > &B, Type *Ty)
Return the combination of A and B such that the result is a possible value of both.
Definition: Attributor.cpp:340
bool isValidAtPosition(const ValueAndContext &VAC, InformationCache &InfoCache)
Return true if the value of VAC is a valid at the position of VAC, that is a constant,...
Definition: Attributor.cpp:291
bool isAssumedThreadLocalObject(Attributor &A, Value &Obj, const AbstractAttribute &QueryingAA)
Return true if Obj is assumed to be a thread local object.
Definition: Attributor.cpp:836
bool isDynamicallyUnique(Attributor &A, const AbstractAttribute &QueryingAA, const Value &V, bool ForAnalysisOnly=true)
Return true if V is dynamically unique, that is, there are no two "instances" of V at runtime with di...
Definition: Attributor.cpp:232
bool getPotentialCopiesOfStoredValue(Attributor &A, StoreInst &SI, SmallSetVector< Value *, 4 > &PotentialCopies, const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation, bool OnlyExact=false)
Collect all potential values of the one stored by SI into PotentialCopies.
Definition: Attributor.cpp:600
bool isPotentiallyAffectedByBarrier(Attributor &A, const Instruction &I, const AbstractAttribute &QueryingAA)
Return true if I is potentially affected by a barrier.
Definition: Attributor.cpp:890
bool isGPU(const Module &M)
Return true iff M target a GPU (and we can use GPU AS reasoning).
Definition: Attributor.cpp:201
Constant * getInitialValueForObj(Attributor &A, const AbstractAttribute &QueryingAA, Value &Obj, Type &Ty, const TargetLibraryInfo *TLI, const DataLayout &DL, RangeTy *RangePtr=nullptr)
Return the initial value of Obj with type Ty if that is a constant.
Definition: Attributor.cpp:243
ValueScope
Flags to distinguish intra-procedural queries from potentially inter-procedural queries.
Definition: Attributor.h:180
@ Intraprocedural
Definition: Attributor.h:181
@ Interprocedural
Definition: Attributor.h:182
bool isValidInScope(const Value &V, const Function *Scope)
Return true if V is a valid value in Scope, that is a constant or an instruction/argument of Scope.
Definition: Attributor.cpp:281
bool isPotentiallyReachable(Attributor &A, const Instruction &FromI, const Instruction &ToI, const AbstractAttribute &QueryingAA, const AA::InstExclusionSetTy *ExclusionSet=nullptr, std::function< bool(const Function &F)> GoBackwardsCB=nullptr)
Return true if ToI is potentially reachable from FromI without running into any instruction in Exclus...
Definition: Attributor.cpp:817
bool isNoSyncInst(Attributor &A, const Instruction &I, const AbstractAttribute &QueryingAA)
Return true if I is a nosync instruction.
Definition: Attributor.cpp:206
bool getPotentiallyLoadedValues(Attributor &A, LoadInst &LI, SmallSetVector< Value *, 4 > &PotentialValues, SmallSetVector< Instruction *, 4 > &PotentialValueOrigins, const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation, bool OnlyExact=false)
Collect all potential values LI could read into PotentialValues.
Definition: Attributor.cpp:590
Value * getWithType(Value &V, Type &Ty)
Try to convert V to type Ty without introducing new instructions.
Definition: Attributor.cpp:317
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
bool isNoFPClassCompatibleType(Type *Ty)
Returns true if this is a type legal for the 'nofpclass' attribute.
void updateMinLegalVectorWidthAttr(Function &Fn, uint64_t Width)
Update min-legal-vector-width if it is in Attribute and less than Width.
@ Entry
Definition: COFF.h:811
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
@ CommaSeparated
Definition: CommandLine.h:163
LocationClass< Ty > location(Ty &L)
Definition: CommandLine.h:463
DiagnosticInfoOptimizationBase::Argument NV
@ OF_TextWithCRLF
The file should be opened in text mode and use a carriage linefeed '\r '.
Definition: FileSystem.h:767
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:480
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1722
Constant * getInitialValueOfAllocation(const Value *V, const TargetLibraryInfo *TLI, Type *Ty)
If this is a call to an allocation function that initializes memory to a fixed value,...
bool RecursivelyDeleteTriviallyDeadInstructions(Value *V, const TargetLibraryInfo *TLI=nullptr, MemorySSAUpdater *MSSAU=nullptr, std::function< void(Value *)> AboutToDeleteCallback=std::function< void(Value *)>())
If the specified value is a trivially dead instruction, delete it.
Definition: Local.cpp:540
unsigned MaxInitializationChainLength
The value passed to the line option that defines the maximal initialization chain length.
Definition: Attributor.cpp:110
bool ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions=false, const TargetLibraryInfo *TLI=nullptr, DomTreeUpdater *DTU=nullptr)
If a terminator instruction is predicated on a constant value, convert it into an unconditional branc...
Definition: Local.cpp:130
APInt operator&(APInt a, const APInt &b)
Definition: APInt.h:2072
void detachDeadBlocks(ArrayRef< BasicBlock * > BBs, SmallVectorImpl< DominatorTree::UpdateType > *Updates, bool KeepOneInputPHIs=false)
Replace contents of every block in BBs with single unreachable instruction.
@ Done
Definition: Threading.h:61
bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
Definition: Verifier.cpp:7128
CallInst * changeToCall(InvokeInst *II, DomTreeUpdater *DTU=nullptr)
This function converts the specified invoke into a normal call.
Definition: Local.cpp:2903
raw_fd_ostream & outs()
This returns a reference to a raw_fd_ostream for standard output.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
bool isNoAliasCall(const Value *V)
Return true if this pointer is returned by a noalias function.
raw_ostream & WriteGraph(raw_ostream &O, const GraphType &G, bool ShortNames=false, const Twine &Title="")
Definition: GraphWriter.h:359
InlineResult isInlineViable(Function &Callee)
Minimal filter to detect invalid constructs for inlining.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1729
bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
Definition: Local.cpp:400
Constant * ConstantFoldLoadFromUniformValue(Constant *C, Type *Ty, const DataLayout &DL)
If C is a uniform value where all bits are the same (either all zero, all ones, all undef or all pois...
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
Definition: Function.cpp:2102
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
bool AreStatisticsEnabled()
Check if statistics are enabled.
Definition: Statistic.cpp:139
Constant * ConstantFoldLoadFromConst(Constant *C, Type *Ty, const APInt &Offset, const DataLayout &DL)
Extract value of C at the given Offset reinterpreted as Ty.
unsigned changeToUnreachable(Instruction *I, bool PreserveLCSSA=false, DomTreeUpdater *DTU=nullptr, MemorySSAUpdater *MSSAU=nullptr)
Insert an unreachable instruction before the specified instruction, making it and the rest of the cod...
Definition: Local.cpp:2837
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
BasicBlock * SplitBlockPredecessors(BasicBlock *BB, ArrayRef< BasicBlock * > Preds, const char *Suffix, DominatorTree *DT, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, bool PreserveLCSSA=false)
This method introduces at least one new basic block into the function and moves some of the predecess...
bool operator&=(SparseBitVector< ElementSize > *LHS, const SparseBitVector< ElementSize > &RHS)
void ViewGraph(const GraphType &G, const Twine &Name, bool ShortNames=false, const Twine &Title="", GraphProgram::Name Program=GraphProgram::DOT)
ViewGraph - Emit a dot graph, run 'dot', run gv on the postscript file, then cleanup.
Definition: GraphWriter.h:427
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
Definition: APFixedPoint.h:293
void CloneFunctionInto(Function *NewFunc, const Function *OldFunc, ValueToValueMapTy &VMap, CloneFunctionChangeType Changes, SmallVectorImpl< ReturnInst * > &Returns, const char *NameSuffix="", ClonedCodeInfo *CodeInfo=nullptr, ValueMapTypeRemapper *TypeMapper=nullptr, ValueMaterializer *Materializer=nullptr)
Clone OldFunc into NewFunc, transforming the old arguments into references to VMap values.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition: Casting.h:565
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
Definition: STLExtras.h:2051
iterator_range< pointer_iterator< WrappedIteratorT > > make_pointer_range(RangeT &&Range)
Definition: iterator.h:363
bool isAllocationFn(const Value *V, const TargetLibraryInfo *TLI)
Tests if a value is a call or invoke to a library function that allocates or reallocates memory (eith...
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1879
ChangeStatus
{
Definition: Attributor.h:484
void fillMapFromAssume(AssumeInst &Assume, RetainedKnowledgeMap &Result)
Insert into the map all the informations contained in the operand bundles of the llvm....
bool operator|=(SparseBitVector< ElementSize > &LHS, const SparseBitVector< ElementSize > *RHS)
Constant * ConstantFoldCastInstruction(unsigned opcode, Constant *V, Type *DestTy)
DepClassTy
Definition: Attributor.h:494
@ OPTIONAL
The target may be valid if the source is not.
@ NONE
Do not track a dependence between source and target.
@ REQUIRED
The target cannot be valid if the source is not.
APInt operator|(APInt a, const APInt &b)
Definition: APInt.h:2092
#define N
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:5521
DepSetTy Deps
Set of dependency graph nodes which should be updated if this one is updated.
Definition: Attributor.h:511
aaiterator begin()
Definition: Attributor.h:525
aaiterator end()
Definition: Attributor.h:526
The data structure for the dependency graph.
Definition: Attributor.h:545
iterator begin()
Definition: Attributor.h:560
AADepGraphNode SyntheticRoot
There is no root node for the dependency graph.
Definition: Attributor.h:557
void print()
Print dependency graph.
iterator end()
Definition: Attributor.h:561
void dumpGraph()
Dump graph to file.
AADepGraphNode * GetEntryNode()
Definition: Attributor.h:558
An abstract interface to track if a value leaves it's defining function instance.
Definition: Attributor.h:4301
bool isAssumedUniqueForAnalysis() const
Return true if we assume that the underlying value is unique in its scope wrt.
Definition: Attributor.h:4315
An abstract Attribute for computing reachability between functions.
Definition: Attributor.h:5674
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:5709
An abstract interface to determine reachability of point A to B.
Definition: Attributor.h:3807
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:3835
An abstract interface for liveness abstract attribute.
Definition: Attributor.h:3967
virtual bool isKnownDead() const =0
Returns true if the underlying value is known dead.
virtual bool isAssumedDead() const =0
The query functions are protected such that other attributes need to go through the Attributor interf...
virtual bool isRemovableStore() const
Return true if the underlying value is a store that is known to be removable.
Definition: Attributor.h:4011
static bool mayCatchAsynchronousExceptions(const Function &F)
Determine if F might catch asynchronous exceptions.
Definition: Attributor.h:4032
An abstract interface for memory access kind related attributes (readnone/readonly/writeonly).
Definition: Attributor.h:4621
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:4687
An abstract interface for all memory location attributes (readnone/argmemonly/inaccessiblememonly/ina...
Definition: Attributor.h:4696
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:4872
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:3618
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:4415
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:5432
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:3931
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:3700
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:3962
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:3576
static bool isNonRelaxedAtomic(const Instruction *I)
Helper function used to determine whether an instruction is non-relaxed atomic.
static bool isNoSyncIntrinsic(const Instruction *I)
Helper function specific for intrinsics which are potentially volatile.
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:3493
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:3669
An access description.
Definition: Attributor.h:5932
bool isWrittenValueUnknown() const
Return true if the value written cannot be determined at all.
Definition: Attributor.h:6038
std::optional< Value * > getContent() const
Return the written value which can be llvm::null if it is not yet determined.
Definition: Attributor.h:6057
bool isWriteOrAssumption() const
Return true if this is a write access.
Definition: Attributor.h:6008
bool isRead() const
Return true if this is a read access.
Definition: Attributor.h:6002
Value * getWrittenValue() const
Return the value writen, if any.
Definition: Attributor.h:6049
Instruction * getLocalInst() const
Return the instruction that causes the access with respect to the local scope of the associated attri...
Definition: Attributor.h:6029
Instruction * getRemoteInst() const
Return the actual instruction that causes the access.
Definition: Attributor.h:6032
bool isWrittenValueYetUndetermined() const
Return true if the value written is not known yet.
Definition: Attributor.h:6035
AccessKind getKind() const
Return the access kind.
Definition: Attributor.h:5999
An abstract interface for struct information.
Definition: Attributor.h:5746
static Value * getSingleValue(Attributor &A, const AbstractAttribute &AA, const IRPosition &IRP, SmallVectorImpl< AA::ValueAndContext > &Values)
Extract the single value in Values if any.
An abstract attribute for getting all assumption underlying objects.
Definition: Attributor.h:6186
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:6216
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:3764
Helper to represent an access offset and size, with logic to deal with uncertainty and check for over...
Definition: Attributor.h:237
bool offsetOrSizeAreUnknown() const
Return true if offset or size are unknown.
Definition: Attributor.h:246
static const fltSemantics & IEEEsingle() LLVM_READNONE
Definition: APFloat.cpp:276
Base struct for all "concrete attribute" deductions.
Definition: Attributor.h:3275
ChangeStatus update(Attributor &A)
Hook for the Attributor to trigger an update of the internal state.
virtual ChangeStatus manifest(Attributor &A)
Hook for the Attributor to trigger the manifestation of the information represented by the abstract a...
Definition: Attributor.h:3390
virtual void printWithDeps(raw_ostream &OS) const
void print(raw_ostream &OS) const
Helper functions, for debug purposes only.
Definition: Attributor.h:3359
virtual StateType & getState()=0
Return the internal abstract state for inspection.
virtual const std::string getName() const =0
This function should return the name of the AbstractAttribute.
virtual ~AbstractAttribute()=default
Virtual destructor.
virtual const std::string getAsStr(Attributor *A) const =0
This function should return the "summarized" assumed state as string.
virtual bool isQueryAA() const
A query AA is always scheduled as long as we do updates because it does lazy computation that cannot ...
Definition: Attributor.h:3347
virtual ChangeStatus updateImpl(Attributor &A)=0
The actual update/transfer function which has to be implemented by the derived classes.
virtual void trackStatistics() const =0
Hook to enable custom statistic tracking, called after manifest that resulted in a change if statisti...
const IRPosition & getIRPosition() const
Return an IR position, see struct IRPosition.
Definition: Attributor.h:3354
An interface to query the internal state of an abstract attribute.
Definition: Attributor.h:2595
virtual ChangeStatus indicatePessimisticFixpoint()=0
Indicate that the abstract state should converge to the pessimistic state.
virtual bool isAtFixpoint() const =0
Return if this abstract state is fixed, thus does not need to be updated if information changes as it...
virtual bool isValidState() const =0
Return if this abstract state is in a valid state.
virtual ChangeStatus indicateOptimisticFixpoint()=0
Indicate that the abstract state should converge to the optimistic state.
Wrapper for FunctionAnalysisManager.
Definition: Attributor.h:1122
PreservedAnalyses run(LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM, LazyCallGraph &CG, CGSCCUpdateResult &UR)
void populateAll() const
Force populate the entire call graph.
Definition: Attributor.h:5538
Configuration for the Attributor.
Definition: Attributor.h:1414
bool UseLiveness
Flag to determine if we should skip all liveness checks early on.
Definition: Attributor.h:1438
std::optional< unsigned > MaxFixpointIterations
Maximum number of iterations to run until fixpoint.
Definition: Attributor.h:1461
DenseSet< const char * > * Allowed
If not null, a set limiting the attribute opportunities.
Definition: Attributor.h:1458
bool RewriteSignatures
Flag to determine if we rewrite function signatures.
Definition: Attributor.h:1431
bool DeleteFns
Flag to determine if we can delete functions or keep dead ones around.
Definition: Attributor.h:1428
bool IsClosedWorldModule
Flag to indicate if the entire world is contained in this module, that is, no outside functions exist...
Definition: Attributor.h:1442
CallGraphUpdater & CGUpdater
Helper to update an underlying call graph and to delete functions.
Definition: Attributor.h:1455
PreservedAnalyses run(LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM, LazyCallGraph &CG, CGSCCUpdateResult &UR)
PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
Helper struct used in the communication between an abstract attribute (AA) that wants to change the s...
Definition: Attributor.h:2203
std::function< void(const ArgumentReplacementInfo &, AbstractCallSite, SmallVectorImpl< Value * > &)> ACSRepairCBTy
Abstract call site (ACS) repair callback type.
Definition: Attributor.h:2226
std::function< void(const ArgumentReplacementInfo &, Function &, Function::arg_iterator)> CalleeRepairCBTy
Callee repair callback type.
Definition: Attributor.h:2212
The fixpoint analysis framework that orchestrates the attribute deduction.
Definition: Attributor.h:1508
bool registerFunctionSignatureRewrite(Argument &Arg, ArrayRef< Type * > ReplacementTypes, ArgumentReplacementInfo::CalleeRepairCBTy &&CalleeRepairCB, ArgumentReplacementInfo::ACSRepairCBTy &&ACSRepairCB)
Register a rewrite for a function signature.
bool checkForAllCallees(function_ref< bool(ArrayRef< const Function * > Callees)> Pred, const AbstractAttribute &QueryingAA, const CallBase &CB)
Check Pred on all potential Callees of CB.
bool isModulePass() const
Return true if this is a module pass, false otherwise.
Definition: Attributor.h:1716
bool isValidFunctionSignatureRewrite(Argument &Arg, ArrayRef< Type * > ReplacementTypes)
Check if we can rewrite a function signature.
static bool isInternalizable(Function &F)
Returns true if the function F can be internalized.
ChangeStatus removeAttrs(const IRPosition &IRP, ArrayRef< Attribute::AttrKind > AttrKinds)
Remove all AttrKinds attached to IRP.
bool isRunOn(Function &Fn) const
Return true if we derive attributes for Fn.
Definition: Attributor.h:1733
bool isAssumedDead(const AbstractAttribute &AA, const AAIsDead *LivenessAA, bool &UsedAssumedInformation, bool CheckBBLivenessOnly=false, DepClassTy DepClass=DepClassTy::OPTIONAL)
Return true if AA (or its context instruction) is assumed dead.
bool checkForAllInstructions(function_ref< bool(Instruction &)> Pred, const Function *Fn, const AbstractAttribute *QueryingAA, ArrayRef< unsigned > Opcodes, bool &UsedAssumedInformation, bool CheckBBLivenessOnly=false, bool CheckPotentiallyDead=false)
Check Pred on all instructions in Fn with an opcode present in Opcodes.
void recordDependence(const AbstractAttribute &FromAA, const AbstractAttribute &ToAA, DepClassTy DepClass)
Explicitly record a dependence from FromAA to ToAA, that is if FromAA changes ToAA should be updated ...
static void createShallowWrapper(Function &F)
Create a shallow wrapper for F such that F has internal linkage afterwards.
std::optional< Value * > getAssumedSimplified(const IRPosition &IRP, const AbstractAttribute &AA, bool &UsedAssumedInformation, AA::ValueScope S)
If V is assumed simplified, return it, if it is unclear yet, return std::nullopt, otherwise return nu...
Definition: Attributor.h:1966
static Function * internalizeFunction(Function &F, bool Force=false)
Make another copy of the function F such that the copied version has internal linkage afterwards and ...
bool isFunctionIPOAmendable(const Function &F)
Determine whether the function F is IPO amendable.
Definition: Attributor.h:1812
bool checkForAllReadWriteInstructions(function_ref< bool(Instruction &)> Pred, AbstractAttribute &QueryingAA, bool &UsedAssumedInformation)
Check Pred on all Read/Write instructions.
bool checkForAllReturnedValues(function_ref< bool(Value &)> Pred, const AbstractAttribute &QueryingAA, AA::ValueScope S=AA::ValueScope::Intraprocedural, bool RecurseForSelectAndPHI=true)
Check Pred on all values potentially returned by the function associated with QueryingAA.
bool isClosedWorldModule() const
Return true if the module contains the whole world, thus, no outside functions exist.
std::optional< Constant * > getAssumedConstant(const IRPosition &IRP, const AbstractAttribute &AA, bool &UsedAssumedInformation)
If IRP is assumed to be a constant, return it, if it is unclear yet, return std::nullopt,...
Attributor(SetVector< Function * > &Functions, InformationCache &InfoCache, AttributorConfig Configuration)
Constructor.
void getAttrs(const IRPosition &IRP, ArrayRef< Attribute::AttrKind > AKs, SmallVectorImpl< Attribute > &Attrs, bool IgnoreSubsumingPositions=false)
Return the attributes of any kind in AKs existing in the IR at a position that will affect this one.
InformationCache & getInfoCache()
Return the internal information cache.
Definition: Attributor.h:1713
std::optional< Value * > translateArgumentToCallSiteContent(std::optional< Value * > V, CallBase &CB, const AbstractAttribute &AA, bool &UsedAssumedInformation)
Translate V from the callee context into the call site context.
bool checkForAllUses(function_ref< bool(const Use &, bool &)> Pred, const AbstractAttribute &QueryingAA, const Value &V, bool CheckBBLivenessOnly=false, DepClassTy LivenessDepClass=DepClassTy::OPTIONAL, bool IgnoreDroppableUses=true, function_ref< bool(const Use &OldU, const Use &NewU)> EquivalentUseCB=nullptr)
Check Pred on all (transitive) uses of V.
ChangeStatus manifestAttrs(const IRPosition &IRP, ArrayRef< Attribute > DeducedAttrs, bool ForceReplace=false)
Attach DeducedAttrs to IRP, if ForceReplace is set we do this even if the same attribute kind was alr...
bool hasAttr(const IRPosition &IRP, ArrayRef< Attribute::AttrKind > AKs, bool IgnoreSubsumingPositions=false, Attribute::AttrKind ImpliedAttributeKind=Attribute::None)
Return true if any kind in AKs existing in the IR at a position that will affect this one.
void registerForUpdate(AbstractAttribute &AA)
Allows a query AA to request an update if a new query was received.
void identifyDefaultAbstractAttributes(Function &F)
Determine opportunities to derive 'default' attributes in F and create abstract attribute objects for...
bool getAssumedSimplifiedValues(const IRPosition &IRP, const AbstractAttribute *AA, SmallVectorImpl< AA::ValueAndContext > &Values, AA::ValueScope S, bool &UsedAssumedInformation, bool RecurseForSelectAndPHI=true)
Try to simplify IRP and in the scope S.
std::function< bool(Attributor &, const AbstractAttribute *)> VirtualUseCallbackTy
Definition: Attributor.h:2054
ChangeStatus run()
Run the analyses until a fixpoint is reached or enforced (timeout).
static bool internalizeFunctions(SmallPtrSetImpl< Function * > &FnSet, DenseMap< Function *, Function * > &FnMap)
Make copies of each function in the set FnSet such that the copied version has internal linkage after...
bool checkForAllCallSites(function_ref< bool(AbstractCallSite)> Pred, const AbstractAttribute &QueryingAA, bool RequireAllCallSites, bool &UsedAssumedInformation)
Check Pred on all function call sites.
bool getAttrsFromAssumes(const IRPosition &IRP, Attribute::AttrKind AK, SmallVectorImpl< Attribute > &Attrs)
Return the attributes of kind AK existing in the IR as operand bundles of an llvm....
bool isKnown(base_t BitsEncoding=BestState) const
Return true if the bits set in BitsEncoding are "known bits".
Definition: Attributor.h:2743
Support structure for SCC passes to communicate updates the call graph back to the CGSCC pass manager...
static std::string getNodeLabel(const AADepGraphNode *Node, const AADepGraph *DG)
DOTGraphTraits - Template class that can be specialized to customize how graphs are converted to 'dot...
DefaultDOTGraphTraits - This class provides the default implementations of all of the DOTGraphTraits ...
Represent subnormal handling kind for floating point instruction inputs and outputs.
@ Dynamic
Denormals have unknown treatment.
An information struct used to provide DenseMap with the various necessary components for a given valu...
Definition: DenseMapInfo.h:52
static NodeRef DepGetVal(const DepTy &DT)
static ChildIteratorType child_end(NodeRef N)
static NodeRef getEntryNode(AADepGraphNode *DGN)
static ChildIteratorType child_begin(NodeRef N)
AADepGraphNode::DepSetTy::iterator ChildEdgeIteratorType
static NodeRef getEntryNode(AADepGraph *DG)
static nodes_iterator nodes_begin(AADepGraph *DG)
static nodes_iterator nodes_end(AADepGraph *DG)
Helper to describe and deal with positions in the LLVM-IR.
Definition: Attributor.h:581
Function * getAssociatedFunction() const
Return the associated function, if any.
Definition: Attributor.h:712
void setAttrList(const AttributeList &AttrList) const
Update the attributes associated with this function or call site scope.
Definition: Attributor.h:848
unsigned getAttrIdx() const
Return the index in the attribute list for this position.
Definition: Attributor.h:813
bool hasCallBaseContext() const
Check if the position has any call base context.
Definition: Attributor.h:930
static const IRPosition callsite_returned(const CallBase &CB)
Create a position describing the returned value of CB.
Definition: Attributor.h:649
static const IRPosition returned(const Function &F, const CallBaseContext *CBContext=nullptr)
Create a position describing the returned value of F.
Definition: Attributor.h:631
Argument * getAssociatedArgument() const
Return the associated argument, if any.
Definition: Attributor.cpp:996
static const IRPosition value(const Value &V, const CallBaseContext *CBContext=nullptr)
Create a position describing the value of V.
Definition: Attributor.h:605
AttributeList getAttrList() const
Return the attributes associated with this function or call site scope.
Definition: Attributor.h:841
static const IRPosition inst(const Instruction &I, const CallBaseContext *CBContext=nullptr)
Create a position describing the instruction I.
Definition: Attributor.h:617
static const IRPosition callsite_argument(const CallBase &CB, unsigned ArgNo)
Create a position describing the argument of CB at position ArgNo.
Definition: Attributor.h:654
static const IRPosition TombstoneKey
Definition: Attributor.h:936
Kind
The positions we distinguish in the IR.
Definition: Attributor.h:587
@ IRP_ARGUMENT
An attribute for a function argument.
Definition: Attributor.h:595
@ IRP_RETURNED
An attribute for the function return value.
Definition: Attributor.h:591
@ IRP_CALL_SITE
An attribute for a call site (function scope).
Definition: Attributor.h:594
@ IRP_CALL_SITE_RETURNED
An attribute for a call site return value.
Definition: Attributor.h:592
@ IRP_FUNCTION
An attribute for a function (scope).
Definition: Attributor.h:593
@ IRP_FLOAT
A position that is not associated with a spot suitable for attributes.
Definition: Attributor.h:589
@ IRP_CALL_SITE_ARGUMENT
An attribute for a call site argument.
Definition: Attributor.h:596
@ IRP_INVALID
An invalid position.
Definition: Attributor.h:588
Instruction * getCtxI() const
Return the context instruction, if any.
Definition: Attributor.h:765
static const IRPosition argument(const Argument &Arg, const CallBaseContext *CBContext=nullptr)
Create a position describing the argument Arg.
Definition: Attributor.h:638
static const IRPosition EmptyKey
Special DenseMap key values.
Definition: Attributor.h:935
static const IRPosition function(const Function &F, const CallBaseContext *CBContext=nullptr)
Create a position describing the function scope of F.
Definition: Attributor.h:624
const CallBaseContext * getCallBaseContext() const
Get the call base context from the position.
Definition: Attributor.h:927
Value & getAssociatedValue() const
Return the value this abstract attribute is associated with.
Definition: Attributor.h:779
Value & getAnchorValue() const
Return the value this abstract attribute is anchored with.
Definition: Attributor.h:698
Value * getAttrListAnchor() const
Return the value attributes are attached to.
Definition: Attributor.h:834
int getCallSiteArgNo() const
Return the call site argument number of the associated value if it is an argument or call site argume...
Definition: Attributor.h:808
Kind getPositionKind() const
Return the associated position kind.
Definition: Attributor.h:877
static const IRPosition callsite_function(const CallBase &CB)
Create a position describing the function scope of CB.
Definition: Attributor.h:644
Function * getAnchorScope() const
Return the Function surrounding the anchor value.
Definition: Attributor.h:753
Data structure to hold cached (LLVM-IR) information.
Definition: Attributor.h:1198
bool stackIsAccessibleByOtherThreads()
Return true if the stack (llvm::Alloca) can be accessed by other threads.
Definition: Attributor.h:1327
MustBeExecutedContextExplorer * getMustBeExecutedContextExplorer()
Return MustBeExecutedContextExplorer.
Definition: Attributor.h:1276
const ArrayRef< Function * > getIndirectlyCallableFunctions(Attributor &A) const
Return all functions that might be called indirectly, only valid for closed world modules (see isClos...
TargetLibraryInfo * getTargetLibraryInfoForFunction(const Function &F)
Return TargetLibraryInfo for function F.
Definition: Attributor.h:1281
OpcodeInstMapTy & getOpcodeInstMapForFunction(const Function &F)
Return the map that relates "interesting" opcodes with all instructions with that opcode in F.
Definition: Attributor.h:1266
const RetainedKnowledgeMap & getKnowledgeMap() const
Return the map conaining all the knowledge we have from llvm.assumes.
Definition: Attributor.h:1311
SmallVector< Instruction *, 8 > InstructionVectorTy
A vector type to hold instructions.
Definition: Attributor.h:1259
InstructionVectorTy & getReadOrWriteInstsForFunction(const Function &F)
Return the instructions in F that may read or write memory.
Definition: Attributor.h:1271
AP::Result * getAnalysisResultForFunction(const Function &F, bool CachedOnly=false)
Return the analysis result from a pass AP for function F.
Definition: Attributor.h:1302
State for an integer range.
Definition: Attributor.h:2921
ConstantRange getKnown() const
Return the known state encoding.
Definition: Attributor.h:2977
ConstantRange getAssumed() const
Return the assumed state encoding.
Definition: Attributor.h:2980
uint32_t getBitWidth() const
Return associated values' bit width.
Definition: Attributor.h:2954
A "must be executed context" for a given program point PP is the set of instructions,...
Definition: MustExecute.h:386
iterator & end()
Return an universal end iterator.
Definition: MustExecute.h:434
bool findInContextOf(const Instruction *I, const Instruction *PP)
Helper to look for I in the context of PP.
Definition: MustExecute.h:470
iterator & begin(const Instruction *PP)
Return an iterator to explore the context around PP.
Definition: MustExecute.h:420
A class for a set state.
Definition: Attributor.h:4951
bool undefIsContained() const
Returns whether this state contains an undef value or not.
Definition: Attributor.h:4987
bool isValidState() const override
See AbstractState::isValidState(...)
Definition: Attributor.h:4960
const SetTy & getAssumedSet() const
Return this set.
Definition: Attributor.h:4981