LLVM 23.0.0git
Attributor.cpp
Go to the documentation of this file.
1//===- Attributor.cpp - Module-wide attribute deduction -------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements an interprocedural pass that deduces and/or propagates
10// attributes. This is done in an abstract interpretation style fixpoint
11// iteration. See the Attributor.h file comment and the class descriptions in
12// that file for more information.
13//
14//===----------------------------------------------------------------------===//
15
17
18#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/STLExtras.h"
22#include "llvm/ADT/Statistic.h"
29#include "llvm/IR/Attributes.h"
30#include "llvm/IR/Constant.h"
32#include "llvm/IR/Constants.h"
33#include "llvm/IR/DataLayout.h"
34#include "llvm/IR/GlobalValue.h"
36#include "llvm/IR/Instruction.h"
39#include "llvm/IR/LLVMContext.h"
40#include "llvm/IR/ValueHandle.h"
43#include "llvm/Support/Debug.h"
47#include "llvm/Support/ModRef.h"
52#include <cstdint>
53#include <memory>
54
55#ifdef EXPENSIVE_CHECKS
56#include "llvm/IR/Verifier.h"
57#endif
58
59#include <cassert>
60#include <optional>
61#include <string>
62
63using namespace llvm;
64
65#define DEBUG_TYPE "attributor"
66#define VERBOSE_DEBUG_TYPE DEBUG_TYPE "-verbose"
67
68DEBUG_COUNTER(ManifestDBGCounter, "attributor-manifest",
69 "Determine what attributes are manifested in the IR");
70
71STATISTIC(NumFnDeleted, "Number of function deleted");
72STATISTIC(NumFnWithExactDefinition,
73 "Number of functions with exact definitions");
74STATISTIC(NumFnWithoutExactDefinition,
75 "Number of functions without exact definitions");
76STATISTIC(NumFnShallowWrappersCreated, "Number of shallow wrappers created");
77STATISTIC(NumAttributesTimedOut,
78 "Number of abstract attributes timed out before fixpoint");
79STATISTIC(NumAttributesValidFixpoint,
80 "Number of abstract attributes in a valid fixpoint state");
81STATISTIC(NumAttributesManifested,
82 "Number of abstract attributes manifested in IR");
83
84// TODO: Determine a good default value.
85//
86// In the LLVM-TS and SPEC2006, 32 seems to not induce compile time overheads
87// (when run with the first 5 abstract attributes). The results also indicate
88// that we never reach 32 iterations but always find a fixpoint sooner.
89//
90// This will become more evolved once we perform two interleaved fixpoint
91// iterations: bottom-up and top-down.
93 SetFixpointIterations("attributor-max-iterations", cl::Hidden,
94 cl::desc("Maximal number of fixpoint iterations."),
95 cl::init(32));
96
98 MaxSpecializationPerCB("attributor-max-specializations-per-call-base",
100 cl::desc("Maximal number of callees specialized for "
101 "a call base"),
102 cl::init(UINT32_MAX));
103
105 "attributor-max-initialization-chain-length", cl::Hidden,
106 cl::desc(
107 "Maximal number of chained initializations (to avoid stack overflows)"),
110
112 "attributor-annotate-decl-cs", cl::Hidden,
113 cl::desc("Annotate call sites of function declarations."), cl::init(false));
114
115static cl::opt<bool> EnableHeapToStack("enable-heap-to-stack-conversion",
116 cl::init(true), cl::Hidden);
117
118static cl::opt<bool>
119 AllowShallowWrappers("attributor-allow-shallow-wrappers", cl::Hidden,
120 cl::desc("Allow the Attributor to create shallow "
121 "wrappers for non-exact definitions."),
122 cl::init(false));
123
124static cl::opt<bool>
125 AllowDeepWrapper("attributor-allow-deep-wrappers", cl::Hidden,
126 cl::desc("Allow the Attributor to use IP information "
127 "derived from non-exact functions via cloning"),
128 cl::init(false));
129
130// These options can only used for debug builds.
131#ifndef NDEBUG
133 SeedAllowList("attributor-seed-allow-list", cl::Hidden,
134 cl::desc("Comma separated list of attribute names that are "
135 "allowed to be seeded."),
137
139 "attributor-function-seed-allow-list", cl::Hidden,
140 cl::desc("Comma separated list of function names that are "
141 "allowed to be seeded."),
143#endif
144
145static cl::opt<bool>
146 DumpDepGraph("attributor-dump-dep-graph", cl::Hidden,
147 cl::desc("Dump the dependency graph to dot files."),
148 cl::init(false));
149
151 "attributor-depgraph-dot-filename-prefix", cl::Hidden,
152 cl::desc("The prefix used for the CallGraph dot file names."));
153
154static cl::opt<bool> ViewDepGraph("attributor-view-dep-graph", cl::Hidden,
155 cl::desc("View the dependency graph."),
156 cl::init(false));
157
158static cl::opt<bool> PrintDependencies("attributor-print-dep", cl::Hidden,
159 cl::desc("Print attribute dependencies"),
160 cl::init(false));
161
163 "attributor-enable-call-site-specific-deduction", cl::Hidden,
164 cl::desc("Allow the Attributor to do call site specific analysis"),
165 cl::init(false));
166
167static cl::opt<bool>
168 PrintCallGraph("attributor-print-call-graph", cl::Hidden,
169 cl::desc("Print Attributor's internal call graph"),
170 cl::init(false));
171
172static cl::opt<bool> SimplifyAllLoads("attributor-simplify-all-loads",
174 cl::desc("Try to simplify all loads."),
175 cl::init(true));
176
178 "attributor-assume-closed-world", cl::Hidden,
179 cl::desc("Should a closed world be assumed, or not. Default if not set."));
180
181/// Logic operators for the change status enum class.
182///
183///{
188 L = L | R;
189 return L;
190}
195 L = L & R;
196 return L;
197}
198///}
199
200namespace {
201/// NVPTX/AMDGPU address space values (shared between both targets)
202enum class NVPTXAMDGPUAddressSpace : unsigned {
203 Generic = 0,
204 Global = 1,
205 Shared = 3,
206 Constant = 4,
207 Local = 5,
208};
209
210/// SPIRV address space values (StorageClass)
211enum class SPIRVAddressSpace : unsigned {
212 Local = 0, // Function (private/local)
213 Global = 1, // CrossWorkgroup (global)
214 Constant = 2, // UniformConstant (constant)
215 Shared = 3, // Workgroup (shared)
216 Generic = 4, // Generic
217};
218} // namespace
219
220bool AA::isGPU(const Module &M) {
221 Triple T(M.getTargetTriple());
222 return T.isGPU();
223}
224
225bool AA::isGPUGenericAddressSpace(const Module &M, unsigned AS) {
226 assert(AA::isGPU(M) && "Only callable on GPU targets");
227 Triple T(M.getTargetTriple());
228
229 if (T.isSPIRV())
230 return AS == static_cast<unsigned>(SPIRVAddressSpace::Generic);
231
232 return AS == static_cast<unsigned>(NVPTXAMDGPUAddressSpace::Generic);
233}
234
235bool AA::isGPUGlobalAddressSpace(const Module &M, unsigned AS) {
236 assert(AA::isGPU(M) && "Only callable on GPU targets");
237 Triple T(M.getTargetTriple());
238
239 if (T.isSPIRV())
240 return AS == static_cast<unsigned>(SPIRVAddressSpace::Global);
241
242 return AS == static_cast<unsigned>(NVPTXAMDGPUAddressSpace::Global);
243}
244
245bool AA::isGPUSharedAddressSpace(const Module &M, unsigned AS) {
246 assert(AA::isGPU(M) && "Only callable on GPU targets");
247 Triple T(M.getTargetTriple());
248
249 if (T.isSPIRV())
250 return AS == static_cast<unsigned>(SPIRVAddressSpace::Shared);
251
252 return AS == static_cast<unsigned>(NVPTXAMDGPUAddressSpace::Shared);
253}
254
255bool AA::isGPUConstantAddressSpace(const Module &M, unsigned AS) {
256 assert(AA::isGPU(M) && "Only callable on GPU targets");
257 Triple T(M.getTargetTriple());
258
259 if (T.isSPIRV())
260 return AS == static_cast<unsigned>(SPIRVAddressSpace::Constant);
261
262 return AS == static_cast<unsigned>(NVPTXAMDGPUAddressSpace::Constant);
263}
264
265bool AA::isGPULocalAddressSpace(const Module &M, unsigned AS) {
266 assert(AA::isGPU(M) && "Only callable on GPU targets");
267 Triple T(M.getTargetTriple());
268
269 if (T.isSPIRV())
270 return AS == static_cast<unsigned>(SPIRVAddressSpace::Local);
271
272 return AS == static_cast<unsigned>(NVPTXAMDGPUAddressSpace::Local);
273}
274
276 const AbstractAttribute &QueryingAA) {
277 // We are looking for volatile instructions or non-relaxed atomics.
278 if (const auto *CB = dyn_cast<CallBase>(&I)) {
279 if (CB->hasFnAttr(Attribute::NoSync))
280 return true;
281
282 // Non-convergent and readnone imply nosync.
283 if (!CB->isConvergent() && !CB->mayReadOrWriteMemory())
284 return true;
285
287 return true;
288
289 bool IsKnownNoSync;
291 A, &QueryingAA, IRPosition::callsite_function(*CB),
292 DepClassTy::OPTIONAL, IsKnownNoSync);
293 }
294
295 if (!I.mayReadOrWriteMemory())
296 return true;
297
298 return !I.isVolatile() && !AANoSync::isNonRelaxedAtomic(&I);
299}
300
302 const Value &V, bool ForAnalysisOnly) {
303 // TODO: See the AAInstanceInfo class comment.
304 if (!ForAnalysisOnly)
305 return false;
306 auto *InstanceInfoAA = A.getAAFor<AAInstanceInfo>(
308 return InstanceInfoAA && InstanceInfoAA->isAssumedUniqueForAnalysis();
309}
310
311Constant *
313 Value &Obj, Type &Ty, const TargetLibraryInfo *TLI,
314 const DataLayout &DL, AA::RangeTy *RangePtr) {
315 if (Constant *Init = getInitialValueOfAllocation(&Obj, TLI, &Ty))
316 return Init;
317 auto *GV = dyn_cast<GlobalVariable>(&Obj);
318 if (!GV)
319 return nullptr;
320
321 bool UsedAssumedInformation = false;
322 Constant *Initializer = nullptr;
323 if (A.hasGlobalVariableSimplificationCallback(*GV)) {
324 auto AssumedGV = A.getAssumedInitializerFromCallBack(
325 *GV, &QueryingAA, UsedAssumedInformation);
326 Initializer = *AssumedGV;
327 if (!Initializer)
328 return nullptr;
329 } else {
330 if (!GV->hasLocalLinkage()) {
331 // Externally visible global that's either non-constant,
332 // or a constant with an uncertain initializer.
333 if (!GV->hasDefinitiveInitializer() || !GV->isConstant())
334 return nullptr;
335 }
336
337 // Globals with local linkage are always initialized.
338 assert(!GV->hasLocalLinkage() || GV->hasInitializer());
339
340 if (!Initializer)
341 Initializer = GV->getInitializer();
342 }
343
344 if (RangePtr && !RangePtr->offsetOrSizeAreUnknown()) {
345 int64_t StorageSize = DL.getTypeStoreSize(&Ty);
346 if (StorageSize != RangePtr->Size)
347 return nullptr;
348 APInt Offset = APInt(64, RangePtr->Offset);
349 return ConstantFoldLoadFromConst(Initializer, &Ty, Offset, DL);
350 }
351
352 return ConstantFoldLoadFromUniformValue(Initializer, &Ty, DL);
353}
354
355bool AA::isValidInScope(const Value &V, const Function *Scope) {
356 if (isa<Constant>(V))
357 return true;
358 if (auto *I = dyn_cast<Instruction>(&V))
359 return I->getFunction() == Scope;
360 if (auto *A = dyn_cast<Argument>(&V))
361 return A->getParent() == Scope;
362 return false;
363}
364
366 InformationCache &InfoCache) {
367 if (isa<Constant>(VAC.getValue()) || VAC.getValue() == VAC.getCtxI())
368 return true;
369 const Function *Scope = nullptr;
370 const Instruction *CtxI = VAC.getCtxI();
371 if (CtxI)
372 Scope = CtxI->getFunction();
373 if (auto *A = dyn_cast<Argument>(VAC.getValue()))
374 return A->getParent() == Scope;
375 if (auto *I = dyn_cast<Instruction>(VAC.getValue())) {
376 if (I->getFunction() == Scope) {
377 if (const DominatorTree *DT =
379 *Scope))
380 return DT->dominates(I, CtxI);
381 // Local dominance check mostly for the old PM passes.
382 if (CtxI && I->getParent() == CtxI->getParent())
383 return llvm::any_of(
384 make_range(I->getIterator(), I->getParent()->end()),
385 [&](const Instruction &AfterI) { return &AfterI == CtxI; });
386 }
387 }
388 return false;
389}
390
392 if (V.getType() == &Ty)
393 return &V;
394 if (isa<PoisonValue>(V))
395 return PoisonValue::get(&Ty);
396 if (isa<UndefValue>(V))
397 return UndefValue::get(&Ty);
398 if (auto *C = dyn_cast<Constant>(&V)) {
399 if (C->isNullValue() && !Ty.isPtrOrPtrVectorTy())
400 return Constant::getNullValue(&Ty);
401 if (C->getType()->isPointerTy() && Ty.isPointerTy())
402 return ConstantExpr::getPointerCast(C, &Ty);
403 if (C->getType()->getPrimitiveSizeInBits() >= Ty.getPrimitiveSizeInBits()) {
404 if (C->getType()->isIntegerTy() && Ty.isIntegerTy())
405 return ConstantExpr::getTrunc(C, &Ty, /* OnlyIfReduced */ true);
406 if (C->getType()->isFloatingPointTy() && Ty.isFloatingPointTy())
407 return ConstantFoldCastInstruction(Instruction::FPTrunc, C, &Ty);
408 }
409 }
410 return nullptr;
411}
412
413std::optional<Value *>
414AA::combineOptionalValuesInAAValueLatice(const std::optional<Value *> &A,
415 const std::optional<Value *> &B,
416 Type *Ty) {
417 if (A == B)
418 return A;
419 if (!B)
420 return A;
421 if (*B == nullptr)
422 return nullptr;
423 if (!A)
424 return Ty ? getWithType(**B, *Ty) : nullptr;
425 if (*A == nullptr)
426 return nullptr;
427 if (!Ty)
428 Ty = (*A)->getType();
430 return getWithType(**B, *Ty);
431 if (isa<UndefValue>(*B))
432 return A;
433 if (*A && *B && *A == getWithType(**B, *Ty))
434 return A;
435 return nullptr;
436}
437
438template <bool IsLoad, typename Ty>
440 Attributor &A, Ty &I, SmallSetVector<Value *, 4> &PotentialCopies,
441 SmallSetVector<Instruction *, 4> *PotentialValueOrigins,
442 const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation,
443 bool OnlyExact) {
444 LLVM_DEBUG(dbgs() << "Trying to determine the potential copies of " << I
445 << " (only exact: " << OnlyExact << ")\n";);
446
447 Value &Ptr = *I.getPointerOperand();
448 // Containers to remember the pointer infos and new copies while we are not
449 // sure that we can find all of them. If we abort we want to avoid spurious
450 // dependences and potential copies in the provided container.
454
455 const auto *TLI =
456 A.getInfoCache().getTargetLibraryInfoForFunction(*I.getFunction());
457
458 auto Pred = [&](Value &Obj) {
459 LLVM_DEBUG(dbgs() << "Visit underlying object " << Obj << "\n");
460 if (isa<UndefValue>(&Obj))
461 return true;
462 if (isa<ConstantPointerNull>(&Obj)) {
463 // A null pointer access can be undefined but any offset from null may
464 // be OK. We do not try to optimize the latter.
465 if (!NullPointerIsDefined(I.getFunction(),
466 Ptr.getType()->getPointerAddressSpace()) &&
467 A.getAssumedSimplified(Ptr, QueryingAA, UsedAssumedInformation,
468 AA::Interprocedural) == &Obj)
469 return true;
471 dbgs() << "Underlying object is a valid nullptr, giving up.\n";);
472 return false;
473 }
474 // TODO: Use assumed noalias return.
475 if (!isa<AllocaInst>(&Obj) && !isa<GlobalVariable>(&Obj) &&
476 !(IsLoad ? isAllocationFn(&Obj, TLI) : isNoAliasCall(&Obj))) {
477 LLVM_DEBUG(dbgs() << "Underlying object is not supported yet: " << Obj
478 << "\n";);
479 return false;
480 }
481 if (auto *GV = dyn_cast<GlobalVariable>(&Obj))
482 if (!GV->hasLocalLinkage() &&
483 !(GV->isConstant() && GV->hasInitializer())) {
484 LLVM_DEBUG(dbgs() << "Underlying object is global with external "
485 "linkage, not supported yet: "
486 << Obj << "\n";);
487 return false;
488 }
489
490 bool NullOnly = true;
491 bool NullRequired = false;
492 auto CheckForNullOnlyAndUndef = [&](std::optional<Value *> V,
493 bool IsExact) {
494 if (!V || *V == nullptr)
495 NullOnly = false;
496 else if (isa<UndefValue>(*V))
497 /* No op */;
498 else if (isa<Constant>(*V) && cast<Constant>(*V)->isNullValue())
499 NullRequired = !IsExact;
500 else
501 NullOnly = false;
502 };
503
504 auto AdjustWrittenValueType = [&](const AAPointerInfo::Access &Acc,
505 Value &V) {
506 Value *AdjV = AA::getWithType(V, *I.getType());
507 if (!AdjV) {
508 LLVM_DEBUG(dbgs() << "Underlying object written but stored value "
509 "cannot be converted to read type: "
510 << *Acc.getRemoteInst() << " : " << *I.getType()
511 << "\n";);
512 }
513 return AdjV;
514 };
515
516 auto SkipCB = [&](const AAPointerInfo::Access &Acc) {
517 if ((IsLoad && !Acc.isWriteOrAssumption()) || (!IsLoad && !Acc.isRead()))
518 return true;
519 if (IsLoad) {
521 return true;
522 if (PotentialValueOrigins && !isa<AssumeInst>(Acc.getRemoteInst()))
523 return false;
524 if (!Acc.isWrittenValueUnknown())
525 if (Value *V = AdjustWrittenValueType(Acc, *Acc.getWrittenValue()))
526 if (NewCopies.count(V)) {
527 NewCopyOrigins.insert(Acc.getRemoteInst());
528 return true;
529 }
530 if (auto *SI = dyn_cast<StoreInst>(Acc.getRemoteInst()))
531 if (Value *V = AdjustWrittenValueType(Acc, *SI->getValueOperand()))
532 if (NewCopies.count(V)) {
533 NewCopyOrigins.insert(Acc.getRemoteInst());
534 return true;
535 }
536 }
537 return false;
538 };
539
540 auto CheckAccess = [&](const AAPointerInfo::Access &Acc, bool IsExact) {
541 if ((IsLoad && !Acc.isWriteOrAssumption()) || (!IsLoad && !Acc.isRead()))
542 return true;
543 if (IsLoad && Acc.isWrittenValueYetUndetermined())
544 return true;
545 CheckForNullOnlyAndUndef(Acc.getContent(), IsExact);
546 if (OnlyExact && !IsExact && !NullOnly &&
548 LLVM_DEBUG(dbgs() << "Non exact access " << *Acc.getRemoteInst()
549 << ", abort!\n");
550 return false;
551 }
552 if (NullRequired && !NullOnly) {
553 LLVM_DEBUG(dbgs() << "Required all `null` accesses due to non exact "
554 "one, however found non-null one: "
555 << *Acc.getRemoteInst() << ", abort!\n");
556 return false;
557 }
558 if (IsLoad) {
559 assert(isa<LoadInst>(I) && "Expected load or store instruction only!");
560 if (!Acc.isWrittenValueUnknown()) {
561 Value *V = AdjustWrittenValueType(Acc, *Acc.getWrittenValue());
562 if (!V)
563 return false;
564 NewCopies.insert(V);
565 if (PotentialValueOrigins)
566 NewCopyOrigins.insert(Acc.getRemoteInst());
567 return true;
568 }
569 auto *SI = dyn_cast<StoreInst>(Acc.getRemoteInst());
570 if (!SI) {
571 LLVM_DEBUG(dbgs() << "Underlying object written through a non-store "
572 "instruction not supported yet: "
573 << *Acc.getRemoteInst() << "\n";);
574 return false;
575 }
576 Value *V = AdjustWrittenValueType(Acc, *SI->getValueOperand());
577 if (!V)
578 return false;
579 NewCopies.insert(V);
580 if (PotentialValueOrigins)
581 NewCopyOrigins.insert(SI);
582 } else {
583 assert(isa<StoreInst>(I) && "Expected load or store instruction only!");
584 auto *LI = dyn_cast<LoadInst>(Acc.getRemoteInst());
585 if (!LI && OnlyExact) {
586 LLVM_DEBUG(dbgs() << "Underlying object read through a non-load "
587 "instruction not supported yet: "
588 << *Acc.getRemoteInst() << "\n";);
589 return false;
590 }
591 NewCopies.insert(Acc.getRemoteInst());
592 }
593 return true;
594 };
595
596 // If the value has been written to we don't need the initial value of the
597 // object.
598 bool HasBeenWrittenTo = false;
599
601 auto *PI = A.getAAFor<AAPointerInfo>(QueryingAA, IRPosition::value(Obj),
603 if (!PI || !PI->forallInterferingAccesses(
604 A, QueryingAA, I,
605 /* FindInterferingWrites */ IsLoad,
606 /* FindInterferingReads */ !IsLoad, CheckAccess,
607 HasBeenWrittenTo, Range, SkipCB)) {
609 dbgs()
610 << "Failed to verify all interfering accesses for underlying object: "
611 << Obj << "\n");
612 return false;
613 }
614
615 if (IsLoad && !HasBeenWrittenTo && !Range.isUnassigned()) {
616 const DataLayout &DL = A.getDataLayout();
617 Value *InitialValue = AA::getInitialValueForObj(
618 A, QueryingAA, Obj, *I.getType(), TLI, DL, &Range);
619 if (!InitialValue) {
620 LLVM_DEBUG(dbgs() << "Could not determine required initial value of "
621 "underlying object, abort!\n");
622 return false;
623 }
624 CheckForNullOnlyAndUndef(InitialValue, /* IsExact */ true);
625 if (NullRequired && !NullOnly) {
626 LLVM_DEBUG(dbgs() << "Non exact access but initial value that is not "
627 "null or undef, abort!\n");
628 return false;
629 }
630
631 NewCopies.insert(InitialValue);
632 if (PotentialValueOrigins)
633 NewCopyOrigins.insert(nullptr);
634 }
635
636 PIs.push_back(PI);
637
638 return true;
639 };
640
641 const auto *AAUO = A.getAAFor<AAUnderlyingObjects>(
642 QueryingAA, IRPosition::value(Ptr), DepClassTy::OPTIONAL);
643 if (!AAUO || !AAUO->forallUnderlyingObjects(Pred)) {
645 dbgs() << "Underlying objects stored into could not be determined\n";);
646 return false;
647 }
648
649 // Only if we were successful collection all potential copies we record
650 // dependences (on non-fix AAPointerInfo AAs). We also only then modify the
651 // given PotentialCopies container.
652 for (const auto *PI : PIs) {
653 if (!PI->getState().isAtFixpoint())
654 UsedAssumedInformation = true;
655 A.recordDependence(*PI, QueryingAA, DepClassTy::OPTIONAL);
656 }
657 PotentialCopies.insert_range(NewCopies);
658 if (PotentialValueOrigins)
659 PotentialValueOrigins->insert_range(NewCopyOrigins);
660
661 return true;
662}
663
665 Attributor &A, LoadInst &LI, SmallSetVector<Value *, 4> &PotentialValues,
666 SmallSetVector<Instruction *, 4> &PotentialValueOrigins,
667 const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation,
668 bool OnlyExact) {
669 return getPotentialCopiesOfMemoryValue</* IsLoad */ true>(
670 A, LI, PotentialValues, &PotentialValueOrigins, QueryingAA,
671 UsedAssumedInformation, OnlyExact);
672}
673
676 const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation,
677 bool OnlyExact) {
678 return getPotentialCopiesOfMemoryValue</* IsLoad */ false>(
679 A, SI, PotentialCopies, nullptr, QueryingAA, UsedAssumedInformation,
680 OnlyExact);
681}
682
684 const AbstractAttribute &QueryingAA,
685 bool RequireReadNone, bool &IsKnown) {
686 if (RequireReadNone) {
688 A, &QueryingAA, IRP, DepClassTy::OPTIONAL, IsKnown,
689 /* IgnoreSubsumingPositions */ true))
690 return true;
692 A, &QueryingAA, IRP, DepClassTy::OPTIONAL, IsKnown,
693 /* IgnoreSubsumingPositions */ true))
694 return true;
695
698 const auto *MemLocAA =
699 A.getAAFor<AAMemoryLocation>(QueryingAA, IRP, DepClassTy::NONE);
700 if (MemLocAA && MemLocAA->isAssumedReadNone()) {
701 IsKnown = MemLocAA->isKnownReadNone();
702 if (!IsKnown)
703 A.recordDependence(*MemLocAA, QueryingAA, DepClassTy::OPTIONAL);
704 return true;
705 }
706 }
707
708 const auto *MemBehaviorAA =
709 A.getAAFor<AAMemoryBehavior>(QueryingAA, IRP, DepClassTy::NONE);
710 if (MemBehaviorAA &&
711 (MemBehaviorAA->isAssumedReadNone() ||
712 (!RequireReadNone && MemBehaviorAA->isAssumedReadOnly()))) {
713 IsKnown = RequireReadNone ? MemBehaviorAA->isKnownReadNone()
714 : MemBehaviorAA->isKnownReadOnly();
715 if (!IsKnown)
716 A.recordDependence(*MemBehaviorAA, QueryingAA, DepClassTy::OPTIONAL);
717 return true;
718 }
719
720 return false;
721}
722
724 const AbstractAttribute &QueryingAA, bool &IsKnown) {
725 return isAssumedReadOnlyOrReadNone(A, IRP, QueryingAA,
726 /* RequireReadNone */ false, IsKnown);
727}
729 const AbstractAttribute &QueryingAA, bool &IsKnown) {
730 return isAssumedReadOnlyOrReadNone(A, IRP, QueryingAA,
731 /* RequireReadNone */ true, IsKnown);
732}
733
734static bool
736 const Instruction *ToI, const Function &ToFn,
737 const AbstractAttribute &QueryingAA,
738 const AA::InstExclusionSetTy *ExclusionSet,
739 std::function<bool(const Function &F)> GoBackwardsCB) {
741 dbgs() << "[AA] isPotentiallyReachable @" << ToFn.getName() << " from "
742 << FromI << " [GBCB: " << bool(GoBackwardsCB) << "][#ExS: "
743 << (ExclusionSet ? std::to_string(ExclusionSet->size()) : "none")
744 << "]\n";
745 if (ExclusionSet)
746 for (auto *ES : *ExclusionSet)
747 dbgs() << *ES << "\n";
748 });
749
750 // We know kernels (generally) cannot be called from within the module. Thus,
751 // for reachability we would need to step back from a kernel which would allow
752 // us to reach anything anyway. Even if a kernel is invoked from another
753 // kernel, values like allocas and shared memory are not accessible. We
754 // implicitly check for this situation to avoid costly lookups.
755 if (GoBackwardsCB && &ToFn != FromI.getFunction() &&
756 !GoBackwardsCB(*FromI.getFunction()) && A.getInfoCache().isKernel(ToFn) &&
757 A.getInfoCache().isKernel(*FromI.getFunction())) {
758 LLVM_DEBUG(dbgs() << "[AA] assume kernel cannot be reached from within the "
759 "module; success\n";);
760 return false;
761 }
762
763 // If we can go arbitrarily backwards we will eventually reach an entry point
764 // that can reach ToI. Only if a set of blocks through which we cannot go is
765 // provided, or once we track internal functions not accessible from the
766 // outside, it makes sense to perform backwards analysis in the absence of a
767 // GoBackwardsCB.
768 if (!GoBackwardsCB && !ExclusionSet) {
769 LLVM_DEBUG(dbgs() << "[AA] check @" << ToFn.getName() << " from " << FromI
770 << " is not checked backwards and does not have an "
771 "exclusion set, abort\n");
772 return true;
773 }
774
777 Worklist.push_back(&FromI);
778
779 while (!Worklist.empty()) {
780 const Instruction *CurFromI = Worklist.pop_back_val();
781 if (!Visited.insert(CurFromI).second)
782 continue;
783
784 const Function *FromFn = CurFromI->getFunction();
785 if (FromFn == &ToFn) {
786 if (!ToI)
787 return true;
788 LLVM_DEBUG(dbgs() << "[AA] check " << *ToI << " from " << *CurFromI
789 << " intraprocedurally\n");
790 const auto *ReachabilityAA = A.getAAFor<AAIntraFnReachability>(
791 QueryingAA, IRPosition::function(ToFn), DepClassTy::OPTIONAL);
792 bool Result = !ReachabilityAA || ReachabilityAA->isAssumedReachable(
793 A, *CurFromI, *ToI, ExclusionSet);
794 LLVM_DEBUG(dbgs() << "[AA] " << *CurFromI << " "
795 << (Result ? "can potentially " : "cannot ") << "reach "
796 << *ToI << " [Intra]\n");
797 if (Result)
798 return true;
799 }
800
801 bool Result = true;
802 if (!ToFn.isDeclaration() && ToI) {
803 const auto *ToReachabilityAA = A.getAAFor<AAIntraFnReachability>(
804 QueryingAA, IRPosition::function(ToFn), DepClassTy::OPTIONAL);
805 const Instruction &EntryI = ToFn.getEntryBlock().front();
806 Result = !ToReachabilityAA || ToReachabilityAA->isAssumedReachable(
807 A, EntryI, *ToI, ExclusionSet);
808 LLVM_DEBUG(dbgs() << "[AA] Entry " << EntryI << " of @" << ToFn.getName()
809 << " " << (Result ? "can potentially " : "cannot ")
810 << "reach @" << *ToI << " [ToFn]\n");
811 }
812
813 if (Result) {
814 // The entry of the ToFn can reach the instruction ToI. If the current
815 // instruction is already known to reach the ToFn.
816 const auto *FnReachabilityAA = A.getAAFor<AAInterFnReachability>(
817 QueryingAA, IRPosition::function(*FromFn), DepClassTy::OPTIONAL);
818 Result = !FnReachabilityAA || FnReachabilityAA->instructionCanReach(
819 A, *CurFromI, ToFn, ExclusionSet);
820 LLVM_DEBUG(dbgs() << "[AA] " << *CurFromI << " in @" << FromFn->getName()
821 << " " << (Result ? "can potentially " : "cannot ")
822 << "reach @" << ToFn.getName() << " [FromFn]\n");
823 if (Result)
824 return true;
825 }
826
827 // TODO: Check assumed nounwind.
828 const auto *ReachabilityAA = A.getAAFor<AAIntraFnReachability>(
829 QueryingAA, IRPosition::function(*FromFn), DepClassTy::OPTIONAL);
830 auto ReturnInstCB = [&](Instruction &Ret) {
831 bool Result = !ReachabilityAA || ReachabilityAA->isAssumedReachable(
832 A, *CurFromI, Ret, ExclusionSet);
833 LLVM_DEBUG(dbgs() << "[AA][Ret] " << *CurFromI << " "
834 << (Result ? "can potentially " : "cannot ") << "reach "
835 << Ret << " [Intra]\n");
836 return !Result;
837 };
838
839 // Check if we can reach returns.
840 bool UsedAssumedInformation = false;
841 if (A.checkForAllInstructions(ReturnInstCB, FromFn, &QueryingAA,
842 {Instruction::Ret}, UsedAssumedInformation)) {
843 LLVM_DEBUG(dbgs() << "[AA] No return is reachable, done\n");
844 continue;
845 }
846
847 if (!GoBackwardsCB) {
848 LLVM_DEBUG(dbgs() << "[AA] check @" << ToFn.getName() << " from " << FromI
849 << " is not checked backwards, abort\n");
850 return true;
851 }
852
853 // If we do not go backwards from the FromFn we are done here and so far we
854 // could not find a way to reach ToFn/ToI.
855 if (!GoBackwardsCB(*FromFn))
856 continue;
857
858 LLVM_DEBUG(dbgs() << "Stepping backwards to the call sites of @"
859 << FromFn->getName() << "\n");
860
861 auto CheckCallSite = [&](AbstractCallSite ACS) {
862 CallBase *CB = ACS.getInstruction();
863 if (!CB)
864 return false;
865
866 if (isa<InvokeInst>(CB))
867 return false;
868
869 Instruction *Inst = CB->getNextNode();
870 Worklist.push_back(Inst);
871 return true;
872 };
873
874 Result = !A.checkForAllCallSites(CheckCallSite, *FromFn,
875 /* RequireAllCallSites */ true,
876 &QueryingAA, UsedAssumedInformation);
877 if (Result) {
878 LLVM_DEBUG(dbgs() << "[AA] stepping back to call sites from " << *CurFromI
879 << " in @" << FromFn->getName()
880 << " failed, give up\n");
881 return true;
882 }
883
884 LLVM_DEBUG(dbgs() << "[AA] stepped back to call sites from " << *CurFromI
885 << " in @" << FromFn->getName()
886 << " worklist size is: " << Worklist.size() << "\n");
887 }
888 return false;
889}
890
892 Attributor &A, const Instruction &FromI, const Instruction &ToI,
893 const AbstractAttribute &QueryingAA,
894 const AA::InstExclusionSetTy *ExclusionSet,
895 std::function<bool(const Function &F)> GoBackwardsCB) {
896 const Function *ToFn = ToI.getFunction();
897 return ::isPotentiallyReachable(A, FromI, &ToI, *ToFn, QueryingAA,
898 ExclusionSet, GoBackwardsCB);
899}
900
902 Attributor &A, const Instruction &FromI, const Function &ToFn,
903 const AbstractAttribute &QueryingAA,
904 const AA::InstExclusionSetTy *ExclusionSet,
905 std::function<bool(const Function &F)> GoBackwardsCB) {
906 return ::isPotentiallyReachable(A, FromI, /* ToI */ nullptr, ToFn, QueryingAA,
907 ExclusionSet, GoBackwardsCB);
908}
909
911 const AbstractAttribute &QueryingAA) {
912 if (isa<UndefValue>(Obj))
913 return true;
914 if (isa<AllocaInst>(Obj)) {
915 InformationCache &InfoCache = A.getInfoCache();
916 if (!InfoCache.stackIsAccessibleByOtherThreads()) {
918 dbgs() << "[AA] Object '" << Obj
919 << "' is thread local; stack objects are thread local.\n");
920 return true;
921 }
922 bool IsKnownNoCapture;
923 bool IsAssumedNoCapture = AA::hasAssumedIRAttr<Attribute::Captures>(
924 A, &QueryingAA, IRPosition::value(Obj), DepClassTy::OPTIONAL,
925 IsKnownNoCapture);
926 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj << "' is "
927 << (IsAssumedNoCapture ? "" : "not") << " thread local; "
928 << (IsAssumedNoCapture ? "non-" : "")
929 << "captured stack object.\n");
930 return IsAssumedNoCapture;
931 }
932 if (auto *GV = dyn_cast<GlobalVariable>(&Obj)) {
933 if (GV->isConstant()) {
934 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj
935 << "' is thread local; constant global\n");
936 return true;
937 }
938 if (GV->isThreadLocal()) {
939 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj
940 << "' is thread local; thread local global\n");
941 return true;
942 }
943 }
944
945 if (A.getInfoCache().IsTargetGPU()) {
946 if (AA::isGPULocalAddressSpace(A.getInfoCache().getModule(),
947 Obj.getType()->getPointerAddressSpace())) {
948 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj
949 << "' is thread local; GPU local memory\n");
950 return true;
951 }
953 A.getInfoCache().getModule(),
954 Obj.getType()->getPointerAddressSpace())) {
955 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj
956 << "' is thread local; GPU constant memory\n");
957 return true;
958 }
959 }
960
961 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj << "' is not thread local\n");
962 return false;
963}
964
966 const AbstractAttribute &QueryingAA) {
967 if (!I.mayHaveSideEffects() && !I.mayReadFromMemory())
968 return false;
969
971
972 auto AddLocationPtr = [&](std::optional<MemoryLocation> Loc) {
973 if (!Loc || !Loc->Ptr) {
975 dbgs() << "[AA] Access to unknown location; -> requires barriers\n");
976 return false;
977 }
978 Ptrs.insert(Loc->Ptr);
979 return true;
980 };
981
982 if (const MemIntrinsic *MI = dyn_cast<MemIntrinsic>(&I)) {
983 if (!AddLocationPtr(MemoryLocation::getForDest(MI)))
984 return true;
986 if (!AddLocationPtr(MemoryLocation::getForSource(MTI)))
987 return true;
988 } else if (!AddLocationPtr(MemoryLocation::getOrNone(&I)))
989 return true;
990
991 return isPotentiallyAffectedByBarrier(A, Ptrs.getArrayRef(), QueryingAA, &I);
992}
993
996 const AbstractAttribute &QueryingAA,
997 const Instruction *CtxI) {
998 for (const Value *Ptr : Ptrs) {
999 if (!Ptr) {
1000 LLVM_DEBUG(dbgs() << "[AA] nullptr; -> requires barriers\n");
1001 return true;
1002 }
1003
1004 auto Pred = [&](Value &Obj) {
1005 if (AA::isAssumedThreadLocalObject(A, Obj, QueryingAA))
1006 return true;
1007 LLVM_DEBUG(dbgs() << "[AA] Access to '" << Obj << "' via '" << *Ptr
1008 << "'; -> requires barrier\n");
1009 return false;
1010 };
1011
1012 const auto *UnderlyingObjsAA = A.getAAFor<AAUnderlyingObjects>(
1013 QueryingAA, IRPosition::value(*Ptr), DepClassTy::OPTIONAL);
1014 if (!UnderlyingObjsAA || !UnderlyingObjsAA->forallUnderlyingObjects(Pred))
1015 return true;
1016 }
1017 return false;
1018}
1019
1020/// Return true if \p New is equal or worse than \p Old.
1021static bool isEqualOrWorse(const Attribute &New, const Attribute &Old) {
1022 if (!Old.isIntAttribute())
1023 return true;
1024
1025 return Old.getValueAsInt() >= New.getValueAsInt();
1026}
1027
1028/// Return true if the information provided by \p Attr was added to the
1029/// attribute set \p AttrSet. This is only the case if it was not already
1030/// present in \p AttrSet.
1031static bool addIfNotExistent(LLVMContext &Ctx, const Attribute &Attr,
1032 AttributeSet AttrSet, bool ForceReplace,
1033 AttrBuilder &AB) {
1034
1035 if (Attr.isEnumAttribute()) {
1036 Attribute::AttrKind Kind = Attr.getKindAsEnum();
1037 if (AttrSet.hasAttribute(Kind))
1038 return false;
1039 AB.addAttribute(Kind);
1040 return true;
1041 }
1042 if (Attr.isStringAttribute()) {
1043 StringRef Kind = Attr.getKindAsString();
1044 if (AttrSet.hasAttribute(Kind)) {
1045 if (!ForceReplace)
1046 return false;
1047 }
1048 AB.addAttribute(Kind, Attr.getValueAsString());
1049 return true;
1050 }
1051 if (Attr.isIntAttribute()) {
1052 Attribute::AttrKind Kind = Attr.getKindAsEnum();
1053 if (!ForceReplace && Kind == Attribute::Memory) {
1054 MemoryEffects ME = Attr.getMemoryEffects() & AttrSet.getMemoryEffects();
1055 if (ME == AttrSet.getMemoryEffects())
1056 return false;
1057 AB.addMemoryAttr(ME);
1058 return true;
1059 }
1060 if (AttrSet.hasAttribute(Kind)) {
1061 if (!ForceReplace && isEqualOrWorse(Attr, AttrSet.getAttribute(Kind)))
1062 return false;
1063 }
1064 AB.addAttribute(Attr);
1065 return true;
1066 }
1067 if (Attr.isConstantRangeAttribute()) {
1068 Attribute::AttrKind Kind = Attr.getKindAsEnum();
1069 if (!ForceReplace && AttrSet.hasAttribute(Kind))
1070 return false;
1071 AB.addAttribute(Attr);
1072 return true;
1073 }
1074
1075 llvm_unreachable("Expected enum or string attribute!");
1076}
1077
1080 return cast<Argument>(&getAnchorValue());
1081
1082 // Not an Argument and no argument number means this is not a call site
1083 // argument, thus we cannot find a callback argument to return.
1084 int ArgNo = getCallSiteArgNo();
1085 if (ArgNo < 0)
1086 return nullptr;
1087
1088 // Use abstract call sites to make the connection between the call site
1089 // values and the ones in callbacks. If a callback was found that makes use
1090 // of the underlying call site operand, we want the corresponding callback
1091 // callee argument and not the direct callee argument.
1092 std::optional<Argument *> CBCandidateArg;
1093 SmallVector<const Use *, 4> CallbackUses;
1094 const auto &CB = cast<CallBase>(getAnchorValue());
1095 AbstractCallSite::getCallbackUses(CB, CallbackUses);
1096 for (const Use *U : CallbackUses) {
1097 AbstractCallSite ACS(U);
1098 assert(ACS && ACS.isCallbackCall());
1099 if (!ACS.getCalledFunction())
1100 continue;
1101
1102 for (unsigned u = 0, e = ACS.getNumArgOperands(); u < e; u++) {
1103
1104 // Test if the underlying call site operand is argument number u of the
1105 // callback callee.
1106 if (ACS.getCallArgOperandNo(u) != ArgNo)
1107 continue;
1108
1109 assert(ACS.getCalledFunction()->arg_size() > u &&
1110 "ACS mapped into var-args arguments!");
1111 if (CBCandidateArg) {
1112 CBCandidateArg = nullptr;
1113 break;
1114 }
1115 CBCandidateArg = ACS.getCalledFunction()->getArg(u);
1116 }
1117 }
1118
1119 // If we found a unique callback candidate argument, return it.
1120 if (CBCandidateArg && *CBCandidateArg)
1121 return *CBCandidateArg;
1122
1123 // If no callbacks were found, or none used the underlying call site operand
1124 // exclusively, use the direct callee argument if available.
1125 auto *Callee = dyn_cast_if_present<Function>(CB.getCalledOperand());
1126 if (Callee && Callee->arg_size() > unsigned(ArgNo))
1127 return Callee->getArg(ArgNo);
1128
1129 return nullptr;
1130}
1131
1134 if (getState().isAtFixpoint())
1135 return HasChanged;
1136
1137 LLVM_DEBUG(dbgs() << "[Attributor] Update: " << *this << "\n");
1138
1139 HasChanged = updateImpl(A);
1140
1141 LLVM_DEBUG(dbgs() << "[Attributor] Update " << HasChanged << " " << *this
1142 << "\n");
1143
1144 return HasChanged;
1145}
1146
1148 InformationCache &InfoCache,
1149 AttributorConfig Configuration)
1150 : Allocator(InfoCache.Allocator), Functions(Functions),
1151 InfoCache(InfoCache), Configuration(Configuration) {
1152 if (!isClosedWorldModule())
1153 return;
1154 for (Function *Fn : Functions)
1155 if (Fn->hasAddressTaken(/*PutOffender=*/nullptr,
1156 /*IgnoreCallbackUses=*/false,
1157 /*IgnoreAssumeLikeCalls=*/true,
1158 /*IgnoreLLVMUsed=*/true,
1159 /*IgnoreARCAttachedCall=*/false,
1160 /*IgnoreCastedDirectCall=*/true))
1161 InfoCache.IndirectlyCallableFunctions.push_back(Fn);
1162}
1163
1168 "Did expect a valid position!");
1171 if (!Explorer)
1172 return false;
1173
1174 Value &AssociatedValue = IRP.getAssociatedValue();
1175
1176 const Assume2KnowledgeMap &A2K =
1177 getInfoCache().getKnowledgeMap().lookup({&AssociatedValue, AK});
1178
1179 // Check if we found any potential assume use, if not we don't need to create
1180 // explorer iterators.
1181 if (A2K.empty())
1182 return false;
1183
1184 LLVMContext &Ctx = AssociatedValue.getContext();
1185 unsigned AttrsSize = Attrs.size();
1186 auto EIt = Explorer->begin(IRP.getCtxI()),
1187 EEnd = Explorer->end(IRP.getCtxI());
1188 for (const auto &It : A2K)
1189 if (Explorer->findInContextOf(It.first, EIt, EEnd))
1190 Attrs.push_back(Attribute::get(Ctx, AK, It.second.Max));
1191 return AttrsSize != Attrs.size();
1192}
1193
1194template <typename DescTy>
1196Attributor::updateAttrMap(const IRPosition &IRP, ArrayRef<DescTy> AttrDescs,
1197 function_ref<bool(const DescTy &, AttributeSet,
1198 AttributeMask &, AttrBuilder &)>
1199 CB) {
1200 if (AttrDescs.empty())
1202 switch (IRP.getPositionKind()) {
1206 default:
1207 break;
1208 };
1209
1210 AttributeList AL;
1211 Value *AttrListAnchor = IRP.getAttrListAnchor();
1212 auto It = AttrsMap.find(AttrListAnchor);
1213 if (It == AttrsMap.end())
1214 AL = IRP.getAttrList();
1215 else
1216 AL = It->getSecond();
1217
1218 LLVMContext &Ctx = IRP.getAnchorValue().getContext();
1219 auto AttrIdx = IRP.getAttrIdx();
1220 AttributeSet AS = AL.getAttributes(AttrIdx);
1221 AttributeMask AM;
1222 AttrBuilder AB(Ctx);
1223
1225 for (const DescTy &AttrDesc : AttrDescs)
1226 if (CB(AttrDesc, AS, AM, AB))
1227 HasChanged = ChangeStatus::CHANGED;
1228
1229 if (HasChanged == ChangeStatus::UNCHANGED)
1231
1232 AL = AL.removeAttributesAtIndex(Ctx, AttrIdx, AM);
1233 AL = AL.addAttributesAtIndex(Ctx, AttrIdx, AB);
1234 AttrsMap[AttrListAnchor] = AL;
1235 return ChangeStatus::CHANGED;
1236}
1237
1240 bool IgnoreSubsumingPositions,
1241 Attribute::AttrKind ImpliedAttributeKind) {
1242 bool Implied = false;
1243 bool HasAttr = false;
1244 auto HasAttrCB = [&](const Attribute::AttrKind &Kind, AttributeSet AttrSet,
1245 AttributeMask &, AttrBuilder &) {
1246 if (AttrSet.hasAttribute(Kind)) {
1247 Implied |= Kind != ImpliedAttributeKind;
1248 HasAttr = true;
1249 }
1250 return false;
1251 };
1252 for (const IRPosition &EquivIRP : SubsumingPositionIterator(IRP)) {
1253 updateAttrMap<Attribute::AttrKind>(EquivIRP, AttrKinds, HasAttrCB);
1254 if (HasAttr)
1255 break;
1256 // The first position returned by the SubsumingPositionIterator is
1257 // always the position itself. If we ignore subsuming positions we
1258 // are done after the first iteration.
1259 if (IgnoreSubsumingPositions)
1260 break;
1261 Implied = true;
1262 }
1263 if (!HasAttr) {
1264 Implied = true;
1266 for (Attribute::AttrKind AK : AttrKinds)
1267 if (getAttrsFromAssumes(IRP, AK, Attrs)) {
1268 HasAttr = true;
1269 break;
1270 }
1271 }
1272
1273 // Check if we should manifest the implied attribute kind at the IRP.
1274 if (ImpliedAttributeKind != Attribute::None && HasAttr && Implied)
1276 ImpliedAttributeKind)});
1277 return HasAttr;
1278}
1279
1283 bool IgnoreSubsumingPositions) {
1284 auto CollectAttrCB = [&](const Attribute::AttrKind &Kind,
1285 AttributeSet AttrSet, AttributeMask &,
1286 AttrBuilder &) {
1287 if (AttrSet.hasAttribute(Kind))
1288 Attrs.push_back(AttrSet.getAttribute(Kind));
1289 return false;
1290 };
1291 for (const IRPosition &EquivIRP : SubsumingPositionIterator(IRP)) {
1292 updateAttrMap<Attribute::AttrKind>(EquivIRP, AttrKinds, CollectAttrCB);
1293 // The first position returned by the SubsumingPositionIterator is
1294 // always the position itself. If we ignore subsuming positions we
1295 // are done after the first iteration.
1296 if (IgnoreSubsumingPositions)
1297 break;
1298 }
1299 for (Attribute::AttrKind AK : AttrKinds)
1300 getAttrsFromAssumes(IRP, AK, Attrs);
1301}
1302
1305 auto RemoveAttrCB = [&](const Attribute::AttrKind &Kind, AttributeSet AttrSet,
1306 AttributeMask &AM, AttrBuilder &) {
1307 if (!AttrSet.hasAttribute(Kind))
1308 return false;
1309 AM.addAttribute(Kind);
1310 return true;
1311 };
1312 return updateAttrMap<Attribute::AttrKind>(IRP, AttrKinds, RemoveAttrCB);
1313}
1314
1316 ArrayRef<StringRef> Attrs) {
1317 auto RemoveAttrCB = [&](StringRef Attr, AttributeSet AttrSet,
1318 AttributeMask &AM, AttrBuilder &) -> bool {
1319 if (!AttrSet.hasAttribute(Attr))
1320 return false;
1321 AM.addAttribute(Attr);
1322 return true;
1323 };
1324
1325 return updateAttrMap<StringRef>(IRP, Attrs, RemoveAttrCB);
1326}
1327
1329 ArrayRef<Attribute> Attrs,
1330 bool ForceReplace) {
1331 LLVMContext &Ctx = IRP.getAnchorValue().getContext();
1332 auto AddAttrCB = [&](const Attribute &Attr, AttributeSet AttrSet,
1333 AttributeMask &, AttrBuilder &AB) {
1334 return addIfNotExistent(Ctx, Attr, AttrSet, ForceReplace, AB);
1335 };
1336 return updateAttrMap<Attribute>(IRP, Attrs, AddAttrCB);
1337}
1338
1340const IRPosition
1342
1344 IRPositions.emplace_back(IRP);
1345
1346 // Helper to determine if operand bundles on a call site are benign or
1347 // potentially problematic. We handle only llvm.assume for now.
1348 auto CanIgnoreOperandBundles = [](const CallBase &CB) {
1349 return (isa<IntrinsicInst>(CB) &&
1350 cast<IntrinsicInst>(CB).getIntrinsicID() == Intrinsic ::assume);
1351 };
1352
1353 const auto *CB = dyn_cast<CallBase>(&IRP.getAnchorValue());
1354 switch (IRP.getPositionKind()) {
1358 return;
1361 IRPositions.emplace_back(IRPosition::function(*IRP.getAnchorScope()));
1362 return;
1364 assert(CB && "Expected call site!");
1365 // TODO: We need to look at the operand bundles similar to the redirection
1366 // in CallBase.
1367 if (!CB->hasOperandBundles() || CanIgnoreOperandBundles(*CB))
1368 if (auto *Callee = dyn_cast_if_present<Function>(CB->getCalledOperand()))
1369 IRPositions.emplace_back(IRPosition::function(*Callee));
1370 return;
1372 assert(CB && "Expected call site!");
1373 // TODO: We need to look at the operand bundles similar to the redirection
1374 // in CallBase.
1375 if (!CB->hasOperandBundles() || CanIgnoreOperandBundles(*CB)) {
1376 if (auto *Callee =
1377 dyn_cast_if_present<Function>(CB->getCalledOperand())) {
1378 IRPositions.emplace_back(IRPosition::returned(*Callee));
1379 IRPositions.emplace_back(IRPosition::function(*Callee));
1380 for (const Argument &Arg : Callee->args())
1381 if (Arg.hasReturnedAttr()) {
1382 IRPositions.emplace_back(
1383 IRPosition::callsite_argument(*CB, Arg.getArgNo()));
1384 IRPositions.emplace_back(
1385 IRPosition::value(*CB->getArgOperand(Arg.getArgNo())));
1386 IRPositions.emplace_back(IRPosition::argument(Arg));
1387 }
1388 }
1389 }
1390 IRPositions.emplace_back(IRPosition::callsite_function(*CB));
1391 return;
1393 assert(CB && "Expected call site!");
1394 // TODO: We need to look at the operand bundles similar to the redirection
1395 // in CallBase.
1396 if (!CB->hasOperandBundles() || CanIgnoreOperandBundles(*CB)) {
1397 auto *Callee = dyn_cast_if_present<Function>(CB->getCalledOperand());
1398 if (Callee) {
1399 if (Argument *Arg = IRP.getAssociatedArgument())
1400 IRPositions.emplace_back(IRPosition::argument(*Arg));
1401 IRPositions.emplace_back(IRPosition::function(*Callee));
1402 }
1403 }
1404 IRPositions.emplace_back(IRPosition::value(IRP.getAssociatedValue()));
1405 return;
1406 }
1407 }
1408}
1409
1410void IRPosition::verify() {
1411#ifdef EXPENSIVE_CHECKS
1412 switch (getPositionKind()) {
1413 case IRP_INVALID:
1414 assert((CBContext == nullptr) &&
1415 "Invalid position must not have CallBaseContext!");
1416 assert(!Enc.getOpaqueValue() &&
1417 "Expected a nullptr for an invalid position!");
1418 return;
1419 case IRP_FLOAT:
1421 "Expected specialized kind for argument values!");
1422 return;
1423 case IRP_RETURNED:
1424 assert(isa<Function>(getAsValuePtr()) &&
1425 "Expected function for a 'returned' position!");
1426 assert(getAsValuePtr() == &getAssociatedValue() &&
1427 "Associated value mismatch!");
1428 return;
1430 assert((CBContext == nullptr) &&
1431 "'call site returned' position must not have CallBaseContext!");
1432 assert((isa<CallBase>(getAsValuePtr())) &&
1433 "Expected call base for 'call site returned' position!");
1434 assert(getAsValuePtr() == &getAssociatedValue() &&
1435 "Associated value mismatch!");
1436 return;
1437 case IRP_CALL_SITE:
1438 assert((CBContext == nullptr) &&
1439 "'call site function' position must not have CallBaseContext!");
1440 assert((isa<CallBase>(getAsValuePtr())) &&
1441 "Expected call base for 'call site function' position!");
1442 assert(getAsValuePtr() == &getAssociatedValue() &&
1443 "Associated value mismatch!");
1444 return;
1445 case IRP_FUNCTION:
1446 assert(isa<Function>(getAsValuePtr()) &&
1447 "Expected function for a 'function' position!");
1448 assert(getAsValuePtr() == &getAssociatedValue() &&
1449 "Associated value mismatch!");
1450 return;
1451 case IRP_ARGUMENT:
1452 assert(isa<Argument>(getAsValuePtr()) &&
1453 "Expected argument for a 'argument' position!");
1454 assert(getAsValuePtr() == &getAssociatedValue() &&
1455 "Associated value mismatch!");
1456 return;
1458 assert((CBContext == nullptr) &&
1459 "'call site argument' position must not have CallBaseContext!");
1460 Use *U = getAsUsePtr();
1461 (void)U; // Silence unused variable warning.
1462 assert(U && "Expected use for a 'call site argument' position!");
1463 assert(isa<CallBase>(U->getUser()) &&
1464 "Expected call base user for a 'call site argument' position!");
1465 assert(cast<CallBase>(U->getUser())->isArgOperand(U) &&
1466 "Expected call base argument operand for a 'call site argument' "
1467 "position");
1468 assert(cast<CallBase>(U->getUser())->getArgOperandNo(U) ==
1469 unsigned(getCallSiteArgNo()) &&
1470 "Argument number mismatch!");
1471 assert(U->get() == &getAssociatedValue() && "Associated value mismatch!");
1472 return;
1473 }
1474 }
1475#endif
1476}
1477
1478std::optional<Constant *>
1480 const AbstractAttribute &AA,
1481 bool &UsedAssumedInformation) {
1482 // First check all callbacks provided by outside AAs. If any of them returns
1483 // a non-null value that is different from the associated value, or
1484 // std::nullopt, we assume it's simplified.
1485 for (auto &CB : SimplificationCallbacks.lookup(IRP)) {
1486 std::optional<Value *> SimplifiedV = CB(IRP, &AA, UsedAssumedInformation);
1487 if (!SimplifiedV)
1488 return std::nullopt;
1489 if (isa_and_nonnull<Constant>(*SimplifiedV))
1490 return cast<Constant>(*SimplifiedV);
1491 return nullptr;
1492 }
1493 if (auto *C = dyn_cast<Constant>(&IRP.getAssociatedValue()))
1494 return C;
1496 if (getAssumedSimplifiedValues(IRP, &AA, Values,
1498 UsedAssumedInformation)) {
1499 if (Values.empty())
1500 return std::nullopt;
1501 if (auto *C = dyn_cast_or_null<Constant>(
1502 AAPotentialValues::getSingleValue(*this, AA, IRP, Values)))
1503 return C;
1504 }
1505 return nullptr;
1506}
1507
1509 const IRPosition &IRP, const AbstractAttribute *AA,
1510 bool &UsedAssumedInformation, AA::ValueScope S) {
1511 // First check all callbacks provided by outside AAs. If any of them returns
1512 // a non-null value that is different from the associated value, or
1513 // std::nullopt, we assume it's simplified.
1514 for (auto &CB : SimplificationCallbacks.lookup(IRP))
1515 return CB(IRP, AA, UsedAssumedInformation);
1516
1518 if (!getAssumedSimplifiedValues(IRP, AA, Values, S, UsedAssumedInformation))
1519 return &IRP.getAssociatedValue();
1520 if (Values.empty())
1521 return std::nullopt;
1522 if (AA)
1523 if (Value *V = AAPotentialValues::getSingleValue(*this, *AA, IRP, Values))
1524 return V;
1527 return nullptr;
1528 return &IRP.getAssociatedValue();
1529}
1530
1532 const IRPosition &InitialIRP, const AbstractAttribute *AA,
1534 bool &UsedAssumedInformation, bool RecurseForSelectAndPHI) {
1537 Worklist.push_back(InitialIRP);
1538 while (!Worklist.empty()) {
1539 const IRPosition &IRP = Worklist.pop_back_val();
1540
1541 // First check all callbacks provided by outside AAs. If any of them returns
1542 // a non-null value that is different from the associated value, or
1543 // std::nullopt, we assume it's simplified.
1544 int NV = Values.size();
1545 const auto &SimplificationCBs = SimplificationCallbacks.lookup(IRP);
1546 for (const auto &CB : SimplificationCBs) {
1547 std::optional<Value *> CBResult = CB(IRP, AA, UsedAssumedInformation);
1548 if (!CBResult.has_value())
1549 continue;
1550 Value *V = *CBResult;
1551 if (!V)
1552 return false;
1555 Values.push_back(AA::ValueAndContext{*V, nullptr});
1556 else
1557 return false;
1558 }
1559 if (SimplificationCBs.empty()) {
1560 // If no high-level/outside simplification occurred, use
1561 // AAPotentialValues.
1562 const auto *PotentialValuesAA =
1564 if (PotentialValuesAA &&
1565 PotentialValuesAA->getAssumedSimplifiedValues(*this, Values, S)) {
1566 UsedAssumedInformation |= !PotentialValuesAA->isAtFixpoint();
1567 } else if (IRP.getPositionKind() != IRPosition::IRP_RETURNED) {
1568 Values.push_back({IRP.getAssociatedValue(), IRP.getCtxI()});
1569 } else {
1570 // TODO: We could visit all returns and add the operands.
1571 return false;
1572 }
1573 }
1574
1575 if (!RecurseForSelectAndPHI)
1576 break;
1577
1578 for (int I = NV, E = Values.size(); I < E; ++I) {
1579 Value *V = Values[I].getValue();
1580 if (!isa<PHINode>(V) && !isa<SelectInst>(V))
1581 continue;
1582 if (!Seen.insert(V).second)
1583 continue;
1584 // Move the last element to this slot.
1585 Values[I] = Values[E - 1];
1586 // Eliminate the last slot, adjust the indices.
1587 Values.pop_back();
1588 --E;
1589 --I;
1590 // Add a new value (select or phi) to the worklist.
1591 Worklist.push_back(IRPosition::value(*V));
1592 }
1593 }
1594 return true;
1595}
1596
1598 std::optional<Value *> V, CallBase &CB, const AbstractAttribute &AA,
1599 bool &UsedAssumedInformation) {
1600 if (!V)
1601 return V;
1602 if (*V == nullptr || isa<Constant>(*V))
1603 return V;
1604 if (auto *Arg = dyn_cast<Argument>(*V))
1605 if (CB.getCalledOperand() == Arg->getParent() &&
1606 CB.arg_size() > Arg->getArgNo())
1607 if (!Arg->hasPointeeInMemoryValueAttr())
1608 return getAssumedSimplified(
1609 IRPosition::callsite_argument(CB, Arg->getArgNo()), AA,
1610 UsedAssumedInformation, AA::Intraprocedural);
1611 return nullptr;
1612}
1613
1615 // The abstract attributes are allocated via the BumpPtrAllocator Allocator,
1616 // thus we cannot delete them. We can, and want to, destruct them though.
1617 for (auto &It : AAMap) {
1618 AbstractAttribute *AA = It.getSecond();
1619 AA->~AbstractAttribute();
1620 }
1621}
1622
1624 const AAIsDead *FnLivenessAA,
1625 bool &UsedAssumedInformation,
1626 bool CheckBBLivenessOnly, DepClassTy DepClass) {
1627 if (!Configuration.UseLiveness)
1628 return false;
1629 const IRPosition &IRP = AA.getIRPosition();
1630 if (!Functions.count(IRP.getAnchorScope()))
1631 return false;
1632 return isAssumedDead(IRP, &AA, FnLivenessAA, UsedAssumedInformation,
1633 CheckBBLivenessOnly, DepClass);
1634}
1635
1637 const AbstractAttribute *QueryingAA,
1638 const AAIsDead *FnLivenessAA,
1639 bool &UsedAssumedInformation,
1640 bool CheckBBLivenessOnly, DepClassTy DepClass) {
1641 if (!Configuration.UseLiveness)
1642 return false;
1643 Instruction *UserI = dyn_cast<Instruction>(U.getUser());
1644 if (!UserI)
1645 return isAssumedDead(IRPosition::value(*U.get()), QueryingAA, FnLivenessAA,
1646 UsedAssumedInformation, CheckBBLivenessOnly, DepClass);
1647
1648 if (auto *CB = dyn_cast<CallBase>(UserI)) {
1649 // For call site argument uses we can check if the argument is
1650 // unused/dead.
1651 if (CB->isArgOperand(&U)) {
1652 const IRPosition &CSArgPos =
1653 IRPosition::callsite_argument(*CB, CB->getArgOperandNo(&U));
1654 return isAssumedDead(CSArgPos, QueryingAA, FnLivenessAA,
1655 UsedAssumedInformation, CheckBBLivenessOnly,
1656 DepClass);
1657 }
1658 } else if (ReturnInst *RI = dyn_cast<ReturnInst>(UserI)) {
1659 const IRPosition &RetPos = IRPosition::returned(*RI->getFunction());
1660 return isAssumedDead(RetPos, QueryingAA, FnLivenessAA,
1661 UsedAssumedInformation, CheckBBLivenessOnly, DepClass);
1662 } else if (PHINode *PHI = dyn_cast<PHINode>(UserI)) {
1663 BasicBlock *IncomingBB = PHI->getIncomingBlock(U);
1664 return isAssumedDead(*IncomingBB->getTerminator(), QueryingAA, FnLivenessAA,
1665 UsedAssumedInformation, CheckBBLivenessOnly, DepClass);
1666 } else if (StoreInst *SI = dyn_cast<StoreInst>(UserI)) {
1667 if (!CheckBBLivenessOnly && SI->getPointerOperand() != U.get()) {
1668 const IRPosition IRP = IRPosition::inst(*SI);
1669 const AAIsDead *IsDeadAA =
1671 if (IsDeadAA && IsDeadAA->isRemovableStore()) {
1672 if (QueryingAA)
1673 recordDependence(*IsDeadAA, *QueryingAA, DepClass);
1674 if (!IsDeadAA->isKnown(AAIsDead::IS_REMOVABLE))
1675 UsedAssumedInformation = true;
1676 return true;
1677 }
1678 }
1679 }
1680
1681 return isAssumedDead(IRPosition::inst(*UserI), QueryingAA, FnLivenessAA,
1682 UsedAssumedInformation, CheckBBLivenessOnly, DepClass);
1683}
1684
1686 const AbstractAttribute *QueryingAA,
1687 const AAIsDead *FnLivenessAA,
1688 bool &UsedAssumedInformation,
1689 bool CheckBBLivenessOnly, DepClassTy DepClass,
1690 bool CheckForDeadStore) {
1691 if (!Configuration.UseLiveness)
1692 return false;
1693 const IRPosition::CallBaseContext *CBCtx =
1694 QueryingAA ? QueryingAA->getCallBaseContext() : nullptr;
1695
1696 if (ManifestAddedBlocks.contains(I.getParent()))
1697 return false;
1698
1699 const Function &F = *I.getFunction();
1700 if (!FnLivenessAA || FnLivenessAA->getAnchorScope() != &F)
1701 FnLivenessAA = getOrCreateAAFor<AAIsDead>(IRPosition::function(F, CBCtx),
1702 QueryingAA, DepClassTy::NONE);
1703
1704 // Don't use recursive reasoning.
1705 if (!FnLivenessAA || QueryingAA == FnLivenessAA)
1706 return false;
1707
1708 // If we have a context instruction and a liveness AA we use it.
1709 if (CheckBBLivenessOnly ? FnLivenessAA->isAssumedDead(I.getParent())
1710 : FnLivenessAA->isAssumedDead(&I)) {
1711 if (QueryingAA)
1712 recordDependence(*FnLivenessAA, *QueryingAA, DepClass);
1713 if (!FnLivenessAA->isKnownDead(&I))
1714 UsedAssumedInformation = true;
1715 return true;
1716 }
1717
1718 if (CheckBBLivenessOnly)
1719 return false;
1720
1721 const IRPosition IRP = IRPosition::inst(I, CBCtx);
1722 const AAIsDead *IsDeadAA =
1724
1725 // Don't use recursive reasoning.
1726 if (!IsDeadAA || QueryingAA == IsDeadAA)
1727 return false;
1728
1729 if (IsDeadAA->isAssumedDead()) {
1730 if (QueryingAA)
1731 recordDependence(*IsDeadAA, *QueryingAA, DepClass);
1732 if (!IsDeadAA->isKnownDead())
1733 UsedAssumedInformation = true;
1734 return true;
1735 }
1736
1737 if (CheckForDeadStore && isa<StoreInst>(I) && IsDeadAA->isRemovableStore()) {
1738 if (QueryingAA)
1739 recordDependence(*IsDeadAA, *QueryingAA, DepClass);
1740 if (!IsDeadAA->isKnownDead())
1741 UsedAssumedInformation = true;
1742 return true;
1743 }
1744
1745 return false;
1746}
1747
1749 const AbstractAttribute *QueryingAA,
1750 const AAIsDead *FnLivenessAA,
1751 bool &UsedAssumedInformation,
1752 bool CheckBBLivenessOnly, DepClassTy DepClass) {
1753 if (!Configuration.UseLiveness)
1754 return false;
1755 // Don't check liveness for constants, e.g. functions, used as (floating)
1756 // values since the context instruction and such is here meaningless.
1759 return false;
1760 }
1761
1762 Instruction *CtxI = IRP.getCtxI();
1763 if (CtxI &&
1764 isAssumedDead(*CtxI, QueryingAA, FnLivenessAA, UsedAssumedInformation,
1765 /* CheckBBLivenessOnly */ true,
1766 CheckBBLivenessOnly ? DepClass : DepClassTy::OPTIONAL))
1767 return true;
1768
1769 if (CheckBBLivenessOnly)
1770 return false;
1771
1772 // If we haven't succeeded we query the specific liveness info for the IRP.
1773 const AAIsDead *IsDeadAA;
1775 IsDeadAA = getOrCreateAAFor<AAIsDead>(
1777 QueryingAA, DepClassTy::NONE);
1778 else
1779 IsDeadAA = getOrCreateAAFor<AAIsDead>(IRP, QueryingAA, DepClassTy::NONE);
1780
1781 // Don't use recursive reasoning.
1782 if (!IsDeadAA || QueryingAA == IsDeadAA)
1783 return false;
1784
1785 if (IsDeadAA->isAssumedDead()) {
1786 if (QueryingAA)
1787 recordDependence(*IsDeadAA, *QueryingAA, DepClass);
1788 if (!IsDeadAA->isKnownDead())
1789 UsedAssumedInformation = true;
1790 return true;
1791 }
1792
1793 return false;
1794}
1795
1797 const AbstractAttribute *QueryingAA,
1798 const AAIsDead *FnLivenessAA,
1799 DepClassTy DepClass) {
1800 if (!Configuration.UseLiveness)
1801 return false;
1802 const Function &F = *BB.getParent();
1803 if (!FnLivenessAA || FnLivenessAA->getAnchorScope() != &F)
1805 QueryingAA, DepClassTy::NONE);
1806
1807 // Don't use recursive reasoning.
1808 if (!FnLivenessAA || QueryingAA == FnLivenessAA)
1809 return false;
1810
1811 if (FnLivenessAA->isAssumedDead(&BB)) {
1812 if (QueryingAA)
1813 recordDependence(*FnLivenessAA, *QueryingAA, DepClass);
1814 return true;
1815 }
1816
1817 return false;
1818}
1819
1822 const AbstractAttribute &QueryingAA, const CallBase &CB) {
1823 if (const Function *Callee = dyn_cast<Function>(CB.getCalledOperand()))
1824 return Pred(Callee);
1825
1826 const auto *CallEdgesAA = getAAFor<AACallEdges>(
1828 if (!CallEdgesAA || CallEdgesAA->hasUnknownCallee())
1829 return false;
1830
1831 const auto &Callees = CallEdgesAA->getOptimisticEdges();
1832 return Pred(Callees.getArrayRef());
1833}
1834
1835bool canMarkAsVisited(const User *Usr) {
1836 return isa<PHINode>(Usr) || !isa<Instruction>(Usr);
1837}
1838
1840 function_ref<bool(const Use &, bool &)> Pred,
1841 const AbstractAttribute &QueryingAA, const Value &V,
1842 bool CheckBBLivenessOnly, DepClassTy LivenessDepClass,
1843 bool IgnoreDroppableUses,
1844 function_ref<bool(const Use &OldU, const Use &NewU)> EquivalentUseCB) {
1845
1846 // Check virtual uses first.
1847 for (VirtualUseCallbackTy &CB : VirtualUseCallbacks.lookup(&V))
1848 if (!CB(*this, &QueryingAA))
1849 return false;
1850
1851 if (isa<ConstantData>(V))
1852 return false;
1853
1854 // Check the trivial case first as it catches void values.
1855 if (V.use_empty())
1856 return true;
1857
1858 const IRPosition &IRP = QueryingAA.getIRPosition();
1861
1862 auto AddUsers = [&](const Value &V, const Use *OldUse) {
1863 for (const Use &UU : V.uses()) {
1864 if (OldUse && EquivalentUseCB && !EquivalentUseCB(*OldUse, UU)) {
1865 LLVM_DEBUG(dbgs() << "[Attributor] Potential copy was "
1866 "rejected by the equivalence call back: "
1867 << *UU << "!\n");
1868 return false;
1869 }
1870
1871 Worklist.push_back(&UU);
1872 }
1873 return true;
1874 };
1875
1876 AddUsers(V, /* OldUse */ nullptr);
1877
1878 LLVM_DEBUG(dbgs() << "[Attributor] Got " << Worklist.size()
1879 << " initial uses to check\n");
1880
1881 const Function *ScopeFn = IRP.getAnchorScope();
1882 const auto *LivenessAA =
1883 ScopeFn ? getAAFor<AAIsDead>(QueryingAA, IRPosition::function(*ScopeFn),
1885 : nullptr;
1886
1887 while (!Worklist.empty()) {
1888 const Use *U = Worklist.pop_back_val();
1889 if (canMarkAsVisited(U->getUser()) && !Visited.insert(U).second)
1890 continue;
1892 if (auto *Fn = dyn_cast<Function>(U->getUser()))
1893 dbgs() << "[Attributor] Check use: " << **U << " in " << Fn->getName()
1894 << "\n";
1895 else
1896 dbgs() << "[Attributor] Check use: " << **U << " in " << *U->getUser()
1897 << "\n";
1898 });
1899 bool UsedAssumedInformation = false;
1900 if (isAssumedDead(*U, &QueryingAA, LivenessAA, UsedAssumedInformation,
1901 CheckBBLivenessOnly, LivenessDepClass)) {
1903 dbgs() << "[Attributor] Dead use, skip!\n");
1904 continue;
1905 }
1906 if (IgnoreDroppableUses && U->getUser()->isDroppable()) {
1908 dbgs() << "[Attributor] Droppable user, skip!\n");
1909 continue;
1910 }
1911
1912 if (auto *SI = dyn_cast<StoreInst>(U->getUser())) {
1913 if (&SI->getOperandUse(0) == U) {
1914 if (!Visited.insert(U).second)
1915 continue;
1916 SmallSetVector<Value *, 4> PotentialCopies;
1918 *this, *SI, PotentialCopies, QueryingAA, UsedAssumedInformation,
1919 /* OnlyExact */ true)) {
1921 dbgs()
1922 << "[Attributor] Value is stored, continue with "
1923 << PotentialCopies.size()
1924 << " potential copies instead!\n");
1925 for (Value *PotentialCopy : PotentialCopies)
1926 if (!AddUsers(*PotentialCopy, U))
1927 return false;
1928 continue;
1929 }
1930 }
1931 }
1932
1933 bool Follow = false;
1934 if (!Pred(*U, Follow))
1935 return false;
1936 if (!Follow)
1937 continue;
1938
1939 User &Usr = *U->getUser();
1940 AddUsers(Usr, /* OldUse */ nullptr);
1941 }
1942
1943 return true;
1944}
1945
1947 const AbstractAttribute &QueryingAA,
1948 bool RequireAllCallSites,
1949 bool &UsedAssumedInformation) {
1950 // We can try to determine information from
1951 // the call sites. However, this is only possible all call sites are known,
1952 // hence the function has internal linkage.
1953 const IRPosition &IRP = QueryingAA.getIRPosition();
1954 const Function *AssociatedFunction = IRP.getAssociatedFunction();
1955 if (!AssociatedFunction) {
1956 LLVM_DEBUG(dbgs() << "[Attributor] No function associated with " << IRP
1957 << "\n");
1958 return false;
1959 }
1960
1961 return checkForAllCallSites(Pred, *AssociatedFunction, RequireAllCallSites,
1962 &QueryingAA, UsedAssumedInformation);
1963}
1964
1966 const Function &Fn,
1967 bool RequireAllCallSites,
1968 const AbstractAttribute *QueryingAA,
1969 bool &UsedAssumedInformation,
1970 bool CheckPotentiallyDead) {
1971 if (RequireAllCallSites && !Fn.hasLocalLinkage()) {
1972 LLVM_DEBUG(
1973 dbgs()
1974 << "[Attributor] Function " << Fn.getName()
1975 << " has no internal linkage, hence not all call sites are known\n");
1976 return false;
1977 }
1978 // Check virtual uses first.
1979 for (VirtualUseCallbackTy &CB : VirtualUseCallbacks.lookup(&Fn))
1980 if (!CB(*this, QueryingAA))
1981 return false;
1982
1984 for (unsigned u = 0; u < Uses.size(); ++u) {
1985 const Use &U = *Uses[u];
1987 if (auto *Fn = dyn_cast<Function>(U))
1988 dbgs() << "[Attributor] Check use: " << Fn->getName() << " in "
1989 << *U.getUser() << "\n";
1990 else
1991 dbgs() << "[Attributor] Check use: " << *U << " in " << *U.getUser()
1992 << "\n";
1993 });
1994 if (!CheckPotentiallyDead &&
1995 isAssumedDead(U, QueryingAA, nullptr, UsedAssumedInformation,
1996 /* CheckBBLivenessOnly */ true)) {
1998 dbgs() << "[Attributor] Dead use, skip!\n");
1999 continue;
2000 }
2001 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U.getUser())) {
2002 if (CE->isCast() && CE->getType()->isPointerTy()) {
2004 dbgs() << "[Attributor] Use, is constant cast expression, add "
2005 << CE->getNumUses() << " uses of that expression instead!\n";
2006 });
2007 for (const Use &CEU : CE->uses())
2008 Uses.push_back(&CEU);
2009 continue;
2010 }
2011 }
2012
2013 AbstractCallSite ACS(&U);
2014 if (!ACS) {
2015 LLVM_DEBUG(dbgs() << "[Attributor] Function " << Fn.getName()
2016 << " has non call site use " << *U.get() << " in "
2017 << *U.getUser() << "\n");
2018 return false;
2019 }
2020
2021 const Use *EffectiveUse =
2022 ACS.isCallbackCall() ? &ACS.getCalleeUseForCallback() : &U;
2023 if (!ACS.isCallee(EffectiveUse)) {
2024 if (!RequireAllCallSites) {
2025 LLVM_DEBUG(dbgs() << "[Attributor] User " << *EffectiveUse->getUser()
2026 << " is not a call of " << Fn.getName()
2027 << ", skip use\n");
2028 continue;
2029 }
2030 LLVM_DEBUG(dbgs() << "[Attributor] User " << *EffectiveUse->getUser()
2031 << " is an invalid use of " << Fn.getName() << "\n");
2032 return false;
2033 }
2034
2035 // Make sure the arguments that can be matched between the call site and the
2036 // callee argee on their type. It is unlikely they do not and it doesn't
2037 // make sense for all attributes to know/care about this.
2038 assert(&Fn == ACS.getCalledFunction() && "Expected known callee");
2039 unsigned MinArgsParams =
2040 std::min(size_t(ACS.getNumArgOperands()), Fn.arg_size());
2041 for (unsigned u = 0; u < MinArgsParams; ++u) {
2042 Value *CSArgOp = ACS.getCallArgOperand(u);
2043 if (CSArgOp && Fn.getArg(u)->getType() != CSArgOp->getType()) {
2044 LLVM_DEBUG(
2045 dbgs() << "[Attributor] Call site / callee argument type mismatch ["
2046 << u << "@" << Fn.getName() << ": "
2047 << *Fn.getArg(u)->getType() << " vs. "
2048 << *ACS.getCallArgOperand(u)->getType() << "\n");
2049 return false;
2050 }
2051 }
2052
2053 if (Pred(ACS))
2054 continue;
2055
2056 LLVM_DEBUG(dbgs() << "[Attributor] Call site callback failed for "
2057 << *ACS.getInstruction() << "\n");
2058 return false;
2059 }
2060
2061 return true;
2062}
2063
2064bool Attributor::shouldPropagateCallBaseContext(const IRPosition &IRP) {
2065 // TODO: Maintain a cache of Values that are
2066 // on the pathway from a Argument to a Instruction that would effect the
2067 // liveness/return state etc.
2069}
2070
2072 const AbstractAttribute &QueryingAA,
2074 bool RecurseForSelectAndPHI) {
2075
2076 const IRPosition &IRP = QueryingAA.getIRPosition();
2077 const Function *AssociatedFunction = IRP.getAssociatedFunction();
2078 if (!AssociatedFunction)
2079 return false;
2080
2081 bool UsedAssumedInformation = false;
2084 IRPosition::returned(*AssociatedFunction), &QueryingAA, Values, S,
2085 UsedAssumedInformation, RecurseForSelectAndPHI))
2086 return false;
2087
2088 return llvm::all_of(Values, [&](const AA::ValueAndContext &VAC) {
2089 return Pred(*VAC.getValue());
2090 });
2091}
2092
2095 function_ref<bool(Instruction &)> Pred, const AbstractAttribute *QueryingAA,
2096 const AAIsDead *LivenessAA, ArrayRef<unsigned> Opcodes,
2097 bool &UsedAssumedInformation, bool CheckBBLivenessOnly = false,
2098 bool CheckPotentiallyDead = false) {
2099 for (unsigned Opcode : Opcodes) {
2100 // Check if we have instructions with this opcode at all first.
2101 auto *Insts = OpcodeInstMap.lookup(Opcode);
2102 if (!Insts)
2103 continue;
2104
2105 for (Instruction *I : *Insts) {
2106 // Skip dead instructions.
2107 if (A && !CheckPotentiallyDead &&
2108 A->isAssumedDead(IRPosition::inst(*I), QueryingAA, LivenessAA,
2109 UsedAssumedInformation, CheckBBLivenessOnly)) {
2111 dbgs() << "[Attributor] Instruction " << *I
2112 << " is potentially dead, skip!\n";);
2113 continue;
2114 }
2115
2116 if (!Pred(*I))
2117 return false;
2118 }
2119 }
2120 return true;
2121}
2122
2124 const Function *Fn,
2125 const AbstractAttribute *QueryingAA,
2126 ArrayRef<unsigned> Opcodes,
2127 bool &UsedAssumedInformation,
2128 bool CheckBBLivenessOnly,
2129 bool CheckPotentiallyDead) {
2130 // Since we need to provide instructions we have to have an exact definition.
2131 if (!Fn || Fn->isDeclaration())
2132 return false;
2133
2134 const IRPosition &QueryIRP = IRPosition::function(*Fn);
2135 const auto *LivenessAA =
2136 CheckPotentiallyDead && QueryingAA
2137 ? (getAAFor<AAIsDead>(*QueryingAA, QueryIRP, DepClassTy::NONE))
2138 : nullptr;
2139
2140 auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(*Fn);
2141 if (!checkForAllInstructionsImpl(this, OpcodeInstMap, Pred, QueryingAA,
2142 LivenessAA, Opcodes, UsedAssumedInformation,
2143 CheckBBLivenessOnly, CheckPotentiallyDead))
2144 return false;
2145
2146 return true;
2147}
2148
2150 const AbstractAttribute &QueryingAA,
2151 ArrayRef<unsigned> Opcodes,
2152 bool &UsedAssumedInformation,
2153 bool CheckBBLivenessOnly,
2154 bool CheckPotentiallyDead) {
2155 const IRPosition &IRP = QueryingAA.getIRPosition();
2156 const Function *AssociatedFunction = IRP.getAssociatedFunction();
2157 return checkForAllInstructions(Pred, AssociatedFunction, &QueryingAA, Opcodes,
2158 UsedAssumedInformation, CheckBBLivenessOnly,
2159 CheckPotentiallyDead);
2160}
2161
2163 function_ref<bool(Instruction &)> Pred, AbstractAttribute &QueryingAA,
2164 bool &UsedAssumedInformation) {
2165 TimeTraceScope TS("checkForAllReadWriteInstructions");
2166
2167 const Function *AssociatedFunction =
2168 QueryingAA.getIRPosition().getAssociatedFunction();
2169 if (!AssociatedFunction)
2170 return false;
2171
2172 const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
2173 const auto *LivenessAA =
2174 getAAFor<AAIsDead>(QueryingAA, QueryIRP, DepClassTy::NONE);
2175
2176 for (Instruction *I :
2177 InfoCache.getReadOrWriteInstsForFunction(*AssociatedFunction)) {
2178 // Skip dead instructions.
2179 if (isAssumedDead(IRPosition::inst(*I), &QueryingAA, LivenessAA,
2180 UsedAssumedInformation))
2181 continue;
2182
2183 if (!Pred(*I))
2184 return false;
2185 }
2186
2187 return true;
2188}
2189
2190void Attributor::runTillFixpoint() {
2191 TimeTraceScope TimeScope("Attributor::runTillFixpoint");
2192 LLVM_DEBUG(dbgs() << "[Attributor] Identified and initialized "
2193 << DG.SyntheticRoot.Deps.size()
2194 << " abstract attributes.\n");
2195
2196 // Now that all abstract attributes are collected and initialized we start
2197 // the abstract analysis.
2198
2199 unsigned IterationCounter = 1;
2200 unsigned MaxIterations =
2201 Configuration.MaxFixpointIterations.value_or(SetFixpointIterations);
2202
2204 SetVector<AbstractAttribute *> Worklist, InvalidAAs;
2205 Worklist.insert_range(DG.SyntheticRoot);
2206
2207 do {
2208 // Remember the size to determine new attributes.
2209 size_t NumAAs = DG.SyntheticRoot.Deps.size();
2210 LLVM_DEBUG(dbgs() << "\n\n[Attributor] #Iteration: " << IterationCounter
2211 << ", Worklist size: " << Worklist.size() << "\n");
2212
2213 // For invalid AAs we can fix dependent AAs that have a required dependence,
2214 // thereby folding long dependence chains in a single step without the need
2215 // to run updates.
2216 for (unsigned u = 0; u < InvalidAAs.size(); ++u) {
2217 AbstractAttribute *InvalidAA = InvalidAAs[u];
2218
2219 // Check the dependences to fast track invalidation.
2221 dbgs() << "[Attributor] InvalidAA: " << *InvalidAA
2222 << " has " << InvalidAA->Deps.size()
2223 << " required & optional dependences\n");
2224 for (auto &DepIt : InvalidAA->Deps) {
2225 AbstractAttribute *DepAA = cast<AbstractAttribute>(DepIt.getPointer());
2226 if (DepIt.getInt() == unsigned(DepClassTy::OPTIONAL)) {
2228 dbgs() << " - recompute: " << *DepAA);
2229 Worklist.insert(DepAA);
2230 continue;
2231 }
2233 << " - invalidate: " << *DepAA);
2235 assert(DepAA->getState().isAtFixpoint() && "Expected fixpoint state!");
2236 if (!DepAA->getState().isValidState())
2237 InvalidAAs.insert(DepAA);
2238 else
2239 ChangedAAs.push_back(DepAA);
2240 }
2241 InvalidAA->Deps.clear();
2242 }
2243
2244 // Add all abstract attributes that are potentially dependent on one that
2245 // changed to the work list.
2246 for (AbstractAttribute *ChangedAA : ChangedAAs) {
2247 for (auto &DepIt : ChangedAA->Deps)
2248 Worklist.insert(cast<AbstractAttribute>(DepIt.getPointer()));
2249 ChangedAA->Deps.clear();
2250 }
2251
2252 LLVM_DEBUG(dbgs() << "[Attributor] #Iteration: " << IterationCounter
2253 << ", Worklist+Dependent size: " << Worklist.size()
2254 << "\n");
2255
2256 // Reset the changed and invalid set.
2257 ChangedAAs.clear();
2258 InvalidAAs.clear();
2259
2260 // Update all abstract attribute in the work list and record the ones that
2261 // changed.
2262 for (AbstractAttribute *AA : Worklist) {
2263 const auto &AAState = AA->getState();
2264 if (!AAState.isAtFixpoint())
2265 if (updateAA(*AA) == ChangeStatus::CHANGED)
2266 ChangedAAs.push_back(AA);
2267
2268 // Use the InvalidAAs vector to propagate invalid states fast transitively
2269 // without requiring updates.
2270 if (!AAState.isValidState())
2271 InvalidAAs.insert(AA);
2272 }
2273
2274 // Add attributes to the changed set if they have been created in the last
2275 // iteration.
2276 ChangedAAs.append(DG.SyntheticRoot.begin() + NumAAs,
2277 DG.SyntheticRoot.end());
2278
2279 // Reset the work list and repopulate with the changed abstract attributes.
2280 // Note that dependent ones are added above.
2281 Worklist.clear();
2282 Worklist.insert_range(ChangedAAs);
2283 Worklist.insert_range(QueryAAsAwaitingUpdate);
2284 QueryAAsAwaitingUpdate.clear();
2285
2286 } while (!Worklist.empty() && (IterationCounter++ < MaxIterations));
2287
2288 if (IterationCounter > MaxIterations && !Functions.empty()) {
2289 auto Remark = [&](OptimizationRemarkMissed ORM) {
2290 return ORM << "Attributor did not reach a fixpoint after "
2291 << ore::NV("Iterations", MaxIterations) << " iterations.";
2292 };
2293 Function *F = Functions.front();
2295 }
2296
2297 LLVM_DEBUG(dbgs() << "\n[Attributor] Fixpoint iteration done after: "
2298 << IterationCounter << "/" << MaxIterations
2299 << " iterations\n");
2300
2301 // Reset abstract arguments not settled in a sound fixpoint by now. This
2302 // happens when we stopped the fixpoint iteration early. Note that only the
2303 // ones marked as "changed" *and* the ones transitively depending on them
2304 // need to be reverted to a pessimistic state. Others might not be in a
2305 // fixpoint state but we can use the optimistic results for them anyway.
2306 SmallPtrSet<AbstractAttribute *, 32> Visited;
2307 for (unsigned u = 0; u < ChangedAAs.size(); u++) {
2308 AbstractAttribute *ChangedAA = ChangedAAs[u];
2309 if (!Visited.insert(ChangedAA).second)
2310 continue;
2311
2312 AbstractState &State = ChangedAA->getState();
2313 if (!State.isAtFixpoint()) {
2315
2316 NumAttributesTimedOut++;
2317 }
2318
2319 for (auto &DepIt : ChangedAA->Deps)
2320 ChangedAAs.push_back(cast<AbstractAttribute>(DepIt.getPointer()));
2321 ChangedAA->Deps.clear();
2322 }
2323
2324 LLVM_DEBUG({
2325 if (!Visited.empty())
2326 dbgs() << "\n[Attributor] Finalized " << Visited.size()
2327 << " abstract attributes.\n";
2328 });
2329}
2330
2332 assert(AA.isQueryAA() &&
2333 "Non-query AAs should not be required to register for updates!");
2334 QueryAAsAwaitingUpdate.insert(&AA);
2335}
2336
2337ChangeStatus Attributor::manifestAttributes() {
2338 TimeTraceScope TimeScope("Attributor::manifestAttributes");
2339 size_t NumFinalAAs = DG.SyntheticRoot.Deps.size();
2340
2341 unsigned NumManifested = 0;
2342 unsigned NumAtFixpoint = 0;
2343 ChangeStatus ManifestChange = ChangeStatus::UNCHANGED;
2344 for (auto &DepAA : DG.SyntheticRoot.Deps) {
2345 AbstractAttribute *AA = cast<AbstractAttribute>(DepAA.getPointer());
2346 AbstractState &State = AA->getState();
2347
2348 // If there is not already a fixpoint reached, we can now take the
2349 // optimistic state. This is correct because we enforced a pessimistic one
2350 // on abstract attributes that were transitively dependent on a changed one
2351 // already above.
2352 if (!State.isAtFixpoint())
2353 State.indicateOptimisticFixpoint();
2354
2355 // We must not manifest Attributes that use Callbase info.
2356 if (AA->hasCallBaseContext())
2357 continue;
2358 // If the state is invalid, we do not try to manifest it.
2359 if (!State.isValidState())
2360 continue;
2361
2362 if (AA->getCtxI() && !isRunOn(*AA->getAnchorScope()))
2363 continue;
2364
2365 // Skip dead code.
2366 bool UsedAssumedInformation = false;
2367 if (isAssumedDead(*AA, nullptr, UsedAssumedInformation,
2368 /* CheckBBLivenessOnly */ true))
2369 continue;
2370 // Check if the manifest debug counter that allows skipping manifestation of
2371 // AAs
2372 if (!DebugCounter::shouldExecute(ManifestDBGCounter))
2373 continue;
2374 // Manifest the state and record if we changed the IR.
2375 ChangeStatus LocalChange = AA->manifest(*this);
2376 if (LocalChange == ChangeStatus::CHANGED && AreStatisticsEnabled())
2377 AA->trackStatistics();
2378 LLVM_DEBUG(dbgs() << "[Attributor] Manifest " << LocalChange << " : " << *AA
2379 << "\n");
2380
2381 ManifestChange = ManifestChange | LocalChange;
2382
2383 NumAtFixpoint++;
2384 NumManifested += (LocalChange == ChangeStatus::CHANGED);
2385 }
2386
2387 (void)NumManifested;
2388 (void)NumAtFixpoint;
2389 LLVM_DEBUG(dbgs() << "\n[Attributor] Manifested " << NumManifested
2390 << " arguments while " << NumAtFixpoint
2391 << " were in a valid fixpoint state\n");
2392
2393 NumAttributesManifested += NumManifested;
2394 NumAttributesValidFixpoint += NumAtFixpoint;
2395
2396 (void)NumFinalAAs;
2397 if (NumFinalAAs != DG.SyntheticRoot.Deps.size()) {
2398 auto DepIt = DG.SyntheticRoot.Deps.begin();
2399 for (unsigned u = 0; u < NumFinalAAs; ++u)
2400 ++DepIt;
2401 for (unsigned u = NumFinalAAs; u < DG.SyntheticRoot.Deps.size();
2402 ++u, ++DepIt) {
2403 errs() << "Unexpected abstract attribute: "
2404 << cast<AbstractAttribute>(DepIt->getPointer()) << " :: "
2405 << cast<AbstractAttribute>(DepIt->getPointer())
2406 ->getIRPosition()
2407 .getAssociatedValue()
2408 << "\n";
2409 }
2410 llvm_unreachable("Expected the final number of abstract attributes to "
2411 "remain unchanged!");
2412 }
2413
2414 for (auto &It : AttrsMap) {
2415 AttributeList &AL = It.getSecond();
2416 const IRPosition &IRP =
2417 isa<Function>(It.getFirst())
2418 ? IRPosition::function(*cast<Function>(It.getFirst()))
2419 : IRPosition::callsite_function(*cast<CallBase>(It.getFirst()));
2420 IRP.setAttrList(AL);
2421 }
2422
2423 return ManifestChange;
2424}
2425
2426void Attributor::identifyDeadInternalFunctions() {
2427 // Early exit if we don't intend to delete functions.
2428 if (!Configuration.DeleteFns)
2429 return;
2430
2431 // To avoid triggering an assertion in the lazy call graph we will not delete
2432 // any internal library functions. We should modify the assertion though and
2433 // allow internals to be deleted.
2434 const auto *TLI =
2435 isModulePass()
2436 ? nullptr
2437 : getInfoCache().getTargetLibraryInfoForFunction(*Functions.back());
2438 LibFunc LF;
2439
2440 // Identify dead internal functions and delete them. This happens outside
2441 // the other fixpoint analysis as we might treat potentially dead functions
2442 // as live to lower the number of iterations. If they happen to be dead, the
2443 // below fixpoint loop will identify and eliminate them.
2444
2445 SmallVector<Function *, 8> InternalFns;
2446 for (Function *F : Functions)
2447 if (F->hasLocalLinkage() && (isModulePass() || !TLI->getLibFunc(*F, LF)))
2448 InternalFns.push_back(F);
2449
2450 SmallPtrSet<Function *, 8> LiveInternalFns;
2451 bool FoundLiveInternal = true;
2452 while (FoundLiveInternal) {
2453 FoundLiveInternal = false;
2454 for (Function *&F : InternalFns) {
2455 if (!F)
2456 continue;
2457
2458 bool UsedAssumedInformation = false;
2460 [&](AbstractCallSite ACS) {
2462 return ToBeDeletedFunctions.count(Callee) ||
2463 (Functions.count(Callee) && Callee->hasLocalLinkage() &&
2464 !LiveInternalFns.count(Callee));
2465 },
2466 *F, true, nullptr, UsedAssumedInformation)) {
2467 continue;
2468 }
2469
2470 LiveInternalFns.insert(F);
2471 F = nullptr;
2472 FoundLiveInternal = true;
2473 }
2474 }
2475
2476 for (Function *F : InternalFns)
2477 if (F)
2478 ToBeDeletedFunctions.insert(F);
2479}
2480
2481ChangeStatus Attributor::cleanupIR() {
2482 TimeTraceScope TimeScope("Attributor::cleanupIR");
2483 // Delete stuff at the end to avoid invalid references and a nice order.
2484 LLVM_DEBUG(dbgs() << "\n[Attributor] Delete/replace at least "
2485 << ToBeDeletedFunctions.size() << " functions and "
2486 << ToBeDeletedBlocks.size() << " blocks and "
2487 << ToBeDeletedInsts.size() << " instructions and "
2488 << ToBeChangedValues.size() << " values and "
2489 << ToBeChangedUses.size() << " uses. To insert "
2490 << ToBeChangedToUnreachableInsts.size()
2491 << " unreachables.\n"
2492 << "Preserve manifest added " << ManifestAddedBlocks.size()
2493 << " blocks\n");
2494
2496 SmallVector<Instruction *, 32> TerminatorsToFold;
2497
2498 auto ReplaceUse = [&](Use *U, Value *NewV) {
2499 Value *OldV = U->get();
2500
2501 // If we plan to replace NewV we need to update it at this point.
2502 do {
2503 const auto &Entry = ToBeChangedValues.lookup(NewV);
2504 if (!get<0>(Entry))
2505 break;
2506 NewV = get<0>(Entry);
2507 } while (true);
2508
2509 Instruction *I = dyn_cast<Instruction>(U->getUser());
2510 assert((!I || isRunOn(*I->getFunction())) &&
2511 "Cannot replace an instruction outside the current SCC!");
2512
2513 // Do not replace uses in returns if the value is a must-tail call we will
2514 // not delete.
2515 if (auto *RI = dyn_cast_or_null<ReturnInst>(I)) {
2516 if (auto *CI = dyn_cast<CallInst>(OldV->stripPointerCasts()))
2517 if (CI->isMustTailCall() && !ToBeDeletedInsts.count(CI))
2518 return;
2519 // If we rewrite a return and the new value is not an argument, strip the
2520 // `returned` attribute as it is wrong now.
2521 if (!isa<Argument>(NewV))
2522 for (auto &Arg : RI->getFunction()->args())
2523 Arg.removeAttr(Attribute::Returned);
2524 }
2525
2526 LLVM_DEBUG(dbgs() << "Use " << *NewV << " in " << *U->getUser()
2527 << " instead of " << *OldV << "\n");
2528 U->set(NewV);
2529
2530 if (Instruction *I = dyn_cast<Instruction>(OldV)) {
2531 CGModifiedFunctions.insert(I->getFunction());
2532 if (!isa<PHINode>(I) && !ToBeDeletedInsts.count(I) &&
2534 DeadInsts.push_back(I);
2535 }
2536 if (isa<UndefValue>(NewV) && isa<CallBase>(U->getUser())) {
2537 auto *CB = cast<CallBase>(U->getUser());
2538 if (CB->isArgOperand(U)) {
2539 unsigned Idx = CB->getArgOperandNo(U);
2540 CB->removeParamAttr(Idx, Attribute::NoUndef);
2541 auto *Callee = dyn_cast_if_present<Function>(CB->getCalledOperand());
2542 if (Callee && Callee->arg_size() > Idx)
2543 Callee->removeParamAttr(Idx, Attribute::NoUndef);
2544 }
2545 }
2546 if (isa<Constant>(NewV) && isa<CondBrInst>(U->getUser())) {
2547 Instruction *UserI = cast<Instruction>(U->getUser());
2548 if (isa<UndefValue>(NewV)) {
2549 ToBeChangedToUnreachableInsts.insert(UserI);
2550 } else {
2551 TerminatorsToFold.push_back(UserI);
2552 }
2553 }
2554 };
2555
2556 for (auto &It : ToBeChangedUses) {
2557 Use *U = It.first;
2558 Value *NewV = It.second;
2559 ReplaceUse(U, NewV);
2560 }
2561
2563 for (auto &It : ToBeChangedValues) {
2564 Value *OldV = It.first;
2565 auto [NewV, Done] = It.second;
2566 Uses.clear();
2567 for (auto &U : OldV->uses())
2568 if (Done || !U.getUser()->isDroppable())
2569 Uses.push_back(&U);
2570 for (Use *U : Uses) {
2571 if (auto *I = dyn_cast<Instruction>(U->getUser()))
2572 if (!isRunOn(*I->getFunction()))
2573 continue;
2574 ReplaceUse(U, NewV);
2575 }
2576 }
2577
2578 for (const auto &V : InvokeWithDeadSuccessor)
2579 if (InvokeInst *II = dyn_cast_or_null<InvokeInst>(V)) {
2580 assert(isRunOn(*II->getFunction()) &&
2581 "Cannot replace an invoke outside the current SCC!");
2582 bool UnwindBBIsDead = II->hasFnAttr(Attribute::NoUnwind);
2583 bool NormalBBIsDead = II->hasFnAttr(Attribute::NoReturn);
2584 bool Invoke2CallAllowed =
2586 assert((UnwindBBIsDead || NormalBBIsDead) &&
2587 "Invoke does not have dead successors!");
2588 BasicBlock *BB = II->getParent();
2589 BasicBlock *NormalDestBB = II->getNormalDest();
2590 if (UnwindBBIsDead) {
2591 Instruction *NormalNextIP = &NormalDestBB->front();
2592 if (Invoke2CallAllowed) {
2594 NormalNextIP = BB->getTerminator();
2595 }
2596 if (NormalBBIsDead)
2597 ToBeChangedToUnreachableInsts.insert(NormalNextIP);
2598 } else {
2599 assert(NormalBBIsDead && "Broken invariant!");
2600 if (!NormalDestBB->getUniquePredecessor())
2601 NormalDestBB = SplitBlockPredecessors(NormalDestBB, {BB}, ".dead");
2602 ToBeChangedToUnreachableInsts.insert(&NormalDestBB->front());
2603 }
2604 }
2605 for (Instruction *I : TerminatorsToFold) {
2606 assert(isRunOn(*I->getFunction()) &&
2607 "Cannot replace a terminator outside the current SCC!");
2608 CGModifiedFunctions.insert(I->getFunction());
2609 ConstantFoldTerminator(I->getParent());
2610 }
2611 for (const auto &V : ToBeChangedToUnreachableInsts)
2612 if (Instruction *I = dyn_cast_or_null<Instruction>(V)) {
2613 LLVM_DEBUG(dbgs() << "[Attributor] Change to unreachable: " << *I
2614 << "\n");
2615 assert(isRunOn(*I->getFunction()) &&
2616 "Cannot replace an instruction outside the current SCC!");
2617 CGModifiedFunctions.insert(I->getFunction());
2619 }
2620
2621 for (const auto &V : ToBeDeletedInsts) {
2622 if (Instruction *I = dyn_cast_or_null<Instruction>(V)) {
2624 isRunOn(*I->getFunction())) &&
2625 "Cannot delete an instruction outside the current SCC!");
2626 I->dropDroppableUses();
2627 CGModifiedFunctions.insert(I->getFunction());
2628 if (!I->getType()->isVoidTy())
2629 I->replaceAllUsesWith(UndefValue::get(I->getType()));
2631 DeadInsts.push_back(I);
2632 else
2633 I->eraseFromParent();
2634 }
2635 }
2636
2637 llvm::erase_if(DeadInsts, [&](WeakTrackingVH I) { return !I; });
2638
2639 LLVM_DEBUG({
2640 dbgs() << "[Attributor] DeadInsts size: " << DeadInsts.size() << "\n";
2641 for (auto &I : DeadInsts)
2642 if (I)
2643 dbgs() << " - " << *I << "\n";
2644 });
2645
2647
2648 if (unsigned NumDeadBlocks = ToBeDeletedBlocks.size()) {
2649 SmallVector<BasicBlock *, 8> ToBeDeletedBBs;
2650 ToBeDeletedBBs.reserve(NumDeadBlocks);
2651 for (BasicBlock *BB : ToBeDeletedBlocks) {
2652 assert(isRunOn(*BB->getParent()) &&
2653 "Cannot delete a block outside the current SCC!");
2654 CGModifiedFunctions.insert(BB->getParent());
2655 // Do not delete BBs added during manifests of AAs.
2656 if (ManifestAddedBlocks.contains(BB))
2657 continue;
2658 ToBeDeletedBBs.push_back(BB);
2659 }
2660 // Actually we do not delete the blocks but squash them into a single
2661 // unreachable but untangling branches that jump here is something we need
2662 // to do in a more generic way.
2663 detachDeadBlocks(ToBeDeletedBBs, nullptr);
2664 }
2665
2666 identifyDeadInternalFunctions();
2667
2668 // Rewrite the functions as requested during manifest.
2669 ChangeStatus ManifestChange = rewriteFunctionSignatures(CGModifiedFunctions);
2670
2671 for (Function *Fn : CGModifiedFunctions)
2672 if (!ToBeDeletedFunctions.count(Fn) && Functions.count(Fn))
2673 Configuration.CGUpdater.reanalyzeFunction(*Fn);
2674
2675 for (Function *Fn : ToBeDeletedFunctions) {
2676 if (!Functions.count(Fn))
2677 continue;
2678 Configuration.CGUpdater.removeFunction(*Fn);
2679 }
2680
2681 if (!ToBeChangedUses.empty())
2682 ManifestChange = ChangeStatus::CHANGED;
2683
2684 if (!ToBeChangedToUnreachableInsts.empty())
2685 ManifestChange = ChangeStatus::CHANGED;
2686
2687 if (!ToBeDeletedFunctions.empty())
2688 ManifestChange = ChangeStatus::CHANGED;
2689
2690 if (!ToBeDeletedBlocks.empty())
2691 ManifestChange = ChangeStatus::CHANGED;
2692
2693 if (!ToBeDeletedInsts.empty())
2694 ManifestChange = ChangeStatus::CHANGED;
2695
2696 if (!InvokeWithDeadSuccessor.empty())
2697 ManifestChange = ChangeStatus::CHANGED;
2698
2699 if (!DeadInsts.empty())
2700 ManifestChange = ChangeStatus::CHANGED;
2701
2702 NumFnDeleted += ToBeDeletedFunctions.size();
2703
2704 LLVM_DEBUG(dbgs() << "[Attributor] Deleted " << ToBeDeletedFunctions.size()
2705 << " functions after manifest.\n");
2706
2707#ifdef EXPENSIVE_CHECKS
2708 for (Function *F : Functions) {
2709 if (ToBeDeletedFunctions.count(F))
2710 continue;
2711 assert(!verifyFunction(*F, &errs()) && "Module verification failed!");
2712 }
2713#endif
2714
2715 return ManifestChange;
2716}
2717
2719 TimeTraceScope TimeScope("Attributor::run");
2720 AttributorCallGraph ACallGraph(*this);
2721
2722 if (PrintCallGraph)
2723 ACallGraph.populateAll();
2724
2725 Phase = AttributorPhase::UPDATE;
2726 runTillFixpoint();
2727
2728 // dump graphs on demand
2729 if (DumpDepGraph)
2730 DG.dumpGraph();
2731
2732 if (ViewDepGraph)
2733 DG.viewGraph();
2734
2736 DG.print();
2737
2738 Phase = AttributorPhase::MANIFEST;
2739 ChangeStatus ManifestChange = manifestAttributes();
2740
2741 Phase = AttributorPhase::CLEANUP;
2742 ChangeStatus CleanupChange = cleanupIR();
2743
2744 if (PrintCallGraph)
2745 ACallGraph.print();
2746
2747 return ManifestChange | CleanupChange;
2748}
2749
2750ChangeStatus Attributor::updateAA(AbstractAttribute &AA) {
2751 TimeTraceScope TimeScope("updateAA", [&]() {
2752 return AA.getName().str() +
2753 std::to_string(AA.getIRPosition().getPositionKind());
2754 });
2755 assert(Phase == AttributorPhase::UPDATE &&
2756 "We can update AA only in the update stage!");
2757
2758 // Use a new dependence vector for this update.
2759 DependenceVector DV;
2760 DependenceStack.push_back(&DV);
2761
2762 auto &AAState = AA.getState();
2764 bool UsedAssumedInformation = false;
2765 if (!isAssumedDead(AA, nullptr, UsedAssumedInformation,
2766 /* CheckBBLivenessOnly */ true))
2767 CS = AA.update(*this);
2768
2769 if (!AA.isQueryAA() && DV.empty() && !AA.getState().isAtFixpoint()) {
2770 // If the AA did not rely on outside information but changed, we run it
2771 // again to see if it found a fixpoint. Most AAs do but we don't require
2772 // them to. Hence, it might take the AA multiple iterations to get to a
2773 // fixpoint even if it does not rely on outside information, which is fine.
2775 if (CS == ChangeStatus::CHANGED)
2776 RerunCS = AA.update(*this);
2777
2778 // If the attribute did not change during the run or rerun, and it still did
2779 // not query any non-fix information, the state will not change and we can
2780 // indicate that right at this point.
2781 if (RerunCS == ChangeStatus::UNCHANGED && !AA.isQueryAA() && DV.empty())
2782 AAState.indicateOptimisticFixpoint();
2783 }
2784
2785 if (!AAState.isAtFixpoint())
2786 rememberDependences();
2787
2788 // Verify the stack was used properly, that is we pop the dependence vector we
2789 // put there earlier.
2790 DependenceVector *PoppedDV = DependenceStack.pop_back_val();
2791 (void)PoppedDV;
2792 assert(PoppedDV == &DV && "Inconsistent usage of the dependence stack!");
2793
2794 return CS;
2795}
2796
2798 assert(!F.isDeclaration() && "Cannot create a wrapper around a declaration!");
2799
2800 Module &M = *F.getParent();
2801 LLVMContext &Ctx = M.getContext();
2802 FunctionType *FnTy = F.getFunctionType();
2803
2804 Function *Wrapper =
2805 Function::Create(FnTy, F.getLinkage(), F.getAddressSpace(), F.getName());
2806 F.setName(""); // set the inside function anonymous
2807 M.getFunctionList().insert(F.getIterator(), Wrapper);
2808
2809 F.setLinkage(GlobalValue::InternalLinkage);
2810
2811 F.replaceAllUsesWith(Wrapper);
2812 assert(F.use_empty() && "Uses remained after wrapper was created!");
2813
2814 // Move the COMDAT section to the wrapper.
2815 // TODO: Check if we need to keep it for F as well.
2816 Wrapper->setComdat(F.getComdat());
2817 F.setComdat(nullptr);
2818
2819 // Copy all metadata and attributes but keep them on F as well.
2821 F.getAllMetadata(MDs);
2822 for (auto MDIt : MDs)
2823 Wrapper->addMetadata(MDIt.first, *MDIt.second);
2824 Wrapper->setAttributes(F.getAttributes());
2825
2826 // Create the call in the wrapper.
2827 BasicBlock *EntryBB = BasicBlock::Create(Ctx, "entry", Wrapper);
2828
2830 Argument *FArgIt = F.arg_begin();
2831 for (Argument &Arg : Wrapper->args()) {
2832 Args.push_back(&Arg);
2833 Arg.setName((FArgIt++)->getName());
2834 }
2835
2836 CallInst *CI = CallInst::Create(&F, Args, "", EntryBB);
2837 CI->setTailCall(true);
2838 CI->addFnAttr(Attribute::NoInline);
2839 ReturnInst::Create(Ctx, CI->getType()->isVoidTy() ? nullptr : CI, EntryBB);
2840
2841 NumFnShallowWrappersCreated++;
2842}
2843
2845 if (F.isDeclaration() || F.hasLocalLinkage() ||
2847 return false;
2848 return true;
2849}
2850
2852 if (!AllowDeepWrapper && !Force)
2853 return nullptr;
2854 if (!isInternalizable(F))
2855 return nullptr;
2856
2857 SmallPtrSet<Function *, 2> FnSet = {&F};
2858 DenseMap<Function *, Function *> InternalizedFns;
2859 internalizeFunctions(FnSet, InternalizedFns);
2860
2861 return InternalizedFns[&F];
2862}
2863
2866 for (Function *F : FnSet)
2868 return false;
2869
2870 FnMap.clear();
2871 // Generate the internalized version of each function.
2872 for (Function *F : FnSet) {
2873 Module &M = *F->getParent();
2874 FunctionType *FnTy = F->getFunctionType();
2875
2876 // Create a copy of the current function
2877 Function *Copied =
2878 Function::Create(FnTy, F->getLinkage(), F->getAddressSpace(),
2879 F->getName() + ".internalized");
2880 ValueToValueMapTy VMap;
2881 auto *NewFArgIt = Copied->arg_begin();
2882 for (auto &Arg : F->args()) {
2883 auto ArgName = Arg.getName();
2884 NewFArgIt->setName(ArgName);
2885 VMap[&Arg] = &(*NewFArgIt++);
2886 }
2888
2889 // Copy the body of the original function to the new one
2890 CloneFunctionInto(Copied, F, VMap,
2892
2893 // Set the linakage and visibility late as CloneFunctionInto has some
2894 // implicit requirements.
2897
2898 // Copy metadata
2900 F->getAllMetadata(MDs);
2901 for (auto MDIt : MDs)
2902 if (!Copied->hasMetadata())
2903 Copied->addMetadata(MDIt.first, *MDIt.second);
2904
2905 M.getFunctionList().insert(F->getIterator(), Copied);
2906 Copied->setDSOLocal(true);
2907 FnMap[F] = Copied;
2908 }
2909
2910 // Replace all uses of the old function with the new internalized function
2911 // unless the caller is a function that was just internalized.
2912 for (Function *F : FnSet) {
2913 auto &InternalizedFn = FnMap[F];
2914 auto IsNotInternalized = [&](Use &U) -> bool {
2915 if (auto *CB = dyn_cast<CallBase>(U.getUser()))
2916 return !FnMap.lookup(CB->getCaller());
2917 return false;
2918 };
2919 F->replaceUsesWithIf(InternalizedFn, IsNotInternalized);
2920 }
2921
2922 return true;
2923}
2924
2926 Argument &Arg, ArrayRef<Type *> ReplacementTypes) {
2927
2928 if (!Configuration.RewriteSignatures)
2929 return false;
2930
2931 Function *Fn = Arg.getParent();
2932 auto CallSiteCanBeChanged = [Fn](AbstractCallSite ACS) {
2933 // Forbid the call site to cast the function return type. If we need to
2934 // rewrite these functions we need to re-create a cast for the new call site
2935 // (if the old had uses).
2936 if (!ACS.getCalledFunction() ||
2937 ACS.getInstruction()->getType() !=
2939 return false;
2940 if (cast<CallBase>(ACS.getInstruction())->getCalledOperand()->getType() !=
2941 Fn->getType())
2942 return false;
2943 if (ACS.getNumArgOperands() != Fn->arg_size())
2944 return false;
2945 // Forbid must-tail calls for now.
2946 return !ACS.isCallbackCall() && !ACS.getInstruction()->isMustTailCall();
2947 };
2948
2949 // Avoid var-arg functions for now.
2950 if (Fn->isVarArg()) {
2951 LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite var-args functions\n");
2952 return false;
2953 }
2954
2955 // Avoid functions with complicated argument passing semantics.
2956 AttributeList FnAttributeList = Fn->getAttributes();
2957 if (FnAttributeList.hasAttrSomewhere(Attribute::Nest) ||
2958 FnAttributeList.hasAttrSomewhere(Attribute::StructRet) ||
2959 FnAttributeList.hasAttrSomewhere(Attribute::InAlloca) ||
2960 FnAttributeList.hasAttrSomewhere(Attribute::Preallocated)) {
2961 LLVM_DEBUG(
2962 dbgs() << "[Attributor] Cannot rewrite due to complex attribute\n");
2963 return false;
2964 }
2965
2966 // Avoid callbacks for now.
2967 bool UsedAssumedInformation = false;
2968 if (!checkForAllCallSites(CallSiteCanBeChanged, *Fn, true, nullptr,
2969 UsedAssumedInformation,
2970 /* CheckPotentiallyDead */ true)) {
2971 LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite all call sites\n");
2972 return false;
2973 }
2974
2975 auto InstPred = [](Instruction &I) {
2976 if (auto *CI = dyn_cast<CallInst>(&I))
2977 return !CI->isMustTailCall();
2978 return true;
2979 };
2980
2981 // Forbid must-tail calls for now.
2982 // TODO:
2983 auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(*Fn);
2984 if (!checkForAllInstructionsImpl(nullptr, OpcodeInstMap, InstPred, nullptr,
2985 nullptr, {Instruction::Call},
2986 UsedAssumedInformation)) {
2987 LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite due to instructions\n");
2988 return false;
2989 }
2990
2991 return true;
2992}
2993
2995 Argument &Arg, ArrayRef<Type *> ReplacementTypes,
2998 LLVM_DEBUG(dbgs() << "[Attributor] Register new rewrite of " << Arg << " in "
2999 << Arg.getParent()->getName() << " with "
3000 << ReplacementTypes.size() << " replacements\n");
3001 assert(isValidFunctionSignatureRewrite(Arg, ReplacementTypes) &&
3002 "Cannot register an invalid rewrite");
3003
3004 Function *Fn = Arg.getParent();
3006 ArgumentReplacementMap[Fn];
3007 if (ARIs.empty())
3008 ARIs.resize(Fn->arg_size());
3009
3010 // If we have a replacement already with less than or equal new arguments,
3011 // ignore this request.
3012 std::unique_ptr<ArgumentReplacementInfo> &ARI = ARIs[Arg.getArgNo()];
3013 if (ARI && ARI->getNumReplacementArgs() <= ReplacementTypes.size()) {
3014 LLVM_DEBUG(dbgs() << "[Attributor] Existing rewrite is preferred\n");
3015 return false;
3016 }
3017
3018 // If we have a replacement already but we like the new one better, delete
3019 // the old.
3020 ARI.reset();
3021
3022 LLVM_DEBUG(dbgs() << "[Attributor] Register new rewrite of " << Arg << " in "
3023 << Arg.getParent()->getName() << " with "
3024 << ReplacementTypes.size() << " replacements\n");
3025
3026 // Remember the replacement.
3027 ARI.reset(new ArgumentReplacementInfo(*this, Arg, ReplacementTypes,
3028 std::move(CalleeRepairCB),
3029 std::move(ACSRepairCB)));
3030
3031 return true;
3032}
3033
3034bool Attributor::shouldSeedAttribute(AbstractAttribute &AA) {
3035 bool Result = true;
3036#ifndef NDEBUG
3037 if (SeedAllowList.size() != 0)
3038 Result = llvm::is_contained(SeedAllowList, AA.getName());
3039 Function *Fn = AA.getAnchorScope();
3040 if (FunctionSeedAllowList.size() != 0 && Fn)
3042#endif
3043 return Result;
3044}
3045
3046ChangeStatus Attributor::rewriteFunctionSignatures(
3047 SmallSetVector<Function *, 8> &ModifiedFns) {
3049
3050 for (auto &It : ArgumentReplacementMap) {
3051 Function *OldFn = It.getFirst();
3052
3053 // Deleted functions do not require rewrites.
3054 if (!Functions.count(OldFn) || ToBeDeletedFunctions.count(OldFn))
3055 continue;
3056
3058 It.getSecond();
3059 assert(ARIs.size() == OldFn->arg_size() && "Inconsistent state!");
3060
3061 SmallVector<Type *, 16> NewArgumentTypes;
3062 SmallVector<AttributeSet, 16> NewArgumentAttributes;
3063
3064 // Collect replacement argument types and copy over existing attributes.
3065 AttributeList OldFnAttributeList = OldFn->getAttributes();
3066 for (Argument &Arg : OldFn->args()) {
3067 if (const std::unique_ptr<ArgumentReplacementInfo> &ARI =
3068 ARIs[Arg.getArgNo()]) {
3069 NewArgumentTypes.append(ARI->ReplacementTypes.begin(),
3070 ARI->ReplacementTypes.end());
3071 NewArgumentAttributes.append(ARI->getNumReplacementArgs(),
3072 AttributeSet());
3073 } else {
3074 NewArgumentTypes.push_back(Arg.getType());
3075 NewArgumentAttributes.push_back(
3076 OldFnAttributeList.getParamAttrs(Arg.getArgNo()));
3077 }
3078 }
3079
3080 uint64_t LargestVectorWidth = 0;
3081 for (auto *I : NewArgumentTypes)
3082 if (auto *VT = dyn_cast<llvm::VectorType>(I))
3083 LargestVectorWidth =
3084 std::max(LargestVectorWidth,
3085 VT->getPrimitiveSizeInBits().getKnownMinValue());
3086
3087 FunctionType *OldFnTy = OldFn->getFunctionType();
3088 Type *RetTy = OldFnTy->getReturnType();
3089
3090 // Construct the new function type using the new arguments types.
3091 FunctionType *NewFnTy =
3092 FunctionType::get(RetTy, NewArgumentTypes, OldFnTy->isVarArg());
3093
3094 LLVM_DEBUG(dbgs() << "[Attributor] Function rewrite '" << OldFn->getName()
3095 << "' from " << *OldFn->getFunctionType() << " to "
3096 << *NewFnTy << "\n");
3097
3098 // Create the new function body and insert it into the module.
3099 Function *NewFn = Function::Create(NewFnTy, OldFn->getLinkage(),
3100 OldFn->getAddressSpace(), "");
3101 Functions.insert(NewFn);
3102 OldFn->getParent()->getFunctionList().insert(OldFn->getIterator(), NewFn);
3103 NewFn->takeName(OldFn);
3104 NewFn->copyAttributesFrom(OldFn);
3105
3106 // Patch the pointer to LLVM function in debug info descriptor.
3107 NewFn->setSubprogram(OldFn->getSubprogram());
3108 OldFn->setSubprogram(nullptr);
3109
3110 // Recompute the parameter attributes list based on the new arguments for
3111 // the function.
3112 LLVMContext &Ctx = OldFn->getContext();
3113 NewFn->setAttributes(AttributeList::get(
3114 Ctx, OldFnAttributeList.getFnAttrs(), OldFnAttributeList.getRetAttrs(),
3115 NewArgumentAttributes));
3116 AttributeFuncs::updateMinLegalVectorWidthAttr(*NewFn, LargestVectorWidth);
3117
3118 // Remove argmem from the memory effects if we have no more pointer
3119 // arguments, or they are readnone.
3120 MemoryEffects ME = NewFn->getMemoryEffects();
3121 int ArgNo = -1;
3122 if (ME.doesAccessArgPointees() && all_of(NewArgumentTypes, [&](Type *T) {
3123 ++ArgNo;
3124 return !T->isPtrOrPtrVectorTy() ||
3125 NewFn->hasParamAttribute(ArgNo, Attribute::ReadNone);
3126 })) {
3128 }
3129
3130 // Since we have now created the new function, splice the body of the old
3131 // function right into the new function, leaving the old rotting hulk of the
3132 // function empty.
3133 NewFn->splice(NewFn->begin(), OldFn);
3134
3135 // Set of all "call-like" instructions that invoke the old function mapped
3136 // to their new replacements.
3138
3139 // Callback to create a new "call-like" instruction for a given one.
3140 auto CallSiteReplacementCreator = [&](AbstractCallSite ACS) {
3141 CallBase *OldCB = cast<CallBase>(ACS.getInstruction());
3142 const AttributeList &OldCallAttributeList = OldCB->getAttributes();
3143
3144 // Collect the new argument operands for the replacement call site.
3145 SmallVector<Value *, 16> NewArgOperands;
3146 SmallVector<AttributeSet, 16> NewArgOperandAttributes;
3147 for (unsigned OldArgNum = 0; OldArgNum < ARIs.size(); ++OldArgNum) {
3148 unsigned NewFirstArgNum = NewArgOperands.size();
3149 (void)NewFirstArgNum; // only used inside assert.
3150 if (const std::unique_ptr<ArgumentReplacementInfo> &ARI =
3151 ARIs[OldArgNum]) {
3152 if (ARI->ACSRepairCB)
3153 ARI->ACSRepairCB(*ARI, ACS, NewArgOperands);
3154 assert(ARI->getNumReplacementArgs() + NewFirstArgNum ==
3155 NewArgOperands.size() &&
3156 "ACS repair callback did not provide as many operand as new "
3157 "types were registered!");
3158 // TODO: Exose the attribute set to the ACS repair callback
3159 NewArgOperandAttributes.append(ARI->ReplacementTypes.size(),
3160 AttributeSet());
3161 } else {
3162 NewArgOperands.push_back(ACS.getCallArgOperand(OldArgNum));
3163 NewArgOperandAttributes.push_back(
3164 OldCallAttributeList.getParamAttrs(OldArgNum));
3165 }
3166 }
3167
3168 assert(NewArgOperands.size() == NewArgOperandAttributes.size() &&
3169 "Mismatch # argument operands vs. # argument operand attributes!");
3170 assert(NewArgOperands.size() == NewFn->arg_size() &&
3171 "Mismatch # argument operands vs. # function arguments!");
3172
3173 SmallVector<OperandBundleDef, 4> OperandBundleDefs;
3174 OldCB->getOperandBundlesAsDefs(OperandBundleDefs);
3175
3176 // Create a new call or invoke instruction to replace the old one.
3177 CallBase *NewCB;
3178 if (InvokeInst *II = dyn_cast<InvokeInst>(OldCB)) {
3179 NewCB = InvokeInst::Create(NewFn, II->getNormalDest(),
3180 II->getUnwindDest(), NewArgOperands,
3181 OperandBundleDefs, "", OldCB->getIterator());
3182 } else {
3183 auto *NewCI = CallInst::Create(NewFn, NewArgOperands, OperandBundleDefs,
3184 "", OldCB->getIterator());
3185 NewCI->setTailCallKind(cast<CallInst>(OldCB)->getTailCallKind());
3186 NewCB = NewCI;
3187 }
3188
3189 // Copy over various properties and the new attributes.
3190 NewCB->copyMetadata(*OldCB, {LLVMContext::MD_prof, LLVMContext::MD_dbg});
3191 NewCB->setCallingConv(OldCB->getCallingConv());
3192 NewCB->takeName(OldCB);
3193 NewCB->setAttributes(AttributeList::get(
3194 Ctx, OldCallAttributeList.getFnAttrs(),
3195 OldCallAttributeList.getRetAttrs(), NewArgOperandAttributes));
3196
3197 AttributeFuncs::updateMinLegalVectorWidthAttr(*NewCB->getCaller(),
3198 LargestVectorWidth);
3199
3200 CallSitePairs.push_back({OldCB, NewCB});
3201 return true;
3202 };
3203
3204 // Use the CallSiteReplacementCreator to create replacement call sites.
3205 bool UsedAssumedInformation = false;
3206 bool Success = checkForAllCallSites(CallSiteReplacementCreator, *OldFn,
3207 true, nullptr, UsedAssumedInformation,
3208 /* CheckPotentiallyDead */ true);
3209 (void)Success;
3210 assert(Success && "Assumed call site replacement to succeed!");
3211
3212 // Rewire the arguments.
3213 Argument *OldFnArgIt = OldFn->arg_begin();
3214 Argument *NewFnArgIt = NewFn->arg_begin();
3215 for (unsigned OldArgNum = 0; OldArgNum < ARIs.size();
3216 ++OldArgNum, ++OldFnArgIt) {
3217 if (const std::unique_ptr<ArgumentReplacementInfo> &ARI =
3218 ARIs[OldArgNum]) {
3219 if (ARI->CalleeRepairCB)
3220 ARI->CalleeRepairCB(*ARI, *NewFn, NewFnArgIt);
3221 if (ARI->ReplacementTypes.empty())
3222 OldFnArgIt->replaceAllUsesWith(
3223 PoisonValue::get(OldFnArgIt->getType()));
3224 NewFnArgIt += ARI->ReplacementTypes.size();
3225 } else {
3226 NewFnArgIt->takeName(&*OldFnArgIt);
3227 OldFnArgIt->replaceAllUsesWith(&*NewFnArgIt);
3228 ++NewFnArgIt;
3229 }
3230 }
3231
3232 // Eliminate the instructions *after* we visited all of them.
3233 for (auto &CallSitePair : CallSitePairs) {
3234 CallBase &OldCB = *CallSitePair.first;
3235 CallBase &NewCB = *CallSitePair.second;
3236 assert(OldCB.getType() == NewCB.getType() &&
3237 "Cannot handle call sites with different types!");
3238 ModifiedFns.insert(OldCB.getFunction());
3239 OldCB.replaceAllUsesWith(&NewCB);
3240 OldCB.eraseFromParent();
3241 }
3242
3243 // Replace the function in the call graph (if any).
3244 Configuration.CGUpdater.replaceFunctionWith(*OldFn, *NewFn);
3245
3246 // If the old function was modified and needed to be reanalyzed, the new one
3247 // does now.
3248 if (ModifiedFns.remove(OldFn))
3249 ModifiedFns.insert(NewFn);
3250
3252 }
3253
3254 return Changed;
3255}
3256
3257void InformationCache::initializeInformationCache(const Function &CF,
3258 FunctionInfo &FI) {
3259 // As we do not modify the function here we can remove the const
3260 // withouth breaking implicit assumptions. At the end of the day, we could
3261 // initialize the cache eagerly which would look the same to the users.
3262 Function &F = const_cast<Function &>(CF);
3263
3264 FI.IsKernel = F.hasFnAttribute("kernel");
3265
3266 // Walk all instructions to find interesting instructions that might be
3267 // queried by abstract attributes during their initialization or update.
3268 // This has to happen before we create attributes.
3269
3270 DenseMap<const Value *, std::optional<short>> AssumeUsesMap;
3271
3272 // Add \p V to the assume uses map which track the number of uses outside of
3273 // "visited" assumes. If no outside uses are left the value is added to the
3274 // assume only use vector.
3275 auto AddToAssumeUsesMap = [&](const Value &V) -> void {
3276 SmallVector<const Instruction *> Worklist;
3277 if (auto *I = dyn_cast<Instruction>(&V))
3278 Worklist.push_back(I);
3279 while (!Worklist.empty()) {
3280 const Instruction *I = Worklist.pop_back_val();
3281 std::optional<short> &NumUses = AssumeUsesMap[I];
3282 if (!NumUses)
3283 NumUses = I->getNumUses();
3284 NumUses = *NumUses - /* this assume */ 1;
3285 if (*NumUses != 0)
3286 continue;
3287 AssumeOnlyValues.insert(I);
3288 for (const Value *Op : I->operands())
3289 if (auto *OpI = dyn_cast<Instruction>(Op))
3290 Worklist.push_back(OpI);
3291 }
3292 };
3293
3294 for (Instruction &I : instructions(&F)) {
3295 bool IsInterestingOpcode = false;
3296
3297 // To allow easy access to all instructions in a function with a given
3298 // opcode we store them in the InfoCache. As not all opcodes are interesting
3299 // to concrete attributes we only cache the ones that are as identified in
3300 // the following switch.
3301 // Note: There are no concrete attributes now so this is initially empty.
3302 switch (I.getOpcode()) {
3303 default:
3304 assert(!isa<CallBase>(&I) &&
3305 "New call base instruction type needs to be known in the "
3306 "Attributor.");
3307 break;
3308 case Instruction::Call:
3309 // Calls are interesting on their own, additionally:
3310 // For `llvm.assume` calls we also fill the KnowledgeMap as we find them.
3311 // For `must-tail` calls we remember the caller and callee.
3312 if (auto *Assume = dyn_cast<AssumeInst>(&I)) {
3313 AssumeOnlyValues.insert(Assume);
3314 fillMapFromAssume(*Assume, KnowledgeMap);
3315 AddToAssumeUsesMap(*Assume->getArgOperand(0));
3316 } else if (cast<CallInst>(I).isMustTailCall()) {
3317 FI.ContainsMustTailCall = true;
3318 if (auto *Callee = dyn_cast_if_present<Function>(
3319 cast<CallInst>(I).getCalledOperand()))
3320 getFunctionInfo(*Callee).CalledViaMustTail = true;
3321 }
3322 [[fallthrough]];
3323 case Instruction::CallBr:
3324 case Instruction::Invoke:
3325 case Instruction::CleanupRet:
3326 case Instruction::CatchSwitch:
3327 case Instruction::AtomicRMW:
3328 case Instruction::AtomicCmpXchg:
3329 case Instruction::UncondBr:
3330 case Instruction::CondBr:
3331 case Instruction::Resume:
3332 case Instruction::Ret:
3333 case Instruction::Load:
3334 // The alignment of a pointer is interesting for loads.
3335 case Instruction::Store:
3336 // The alignment of a pointer is interesting for stores.
3337 case Instruction::Alloca:
3338 case Instruction::AddrSpaceCast:
3339 IsInterestingOpcode = true;
3340 }
3341 if (IsInterestingOpcode) {
3342 auto *&Insts = FI.OpcodeInstMap[I.getOpcode()];
3343 if (!Insts)
3344 Insts = new (Allocator) InstructionVectorTy();
3345 Insts->push_back(&I);
3346 }
3347 if (I.mayReadOrWriteMemory())
3348 FI.RWInsts.push_back(&I);
3349 }
3350
3351 if (F.hasFnAttribute(Attribute::AlwaysInline) &&
3352 isInlineViable(F).isSuccess())
3353 InlineableFunctions.insert(&F);
3354}
3355
3356InformationCache::FunctionInfo::~FunctionInfo() {
3357 // The instruction vectors are allocated using a BumpPtrAllocator, we need to
3358 // manually destroy them.
3359 for (auto &It : OpcodeInstMap)
3360 It.getSecond()->~InstructionVectorTy();
3361}
3362
3365 assert(A.isClosedWorldModule() && "Cannot see all indirect callees!");
3366 return IndirectlyCallableFunctions;
3367}
3368
3369std::optional<unsigned> InformationCache::getFlatAddressSpace() const {
3370 if (IsTargetGPU())
3371 return 0;
3372 return std::nullopt;
3373}
3374
3376 const AbstractAttribute &ToAA,
3377 DepClassTy DepClass) {
3378 if (DepClass == DepClassTy::NONE)
3379 return;
3380 // If we are outside of an update, thus before the actual fixpoint iteration
3381 // started (= when we create AAs), we do not track dependences because we will
3382 // put all AAs into the initial worklist anyway.
3383 if (DependenceStack.empty())
3384 return;
3385 if (FromAA.getState().isAtFixpoint())
3386 return;
3387 DependenceStack.back()->push_back({&FromAA, &ToAA, DepClass});
3388}
3389
3390void Attributor::rememberDependences() {
3391 assert(!DependenceStack.empty() && "No dependences to remember!");
3392
3393 for (DepInfo &DI : *DependenceStack.back()) {
3394 assert((DI.DepClass == DepClassTy::REQUIRED ||
3395 DI.DepClass == DepClassTy::OPTIONAL) &&
3396 "Expected required or optional dependence (1 bit)!");
3397 auto &DepAAs = const_cast<AbstractAttribute &>(*DI.FromAA).Deps;
3398 DepAAs.insert(AbstractAttribute::DepTy(
3399 const_cast<AbstractAttribute *>(DI.ToAA), unsigned(DI.DepClass)));
3400 }
3401}
3402
3403template <Attribute::AttrKind AK, typename AAType>
3404void Attributor::checkAndQueryIRAttr(const IRPosition &IRP, AttributeSet Attrs,
3405 bool SkipHasAttrCheck) {
3406 bool IsKnown;
3407 if (SkipHasAttrCheck || !Attrs.hasAttribute(AK))
3408 if (!Configuration.Allowed || Configuration.Allowed->count(&AAType::ID))
3409 if (!AA::hasAssumedIRAttr<AK>(*this, nullptr, IRP, DepClassTy::NONE,
3410 IsKnown))
3411 getOrCreateAAFor<AAType>(IRP);
3412}
3413
3415 assert(!F.isDeclaration());
3416
3417 if (!VisitedFunctions.insert(&F).second)
3418 return;
3419
3420 // In non-module runs we need to look at the call sites of a function to
3421 // determine if it is part of a must-tail call edge. This will influence what
3422 // attributes we can derive.
3423 InformationCache::FunctionInfo &FI = InfoCache.getFunctionInfo(F);
3424 if (!isModulePass() && !FI.CalledViaMustTail) {
3425 for (const Use &U : F.uses())
3426 if (const auto *CB = dyn_cast<CallBase>(U.getUser()))
3427 if (CB->isCallee(&U) && CB->isMustTailCall())
3428 FI.CalledViaMustTail = true;
3429 }
3430
3432 bool IsIPOAmendable = isFunctionIPOAmendable(F);
3433 auto Attrs = F.getAttributes();
3434 auto FnAttrs = Attrs.getFnAttrs();
3435
3436 // Check for dead BasicBlocks in every function.
3437 // We need dead instruction detection because we do not want to deal with
3438 // broken IR in which SSA rules do not apply.
3440
3441 // Every function might contain instructions that cause "undefined
3442 // behavior".
3444
3445 // Every function might be applicable for Heap-To-Stack conversion.
3448
3449 // Every function might be "must-progress".
3450 checkAndQueryIRAttr<Attribute::MustProgress, AAMustProgress>(FPos, FnAttrs);
3451
3452 // Every function might be "no-free".
3453 checkAndQueryIRAttr<Attribute::NoFree, AANoFree>(FPos, FnAttrs);
3454
3455 // Every function might be "will-return".
3456 checkAndQueryIRAttr<Attribute::WillReturn, AAWillReturn>(FPos, FnAttrs);
3457
3458 // Every function might be marked "nosync"
3459 checkAndQueryIRAttr<Attribute::NoSync, AANoSync>(FPos, FnAttrs);
3460
3461 // Everything that is visible from the outside (=function, argument, return
3462 // positions), cannot be changed if the function is not IPO amendable. We can
3463 // however analyse the code inside.
3464 if (IsIPOAmendable) {
3465
3466 // Every function can be nounwind.
3467 checkAndQueryIRAttr<Attribute::NoUnwind, AANoUnwind>(FPos, FnAttrs);
3468
3469 // Every function might be "no-return".
3470 checkAndQueryIRAttr<Attribute::NoReturn, AANoReturn>(FPos, FnAttrs);
3471
3472 // Every function might be "no-recurse".
3473 checkAndQueryIRAttr<Attribute::NoRecurse, AANoRecurse>(FPos, FnAttrs);
3474
3475 // Every function can be "non-convergent".
3476 if (Attrs.hasFnAttr(Attribute::Convergent))
3478
3479 // Every function might be "readnone/readonly/writeonly/...".
3481
3482 // Every function can be "readnone/argmemonly/inaccessiblememonly/...".
3484
3485 // Every function can track active assumptions.
3487
3488 // If we're not using a dynamic mode for float, there's nothing worthwhile
3489 // to infer. This misses the edge case denormal-fp-math="dynamic" and
3490 // denormal-fp-math-f32=something, but that likely has no real world use.
3491 DenormalMode Mode = F.getDenormalMode(APFloat::IEEEsingle());
3492 if (Mode.Input == DenormalMode::Dynamic ||
3493 Mode.Output == DenormalMode::Dynamic)
3495
3496 // Return attributes are only appropriate if the return type is non void.
3497 Type *ReturnType = F.getReturnType();
3498 if (!ReturnType->isVoidTy()) {
3500 AttributeSet RetAttrs = Attrs.getRetAttrs();
3501
3502 // Every returned value might be dead.
3504
3505 // Every function might be simplified.
3506 bool UsedAssumedInformation = false;
3507 getAssumedSimplified(RetPos, nullptr, UsedAssumedInformation,
3509
3510 // Every returned value might be marked noundef.
3511 checkAndQueryIRAttr<Attribute::NoUndef, AANoUndef>(RetPos, RetAttrs);
3512
3513 if (ReturnType->isPointerTy()) {
3514
3515 // Every function with pointer return type might be marked align.
3517
3518 // Every function with pointer return type might be marked nonnull.
3519 checkAndQueryIRAttr<Attribute::NonNull, AANonNull>(RetPos, RetAttrs);
3520
3521 // Every function with pointer return type might be marked noalias.
3522 checkAndQueryIRAttr<Attribute::NoAlias, AANoAlias>(RetPos, RetAttrs);
3523
3524 // Every function with pointer return type might be marked
3525 // dereferenceable.
3527 } else if (AttributeFuncs::isNoFPClassCompatibleType(ReturnType)) {
3529 }
3530 }
3531 }
3532
3533 for (Argument &Arg : F.args()) {
3534 IRPosition ArgPos = IRPosition::argument(Arg);
3535 auto ArgNo = Arg.getArgNo();
3536 AttributeSet ArgAttrs = Attrs.getParamAttrs(ArgNo);
3537
3538 if (!IsIPOAmendable) {
3539 if (Arg.getType()->isPointerTy())
3540 // Every argument with pointer type might be marked nofree.
3541 checkAndQueryIRAttr<Attribute::NoFree, AANoFree>(ArgPos, ArgAttrs);
3542 continue;
3543 }
3544
3545 // Every argument might be simplified. We have to go through the
3546 // Attributor interface though as outside AAs can register custom
3547 // simplification callbacks.
3548 bool UsedAssumedInformation = false;
3549 getAssumedSimplified(ArgPos, /* AA */ nullptr, UsedAssumedInformation,
3551
3552 // Every argument might be dead.
3554
3555 // Every argument might be marked noundef.
3556 checkAndQueryIRAttr<Attribute::NoUndef, AANoUndef>(ArgPos, ArgAttrs);
3557
3558 if (Arg.getType()->isPointerTy()) {
3559 // Every argument with pointer type might be marked nonnull.
3560 checkAndQueryIRAttr<Attribute::NonNull, AANonNull>(ArgPos, ArgAttrs);
3561
3562 // Every argument with pointer type might be marked noalias.
3563 checkAndQueryIRAttr<Attribute::NoAlias, AANoAlias>(ArgPos, ArgAttrs);
3564
3565 // Every argument with pointer type might be marked dereferenceable.
3567
3568 // Every argument with pointer type might be marked align.
3570
3571 // Every argument with pointer type might be marked nocapture.
3572 checkAndQueryIRAttr<Attribute::Captures, AANoCapture>(
3573 ArgPos, ArgAttrs, /*SkipHasAttrCheck=*/true);
3574
3575 // Every argument with pointer type might be marked
3576 // "readnone/readonly/writeonly/..."
3578
3579 // Every argument with pointer type might be marked nofree.
3580 checkAndQueryIRAttr<Attribute::NoFree, AANoFree>(ArgPos, ArgAttrs);
3581
3582 // Every argument with pointer type might be privatizable (or
3583 // promotable)
3585 } else if (AttributeFuncs::isNoFPClassCompatibleType(Arg.getType())) {
3587 }
3588 }
3589
3590 auto CallSitePred = [&](Instruction &I) -> bool {
3591 auto &CB = cast<CallBase>(I);
3592 IRPosition CBInstPos = IRPosition::inst(CB);
3594
3595 // Call sites might be dead if they do not have side effects and no live
3596 // users. The return value might be dead if there are no live users.
3597 getOrCreateAAFor<AAIsDead>(CBInstPos);
3598
3599 Function *Callee = dyn_cast_if_present<Function>(CB.getCalledOperand());
3600 // TODO: Even if the callee is not known now we might be able to simplify
3601 // the call/callee.
3602 if (!Callee) {
3604 return true;
3605 }
3606
3607 // Every call site can track active assumptions.
3609
3610 // Skip declarations except if annotations on their call sites were
3611 // explicitly requested.
3612 if (!AnnotateDeclarationCallSites && Callee->isDeclaration() &&
3613 !Callee->hasMetadata(LLVMContext::MD_callback))
3614 return true;
3615
3616 if (!Callee->getReturnType()->isVoidTy() && !CB.use_empty()) {
3618 bool UsedAssumedInformation = false;
3619 getAssumedSimplified(CBRetPos, nullptr, UsedAssumedInformation,
3621
3622 if (AttributeFuncs::isNoFPClassCompatibleType(Callee->getReturnType()))
3624 }
3625
3626 const AttributeList &CBAttrs = CBFnPos.getAttrList();
3627 for (int I = 0, E = CB.arg_size(); I < E; ++I) {
3628
3630 AttributeSet CBArgAttrs = CBAttrs.getParamAttrs(I);
3631
3632 // Every call site argument might be dead.
3634
3635 // Call site argument might be simplified. We have to go through the
3636 // Attributor interface though as outside AAs can register custom
3637 // simplification callbacks.
3638 bool UsedAssumedInformation = false;
3639 getAssumedSimplified(CBArgPos, /* AA */ nullptr, UsedAssumedInformation,
3641
3642 // Every call site argument might be marked "noundef".
3643 checkAndQueryIRAttr<Attribute::NoUndef, AANoUndef>(CBArgPos, CBArgAttrs);
3644
3645 Type *ArgTy = CB.getArgOperand(I)->getType();
3646
3647 if (!ArgTy->isPointerTy()) {
3648 if (AttributeFuncs::isNoFPClassCompatibleType(ArgTy))
3650
3651 continue;
3652 }
3653
3654 // Call site argument attribute "non-null".
3655 checkAndQueryIRAttr<Attribute::NonNull, AANonNull>(CBArgPos, CBArgAttrs);
3656
3657 // Call site argument attribute "captures(none)".
3658 checkAndQueryIRAttr<Attribute::Captures, AANoCapture>(
3659 CBArgPos, CBArgAttrs, /*SkipHasAttrCheck=*/true);
3660
3661 // Call site argument attribute "no-alias".
3662 checkAndQueryIRAttr<Attribute::NoAlias, AANoAlias>(CBArgPos, CBArgAttrs);
3663
3664 // Call site argument attribute "dereferenceable".
3666
3667 // Call site argument attribute "align".
3668 getOrCreateAAFor<AAAlign>(CBArgPos);
3669
3670 // Call site argument attribute
3671 // "readnone/readonly/writeonly/..."
3672 if (!CBAttrs.hasParamAttr(I, Attribute::ReadNone))
3674
3675 // Call site argument attribute "nofree".
3676 checkAndQueryIRAttr<Attribute::NoFree, AANoFree>(CBArgPos, CBArgAttrs);
3677 }
3678 return true;
3679 };
3680
3681 auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(F);
3682 [[maybe_unused]] bool Success;
3683 bool UsedAssumedInformation = false;
3685 nullptr, OpcodeInstMap, CallSitePred, nullptr, nullptr,
3686 {(unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr,
3687 (unsigned)Instruction::Call},
3688 UsedAssumedInformation);
3689 assert(Success && "Expected the check call to be successful!");
3690
3691 auto LoadStorePred = [&](Instruction &I) -> bool {
3692 if (auto *LI = dyn_cast<LoadInst>(&I)) {
3693 getOrCreateAAFor<AAAlign>(IRPosition::value(*LI->getPointerOperand()));
3694 if (SimplifyAllLoads)
3696 UsedAssumedInformation, AA::Intraprocedural);
3698 IRPosition::value(*LI->getPointerOperand()));
3700 IRPosition::value(*LI->getPointerOperand()));
3701 } else {
3702 auto &SI = cast<StoreInst>(I);
3704 getAssumedSimplified(IRPosition::value(*SI.getValueOperand()), nullptr,
3705 UsedAssumedInformation, AA::Intraprocedural);
3706 getOrCreateAAFor<AAAlign>(IRPosition::value(*SI.getPointerOperand()));
3708 IRPosition::value(*SI.getPointerOperand()));
3709 }
3710 return true;
3711 };
3713 nullptr, OpcodeInstMap, LoadStorePred, nullptr, nullptr,
3714 {(unsigned)Instruction::Load, (unsigned)Instruction::Store},
3715 UsedAssumedInformation);
3716 assert(Success && "Expected the check call to be successful!");
3717
3718 // AllocaInstPredicate
3719 auto AAAllocationInfoPred = [&](Instruction &I) -> bool {
3721 return true;
3722 };
3723
3725 nullptr, OpcodeInstMap, AAAllocationInfoPred, nullptr, nullptr,
3726 {(unsigned)Instruction::Alloca}, UsedAssumedInformation);
3727 assert(Success && "Expected the check call to be successful!");
3728}
3729
3731 if (CloseWorldAssumption.getNumOccurrences())
3732 return CloseWorldAssumption;
3733 return isModulePass() && Configuration.IsClosedWorldModule;
3734}
3735
3736/// Helpers to ease debugging through output streams and print calls.
3737///
3738///{
3740 return OS << (S == ChangeStatus::CHANGED ? "changed" : "unchanged");
3741}
3742
3744 switch (AP) {
3746 return OS << "inv";
3748 return OS << "flt";
3750 return OS << "fn_ret";
3752 return OS << "cs_ret";
3754 return OS << "fn";
3756 return OS << "cs";
3758 return OS << "arg";
3760 return OS << "cs_arg";
3761 }
3762 llvm_unreachable("Unknown attribute position!");
3763}
3764
3766 const Value &AV = Pos.getAssociatedValue();
3767 OS << "{" << Pos.getPositionKind() << ":" << AV.getName() << " ["
3768 << Pos.getAnchorValue().getName() << "@" << Pos.getCallSiteArgNo() << "]";
3769
3770 if (Pos.hasCallBaseContext())
3771 OS << "[cb_context:" << *Pos.getCallBaseContext() << "]";
3772 return OS << "}";
3773}
3774
3776 OS << "range-state(" << S.getBitWidth() << ")<";
3777 S.getKnown().print(OS);
3778 OS << " / ";
3779 S.getAssumed().print(OS);
3780 OS << ">";
3781
3782 return OS << static_cast<const AbstractState &>(S);
3783}
3784
3786 return OS << (!S.isValidState() ? "top" : (S.isAtFixpoint() ? "fix" : ""));
3787}
3788
3790 AA.print(OS);
3791 return OS;
3792}
3793
3796 OS << "set-state(< {";
3797 if (!S.isValidState())
3798 OS << "full-set";
3799 else {
3800 for (const auto &It : S.getAssumedSet())
3801 OS << It << ", ";
3802 if (S.undefIsContained())
3803 OS << "undef ";
3804 }
3805 OS << "} >)";
3806
3807 return OS;
3808}
3809
3811 const PotentialLLVMValuesState &S) {
3812 OS << "set-state(< {";
3813 if (!S.isValidState())
3814 OS << "full-set";
3815 else {
3816 for (const auto &It : S.getAssumedSet()) {
3817 if (auto *F = dyn_cast<Function>(It.first.getValue()))
3818 OS << "@" << F->getName() << "[" << int(It.second) << "], ";
3819 else
3820 OS << *It.first.getValue() << "[" << int(It.second) << "], ";
3821 }
3822 if (S.undefIsContained())
3823 OS << "undef ";
3824 }
3825 OS << "} >)";
3826
3827 return OS;
3828}
3829
3831 OS << "[";
3832 OS << getName();
3833 OS << "] for CtxI ";
3834
3835 if (auto *I = getCtxI()) {
3836 OS << "'";
3837 I->print(OS);
3838 OS << "'";
3839 } else
3840 OS << "<<null inst>>";
3841
3842 OS << " at position " << getIRPosition() << " with state " << getAsStr(A)
3843 << '\n';
3844}
3845
3847 print(OS);
3848
3849 for (const auto &DepAA : Deps) {
3850 auto *AA = DepAA.getPointer();
3851 OS << " updates ";
3852 AA->print(OS);
3853 }
3854
3855 OS << '\n';
3856}
3857
3859 const AAPointerInfo::Access &Acc) {
3860 OS << " [" << Acc.getKind() << "] " << *Acc.getRemoteInst();
3861 if (Acc.getLocalInst() != Acc.getRemoteInst())
3862 OS << " via " << *Acc.getLocalInst();
3863 if (Acc.getContent()) {
3864 if (*Acc.getContent())
3865 OS << " [" << **Acc.getContent() << "]";
3866 else
3867 OS << " [ <unknown> ]";
3868 }
3869 return OS;
3870}
3871///}
3872
3873/// ----------------------------------------------------------------------------
3874/// Pass (Manager) Boilerplate
3875/// ----------------------------------------------------------------------------
3876
3878 SetVector<Function *> &Functions,
3879 AnalysisGetter &AG,
3880 CallGraphUpdater &CGUpdater,
3881 bool DeleteFns, bool IsModulePass) {
3882 if (Functions.empty())
3883 return false;
3884
3885 LLVM_DEBUG({
3886 dbgs() << "[Attributor] Run on module with " << Functions.size()
3887 << " functions:\n";
3888 for (Function *Fn : Functions)
3889 dbgs() << " - " << Fn->getName() << "\n";
3890 });
3891
3892 // Create an Attributor and initially empty information cache that is filled
3893 // while we identify default attribute opportunities.
3894 AttributorConfig AC(CGUpdater);
3895 AC.IsModulePass = IsModulePass;
3896 AC.DeleteFns = DeleteFns;
3897
3898 /// Tracking callback for specialization of indirect calls.
3900 IndirectCalleeTrackingMap;
3901 if (MaxSpecializationPerCB.getNumOccurrences()) {
3902 AC.IndirectCalleeSpecializationCallback =
3903 [&](Attributor &, const AbstractAttribute &AA, CallBase &CB,
3904 Function &Callee, unsigned) {
3905 if (MaxSpecializationPerCB == 0)
3906 return false;
3907 auto &Set = IndirectCalleeTrackingMap[&CB];
3908 if (!Set)
3909 Set = std::make_unique<SmallPtrSet<Function *, 8>>();
3910 if (Set->size() >= MaxSpecializationPerCB)
3911 return Set->contains(&Callee);
3912 Set->insert(&Callee);
3913 return true;
3914 };
3915 }
3916
3917 Attributor A(Functions, InfoCache, AC);
3918
3919 // Create shallow wrappers for all functions that are not IPO amendable
3921 for (Function *F : Functions)
3922 if (!A.isFunctionIPOAmendable(*F))
3924
3925 // Internalize non-exact functions
3926 // TODO: for now we eagerly internalize functions without calculating the
3927 // cost, we need a cost interface to determine whether internalizing
3928 // a function is "beneficial"
3929 if (AllowDeepWrapper) {
3930 unsigned FunSize = Functions.size();
3931 for (unsigned u = 0; u < FunSize; u++) {
3932 Function *F = Functions[u];
3933 if (!F->isDeclaration() && !F->isDefinitionExact() && !F->use_empty() &&
3934 !GlobalValue::isInterposableLinkage(F->getLinkage())) {
3936 assert(NewF && "Could not internalize function.");
3937 Functions.insert(NewF);
3938
3939 // Update call graph
3940 CGUpdater.replaceFunctionWith(*F, *NewF);
3941 for (const Use &U : NewF->uses())
3942 if (CallBase *CB = dyn_cast<CallBase>(U.getUser())) {
3943 auto *CallerF = CB->getCaller();
3944 CGUpdater.reanalyzeFunction(*CallerF);
3945 }
3946 }
3947 }
3948 }
3949
3950 for (Function *F : Functions) {
3951 if (F->isDeclaration())
3952 continue;
3953
3954 if (F->hasExactDefinition())
3955 NumFnWithExactDefinition++;
3956 else
3957 NumFnWithoutExactDefinition++;
3958
3959 // We look at internal functions only on-demand but if any use is not a
3960 // direct call or outside the current set of analyzed functions, we have
3961 // to do it eagerly.
3962 if (F->hasLocalLinkage()) {
3963 if (llvm::all_of(F->uses(), [&Functions](const Use &U) {
3964 const auto *CB = dyn_cast<CallBase>(U.getUser());
3965 return CB && CB->isCallee(&U) &&
3966 Functions.count(const_cast<Function *>(CB->getCaller()));
3967 }))
3968 continue;
3969 }
3970
3971 // Populate the Attributor with abstract attribute opportunities in the
3972 // function and the information cache with IR information.
3973 A.identifyDefaultAbstractAttributes(*F);
3974 }
3975
3976 ChangeStatus Changed = A.run();
3977
3978 LLVM_DEBUG(dbgs() << "[Attributor] Done with " << Functions.size()
3979 << " functions, result: " << Changed << ".\n");
3981}
3982
3984 SetVector<Function *> &Functions,
3985 AnalysisGetter &AG,
3986 CallGraphUpdater &CGUpdater,
3988 bool IsModulePass) {
3989 if (Functions.empty())
3990 return false;
3991
3992 LLVM_DEBUG({
3993 dbgs() << "[AttributorLight] Run on module with " << Functions.size()
3994 << " functions:\n";
3995 for (Function *Fn : Functions)
3996 dbgs() << " - " << Fn->getName() << "\n";
3997 });
3998
3999 // Create an Attributor and initially empty information cache that is filled
4000 // while we identify default attribute opportunities.
4001 AttributorConfig AC(CGUpdater);
4002 AC.IsModulePass = IsModulePass;
4003 AC.DeleteFns = false;
4004 DenseSet<const char *> Allowed(
4011 AC.Allowed = &Allowed;
4012 AC.UseLiveness = false;
4013
4014 Attributor A(Functions, InfoCache, AC);
4015
4016 for (Function *F : Functions) {
4017 if (F->isDeclaration())
4018 continue;
4019
4020 if (F->hasExactDefinition())
4021 NumFnWithExactDefinition++;
4022 else
4023 NumFnWithoutExactDefinition++;
4024
4025 // We look at internal functions only on-demand but if any use is not a
4026 // direct call or outside the current set of analyzed functions, we have
4027 // to do it eagerly.
4028 if (AC.UseLiveness && F->hasLocalLinkage()) {
4029 if (llvm::all_of(F->uses(), [&Functions](const Use &U) {
4030 const auto *CB = dyn_cast<CallBase>(U.getUser());
4031 return CB && CB->isCallee(&U) &&
4032 Functions.count(const_cast<Function *>(CB->getCaller()));
4033 }))
4034 continue;
4035 }
4036
4037 // Populate the Attributor with abstract attribute opportunities in the
4038 // function and the information cache with IR information.
4039 A.identifyDefaultAbstractAttributes(*F);
4040 }
4041
4042 ChangeStatus Changed = A.run();
4043
4045 // Invalidate analyses for modified functions so that we don't have to
4046 // invalidate all analyses for all functions in this SCC.
4047 PreservedAnalyses FuncPA;
4048 // We haven't changed the CFG for modified functions.
4049 FuncPA.preserveSet<CFGAnalyses>();
4050 for (Function *Changed : A.getModifiedFunctions()) {
4051 FAM.invalidate(*Changed, FuncPA);
4052 // Also invalidate any direct callers of changed functions since analyses
4053 // may care about attributes of direct callees. For example, MemorySSA
4054 // cares about whether or not a call's callee modifies memory and queries
4055 // that through function attributes.
4056 for (auto *U : Changed->users()) {
4057 if (auto *Call = dyn_cast<CallBase>(U)) {
4058 if (Call->getCalledFunction() == Changed)
4059 FAM.invalidate(*Call->getFunction(), FuncPA);
4060 }
4061 }
4062 }
4063 }
4064 LLVM_DEBUG(dbgs() << "[Attributor] Done with " << Functions.size()
4065 << " functions, result: " << Changed << ".\n");
4067}
4068
4069void AADepGraph::viewGraph() { llvm::ViewGraph(this, "Dependency Graph"); }
4070
4072 static std::atomic<int> CallTimes;
4073 std::string Prefix;
4074
4075 if (!DepGraphDotFileNamePrefix.empty())
4077 else
4078 Prefix = "dep_graph";
4079 std::string Filename =
4080 Prefix + "_" + std::to_string(CallTimes.load()) + ".dot";
4081
4082 outs() << "Dependency graph dump to " << Filename << ".\n";
4083
4084 std::error_code EC;
4085
4087 if (!EC)
4088 llvm::WriteGraph(File, this);
4089
4090 CallTimes++;
4091}
4092
4094 for (auto DepAA : SyntheticRoot.Deps)
4095 cast<AbstractAttribute>(DepAA.getPointer())->printWithDeps(outs());
4096}
4097
4101 AnalysisGetter AG(FAM);
4102
4103 SetVector<Function *> Functions;
4104 for (Function &F : M)
4105 Functions.insert(&F);
4106
4107 CallGraphUpdater CGUpdater;
4108 BumpPtrAllocator Allocator;
4109 InformationCache InfoCache(M, AG, Allocator, /* CGSCC */ nullptr);
4110 if (runAttributorOnFunctions(InfoCache, Functions, AG, CGUpdater,
4111 /* DeleteFns */ true, /* IsModulePass */ true)) {
4112 // FIXME: Think about passes we will preserve and add them here.
4113 return PreservedAnalyses::none();
4114 }
4115 return PreservedAnalyses::all();
4116}
4117
4120 LazyCallGraph &CG,
4121 CGSCCUpdateResult &UR) {
4123 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager();
4124 AnalysisGetter AG(FAM);
4125
4126 SetVector<Function *> Functions;
4127 for (LazyCallGraph::Node &N : C)
4128 Functions.insert(&N.getFunction());
4129
4130 if (Functions.empty())
4131 return PreservedAnalyses::all();
4132
4133 Module &M = *Functions.back()->getParent();
4134 CallGraphUpdater CGUpdater;
4135 CGUpdater.initialize(CG, C, AM, UR);
4136 BumpPtrAllocator Allocator;
4137 InformationCache InfoCache(M, AG, Allocator, /* CGSCC */ &Functions);
4138 if (runAttributorOnFunctions(InfoCache, Functions, AG, CGUpdater,
4139 /* DeleteFns */ false,
4140 /* IsModulePass */ false)) {
4141 // FIXME: Think about passes we will preserve and add them here.
4144 return PA;
4145 }
4146 return PreservedAnalyses::all();
4147}
4148
4153 AnalysisGetter AG(FAM, /* CachedOnly */ true);
4154
4155 SetVector<Function *> Functions;
4156 for (Function &F : M)
4157 Functions.insert(&F);
4158
4159 CallGraphUpdater CGUpdater;
4160 BumpPtrAllocator Allocator;
4161 InformationCache InfoCache(M, AG, Allocator, /* CGSCC */ nullptr);
4162 if (runAttributorLightOnFunctions(InfoCache, Functions, AG, CGUpdater, FAM,
4163 /* IsModulePass */ true)) {
4165 // We have not added or removed functions.
4167 // We already invalidated all relevant function analyses above.
4169 return PA;
4170 }
4171 return PreservedAnalyses::all();
4172}
4173
4176 LazyCallGraph &CG,
4177 CGSCCUpdateResult &UR) {
4179 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager();
4180 AnalysisGetter AG(FAM);
4181
4182 SetVector<Function *> Functions;
4183 for (LazyCallGraph::Node &N : C)
4184 Functions.insert(&N.getFunction());
4185
4186 if (Functions.empty())
4187 return PreservedAnalyses::all();
4188
4189 Module &M = *Functions.back()->getParent();
4190 CallGraphUpdater CGUpdater;
4191 CGUpdater.initialize(CG, C, AM, UR);
4192 BumpPtrAllocator Allocator;
4193 InformationCache InfoCache(M, AG, Allocator, /* CGSCC */ &Functions);
4194 if (runAttributorLightOnFunctions(InfoCache, Functions, AG, CGUpdater, FAM,
4195 /* IsModulePass */ false)) {
4197 // We have not added or removed functions.
4199 // We already invalidated all relevant function analyses above.
4201 return PA;
4202 }
4203 return PreservedAnalyses::all();
4204}
4205namespace llvm {
4206
4223
4224template <>
4226 static NodeRef getEntryNode(AADepGraph *DG) { return DG->GetEntryNode(); }
4227
4230
4231 static nodes_iterator nodes_begin(AADepGraph *DG) { return DG->begin(); }
4232
4233 static nodes_iterator nodes_end(AADepGraph *DG) { return DG->end(); }
4234};
4235
4236template <> struct DOTGraphTraits<AADepGraph *> : public DefaultDOTGraphTraits {
4238
4239 static std::string getNodeLabel(const AADepGraphNode *Node,
4240 const AADepGraph *DG) {
4241 std::string AAString;
4242 raw_string_ostream O(AAString);
4243 Node->print(O);
4244 return AAString;
4245 }
4246};
4247
4248} // end namespace llvm
aarch64 falkor hwpf fix Falkor HW Prefetch Fix Late Phase
static unsigned getIntrinsicID(const SDNode *N)
@ Generic
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
amdgpu aa AMDGPU Address space based Alias Analysis Wrapper
Rewrite undef for PHI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Expand Atomic instructions
This file contains the simple types necessary to represent the attributes associated with functions a...
static cl::opt< bool > AllowShallowWrappers("attributor-allow-shallow-wrappers", cl::Hidden, cl::desc("Allow the Attributor to create shallow " "wrappers for non-exact definitions."), cl::init(false))
bool canMarkAsVisited(const User *Usr)
#define VERBOSE_DEBUG_TYPE
static cl::opt< bool > EnableHeapToStack("enable-heap-to-stack-conversion", cl::init(true), cl::Hidden)
static cl::list< std::string > SeedAllowList("attributor-seed-allow-list", cl::Hidden, cl::desc("Comma separated list of attribute names that are " "allowed to be seeded."), cl::CommaSeparated)
static bool runAttributorOnFunctions(InformationCache &InfoCache, SetVector< Function * > &Functions, AnalysisGetter &AG, CallGraphUpdater &CGUpdater, bool DeleteFns, bool IsModulePass)
}
static bool getPotentialCopiesOfMemoryValue(Attributor &A, Ty &I, SmallSetVector< Value *, 4 > &PotentialCopies, SmallSetVector< Instruction *, 4 > *PotentialValueOrigins, const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation, bool OnlyExact)
static bool runAttributorLightOnFunctions(InformationCache &InfoCache, SetVector< Function * > &Functions, AnalysisGetter &AG, CallGraphUpdater &CGUpdater, FunctionAnalysisManager &FAM, bool IsModulePass)
static cl::opt< unsigned, true > MaxInitializationChainLengthX("attributor-max-initialization-chain-length", cl::Hidden, cl::desc("Maximal number of chained initializations (to avoid stack overflows)"), cl::location(MaxInitializationChainLength), cl::init(1024))
static cl::opt< unsigned > MaxSpecializationPerCB("attributor-max-specializations-per-call-base", cl::Hidden, cl::desc("Maximal number of callees specialized for " "a call base"), cl::init(UINT32_MAX))
static cl::opt< bool > SimplifyAllLoads("attributor-simplify-all-loads", cl::Hidden, cl::desc("Try to simplify all loads."), cl::init(true))
static bool addIfNotExistent(LLVMContext &Ctx, const Attribute &Attr, AttributeSet AttrSet, bool ForceReplace, AttrBuilder &AB)
Return true if the information provided by Attr was added to the attribute set AttrSet.
static cl::opt< bool > ViewDepGraph("attributor-view-dep-graph", cl::Hidden, cl::desc("View the dependency graph."), cl::init(false))
static bool isEqualOrWorse(const Attribute &New, const Attribute &Old)
Return true if New is equal or worse than Old.
static cl::opt< bool > AllowDeepWrapper("attributor-allow-deep-wrappers", cl::Hidden, cl::desc("Allow the Attributor to use IP information " "derived from non-exact functions via cloning"), cl::init(false))
static cl::opt< bool > DumpDepGraph("attributor-dump-dep-graph", cl::Hidden, cl::desc("Dump the dependency graph to dot files."), cl::init(false))
static cl::opt< bool > PrintCallGraph("attributor-print-call-graph", cl::Hidden, cl::desc("Print Attributor's internal call graph"), cl::init(false))
static bool checkForAllInstructionsImpl(Attributor *A, InformationCache::OpcodeInstMapTy &OpcodeInstMap, function_ref< bool(Instruction &)> Pred, const AbstractAttribute *QueryingAA, const AAIsDead *LivenessAA, ArrayRef< unsigned > Opcodes, bool &UsedAssumedInformation, bool CheckBBLivenessOnly=false, bool CheckPotentiallyDead=false)
static cl::opt< bool > PrintDependencies("attributor-print-dep", cl::Hidden, cl::desc("Print attribute dependencies"), cl::init(false))
static bool isAssumedReadOnlyOrReadNone(Attributor &A, const IRPosition &IRP, const AbstractAttribute &QueryingAA, bool RequireReadNone, bool &IsKnown)
static cl::opt< std::string > DepGraphDotFileNamePrefix("attributor-depgraph-dot-filename-prefix", cl::Hidden, cl::desc("The prefix used for the CallGraph dot file names."))
static cl::opt< bool > AnnotateDeclarationCallSites("attributor-annotate-decl-cs", cl::Hidden, cl::desc("Annotate call sites of function declarations."), cl::init(false))
static cl::opt< unsigned > SetFixpointIterations("attributor-max-iterations", cl::Hidden, cl::desc("Maximal number of fixpoint iterations."), cl::init(32))
static cl::list< std::string > FunctionSeedAllowList("attributor-function-seed-allow-list", cl::Hidden, cl::desc("Comma separated list of function names that are " "allowed to be seeded."), cl::CommaSeparated)
static cl::opt< bool > EnableCallSiteSpecific("attributor-enable-call-site-specific-deduction", cl::Hidden, cl::desc("Allow the Attributor to do call site specific analysis"), cl::init(false))
static cl::opt< bool > CloseWorldAssumption("attributor-assume-closed-world", cl::Hidden, cl::desc("Should a closed world be assumed, or not. Default if not set."))
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file provides interfaces used to build and manipulate a call graph, which is a very useful tool ...
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file provides an implementation of debug counters.
#define DEBUG_COUNTER(VARNAME, COUNTERNAME, DESC)
IRTranslator LLVM IR MI
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
#define T
Contains a collection of routines for determining if a given instruction is guaranteed to execute if ...
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
static constexpr StringLiteral Filename
FunctionAnalysisManager FAM
This file defines the PointerIntPair class.
static StringRef getName(Value *V)
Remove Loads Into Fake Uses
This file contains some templates that are useful if you are working with the STL at all.
This file defines the SmallPtrSet class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
#define LLVM_DEBUG(...)
Definition Debug.h:114
#define DEBUG_WITH_TYPE(TYPE,...)
DEBUG_WITH_TYPE macro - This macro should be used by passes to emit debug information.
Definition Debug.h:72
void print(OutputBuffer &OB) const
static const fltSemantics & IEEEsingle()
Definition APFloat.h:296
Class for arbitrary precision integers.
Definition APInt.h:78
CallBase * getInstruction() const
Return the underlying instruction.
bool isCallbackCall() const
Return true if this ACS represents a callback call.
const Use & getCalleeUseForCallback() const
Return the use of the callee value in the underlying instruction.
static LLVM_ABI void getCallbackUses(const CallBase &CB, SmallVectorImpl< const Use * > &CallbackUses)
Add operand uses of CB that represent callback uses into CallbackUses.
bool isCallee(Value::const_user_iterator UI) const
Return true if UI is the use that defines the callee of this ACS.
Value * getCallArgOperand(Argument &Arg) const
Return the operand of the underlying instruction associated with Arg.
int getCallArgOperandNo(Argument &Arg) const
Return the operand index of the underlying instruction associated with Arg.
unsigned getNumArgOperands() const
Return the number of parameters of the callee.
Function * getCalledFunction() const
Return the function being called if this is a direct call, otherwise return null (if it's an indirect...
This templated class represents "all analyses that operate over <aparticular IR unit>" (e....
Definition Analysis.h:50
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
This class represents an incoming formal argument to a Function.
Definition Argument.h:32
const Function * getParent() const
Definition Argument.h:44
unsigned getArgNo() const
Return the index of this formal argument in its containing function.
Definition Argument.h:50
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
size_t size() const
size - Get the array size.
Definition ArrayRef.h:142
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:137
This class stores enough information to efficiently remove some attributes from an existing AttrBuild...
This class holds the attributes for a particular argument, parameter, function, or return value.
Definition Attributes.h:407
LLVM_ABI MemoryEffects getMemoryEffects() const
LLVM_ABI bool hasAttribute(Attribute::AttrKind Kind) const
Return true if the attribute exists in this set.
LLVM_ABI Attribute getAttribute(Attribute::AttrKind Kind) const
Return the attribute object.
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:105
LLVM_ABI bool isStringAttribute() const
Return true if the attribute is a string (target-dependent) attribute.
LLVM_ABI bool isEnumAttribute() const
Return true if the attribute is an Attribute::AttrKind type.
LLVM_ABI bool isIntAttribute() const
Return true if the attribute is an integer attribute.
LLVM_ABI uint64_t getValueAsInt() const
Return the attribute's value as an integer.
LLVM_ABI bool isConstantRangeAttribute() const
Return true if the attribute is a ConstantRange attribute.
LLVM_ABI StringRef getKindAsString() const
Return the attribute's kind as a string.
static LLVM_ABI Attribute get(LLVMContext &Context, AttrKind Kind, uint64_t Val=0)
Return a uniquified Attribute object.
LLVM_ABI Attribute::AttrKind getKindAsEnum() const
Return the attribute's kind as an enum (Attribute::AttrKind).
LLVM_ABI MemoryEffects getMemoryEffects() const
Returns memory effects.
LLVM_ABI StringRef getValueAsString() const
Return the attribute's value as a string.
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
Definition Attributes.h:124
@ None
No attributes have been set.
Definition Attributes.h:126
LLVM Basic Block Representation.
Definition BasicBlock.h:62
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition BasicBlock.h:206
const Instruction & front() const
Definition BasicBlock.h:484
LLVM_ABI const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction; assumes that the block is well-formed.
Definition BasicBlock.h:237
Represents analyses that only rely on functions' control flow.
Definition Analysis.h:73
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
void setCallingConv(CallingConv::ID CC)
void addFnAttr(Attribute::AttrKind Kind)
Adds the attribute to the function.
LLVM_ABI void getOperandBundlesAsDefs(SmallVectorImpl< OperandBundleDef > &Defs) const
Return the list of operand bundles attached to this instruction as a vector of OperandBundleDefs.
CallingConv::ID getCallingConv() const
LLVM_ABI bool isMustTailCall() const
Tests if this call site must be tail call optimized.
Value * getCalledOperand() const
void setAttributes(AttributeList A)
Set the attributes for this call.
unsigned arg_size() const
AttributeList getAttributes() const
Return the attributes for this call.
LLVM_ABI Function * getCaller()
Helper to get the caller (the parent function).
Wrapper to unify "old style" CallGraph and "new style" LazyCallGraph.
LLVM_ABI void replaceFunctionWith(Function &OldFn, Function &NewFn)
Replace OldFn in the call graph (and SCC) with NewFn.
LLVM_ABI void reanalyzeFunction(Function &Fn)
After an CGSCC pass changes a function in ways that affect the call graph, this method can be called ...
void initialize(LazyCallGraph &LCG, LazyCallGraph::SCC &SCC, CGSCCAnalysisManager &AM, CGSCCUpdateResult &UR)
Initializers for usage outside of a CGSCC pass, inside a CGSCC pass in the old and new pass manager (...
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
void setTailCall(bool IsTc=true)
A constant value that is initialized with an expression using other constant values.
Definition Constants.h:1291
static LLVM_ABI Constant * getPointerCast(Constant *C, Type *Ty)
Create a BitCast, AddrSpaceCast, or a PtrToInt cast constant expression.
static LLVM_ABI Constant * getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
LLVM_ABI void print(raw_ostream &OS) const
Print out the bounds to a stream.
This is an important base class in LLVM.
Definition Constant.h:43
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
static bool shouldExecute(CounterInfo &Counter)
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition DenseMap.h:205
bool empty() const
Definition DenseMap.h:109
Implements a dense probed hash-table based set.
Definition DenseSet.h:279
Analysis pass which computes a DominatorTree.
Definition Dominators.h:278
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:159
A proxy from a FunctionAnalysisManager to an SCC.
Class to represent function types.
static LLVM_ABI FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
void setSubprogram(DISubprogram *SP)
Set the attached subprogram.
static Function * Create(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, const Twine &N="", Module *M=nullptr)
Definition Function.h:168
void splice(Function::iterator ToIt, Function *FromF)
Transfer all blocks from FromF to this function at ToIt.
Definition Function.h:761
const BasicBlock & getEntryBlock() const
Definition Function.h:809
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition Function.h:211
iterator_range< arg_iterator > args()
Definition Function.h:892
DISubprogram * getSubprogram() const
Get the attached subprogram.
MemoryEffects getMemoryEffects() const
Definition Function.cpp:859
bool hasParamAttribute(unsigned ArgNo, Attribute::AttrKind Kind) const
check if an attributes is in the list of attributes.
Definition Function.cpp:740
AttributeList getAttributes() const
Return the attribute list for this Function.
Definition Function.h:354
iterator begin()
Definition Function.h:853
arg_iterator arg_begin()
Definition Function.h:868
void setAttributes(AttributeList Attrs)
Set the attribute list for this Function.
Definition Function.h:357
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition Function.cpp:358
size_t arg_size() const
Definition Function.h:901
Type * getReturnType() const
Returns the type of the ret val.
Definition Function.h:216
void setMemoryEffects(MemoryEffects ME)
Definition Function.cpp:862
Argument * getArg(unsigned i) const
Definition Function.h:886
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
Definition Function.h:229
void copyAttributesFrom(const Function *Src)
copyAttributesFrom - copy all additional attributes (those not needed to create a Function) from the ...
Definition Function.cpp:843
bool hasMetadata() const
Return true if this GlobalObject has any metadata attached to it.
LLVM_ABI void addMetadata(unsigned KindID, MDNode &MD)
Add a metadata attachment.
LLVM_ABI bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
Definition Globals.cpp:337
LinkageTypes getLinkage() const
bool hasLocalLinkage() const
void setLinkage(LinkageTypes LT)
unsigned getAddressSpace() const
Module * getParent()
Get the module that this global value is contained inside of...
void setDSOLocal(bool Local)
PointerType * getType() const
Global values are always pointers.
@ DefaultVisibility
The GV is visible.
Definition GlobalValue.h:68
void setVisibility(VisibilityTypes V)
static bool isInterposableLinkage(LinkageTypes Linkage)
Whether the definition of this global may be replaced by something non-equivalent at link time.
@ PrivateLinkage
Like Internal, but omit from symbol table.
Definition GlobalValue.h:61
@ InternalLinkage
Rename collisions when linking (static functions).
Definition GlobalValue.h:60
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
LLVM_ABI void copyMetadata(const Instruction &SrcInst, ArrayRef< unsigned > WL=ArrayRef< unsigned >())
Copy metadata from SrcInst to this instruction.
static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
A node in the call graph.
An SCC of the call graph.
A lazily constructed view of the call graph of a module.
An instruction for reading from memory.
This is the common base class for memset/memcpy/memmove.
This class wraps the llvm.memcpy/memmove intrinsics.
static MemoryEffectsBase argMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Definition ModRef.h:143
bool doesAccessArgPointees() const
Whether this function may access argument memory.
Definition ModRef.h:260
static LLVM_ABI MemoryLocation getForSource(const MemTransferInst *MTI)
Return a location representing the source of a memory transfer.
static LLVM_ABI MemoryLocation getForDest(const MemIntrinsic *MI)
Return a location representing the destination of a memory set or transfer.
static LLVM_ABI std::optional< MemoryLocation > getOrNone(const Instruction *Inst)
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
const FunctionListType & getFunctionList() const
Get the Module's list of functions (constant).
Definition Module.h:596
PointerIntPair - This class implements a pair of a pointer and small integer.
void * getOpaqueValue() const
PointerTy getPointer() const
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
Definition Analysis.h:115
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
PreservedAnalyses & preserveSet()
Mark an analysis set as preserved.
Definition Analysis.h:151
PreservedAnalyses & preserve()
Mark an analysis as preserved.
Definition Analysis.h:132
Return a value (possibly void), from a function.
static ReturnInst * Create(LLVMContext &C, Value *retVal=nullptr, InsertPosition InsertBefore=nullptr)
A vector that has set insertion semantics.
Definition SetVector.h:57
ArrayRef< value_type > getArrayRef() const
Definition SetVector.h:91
bool remove(const value_type &X)
Remove an item from the set vector.
Definition SetVector.h:181
size_type size() const
Determine the number of elements in the SetVector.
Definition SetVector.h:103
void insert_range(Range &&R)
Definition SetVector.h:176
size_type count(const_arg_type key) const
Count the number of elements of a given key in the SetVector.
Definition SetVector.h:262
typename vector_type::const_iterator iterator
Definition SetVector.h:72
void clear()
Completely clear the SetVector.
Definition SetVector.h:267
iterator begin()
Get an iterator to the beginning of the SetVector.
Definition SetVector.h:106
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition SetVector.h:151
size_type size() const
Definition SmallPtrSet.h:99
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
Definition SetVector.h:339
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void reserve(size_type N)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void resize(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
A visitor class for IR positions.
LLVM_ABI SubsumingPositionIterator(const IRPosition &IRP)
Provides information about what library functions are available for the current target.
The TimeTraceScope is a helper class to call the begin and end functions of the time trace profiler.
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:46
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:284
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:141
static LLVM_ABI UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:255
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition Value.cpp:549
LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.h:258
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition Value.cpp:709
iterator_range< use_iterator > uses()
Definition Value.h:380
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:318
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
Definition Value.cpp:399
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition ilist_node.h:34
self_iterator getIterator()
Definition ilist_node.h:123
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition ilist_node.h:348
iterator insert(iterator where, pointer New)
Definition ilist.h:165
A raw_ostream that writes to a file descriptor.
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
A raw_ostream that writes to an std::string.
CallInst * Call
Changed
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Abstract Attribute helper functions.
Definition Attributor.h:165
LLVM_ABI bool isAssumedReadNone(Attributor &A, const IRPosition &IRP, const AbstractAttribute &QueryingAA, bool &IsKnown)
Return true if IRP is readnone.
LLVM_ABI bool isAssumedReadOnly(Attributor &A, const IRPosition &IRP, const AbstractAttribute &QueryingAA, bool &IsKnown)
Return true if IRP is readonly.
LLVM_ABI std::optional< Value * > combineOptionalValuesInAAValueLatice(const std::optional< Value * > &A, const std::optional< Value * > &B, Type *Ty)
Return the combination of A and B such that the result is a possible value of both.
LLVM_ABI bool isValidAtPosition(const ValueAndContext &VAC, InformationCache &InfoCache)
Return true if the value of VAC is a valid at the position of VAC, that is a constant,...
LLVM_ABI bool isAssumedThreadLocalObject(Attributor &A, Value &Obj, const AbstractAttribute &QueryingAA)
Return true if Obj is assumed to be a thread local object.
LLVM_ABI bool isGPUConstantAddressSpace(const Module &M, unsigned AS)
Check if the given address space AS corresponds to a GPU constant address space for the target triple...
LLVM_ABI bool isGPUGenericAddressSpace(const Module &M, unsigned AS)
Check if the given address space AS corresponds to a GPU generic address space for the target triple ...
LLVM_ABI bool isDynamicallyUnique(Attributor &A, const AbstractAttribute &QueryingAA, const Value &V, bool ForAnalysisOnly=true)
Return true if V is dynamically unique, that is, there are no two "instances" of V at runtime with di...
LLVM_ABI bool getPotentialCopiesOfStoredValue(Attributor &A, StoreInst &SI, SmallSetVector< Value *, 4 > &PotentialCopies, const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation, bool OnlyExact=false)
Collect all potential values of the one stored by SI into PotentialCopies.
LLVM_ABI bool isGPUSharedAddressSpace(const Module &M, unsigned AS)
Check if the given address space AS corresponds to a GPU shared address space for the target triple i...
LLVM_ABI bool isGPULocalAddressSpace(const Module &M, unsigned AS)
Check if the given address space AS corresponds to a GPU local/private address space for the target t...
LLVM_ABI bool isPotentiallyAffectedByBarrier(Attributor &A, const Instruction &I, const AbstractAttribute &QueryingAA)
Return true if I is potentially affected by a barrier.
SmallPtrSet< Instruction *, 4 > InstExclusionSetTy
Definition Attributor.h:166
LLVM_ABI bool isGPU(const Module &M)
Return true iff M target a GPU (and we can use GPU AS reasoning).
LLVM_ABI Constant * getInitialValueForObj(Attributor &A, const AbstractAttribute &QueryingAA, Value &Obj, Type &Ty, const TargetLibraryInfo *TLI, const DataLayout &DL, RangeTy *RangePtr=nullptr)
Return the initial value of Obj with type Ty if that is a constant.
ValueScope
Flags to distinguish intra-procedural queries from potentially inter-procedural queries.
Definition Attributor.h:194
@ Intraprocedural
Definition Attributor.h:195
@ Interprocedural
Definition Attributor.h:196
LLVM_ABI bool isValidInScope(const Value &V, const Function *Scope)
Return true if V is a valid value in Scope, that is a constant or an instruction/argument of Scope.
LLVM_ABI bool isGPUGlobalAddressSpace(const Module &M, unsigned AS)
Check if the given address space AS corresponds to a GPU global address space for the target triple i...
LLVM_ABI bool isPotentiallyReachable(Attributor &A, const Instruction &FromI, const Instruction &ToI, const AbstractAttribute &QueryingAA, const AA::InstExclusionSetTy *ExclusionSet=nullptr, std::function< bool(const Function &F)> GoBackwardsCB=nullptr)
Return true if ToI is potentially reachable from FromI without running into any instruction in Exclus...
LLVM_ABI bool isNoSyncInst(Attributor &A, const Instruction &I, const AbstractAttribute &QueryingAA)
Return true if I is a nosync instruction.
bool hasAssumedIRAttr(Attributor &A, const AbstractAttribute *QueryingAA, const IRPosition &IRP, DepClassTy DepClass, bool &IsKnown, bool IgnoreSubsumingPositions=false, const AAType **AAPtr=nullptr)
Helper to avoid creating an AA for IR Attributes that might already be set.
LLVM_ABI bool getPotentiallyLoadedValues(Attributor &A, LoadInst &LI, SmallSetVector< Value *, 4 > &PotentialValues, SmallSetVector< Instruction *, 4 > &PotentialValueOrigins, const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation, bool OnlyExact=false)
Collect all potential values LI could read into PotentialValues.
LLVM_ABI Value * getWithType(Value &V, Type &Ty)
Try to convert V to type Ty without introducing new instructions.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
@ Entry
Definition COFF.h:862
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
initializer< Ty > init(const Ty &Val)
LocationClass< Ty > location(Ty &L)
DiagnosticInfoOptimizationBase::Argument NV
NodeAddr< UseNode * > Use
Definition RDFGraph.h:385
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
@ OF_TextWithCRLF
The file should be opened in text mode and use a carriage linefeed '\r '.
Definition FileSystem.h:786
This is an optimization pass for GlobalISel generic memory operations.
@ Offset
Definition DWP.cpp:532
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1738
LLVM_ABI Constant * getInitialValueOfAllocation(const Value *V, const TargetLibraryInfo *TLI, Type *Ty)
If this is a call to an allocation function that initializes memory to a fixed value,...
Printable print(const GCNRegPressure &RP, const GCNSubtarget *ST=nullptr, unsigned DynamicVGPRBlockSize=0)
LLVM_ABI bool RecursivelyDeleteTriviallyDeadInstructions(Value *V, const TargetLibraryInfo *TLI=nullptr, MemorySSAUpdater *MSSAU=nullptr, std::function< void(Value *)> AboutToDeleteCallback=std::function< void(Value *)>())
If the specified value is a trivially dead instruction, delete it.
Definition Local.cpp:535
LLVM_ABI unsigned MaxInitializationChainLength
The value passed to the line option that defines the maximal initialization chain length.
LLVM_ABI bool ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions=false, const TargetLibraryInfo *TLI=nullptr, DomTreeUpdater *DTU=nullptr)
If a terminator instruction is predicated on a constant value, convert it into an unconditional branc...
Definition Local.cpp:134
APInt operator&(APInt a, const APInt &b)
Definition APInt.h:2152
LLVM_ABI void detachDeadBlocks(ArrayRef< BasicBlock * > BBs, SmallVectorImpl< DominatorTree::UpdateType > *Updates, bool KeepOneInputPHIs=false)
Replace contents of every block in BBs with single unreachable instruction.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
@ Done
Definition Threading.h:60
DenseMap< AssumeInst *, MinMax > Assume2KnowledgeMap
A mapping from intrinsics (=llvm.assume calls) to a value range (=knowledge) that is encoded in them.
LLVM_ABI bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
LLVM_ABI CallInst * changeToCall(InvokeInst *II, DomTreeUpdater *DTU=nullptr)
This function converts the specified invoke into a normal call.
Definition Local.cpp:2594
LLVM_ABI raw_fd_ostream & outs()
This returns a reference to a raw_fd_ostream for standard output.
auto dyn_cast_if_present(const Y &Val)
dyn_cast_if_present<X> - Functionally identical to dyn_cast, except that a null (or none in the case ...
Definition Casting.h:732
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
InnerAnalysisManagerProxy< FunctionAnalysisManager, Module > FunctionAnalysisManagerModuleProxy
Provide the FunctionAnalysisManager to Module proxy.
LLVM_ABI bool isNoAliasCall(const Value *V)
Return true if this pointer is returned by a noalias function.
MemoryEffectsBase< IRMemLocation > MemoryEffects
Summary of how a function affects memory in the program.
Definition ModRef.h:356
raw_ostream & WriteGraph(raw_ostream &O, const GraphType &G, bool ShortNames=false, const Twine &Title="")
bool isa_and_nonnull(const Y &Val)
Definition Casting.h:676
AnalysisManager< LazyCallGraph::SCC, LazyCallGraph & > CGSCCAnalysisManager
The CGSCC analysis manager.
LLVM_ABI InlineResult isInlineViable(Function &Callee)
Check if it is mechanically possible to inline the function Callee, based on the contents of the func...
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1745
LLVM_ABI bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
Definition Local.cpp:403
LLVM_ABI Constant * ConstantFoldLoadFromUniformValue(Constant *C, Type *Ty, const DataLayout &DL)
If C is a uniform value where all bits are the same (either all zero, all ones, all undef or all pois...
PotentialValuesState< std::pair< AA::ValueAndContext, AA::ValueScope > > PotentialLLVMValuesState
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI bool AreStatisticsEnabled()
Check if statistics are enabled.
LLVM_ABI Constant * ConstantFoldLoadFromConst(Constant *C, Type *Ty, const APInt &Offset, const DataLayout &DL)
Extract value of C at the given Offset reinterpreted as Ty.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
@ Success
The lock was released successfully.
LLVM_ABI unsigned changeToUnreachable(Instruction *I, bool PreserveLCSSA=false, DomTreeUpdater *DTU=nullptr, MemorySSAUpdater *MSSAU=nullptr)
Insert an unreachable instruction before the specified instruction, making it and the rest of the cod...
Definition Local.cpp:2528
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
@ Global
Append to llvm.global_dtors.
LLVM_ABI BasicBlock * SplitBlockPredecessors(BasicBlock *BB, ArrayRef< BasicBlock * > Preds, const char *Suffix, DominatorTree *DT, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, bool PreserveLCSSA=false)
This method introduces at least one new basic block into the function and moves some of the predecess...
PotentialValuesState< APInt > PotentialConstantIntValuesState
bool operator&=(SparseBitVector< ElementSize > *LHS, const SparseBitVector< ElementSize > &RHS)
LLVM_ABI bool isPotentiallyReachable(const Instruction *From, const Instruction *To, const SmallPtrSetImpl< BasicBlock * > *ExclusionSet=nullptr, const DominatorTree *DT=nullptr, const LoopInfo *LI=nullptr, const CycleInfo *CI=nullptr)
Determine whether instruction 'To' is reachable from 'From', without passing through any blocks in Ex...
Definition CFG.cpp:335
DWARFExpression::Operation Op
void ViewGraph(const GraphType &G, const Twine &Name, bool ShortNames=false, const Twine &Title="", GraphProgram::Name Program=GraphProgram::DOT)
ViewGraph - Emit a dot graph, run 'dot', run gv on the postscript file, then cleanup.
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
ArrayRef(const T &OneElt) -> ArrayRef< T >
ValueMap< const Value *, WeakTrackingVH > ValueToValueMapTy
LLVM_ABI void CloneFunctionInto(Function *NewFunc, const Function *OldFunc, ValueToValueMapTy &VMap, CloneFunctionChangeType Changes, SmallVectorImpl< ReturnInst * > &Returns, const char *NameSuffix="", ClonedCodeInfo *CodeInfo=nullptr, ValueMapTypeRemapper *TypeMapper=nullptr, ValueMaterializer *Materializer=nullptr)
Clone OldFunc into NewFunc, transforming the old arguments into references to VMap values.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
Definition STLExtras.h:2191
iterator_range< pointer_iterator< WrappedIteratorT > > make_pointer_range(RangeT &&Range)
Definition iterator.h:368
LLVM_ABI bool isAllocationFn(const Value *V, const TargetLibraryInfo *TLI)
Tests if a value is a call or invoke to a library function that allocates or reallocates memory (eith...
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1946
ChangeStatus
{
Definition Attributor.h:508
LLVM_ABI void fillMapFromAssume(AssumeInst &Assume, RetainedKnowledgeMap &Result)
Insert into the map all the informations contained in the operand bundles of the llvm....
bool operator|=(SparseBitVector< ElementSize > &LHS, const SparseBitVector< ElementSize > *RHS)
LLVM_ABI Constant * ConstantFoldCastInstruction(unsigned opcode, Constant *V, Type *DestTy)
@ OPTIONAL
The target may be valid if the source is not.
Definition Attributor.h:520
@ NONE
Do not track a dependence between source and target.
Definition Attributor.h:521
@ REQUIRED
The target cannot be valid if the source is not.
Definition Attributor.h:519
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
APInt operator|(APInt a, const APInt &b)
Definition APInt.h:2172
BumpPtrAllocatorImpl<> BumpPtrAllocator
The standard BumpPtrAllocator which just uses the default template parameters.
Definition Allocator.h:383
AnalysisManager< Module > ModuleAnalysisManager
Convenience typedef for the Module analysis manager.
Definition MIRParser.h:39
#define N
static LLVM_ABI const char ID
Unique ID (due to the unique address)
static LLVM_ABI const char ID
Unique ID (due to the unique address)
DepSetTy Deps
Set of dependency graph nodes which should be updated if this one is updated.
Definition Attributor.h:535
PointerIntPair< AADepGraphNode *, 1 > DepTy
Definition Attributor.h:529
The data structure for the dependency graph.
Definition Attributor.h:569
iterator begin()
Definition Attributor.h:584
LLVM_ABI void viewGraph()
AADepGraphNode SyntheticRoot
There is no root node for the dependency graph.
Definition Attributor.h:581
LLVM_ABI void print()
Print dependency graph.
iterator end()
Definition Attributor.h:585
LLVM_ABI void dumpGraph()
Dump graph to file.
AADepGraphNode * GetEntryNode()
Definition Attributor.h:582
An abstract interface to track if a value leaves it's defining function instance.
bool isAssumedUniqueForAnalysis() const
Return true if we assume that the underlying value is unique in its scope wrt.
An abstract Attribute for computing reachability between functions.
static LLVM_ABI const char ID
Unique ID (due to the unique address)
An abstract interface to determine reachability of point A to B.
static LLVM_ABI const char ID
Unique ID (due to the unique address)
An abstract interface for liveness abstract attribute.
virtual bool isKnownDead() const =0
Returns true if the underlying value is known dead.
virtual bool isAssumedDead() const =0
The query functions are protected such that other attributes need to go through the Attributor interf...
virtual bool isRemovableStore() const
Return true if the underlying value is a store that is known to be removable.
static bool mayCatchAsynchronousExceptions(const Function &F)
Determine if F might catch asynchronous exceptions.
An abstract interface for memory access kind related attributes (readnone/readonly/writeonly).
static LLVM_ABI const char ID
Unique ID (due to the unique address)
An abstract interface for all memory location attributes (readnone/argmemonly/inaccessiblememonly/ina...
static LLVM_ABI const char ID
Unique ID (due to the unique address)
static LLVM_ABI const char ID
Unique ID (due to the unique address)
static LLVM_ABI const char ID
Unique ID (due to the unique address)
static LLVM_ABI const char ID
Unique ID (due to the unique address)
static LLVM_ABI const char ID
Unique ID (due to the unique address)
static LLVM_ABI const char ID
Unique ID (due to the unique address)
static LLVM_ABI const char ID
Unique ID (due to the unique address)
static LLVM_ABI const char ID
Unique ID (due to the unique address)
static LLVM_ABI bool isNonRelaxedAtomic(const Instruction *I)
Helper function used to determine whether an instruction is non-relaxed atomic.
static LLVM_ABI bool isNoSyncIntrinsic(const Instruction *I)
Helper function specific for intrinsics which are potentially volatile.
static LLVM_ABI const char ID
Unique ID (due to the unique address)
static LLVM_ABI const char ID
Unique ID (due to the unique address)
An access description.
bool isWrittenValueUnknown() const
Return true if the value written cannot be determined at all.
std::optional< Value * > getContent() const
Return the written value which can be llvm::null if it is not yet determined.
bool isWriteOrAssumption() const
Return true if this is a write access.
bool isRead() const
Return true if this is a read access.
Value * getWrittenValue() const
Return the value writen, if any.
Instruction * getLocalInst() const
Return the instruction that causes the access with respect to the local scope of the associated attri...
Instruction * getRemoteInst() const
Return the actual instruction that causes the access.
bool isWrittenValueYetUndetermined() const
Return true if the value written is not known yet.
AccessKind getKind() const
Return the access kind.
An abstract interface for struct information.
static LLVM_ABI Value * getSingleValue(Attributor &A, const AbstractAttribute &AA, const IRPosition &IRP, SmallVectorImpl< AA::ValueAndContext > &Values)
Extract the single value in Values if any.
An abstract attribute for getting all assumption underlying objects.
static LLVM_ABI const char ID
Unique ID (due to the unique address)
static LLVM_ABI const char ID
Unique ID (due to the unique address)
Helper to represent an access offset and size, with logic to deal with uncertainty and check for over...
Definition Attributor.h:253
bool offsetOrSizeAreUnknown() const
Return true if offset or size are unknown.
Definition Attributor.h:262
Value * getValue() const
Definition Attributor.h:206
const Instruction * getCtxI() const
Definition Attributor.h:207
Base struct for all "concrete attribute" deductions.
ChangeStatus update(Attributor &A)
Hook for the Attributor to trigger an update of the internal state.
friend struct Attributor
}
virtual void printWithDeps(raw_ostream &OS) const
void print(raw_ostream &OS) const
Helper functions, for debug purposes only.
virtual StateType & getState()=0
Return the internal abstract state for inspection.
virtual const std::string getAsStr(Attributor *A) const =0
This function should return the "summarized" assumed state as string.
virtual ChangeStatus updateImpl(Attributor &A)=0
The actual update/transfer function which has to be implemented by the derived classes.
const IRPosition & getIRPosition() const
Return an IR position, see struct IRPosition.
An interface to query the internal state of an abstract attribute.
virtual ChangeStatus indicatePessimisticFixpoint()=0
Indicate that the abstract state should converge to the pessimistic state.
virtual bool isAtFixpoint() const =0
Return if this abstract state is fixed, thus does not need to be updated if information changes as it...
virtual bool isValidState() const =0
Return if this abstract state is in a valid state.
Wrapper for FunctionAnalysisManager.
LLVM_ABI PreservedAnalyses run(LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM, LazyCallGraph &CG, CGSCCUpdateResult &UR)
void populateAll() const
Force populate the entire call graph.
Configuration for the Attributor.
std::optional< unsigned > MaxFixpointIterations
Maximum number of iterations to run until fixpoint.
LLVM_ABI PreservedAnalyses run(LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM, LazyCallGraph &CG, CGSCCUpdateResult &UR)
LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
Helper struct used in the communication between an abstract attribute (AA) that wants to change the s...
std::function< void( const ArgumentReplacementInfo &, Function &, Function::arg_iterator)> CalleeRepairCBTy
Callee repair callback type.
std::function< void(const ArgumentReplacementInfo &, AbstractCallSite, SmallVectorImpl< Value * > &)> ACSRepairCBTy
Abstract call site (ACS) repair callback type.
The fixpoint analysis framework that orchestrates the attribute deduction.
LLVM_ABI bool registerFunctionSignatureRewrite(Argument &Arg, ArrayRef< Type * > ReplacementTypes, ArgumentReplacementInfo::CalleeRepairCBTy &&CalleeRepairCB, ArgumentReplacementInfo::ACSRepairCBTy &&ACSRepairCB)
Register a rewrite for a function signature.
LLVM_ABI ~Attributor()
LLVM_ABI bool checkForAllCallees(function_ref< bool(ArrayRef< const Function * > Callees)> Pred, const AbstractAttribute &QueryingAA, const CallBase &CB)
Check Pred on all potential Callees of CB.
bool isModulePass() const
Return true if this is a module pass, false otherwise.
LLVM_ABI bool isValidFunctionSignatureRewrite(Argument &Arg, ArrayRef< Type * > ReplacementTypes)
Check if we can rewrite a function signature.
static LLVM_ABI bool isInternalizable(Function &F)
Returns true if the function F can be internalized.
LLVM_ABI ChangeStatus removeAttrs(const IRPosition &IRP, ArrayRef< Attribute::AttrKind > AttrKinds)
Remove all AttrKinds attached to IRP.
void emitRemark(Instruction *I, StringRef RemarkName, RemarkCallBack &&RemarkCB) const
Emit a remark generically.
bool isRunOn(Function &Fn) const
Return true if we derive attributes for Fn.
LLVM_ABI bool isAssumedDead(const AbstractAttribute &AA, const AAIsDead *LivenessAA, bool &UsedAssumedInformation, bool CheckBBLivenessOnly=false, DepClassTy DepClass=DepClassTy::OPTIONAL)
Return true if AA (or its context instruction) is assumed dead.
LLVM_ABI bool checkForAllInstructions(function_ref< bool(Instruction &)> Pred, const Function *Fn, const AbstractAttribute *QueryingAA, ArrayRef< unsigned > Opcodes, bool &UsedAssumedInformation, bool CheckBBLivenessOnly=false, bool CheckPotentiallyDead=false)
Check Pred on all instructions in Fn with an opcode present in Opcodes.
LLVM_ABI void recordDependence(const AbstractAttribute &FromAA, const AbstractAttribute &ToAA, DepClassTy DepClass)
Explicitly record a dependence from FromAA to ToAA, that is if FromAA changes ToAA should be updated ...
static LLVM_ABI void createShallowWrapper(Function &F)
Create a shallow wrapper for F such that F has internal linkage afterwards.
const AAType * getAAFor(const AbstractAttribute &QueryingAA, const IRPosition &IRP, DepClassTy DepClass)
Lookup an abstract attribute of type AAType at position IRP.
std::optional< Value * > getAssumedSimplified(const IRPosition &IRP, const AbstractAttribute &AA, bool &UsedAssumedInformation, AA::ValueScope S)
If V is assumed simplified, return it, if it is unclear yet, return std::nullopt, otherwise return nu...
static LLVM_ABI Function * internalizeFunction(Function &F, bool Force=false)
Make another copy of the function F such that the copied version has internal linkage afterwards and ...
bool isFunctionIPOAmendable(const Function &F)
Determine whether the function F is IPO amendable.
const AAType * getOrCreateAAFor(IRPosition IRP, const AbstractAttribute *QueryingAA, DepClassTy DepClass, bool ForceUpdate=false, bool UpdateAfterInit=true)
The version of getAAFor that allows to omit a querying abstract attribute.
LLVM_ABI bool checkForAllReadWriteInstructions(function_ref< bool(Instruction &)> Pred, AbstractAttribute &QueryingAA, bool &UsedAssumedInformation)
Check Pred on all Read/Write instructions.
LLVM_ABI bool checkForAllReturnedValues(function_ref< bool(Value &)> Pred, const AbstractAttribute &QueryingAA, AA::ValueScope S=AA::ValueScope::Intraprocedural, bool RecurseForSelectAndPHI=true)
Check Pred on all values potentially returned by the function associated with QueryingAA.
LLVM_ABI bool isClosedWorldModule() const
Return true if the module contains the whole world, thus, no outside functions exist.
LLVM_ABI std::optional< Constant * > getAssumedConstant(const IRPosition &IRP, const AbstractAttribute &AA, bool &UsedAssumedInformation)
If IRP is assumed to be a constant, return it, if it is unclear yet, return std::nullopt,...
LLVM_ABI Attributor(SetVector< Function * > &Functions, InformationCache &InfoCache, AttributorConfig Configuration)
Constructor.
LLVM_ABI void getAttrs(const IRPosition &IRP, ArrayRef< Attribute::AttrKind > AKs, SmallVectorImpl< Attribute > &Attrs, bool IgnoreSubsumingPositions=false)
Return the attributes of any kind in AKs existing in the IR at a position that will affect this one.
InformationCache & getInfoCache()
Return the internal information cache.
LLVM_ABI std::optional< Value * > translateArgumentToCallSiteContent(std::optional< Value * > V, CallBase &CB, const AbstractAttribute &AA, bool &UsedAssumedInformation)
Translate V from the callee context into the call site context.
LLVM_ABI bool checkForAllUses(function_ref< bool(const Use &, bool &)> Pred, const AbstractAttribute &QueryingAA, const Value &V, bool CheckBBLivenessOnly=false, DepClassTy LivenessDepClass=DepClassTy::OPTIONAL, bool IgnoreDroppableUses=true, function_ref< bool(const Use &OldU, const Use &NewU)> EquivalentUseCB=nullptr)
Check Pred on all (transitive) uses of V.
LLVM_ABI ChangeStatus manifestAttrs(const IRPosition &IRP, ArrayRef< Attribute > DeducedAttrs, bool ForceReplace=false)
Attach DeducedAttrs to IRP, if ForceReplace is set we do this even if the same attribute kind was alr...
LLVM_ABI bool hasAttr(const IRPosition &IRP, ArrayRef< Attribute::AttrKind > AKs, bool IgnoreSubsumingPositions=false, Attribute::AttrKind ImpliedAttributeKind=Attribute::None)
Return true if any kind in AKs existing in the IR at a position that will affect this one.
LLVM_ABI void registerForUpdate(AbstractAttribute &AA)
Allows a query AA to request an update if a new query was received.
std::function< bool(Attributor &, const AbstractAttribute *)> VirtualUseCallbackTy
LLVM_ABI void identifyDefaultAbstractAttributes(Function &F)
Determine opportunities to derive 'default' attributes in F and create abstract attribute objects for...
LLVM_ABI bool getAssumedSimplifiedValues(const IRPosition &IRP, const AbstractAttribute *AA, SmallVectorImpl< AA::ValueAndContext > &Values, AA::ValueScope S, bool &UsedAssumedInformation, bool RecurseForSelectAndPHI=true)
Try to simplify IRP and in the scope S.
BumpPtrAllocator & Allocator
The allocator used to allocate memory, e.g. for AbstractAttributes.
LLVM_ABI ChangeStatus run()
Run the analyses until a fixpoint is reached or enforced (timeout).
static LLVM_ABI bool internalizeFunctions(SmallPtrSetImpl< Function * > &FnSet, DenseMap< Function *, Function * > &FnMap)
Make copies of each function in the set FnSet such that the copied version has internal linkage after...
LLVM_ABI bool checkForAllCallSites(function_ref< bool(AbstractCallSite)> Pred, const AbstractAttribute &QueryingAA, bool RequireAllCallSites, bool &UsedAssumedInformation)
Check Pred on all function call sites.
LLVM_ABI bool getAttrsFromAssumes(const IRPosition &IRP, Attribute::AttrKind AK, SmallVectorImpl< Attribute > &Attrs)
Return the attributes of kind AK existing in the IR as operand bundles of an llvm....
bool isKnown(base_t BitsEncoding=BestState) const
Return true if the bits set in BitsEncoding are "known bits".
Support structure for SCC passes to communicate updates the call graph back to the CGSCC pass manager...
static std::string getNodeLabel(const AADepGraphNode *Node, const AADepGraph *DG)
DefaultDOTGraphTraits(bool simple=false)
Represent subnormal handling kind for floating point instruction inputs and outputs.
@ Dynamic
Denormals have unknown treatment.
An information struct used to provide DenseMap with the various necessary components for a given valu...
static NodeRef DepGetVal(const DepTy &DT)
PointerIntPair< AADepGraphNode *, 1 > DepTy
static ChildIteratorType child_end(NodeRef N)
static NodeRef getEntryNode(AADepGraphNode *DGN)
mapped_iterator< AADepGraphNode::DepSetTy::iterator, decltype(&DepGetVal)> ChildIteratorType
PointerIntPair< AADepGraphNode *, 1 > EdgeRef
static ChildIteratorType child_begin(NodeRef N)
AADepGraphNode::DepSetTy::iterator ChildEdgeIteratorType
static NodeRef getEntryNode(AADepGraph *DG)
mapped_iterator< AADepGraphNode::DepSetTy::iterator, decltype(&DepGetVal)> nodes_iterator
static nodes_iterator nodes_begin(AADepGraph *DG)
static nodes_iterator nodes_end(AADepGraph *DG)
typename AADepGraph *::UnknownGraphTypeError NodeRef
Definition GraphTraits.h:95
Helper to describe and deal with positions in the LLVM-IR.
Definition Attributor.h:605
Function * getAssociatedFunction() const
Return the associated function, if any.
Definition Attributor.h:736
void setAttrList(const AttributeList &AttrList) const
Update the attributes associated with this function or call site scope.
Definition Attributor.h:872
unsigned getAttrIdx() const
Return the index in the attribute list for this position.
Definition Attributor.h:837
bool hasCallBaseContext() const
Check if the position has any call base context.
Definition Attributor.h:954
static const IRPosition callsite_returned(const CallBase &CB)
Create a position describing the returned value of CB.
Definition Attributor.h:673
static const IRPosition returned(const Function &F, const CallBaseContext *CBContext=nullptr)
Create a position describing the returned value of F.
Definition Attributor.h:655
LLVM_ABI Argument * getAssociatedArgument() const
Return the associated argument, if any.
static const IRPosition value(const Value &V, const CallBaseContext *CBContext=nullptr)
Create a position describing the value of V.
Definition Attributor.h:629
CallBase CallBaseContext
Definition Attributor.h:608
AttributeList getAttrList() const
Return the attributes associated with this function or call site scope.
Definition Attributor.h:865
static const IRPosition inst(const Instruction &I, const CallBaseContext *CBContext=nullptr)
Create a position describing the instruction I.
Definition Attributor.h:641
static const IRPosition callsite_argument(const CallBase &CB, unsigned ArgNo)
Create a position describing the argument of CB at position ArgNo.
Definition Attributor.h:678
static LLVM_ABI const IRPosition TombstoneKey
Definition Attributor.h:960
Kind
The positions we distinguish in the IR.
Definition Attributor.h:611
@ IRP_ARGUMENT
An attribute for a function argument.
Definition Attributor.h:619
@ IRP_RETURNED
An attribute for the function return value.
Definition Attributor.h:615
@ IRP_CALL_SITE
An attribute for a call site (function scope).
Definition Attributor.h:618
@ IRP_CALL_SITE_RETURNED
An attribute for a call site return value.
Definition Attributor.h:616
@ IRP_FUNCTION
An attribute for a function (scope).
Definition Attributor.h:617
@ IRP_FLOAT
A position that is not associated with a spot suitable for attributes.
Definition Attributor.h:613
@ IRP_CALL_SITE_ARGUMENT
An attribute for a call site argument.
Definition Attributor.h:620
@ IRP_INVALID
An invalid position.
Definition Attributor.h:612
Instruction * getCtxI() const
Return the context instruction, if any.
Definition Attributor.h:789
static const IRPosition argument(const Argument &Arg, const CallBaseContext *CBContext=nullptr)
Create a position describing the argument Arg.
Definition Attributor.h:662
static LLVM_ABI const IRPosition EmptyKey
Special DenseMap key values.
Definition Attributor.h:959
static const IRPosition function(const Function &F, const CallBaseContext *CBContext=nullptr)
Create a position describing the function scope of F.
Definition Attributor.h:648
const CallBaseContext * getCallBaseContext() const
Get the call base context from the position.
Definition Attributor.h:951
Value & getAssociatedValue() const
Return the value this abstract attribute is associated with.
Definition Attributor.h:803
Value & getAnchorValue() const
Return the value this abstract attribute is anchored with.
Definition Attributor.h:722
Value * getAttrListAnchor() const
Return the value attributes are attached to.
Definition Attributor.h:858
int getCallSiteArgNo() const
Return the call site argument number of the associated value if it is an argument or call site argume...
Definition Attributor.h:832
Kind getPositionKind() const
Return the associated position kind.
Definition Attributor.h:901
static const IRPosition callsite_function(const CallBase &CB)
Create a position describing the function scope of CB.
Definition Attributor.h:668
Function * getAnchorScope() const
Return the Function surrounding the anchor value.
Definition Attributor.h:777
Data structure to hold cached (LLVM-IR) information.
bool IsTargetGPU() const
Return true if the target is a GPU.
friend struct Attributor
Give the Attributor access to the members so Attributor::identifyDefaultAbstractAttributes(....
bool stackIsAccessibleByOtherThreads()
Return true if the stack (llvm::Alloca) can be accessed by other threads.
MustBeExecutedContextExplorer * getMustBeExecutedContextExplorer()
Return MustBeExecutedContextExplorer.
TargetLibraryInfo * getTargetLibraryInfoForFunction(const Function &F)
Return TargetLibraryInfo for function F.
LLVM_ABI std::optional< unsigned > getFlatAddressSpace() const
Return the flat address space if the associated target has.
DenseMap< unsigned, InstructionVectorTy * > OpcodeInstMapTy
A map type from opcodes to instructions with this opcode.
const RetainedKnowledgeMap & getKnowledgeMap() const
Return the map conaining all the knowledge we have from llvm.assumes.
LLVM_ABI ArrayRef< Function * > getIndirectlyCallableFunctions(Attributor &A) const
Return all functions that might be called indirectly, only valid for closed world modules (see isClos...
SmallVector< Instruction *, 8 > InstructionVectorTy
A vector type to hold instructions.
AP::Result * getAnalysisResultForFunction(const Function &F, bool CachedOnly=false)
Return the analysis result from a pass AP for function F.
State for an integer range.
ConstantRange getKnown() const
Return the known state encoding.
ConstantRange getAssumed() const
Return the assumed state encoding.
uint32_t getBitWidth() const
Return associated values' bit width.
A "must be executed context" for a given program point PP is the set of instructions,...
iterator & end()
Return an universal end iterator.
bool findInContextOf(const Instruction *I, const Instruction *PP)
Helper to look for I in the context of PP.
iterator & begin(const Instruction *PP)
Return an iterator to explore the context around PP.
bool undefIsContained() const
Returns whether this state contains an undef value or not.
bool isValidState() const override
See AbstractState::isValidState(...)
const SetTy & getAssumedSet() const
Return this set.