LLVM 20.0.0git
MemProfiler.cpp
Go to the documentation of this file.
1//===- MemProfiler.cpp - memory allocation and access profiler ------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of MemProfiler. Memory accesses are instrumented
10// to increment the access count held in a shadow memory location, or
11// alternatively to call into the runtime. Memory intrinsic calls (memmove,
12// memcpy, memset) are changed to call the memory profiling runtime version
13// instead.
14//
15//===----------------------------------------------------------------------===//
16
19#include "llvm/ADT/Statistic.h"
20#include "llvm/ADT/StringRef.h"
25#include "llvm/IR/Constant.h"
26#include "llvm/IR/DataLayout.h"
28#include "llvm/IR/Function.h"
29#include "llvm/IR/GlobalValue.h"
30#include "llvm/IR/IRBuilder.h"
31#include "llvm/IR/Instruction.h"
33#include "llvm/IR/Module.h"
34#include "llvm/IR/Type.h"
35#include "llvm/IR/Value.h"
38#include "llvm/Support/BLAKE3.h"
40#include "llvm/Support/Debug.h"
46#include <map>
47#include <set>
48
49using namespace llvm;
50using namespace llvm::memprof;
51
52#define DEBUG_TYPE "memprof"
53
54namespace llvm {
58} // namespace llvm
59
60constexpr int LLVM_MEM_PROFILER_VERSION = 1;
61
62// Size of memory mapped to a single shadow location.
64
65// Size of memory mapped to a single histogram bucket.
67
68// Scale from granularity down to shadow size.
70
71constexpr char MemProfModuleCtorName[] = "memprof.module_ctor";
73// On Emscripten, the system needs more than one priorities for constructors.
75constexpr char MemProfInitName[] = "__memprof_init";
77 "__memprof_version_mismatch_check_v";
78
80 "__memprof_shadow_memory_dynamic_address";
81
82constexpr char MemProfFilenameVar[] = "__memprof_profile_filename";
83
84constexpr char MemProfHistogramFlagVar[] = "__memprof_histogram";
85
86// Command-line flags.
87
89 "memprof-guard-against-version-mismatch",
90 cl::desc("Guard against compiler/runtime version mismatch."), cl::Hidden,
91 cl::init(true));
92
93// This flag may need to be replaced with -f[no-]memprof-reads.
94static cl::opt<bool> ClInstrumentReads("memprof-instrument-reads",
95 cl::desc("instrument read instructions"),
96 cl::Hidden, cl::init(true));
97
98static cl::opt<bool>
99 ClInstrumentWrites("memprof-instrument-writes",
100 cl::desc("instrument write instructions"), cl::Hidden,
101 cl::init(true));
102
104 "memprof-instrument-atomics",
105 cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden,
106 cl::init(true));
107
109 "memprof-use-callbacks",
110 cl::desc("Use callbacks instead of inline instrumentation sequences."),
111 cl::Hidden, cl::init(false));
112
114 ClMemoryAccessCallbackPrefix("memprof-memory-access-callback-prefix",
115 cl::desc("Prefix for memory access callbacks"),
116 cl::Hidden, cl::init("__memprof_"));
117
118// These flags allow to change the shadow mapping.
119// The shadow mapping looks like
120// Shadow = ((Mem & mask) >> scale) + offset
121
122static cl::opt<int> ClMappingScale("memprof-mapping-scale",
123 cl::desc("scale of memprof shadow mapping"),
125
126static cl::opt<int>
127 ClMappingGranularity("memprof-mapping-granularity",
128 cl::desc("granularity of memprof shadow mapping"),
130
131static cl::opt<bool> ClStack("memprof-instrument-stack",
132 cl::desc("Instrument scalar stack variables"),
133 cl::Hidden, cl::init(false));
134
135// Debug flags.
136
137static cl::opt<int> ClDebug("memprof-debug", cl::desc("debug"), cl::Hidden,
138 cl::init(0));
139
140static cl::opt<std::string> ClDebugFunc("memprof-debug-func", cl::Hidden,
141 cl::desc("Debug func"));
142
143static cl::opt<int> ClDebugMin("memprof-debug-min", cl::desc("Debug min inst"),
144 cl::Hidden, cl::init(-1));
145
146static cl::opt<int> ClDebugMax("memprof-debug-max", cl::desc("Debug max inst"),
147 cl::Hidden, cl::init(-1));
148
149// By default disable matching of allocation profiles onto operator new that
150// already explicitly pass a hot/cold hint, since we don't currently
151// override these hints anyway.
153 "memprof-match-hot-cold-new",
154 cl::desc(
155 "Match allocation profiles onto existing hot/cold operator new calls"),
156 cl::Hidden, cl::init(false));
157
158static cl::opt<bool> ClHistogram("memprof-histogram",
159 cl::desc("Collect access count histograms"),
160 cl::Hidden, cl::init(false));
161
162static cl::opt<bool>
163 ClPrintMemProfMatchInfo("memprof-print-match-info",
164 cl::desc("Print matching stats for each allocation "
165 "context in this module's profiles"),
166 cl::Hidden, cl::init(false));
167
169
170// Instrumentation statistics
171STATISTIC(NumInstrumentedReads, "Number of instrumented reads");
172STATISTIC(NumInstrumentedWrites, "Number of instrumented writes");
173STATISTIC(NumSkippedStackReads, "Number of non-instrumented stack reads");
174STATISTIC(NumSkippedStackWrites, "Number of non-instrumented stack writes");
175
176// Matching statistics
177STATISTIC(NumOfMemProfMissing, "Number of functions without memory profile.");
178STATISTIC(NumOfMemProfMismatch,
179 "Number of functions having mismatched memory profile hash.");
180STATISTIC(NumOfMemProfFunc, "Number of functions having valid memory profile.");
181STATISTIC(NumOfMemProfAllocContextProfiles,
182 "Number of alloc contexts in memory profile.");
183STATISTIC(NumOfMemProfCallSiteProfiles,
184 "Number of callsites in memory profile.");
185STATISTIC(NumOfMemProfMatchedAllocContexts,
186 "Number of matched memory profile alloc contexts.");
187STATISTIC(NumOfMemProfMatchedAllocs,
188 "Number of matched memory profile allocs.");
189STATISTIC(NumOfMemProfMatchedCallSites,
190 "Number of matched memory profile callsites.");
191
192namespace {
193
194/// This struct defines the shadow mapping using the rule:
195/// shadow = ((mem & mask) >> Scale) ADD DynamicShadowOffset.
196struct ShadowMapping {
197 ShadowMapping() {
198 Scale = ClMappingScale;
200 Mask = ~(Granularity - 1);
201 }
202
203 int Scale;
204 int Granularity;
205 uint64_t Mask; // Computed as ~(Granularity-1)
206};
207
208static uint64_t getCtorAndDtorPriority(Triple &TargetTriple) {
211}
212
213struct InterestingMemoryAccess {
214 Value *Addr = nullptr;
215 bool IsWrite;
216 Type *AccessTy;
217 Value *MaybeMask = nullptr;
218};
219
220/// Instrument the code in module to profile memory accesses.
221class MemProfiler {
222public:
223 MemProfiler(Module &M) {
224 C = &(M.getContext());
225 LongSize = M.getDataLayout().getPointerSizeInBits();
226 IntptrTy = Type::getIntNTy(*C, LongSize);
227 PtrTy = PointerType::getUnqual(*C);
228 }
229
230 /// If it is an interesting memory access, populate information
231 /// about the access and return a InterestingMemoryAccess struct.
232 /// Otherwise return std::nullopt.
233 std::optional<InterestingMemoryAccess>
234 isInterestingMemoryAccess(Instruction *I) const;
235
236 void instrumentMop(Instruction *I, const DataLayout &DL,
237 InterestingMemoryAccess &Access);
238 void instrumentAddress(Instruction *OrigIns, Instruction *InsertBefore,
239 Value *Addr, bool IsWrite);
240 void instrumentMaskedLoadOrStore(const DataLayout &DL, Value *Mask,
241 Instruction *I, Value *Addr, Type *AccessTy,
242 bool IsWrite);
243 void instrumentMemIntrinsic(MemIntrinsic *MI);
244 Value *memToShadow(Value *Shadow, IRBuilder<> &IRB);
245 bool instrumentFunction(Function &F);
246 bool maybeInsertMemProfInitAtFunctionEntry(Function &F);
247 bool insertDynamicShadowAtFunctionEntry(Function &F);
248
249private:
250 void initializeCallbacks(Module &M);
251
252 LLVMContext *C;
253 int LongSize;
254 Type *IntptrTy;
255 PointerType *PtrTy;
256 ShadowMapping Mapping;
257
258 // These arrays is indexed by AccessIsWrite
259 FunctionCallee MemProfMemoryAccessCallback[2];
260
261 FunctionCallee MemProfMemmove, MemProfMemcpy, MemProfMemset;
262 Value *DynamicShadowOffset = nullptr;
263};
264
265class ModuleMemProfiler {
266public:
267 ModuleMemProfiler(Module &M) { TargetTriple = Triple(M.getTargetTriple()); }
268
269 bool instrumentModule(Module &);
270
271private:
272 Triple TargetTriple;
273 ShadowMapping Mapping;
274 Function *MemProfCtorFunction = nullptr;
275};
276
277} // end anonymous namespace
278
280
284 "Memprof with histogram only supports default mapping granularity");
285 Module &M = *F.getParent();
286 MemProfiler Profiler(M);
287 if (Profiler.instrumentFunction(F))
289 return PreservedAnalyses::all();
290}
291
293
296
297 ModuleMemProfiler Profiler(M);
298 if (Profiler.instrumentModule(M))
300 return PreservedAnalyses::all();
301}
302
303Value *MemProfiler::memToShadow(Value *Shadow, IRBuilder<> &IRB) {
304 // (Shadow & mask) >> scale
305 Shadow = IRB.CreateAnd(Shadow, Mapping.Mask);
306 Shadow = IRB.CreateLShr(Shadow, Mapping.Scale);
307 // (Shadow >> scale) | offset
308 assert(DynamicShadowOffset);
309 return IRB.CreateAdd(Shadow, DynamicShadowOffset);
310}
311
312// Instrument memset/memmove/memcpy
313void MemProfiler::instrumentMemIntrinsic(MemIntrinsic *MI) {
314 IRBuilder<> IRB(MI);
315 if (isa<MemTransferInst>(MI)) {
316 IRB.CreateCall(isa<MemMoveInst>(MI) ? MemProfMemmove : MemProfMemcpy,
317 {MI->getOperand(0), MI->getOperand(1),
318 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
319 } else if (isa<MemSetInst>(MI)) {
320 IRB.CreateCall(
321 MemProfMemset,
322 {MI->getOperand(0),
323 IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false),
324 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
325 }
326 MI->eraseFromParent();
327}
328
329std::optional<InterestingMemoryAccess>
330MemProfiler::isInterestingMemoryAccess(Instruction *I) const {
331 // Do not instrument the load fetching the dynamic shadow address.
332 if (DynamicShadowOffset == I)
333 return std::nullopt;
334
335 InterestingMemoryAccess Access;
336
337 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
339 return std::nullopt;
340 Access.IsWrite = false;
341 Access.AccessTy = LI->getType();
342 Access.Addr = LI->getPointerOperand();
343 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
345 return std::nullopt;
346 Access.IsWrite = true;
347 Access.AccessTy = SI->getValueOperand()->getType();
348 Access.Addr = SI->getPointerOperand();
349 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
351 return std::nullopt;
352 Access.IsWrite = true;
353 Access.AccessTy = RMW->getValOperand()->getType();
354 Access.Addr = RMW->getPointerOperand();
355 } else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) {
357 return std::nullopt;
358 Access.IsWrite = true;
359 Access.AccessTy = XCHG->getCompareOperand()->getType();
360 Access.Addr = XCHG->getPointerOperand();
361 } else if (auto *CI = dyn_cast<CallInst>(I)) {
362 auto *F = CI->getCalledFunction();
363 if (F && (F->getIntrinsicID() == Intrinsic::masked_load ||
364 F->getIntrinsicID() == Intrinsic::masked_store)) {
365 unsigned OpOffset = 0;
366 if (F->getIntrinsicID() == Intrinsic::masked_store) {
368 return std::nullopt;
369 // Masked store has an initial operand for the value.
370 OpOffset = 1;
371 Access.AccessTy = CI->getArgOperand(0)->getType();
372 Access.IsWrite = true;
373 } else {
375 return std::nullopt;
376 Access.AccessTy = CI->getType();
377 Access.IsWrite = false;
378 }
379
380 auto *BasePtr = CI->getOperand(0 + OpOffset);
381 Access.MaybeMask = CI->getOperand(2 + OpOffset);
382 Access.Addr = BasePtr;
383 }
384 }
385
386 if (!Access.Addr)
387 return std::nullopt;
388
389 // Do not instrument accesses from different address spaces; we cannot deal
390 // with them.
391 Type *PtrTy = cast<PointerType>(Access.Addr->getType()->getScalarType());
392 if (PtrTy->getPointerAddressSpace() != 0)
393 return std::nullopt;
394
395 // Ignore swifterror addresses.
396 // swifterror memory addresses are mem2reg promoted by instruction
397 // selection. As such they cannot have regular uses like an instrumentation
398 // function and it makes no sense to track them as memory.
399 if (Access.Addr->isSwiftError())
400 return std::nullopt;
401
402 // Peel off GEPs and BitCasts.
403 auto *Addr = Access.Addr->stripInBoundsOffsets();
404
405 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Addr)) {
406 // Do not instrument PGO counter updates.
407 if (GV->hasSection()) {
408 StringRef SectionName = GV->getSection();
409 // Check if the global is in the PGO counters section.
410 auto OF = Triple(I->getModule()->getTargetTriple()).getObjectFormat();
411 if (SectionName.ends_with(
412 getInstrProfSectionName(IPSK_cnts, OF, /*AddSegmentInfo=*/false)))
413 return std::nullopt;
414 }
415
416 // Do not instrument accesses to LLVM internal variables.
417 if (GV->getName().starts_with("__llvm"))
418 return std::nullopt;
419 }
420
421 return Access;
422}
423
424void MemProfiler::instrumentMaskedLoadOrStore(const DataLayout &DL, Value *Mask,
426 Type *AccessTy, bool IsWrite) {
427 auto *VTy = cast<FixedVectorType>(AccessTy);
428 unsigned Num = VTy->getNumElements();
429 auto *Zero = ConstantInt::get(IntptrTy, 0);
430 for (unsigned Idx = 0; Idx < Num; ++Idx) {
431 Value *InstrumentedAddress = nullptr;
432 Instruction *InsertBefore = I;
433 if (auto *Vector = dyn_cast<ConstantVector>(Mask)) {
434 // dyn_cast as we might get UndefValue
435 if (auto *Masked = dyn_cast<ConstantInt>(Vector->getOperand(Idx))) {
436 if (Masked->isZero())
437 // Mask is constant false, so no instrumentation needed.
438 continue;
439 // If we have a true or undef value, fall through to instrumentAddress.
440 // with InsertBefore == I
441 }
442 } else {
443 IRBuilder<> IRB(I);
444 Value *MaskElem = IRB.CreateExtractElement(Mask, Idx);
445 Instruction *ThenTerm = SplitBlockAndInsertIfThen(MaskElem, I, false);
446 InsertBefore = ThenTerm;
447 }
448
449 IRBuilder<> IRB(InsertBefore);
450 InstrumentedAddress =
451 IRB.CreateGEP(VTy, Addr, {Zero, ConstantInt::get(IntptrTy, Idx)});
452 instrumentAddress(I, InsertBefore, InstrumentedAddress, IsWrite);
453 }
454}
455
456void MemProfiler::instrumentMop(Instruction *I, const DataLayout &DL,
457 InterestingMemoryAccess &Access) {
458 // Skip instrumentation of stack accesses unless requested.
459 if (!ClStack && isa<AllocaInst>(getUnderlyingObject(Access.Addr))) {
460 if (Access.IsWrite)
461 ++NumSkippedStackWrites;
462 else
463 ++NumSkippedStackReads;
464 return;
465 }
466
467 if (Access.IsWrite)
468 NumInstrumentedWrites++;
469 else
470 NumInstrumentedReads++;
471
472 if (Access.MaybeMask) {
473 instrumentMaskedLoadOrStore(DL, Access.MaybeMask, I, Access.Addr,
474 Access.AccessTy, Access.IsWrite);
475 } else {
476 // Since the access counts will be accumulated across the entire allocation,
477 // we only update the shadow access count for the first location and thus
478 // don't need to worry about alignment and type size.
479 instrumentAddress(I, I, Access.Addr, Access.IsWrite);
480 }
481}
482
483void MemProfiler::instrumentAddress(Instruction *OrigIns,
484 Instruction *InsertBefore, Value *Addr,
485 bool IsWrite) {
486 IRBuilder<> IRB(InsertBefore);
487 Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
488
489 if (ClUseCalls) {
490 IRB.CreateCall(MemProfMemoryAccessCallback[IsWrite], AddrLong);
491 return;
492 }
493
494 Type *ShadowTy = ClHistogram ? Type::getInt8Ty(*C) : Type::getInt64Ty(*C);
495 Type *ShadowPtrTy = PointerType::get(ShadowTy, 0);
496
497 Value *ShadowPtr = memToShadow(AddrLong, IRB);
498 Value *ShadowAddr = IRB.CreateIntToPtr(ShadowPtr, ShadowPtrTy);
499 Value *ShadowValue = IRB.CreateLoad(ShadowTy, ShadowAddr);
500 // If we are profiling with histograms, add overflow protection at 255.
501 if (ClHistogram) {
502 Value *MaxCount = ConstantInt::get(Type::getInt8Ty(*C), 255);
503 Value *Cmp = IRB.CreateICmpULT(ShadowValue, MaxCount);
504 Instruction *IncBlock =
505 SplitBlockAndInsertIfThen(Cmp, InsertBefore, /*Unreachable=*/false);
506 IRB.SetInsertPoint(IncBlock);
507 }
508 Value *Inc = ConstantInt::get(ShadowTy, 1);
509 ShadowValue = IRB.CreateAdd(ShadowValue, Inc);
510 IRB.CreateStore(ShadowValue, ShadowAddr);
511}
512
513// Create the variable for the profile file name.
515 const MDString *MemProfFilename =
516 dyn_cast_or_null<MDString>(M.getModuleFlag("MemProfProfileFilename"));
517 if (!MemProfFilename)
518 return;
519 assert(!MemProfFilename->getString().empty() &&
520 "Unexpected MemProfProfileFilename metadata with empty string");
521 Constant *ProfileNameConst = ConstantDataArray::getString(
522 M.getContext(), MemProfFilename->getString(), true);
523 GlobalVariable *ProfileNameVar = new GlobalVariable(
524 M, ProfileNameConst->getType(), /*isConstant=*/true,
526 Triple TT(M.getTargetTriple());
527 if (TT.supportsCOMDAT()) {
529 ProfileNameVar->setComdat(M.getOrInsertComdat(MemProfFilenameVar));
530 }
531}
532
533// Set MemprofHistogramFlag as a Global veriable in IR. This makes it accessible
534// to the runtime, changing shadow count behavior.
536 const StringRef VarName(MemProfHistogramFlagVar);
537 Type *IntTy1 = Type::getInt1Ty(M.getContext());
538 auto MemprofHistogramFlag = new GlobalVariable(
539 M, IntTy1, true, GlobalValue::WeakAnyLinkage,
540 Constant::getIntegerValue(IntTy1, APInt(1, ClHistogram)), VarName);
541 Triple TT(M.getTargetTriple());
542 if (TT.supportsCOMDAT()) {
543 MemprofHistogramFlag->setLinkage(GlobalValue::ExternalLinkage);
544 MemprofHistogramFlag->setComdat(M.getOrInsertComdat(VarName));
545 }
546 appendToCompilerUsed(M, MemprofHistogramFlag);
547}
548
549bool ModuleMemProfiler::instrumentModule(Module &M) {
550
551 // Create a module constructor.
552 std::string MemProfVersion = std::to_string(LLVM_MEM_PROFILER_VERSION);
553 std::string VersionCheckName =
555 : "";
556 std::tie(MemProfCtorFunction, std::ignore) =
558 MemProfInitName, /*InitArgTypes=*/{},
559 /*InitArgs=*/{}, VersionCheckName);
560
561 const uint64_t Priority = getCtorAndDtorPriority(TargetTriple);
562 appendToGlobalCtors(M, MemProfCtorFunction, Priority);
563
565
567
568 return true;
569}
570
571void MemProfiler::initializeCallbacks(Module &M) {
572 IRBuilder<> IRB(*C);
573
574 for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) {
575 const std::string TypeStr = AccessIsWrite ? "store" : "load";
576 const std::string HistPrefix = ClHistogram ? "hist_" : "";
577
578 SmallVector<Type *, 2> Args1{1, IntptrTy};
579 MemProfMemoryAccessCallback[AccessIsWrite] = M.getOrInsertFunction(
580 ClMemoryAccessCallbackPrefix + HistPrefix + TypeStr,
581 FunctionType::get(IRB.getVoidTy(), Args1, false));
582 }
583 MemProfMemmove = M.getOrInsertFunction(
584 ClMemoryAccessCallbackPrefix + "memmove", PtrTy, PtrTy, PtrTy, IntptrTy);
585 MemProfMemcpy = M.getOrInsertFunction(ClMemoryAccessCallbackPrefix + "memcpy",
586 PtrTy, PtrTy, PtrTy, IntptrTy);
587 MemProfMemset =
588 M.getOrInsertFunction(ClMemoryAccessCallbackPrefix + "memset", PtrTy,
589 PtrTy, IRB.getInt32Ty(), IntptrTy);
590}
591
592bool MemProfiler::maybeInsertMemProfInitAtFunctionEntry(Function &F) {
593 // For each NSObject descendant having a +load method, this method is invoked
594 // by the ObjC runtime before any of the static constructors is called.
595 // Therefore we need to instrument such methods with a call to __memprof_init
596 // at the beginning in order to initialize our runtime before any access to
597 // the shadow memory.
598 // We cannot just ignore these methods, because they may call other
599 // instrumented functions.
600 if (F.getName().contains(" load]")) {
601 FunctionCallee MemProfInitFunction =
603 IRBuilder<> IRB(&F.front(), F.front().begin());
604 IRB.CreateCall(MemProfInitFunction, {});
605 return true;
606 }
607 return false;
608}
609
610bool MemProfiler::insertDynamicShadowAtFunctionEntry(Function &F) {
611 IRBuilder<> IRB(&F.front().front());
612 Value *GlobalDynamicAddress = F.getParent()->getOrInsertGlobal(
614 if (F.getParent()->getPICLevel() == PICLevel::NotPIC)
615 cast<GlobalVariable>(GlobalDynamicAddress)->setDSOLocal(true);
616 DynamicShadowOffset = IRB.CreateLoad(IntptrTy, GlobalDynamicAddress);
617 return true;
618}
619
620bool MemProfiler::instrumentFunction(Function &F) {
621 if (F.getLinkage() == GlobalValue::AvailableExternallyLinkage)
622 return false;
623 if (ClDebugFunc == F.getName())
624 return false;
625 if (F.getName().starts_with("__memprof_"))
626 return false;
627
628 bool FunctionModified = false;
629
630 // If needed, insert __memprof_init.
631 // This function needs to be called even if the function body is not
632 // instrumented.
633 if (maybeInsertMemProfInitAtFunctionEntry(F))
634 FunctionModified = true;
635
636 LLVM_DEBUG(dbgs() << "MEMPROF instrumenting:\n" << F << "\n");
637
638 initializeCallbacks(*F.getParent());
639
641
642 // Fill the set of memory operations to instrument.
643 for (auto &BB : F) {
644 for (auto &Inst : BB) {
645 if (isInterestingMemoryAccess(&Inst) || isa<MemIntrinsic>(Inst))
646 ToInstrument.push_back(&Inst);
647 }
648 }
649
650 if (ToInstrument.empty()) {
651 LLVM_DEBUG(dbgs() << "MEMPROF done instrumenting: " << FunctionModified
652 << " " << F << "\n");
653
654 return FunctionModified;
655 }
656
657 FunctionModified |= insertDynamicShadowAtFunctionEntry(F);
658
659 int NumInstrumented = 0;
660 for (auto *Inst : ToInstrument) {
661 if (ClDebugMin < 0 || ClDebugMax < 0 ||
662 (NumInstrumented >= ClDebugMin && NumInstrumented <= ClDebugMax)) {
663 std::optional<InterestingMemoryAccess> Access =
664 isInterestingMemoryAccess(Inst);
665 if (Access)
666 instrumentMop(Inst, F.getDataLayout(), *Access);
667 else
668 instrumentMemIntrinsic(cast<MemIntrinsic>(Inst));
669 }
670 NumInstrumented++;
671 }
672
673 if (NumInstrumented > 0)
674 FunctionModified = true;
675
676 LLVM_DEBUG(dbgs() << "MEMPROF done instrumenting: " << FunctionModified << " "
677 << F << "\n");
678
679 return FunctionModified;
680}
681
683 std::vector<uint64_t> &InlinedCallStack,
684 LLVMContext &Ctx) {
685 I.setMetadata(LLVMContext::MD_callsite,
686 buildCallstackMetadata(InlinedCallStack, Ctx));
687}
688
690 uint32_t Column) {
693 HashBuilder.add(Function, LineOffset, Column);
695 uint64_t Id;
696 std::memcpy(&Id, Hash.data(), sizeof(Hash));
697 return Id;
698}
699
702}
703
704// Helper to generate a single hash id for a given callstack, used for emitting
705// matching statistics and useful for uniquing such statistics across modules.
706static uint64_t
707computeFullStackId(const std::vector<memprof::Frame> &CallStack) {
710 for (auto &F : CallStack)
711 HashBuilder.add(F.Function, F.LineOffset, F.Column);
713 uint64_t Id;
714 std::memcpy(&Id, Hash.data(), sizeof(Hash));
715 return Id;
716}
717
719 const AllocationInfo *AllocInfo) {
720 SmallVector<uint64_t> StackIds;
721 for (const auto &StackFrame : AllocInfo->CallStack)
722 StackIds.push_back(computeStackId(StackFrame));
723 auto AllocType = getAllocType(AllocInfo->Info.getTotalLifetimeAccessDensity(),
724 AllocInfo->Info.getAllocCount(),
725 AllocInfo->Info.getTotalLifetime());
726 uint64_t TotalSize = 0;
728 TotalSize = AllocInfo->Info.getTotalSize();
729 assert(TotalSize);
730 }
731 AllocTrie.addCallStack(AllocType, StackIds, TotalSize);
732 return AllocType;
733}
734
735// Helper to compare the InlinedCallStack computed from an instruction's debug
736// info to a list of Frames from profile data (either the allocation data or a
737// callsite). For callsites, the StartIndex to use in the Frame array may be
738// non-zero.
739static bool
741 ArrayRef<uint64_t> InlinedCallStack,
742 unsigned StartIndex = 0) {
743 auto StackFrame = ProfileCallStack.begin() + StartIndex;
744 auto InlCallStackIter = InlinedCallStack.begin();
745 for (; StackFrame != ProfileCallStack.end() &&
746 InlCallStackIter != InlinedCallStack.end();
747 ++StackFrame, ++InlCallStackIter) {
748 uint64_t StackId = computeStackId(*StackFrame);
749 if (StackId != *InlCallStackIter)
750 return false;
751 }
752 // Return true if we found and matched all stack ids from the call
753 // instruction.
754 return InlCallStackIter == InlinedCallStack.end();
755}
756
758 const TargetLibraryInfo &TLI) {
759 if (!Callee)
760 return false;
761 LibFunc Func;
762 if (!TLI.getLibFunc(*Callee, Func))
763 return false;
764 switch (Func) {
765 case LibFunc_Znwm:
766 case LibFunc_ZnwmRKSt9nothrow_t:
767 case LibFunc_ZnwmSt11align_val_t:
768 case LibFunc_ZnwmSt11align_val_tRKSt9nothrow_t:
769 case LibFunc_Znam:
770 case LibFunc_ZnamRKSt9nothrow_t:
771 case LibFunc_ZnamSt11align_val_t:
772 case LibFunc_ZnamSt11align_val_tRKSt9nothrow_t:
773 case LibFunc_size_returning_new:
774 case LibFunc_size_returning_new_aligned:
775 return true;
776 case LibFunc_Znwm12__hot_cold_t:
777 case LibFunc_ZnwmRKSt9nothrow_t12__hot_cold_t:
778 case LibFunc_ZnwmSt11align_val_t12__hot_cold_t:
779 case LibFunc_ZnwmSt11align_val_tRKSt9nothrow_t12__hot_cold_t:
780 case LibFunc_Znam12__hot_cold_t:
781 case LibFunc_ZnamRKSt9nothrow_t12__hot_cold_t:
782 case LibFunc_ZnamSt11align_val_t12__hot_cold_t:
783 case LibFunc_ZnamSt11align_val_tRKSt9nothrow_t12__hot_cold_t:
784 case LibFunc_size_returning_new_hot_cold:
785 case LibFunc_size_returning_new_aligned_hot_cold:
787 default:
788 return false;
789 }
790}
791
793 uint64_t TotalSize = 0;
794 AllocationType AllocType = AllocationType::None;
795 bool Matched = false;
796};
797
798static void
800 const TargetLibraryInfo &TLI,
801 std::map<uint64_t, AllocMatchInfo> &FullStackIdToAllocMatchInfo) {
802 auto &Ctx = M.getContext();
803 // Previously we used getIRPGOFuncName() here. If F is local linkage,
804 // getIRPGOFuncName() returns FuncName with prefix 'FileName;'. But
805 // llvm-profdata uses FuncName in dwarf to create GUID which doesn't
806 // contain FileName's prefix. It caused local linkage function can't
807 // find MemProfRecord. So we use getName() now.
808 // 'unique-internal-linkage-names' can make MemProf work better for local
809 // linkage function.
810 auto FuncName = F.getName();
811 auto FuncGUID = Function::getGUID(FuncName);
812 std::optional<memprof::MemProfRecord> MemProfRec;
813 auto Err = MemProfReader->getMemProfRecord(FuncGUID).moveInto(MemProfRec);
814 if (Err) {
815 handleAllErrors(std::move(Err), [&](const InstrProfError &IPE) {
816 auto Err = IPE.get();
817 bool SkipWarning = false;
818 LLVM_DEBUG(dbgs() << "Error in reading profile for Func " << FuncName
819 << ": ");
821 NumOfMemProfMissing++;
822 SkipWarning = !PGOWarnMissing;
823 LLVM_DEBUG(dbgs() << "unknown function");
824 } else if (Err == instrprof_error::hash_mismatch) {
825 NumOfMemProfMismatch++;
826 SkipWarning =
829 (F.hasComdat() ||
831 LLVM_DEBUG(dbgs() << "hash mismatch (skip=" << SkipWarning << ")");
832 }
833
834 if (SkipWarning)
835 return;
836
837 std::string Msg = (IPE.message() + Twine(" ") + F.getName().str() +
838 Twine(" Hash = ") + std::to_string(FuncGUID))
839 .str();
840
841 Ctx.diagnose(
842 DiagnosticInfoPGOProfile(M.getName().data(), Msg, DS_Warning));
843 });
844 return;
845 }
846
847 NumOfMemProfFunc++;
848
849 // Detect if there are non-zero column numbers in the profile. If not,
850 // treat all column numbers as 0 when matching (i.e. ignore any non-zero
851 // columns in the IR). The profiled binary might have been built with
852 // column numbers disabled, for example.
853 bool ProfileHasColumns = false;
854
855 // Build maps of the location hash to all profile data with that leaf location
856 // (allocation info and the callsites).
857 std::map<uint64_t, std::set<const AllocationInfo *>> LocHashToAllocInfo;
858 // For the callsites we need to record the index of the associated frame in
859 // the frame array (see comments below where the map entries are added).
860 std::map<uint64_t, std::set<std::pair<const std::vector<Frame> *, unsigned>>>
861 LocHashToCallSites;
862 for (auto &AI : MemProfRec->AllocSites) {
863 NumOfMemProfAllocContextProfiles++;
864 // Associate the allocation info with the leaf frame. The later matching
865 // code will match any inlined call sequences in the IR with a longer prefix
866 // of call stack frames.
867 uint64_t StackId = computeStackId(AI.CallStack[0]);
868 LocHashToAllocInfo[StackId].insert(&AI);
869 ProfileHasColumns |= AI.CallStack[0].Column;
870 }
871 for (auto &CS : MemProfRec->CallSites) {
872 NumOfMemProfCallSiteProfiles++;
873 // Need to record all frames from leaf up to and including this function,
874 // as any of these may or may not have been inlined at this point.
875 unsigned Idx = 0;
876 for (auto &StackFrame : CS) {
877 uint64_t StackId = computeStackId(StackFrame);
878 LocHashToCallSites[StackId].insert(std::make_pair(&CS, Idx++));
879 ProfileHasColumns |= StackFrame.Column;
880 // Once we find this function, we can stop recording.
881 if (StackFrame.Function == FuncGUID)
882 break;
883 }
884 assert(Idx <= CS.size() && CS[Idx - 1].Function == FuncGUID);
885 }
886
887 auto GetOffset = [](const DILocation *DIL) {
888 return (DIL->getLine() - DIL->getScope()->getSubprogram()->getLine()) &
889 0xffff;
890 };
891
892 // Now walk the instructions, looking up the associated profile data using
893 // debug locations.
894 for (auto &BB : F) {
895 for (auto &I : BB) {
896 if (I.isDebugOrPseudoInst())
897 continue;
898 // We are only interested in calls (allocation or interior call stack
899 // context calls).
900 auto *CI = dyn_cast<CallBase>(&I);
901 if (!CI)
902 continue;
903 auto *CalledFunction = CI->getCalledFunction();
904 if (CalledFunction && CalledFunction->isIntrinsic())
905 continue;
906 // List of call stack ids computed from the location hashes on debug
907 // locations (leaf to inlined at root).
908 std::vector<uint64_t> InlinedCallStack;
909 // Was the leaf location found in one of the profile maps?
910 bool LeafFound = false;
911 // If leaf was found in a map, iterators pointing to its location in both
912 // of the maps. It might exist in neither, one, or both (the latter case
913 // can happen because we don't currently have discriminators to
914 // distinguish the case when a single line/col maps to both an allocation
915 // and another callsite).
916 std::map<uint64_t, std::set<const AllocationInfo *>>::iterator
917 AllocInfoIter;
918 std::map<uint64_t, std::set<std::pair<const std::vector<Frame> *,
919 unsigned>>>::iterator CallSitesIter;
920 for (const DILocation *DIL = I.getDebugLoc(); DIL != nullptr;
921 DIL = DIL->getInlinedAt()) {
922 // Use C++ linkage name if possible. Need to compile with
923 // -fdebug-info-for-profiling to get linkage name.
924 StringRef Name = DIL->getScope()->getSubprogram()->getLinkageName();
925 if (Name.empty())
926 Name = DIL->getScope()->getSubprogram()->getName();
927 auto CalleeGUID = Function::getGUID(Name);
928 auto StackId = computeStackId(CalleeGUID, GetOffset(DIL),
929 ProfileHasColumns ? DIL->getColumn() : 0);
930 // Check if we have found the profile's leaf frame. If yes, collect
931 // the rest of the call's inlined context starting here. If not, see if
932 // we find a match further up the inlined context (in case the profile
933 // was missing debug frames at the leaf).
934 if (!LeafFound) {
935 AllocInfoIter = LocHashToAllocInfo.find(StackId);
936 CallSitesIter = LocHashToCallSites.find(StackId);
937 if (AllocInfoIter != LocHashToAllocInfo.end() ||
938 CallSitesIter != LocHashToCallSites.end())
939 LeafFound = true;
940 }
941 if (LeafFound)
942 InlinedCallStack.push_back(StackId);
943 }
944 // If leaf not in either of the maps, skip inst.
945 if (!LeafFound)
946 continue;
947
948 // First add !memprof metadata from allocation info, if we found the
949 // instruction's leaf location in that map, and if the rest of the
950 // instruction's locations match the prefix Frame locations on an
951 // allocation context with the same leaf.
952 if (AllocInfoIter != LocHashToAllocInfo.end()) {
953 // Only consider allocations which support hinting.
954 if (!isAllocationWithHotColdVariant(CI->getCalledFunction(), TLI))
955 continue;
956 // We may match this instruction's location list to multiple MIB
957 // contexts. Add them to a Trie specialized for trimming the contexts to
958 // the minimal needed to disambiguate contexts with unique behavior.
959 CallStackTrie AllocTrie;
960 for (auto *AllocInfo : AllocInfoIter->second) {
961 // Check the full inlined call stack against this one.
962 // If we found and thus matched all frames on the call, include
963 // this MIB.
965 InlinedCallStack)) {
966 NumOfMemProfMatchedAllocContexts++;
967 auto AllocType = addCallStack(AllocTrie, AllocInfo);
968 // Record information about the allocation if match info printing
969 // was requested.
971 auto FullStackId = computeFullStackId(AllocInfo->CallStack);
972 FullStackIdToAllocMatchInfo[FullStackId] = {
973 AllocInfo->Info.getTotalSize(), AllocType, /*Matched=*/true};
974 }
975 }
976 }
977 // We might not have matched any to the full inlined call stack.
978 // But if we did, create and attach metadata, or a function attribute if
979 // all contexts have identical profiled behavior.
980 if (!AllocTrie.empty()) {
981 NumOfMemProfMatchedAllocs++;
982 // MemprofMDAttached will be false if a function attribute was
983 // attached.
984 bool MemprofMDAttached = AllocTrie.buildAndAttachMIBMetadata(CI);
985 assert(MemprofMDAttached == I.hasMetadata(LLVMContext::MD_memprof));
986 if (MemprofMDAttached) {
987 // Add callsite metadata for the instruction's location list so that
988 // it simpler later on to identify which part of the MIB contexts
989 // are from this particular instruction (including during inlining,
990 // when the callsite metadata will be updated appropriately).
991 // FIXME: can this be changed to strip out the matching stack
992 // context ids from the MIB contexts and not add any callsite
993 // metadata here to save space?
994 addCallsiteMetadata(I, InlinedCallStack, Ctx);
995 }
996 }
997 continue;
998 }
999
1000 // Otherwise, add callsite metadata. If we reach here then we found the
1001 // instruction's leaf location in the callsites map and not the allocation
1002 // map.
1003 assert(CallSitesIter != LocHashToCallSites.end());
1004 for (auto CallStackIdx : CallSitesIter->second) {
1005 // If we found and thus matched all frames on the call, create and
1006 // attach call stack metadata.
1008 *CallStackIdx.first, InlinedCallStack, CallStackIdx.second)) {
1009 NumOfMemProfMatchedCallSites++;
1010 addCallsiteMetadata(I, InlinedCallStack, Ctx);
1011 // Only need to find one with a matching call stack and add a single
1012 // callsite metadata.
1013 break;
1014 }
1015 }
1016 }
1017 }
1018}
1019
1020MemProfUsePass::MemProfUsePass(std::string MemoryProfileFile,
1022 : MemoryProfileFileName(MemoryProfileFile), FS(FS) {
1023 if (!FS)
1024 this->FS = vfs::getRealFileSystem();
1025}
1026
1028 LLVM_DEBUG(dbgs() << "Read in memory profile:");
1029 auto &Ctx = M.getContext();
1030 auto ReaderOrErr = IndexedInstrProfReader::create(MemoryProfileFileName, *FS);
1031 if (Error E = ReaderOrErr.takeError()) {
1032 handleAllErrors(std::move(E), [&](const ErrorInfoBase &EI) {
1033 Ctx.diagnose(
1034 DiagnosticInfoPGOProfile(MemoryProfileFileName.data(), EI.message()));
1035 });
1036 return PreservedAnalyses::all();
1037 }
1038
1039 std::unique_ptr<IndexedInstrProfReader> MemProfReader =
1040 std::move(ReaderOrErr.get());
1041 if (!MemProfReader) {
1042 Ctx.diagnose(DiagnosticInfoPGOProfile(
1043 MemoryProfileFileName.data(), StringRef("Cannot get MemProfReader")));
1044 return PreservedAnalyses::all();
1045 }
1046
1047 if (!MemProfReader->hasMemoryProfile()) {
1048 Ctx.diagnose(DiagnosticInfoPGOProfile(MemoryProfileFileName.data(),
1049 "Not a memory profile"));
1050 return PreservedAnalyses::all();
1051 }
1052
1053 auto &FAM = AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
1054
1055 // Map from the stack has of each allocation context in the function profiles
1056 // to the total profiled size (bytes), allocation type, and whether we matched
1057 // it to an allocation in the IR.
1058 std::map<uint64_t, AllocMatchInfo> FullStackIdToAllocMatchInfo;
1059
1060 for (auto &F : M) {
1061 if (F.isDeclaration())
1062 continue;
1063
1065 readMemprof(M, F, MemProfReader.get(), TLI, FullStackIdToAllocMatchInfo);
1066 }
1067
1069 for (const auto &[Id, Info] : FullStackIdToAllocMatchInfo)
1070 errs() << "MemProf " << getAllocTypeAttributeString(Info.AllocType)
1071 << " context with id " << Id << " has total profiled size "
1072 << Info.TotalSize << (Info.Matched ? " is" : " not")
1073 << " matched\n";
1074 }
1075
1076 return PreservedAnalyses::none();
1077}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static cl::opt< int > ClDebugMin("asan-debug-min", cl::desc("Debug min inst"), cl::Hidden, cl::init(-1))
static cl::opt< std::string > ClMemoryAccessCallbackPrefix("asan-memory-access-callback-prefix", cl::desc("Prefix for memory access callbacks"), cl::Hidden, cl::init("__asan_"))
static cl::opt< bool > ClInsertVersionCheck("asan-guard-against-version-mismatch", cl::desc("Guard against compiler/runtime version mismatch."), cl::Hidden, cl::init(true))
static cl::opt< bool > ClInstrumentWrites("asan-instrument-writes", cl::desc("instrument write instructions"), cl::Hidden, cl::init(true))
static cl::opt< int > ClDebugMax("asan-debug-max", cl::desc("Debug max inst"), cl::Hidden, cl::init(-1))
static cl::opt< bool > ClStack("asan-stack", cl::desc("Handle stack memory"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClInstrumentAtomics("asan-instrument-atomics", cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden, cl::init(true))
static cl::opt< int > ClMappingScale("asan-mapping-scale", cl::desc("scale of asan shadow mapping"), cl::Hidden, cl::init(0))
static cl::opt< std::string > ClDebugFunc("asan-debug-func", cl::Hidden, cl::desc("Debug func"))
static cl::opt< bool > ClInstrumentReads("asan-instrument-reads", cl::desc("instrument read instructions"), cl::Hidden, cl::init(true))
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(X)
Definition: Debug.h:101
uint64_t Addr
std::string Name
IRTranslator LLVM IR MI
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
static cl::opt< int > ClMappingGranularity("memprof-mapping-granularity", cl::desc("granularity of memprof shadow mapping"), cl::Hidden, cl::init(DefaultMemGranularity))
constexpr char MemProfVersionCheckNamePrefix[]
Definition: MemProfiler.cpp:76
static AllocationType addCallStack(CallStackTrie &AllocTrie, const AllocationInfo *AllocInfo)
static cl::opt< int > ClDebugMin("memprof-debug-min", cl::desc("Debug min inst"), cl::Hidden, cl::init(-1))
void createMemprofHistogramFlagVar(Module &M)
constexpr uint64_t MemProfEmscriptenCtorAndDtorPriority
Definition: MemProfiler.cpp:74
static cl::opt< std::string > ClDebugFunc("memprof-debug-func", cl::Hidden, cl::desc("Debug func"))
constexpr char MemProfShadowMemoryDynamicAddress[]
Definition: MemProfiler.cpp:79
constexpr uint64_t MemProfCtorAndDtorPriority
Definition: MemProfiler.cpp:72
constexpr int LLVM_MEM_PROFILER_VERSION
Definition: MemProfiler.cpp:60
static cl::opt< bool > ClUseCalls("memprof-use-callbacks", cl::desc("Use callbacks instead of inline instrumentation sequences."), cl::Hidden, cl::init(false))
static cl::opt< bool > ClInstrumentAtomics("memprof-instrument-atomics", cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClInsertVersionCheck("memprof-guard-against-version-mismatch", cl::desc("Guard against compiler/runtime version mismatch."), cl::Hidden, cl::init(true))
constexpr char MemProfInitName[]
Definition: MemProfiler.cpp:75
constexpr char MemProfFilenameVar[]
Definition: MemProfiler.cpp:82
static uint64_t computeStackId(GlobalValue::GUID Function, uint32_t LineOffset, uint32_t Column)
static cl::opt< bool > ClStack("memprof-instrument-stack", cl::desc("Instrument scalar stack variables"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClHistogram("memprof-histogram", cl::desc("Collect access count histograms"), cl::Hidden, cl::init(false))
constexpr uint64_t DefaultMemGranularity
Definition: MemProfiler.cpp:63
static cl::opt< bool > ClPrintMemProfMatchInfo("memprof-print-match-info", cl::desc("Print matching stats for each allocation " "context in this module's profiles"), cl::Hidden, cl::init(false))
constexpr uint64_t HistogramGranularity
Definition: MemProfiler.cpp:66
constexpr uint64_t DefaultShadowScale
Definition: MemProfiler.cpp:69
cl::opt< bool > MemProfReportHintedSizes
static cl::opt< std::string > ClMemoryAccessCallbackPrefix("memprof-memory-access-callback-prefix", cl::desc("Prefix for memory access callbacks"), cl::Hidden, cl::init("__memprof_"))
static bool isAllocationWithHotColdVariant(Function *Callee, const TargetLibraryInfo &TLI)
constexpr char MemProfModuleCtorName[]
Definition: MemProfiler.cpp:71
static cl::opt< bool > ClInstrumentReads("memprof-instrument-reads", cl::desc("instrument read instructions"), cl::Hidden, cl::init(true))
static cl::opt< int > ClDebugMax("memprof-debug-max", cl::desc("Debug max inst"), cl::Hidden, cl::init(-1))
static cl::opt< bool > ClInstrumentWrites("memprof-instrument-writes", cl::desc("instrument write instructions"), cl::Hidden, cl::init(true))
static cl::opt< int > ClDebug("memprof-debug", cl::desc("debug"), cl::Hidden, cl::init(0))
static cl::opt< int > ClMappingScale("memprof-mapping-scale", cl::desc("scale of memprof shadow mapping"), cl::Hidden, cl::init(DefaultShadowScale))
static void addCallsiteMetadata(Instruction &I, std::vector< uint64_t > &InlinedCallStack, LLVMContext &Ctx)
static void readMemprof(Module &M, Function &F, IndexedInstrProfReader *MemProfReader, const TargetLibraryInfo &TLI, std::map< uint64_t, AllocMatchInfo > &FullStackIdToAllocMatchInfo)
static bool stackFrameIncludesInlinedCallStack(ArrayRef< Frame > ProfileCallStack, ArrayRef< uint64_t > InlinedCallStack, unsigned StartIndex=0)
static uint64_t computeFullStackId(const std::vector< memprof::Frame > &CallStack)
static cl::opt< bool > ClMemProfMatchHotColdNew("memprof-match-hot-cold-new", cl::desc("Match allocation profiles onto existing hot/cold operator new calls"), cl::Hidden, cl::init(false))
constexpr char MemProfHistogramFlagVar[]
Definition: MemProfiler.cpp:84
AllocType
cl::opt< bool > MemProfReportHintedSizes("memprof-report-hinted-sizes", cl::init(false), cl::Hidden, cl::desc("Report total allocation sizes of hinted allocations"))
Module.h This file contains the declarations for the Module class.
FunctionAnalysisManager FAM
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:166
Defines the virtual file system interface vfs::FileSystem.
Class for arbitrary precision integers.
Definition: APInt.h:78
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:253
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:405
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
iterator end() const
Definition: ArrayRef.h:154
iterator begin() const
Definition: ArrayRef.h:153
An instruction that atomically checks whether a specified value is in a memory location,...
Definition: Instructions.h:495
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:696
static Constant * getString(LLVMContext &Context, StringRef Initializer, bool AddNull=true)
This method constructs a CDS and initializes it with a text string.
Definition: Constants.cpp:2950
This is an important base class in LLVM.
Definition: Constant.h:42
static Constant * getIntegerValue(Type *Ty, const APInt &V)
Return the value for an integer or pointer constant, or a vector thereof, with the given scalar value...
Definition: Constants.cpp:400
Debug location.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
Diagnostic information for the PGO profiler.
Base class for error info classes.
Definition: Error.h:45
virtual std::string message() const
Return the error message as a string.
Definition: Error.h:53
Lightweight error class with error context and mandatory checking.
Definition: Error.h:160
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
Definition: DerivedTypes.h:168
static FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
void setComdat(Comdat *C)
Definition: Globals.cpp:206
void setLinkage(LinkageTypes LT)
Definition: GlobalValue.h:537
GUID getGUID() const
Return a 64-bit global unique ID constructed from global value name (i.e.
Definition: GlobalValue.h:595
@ ExternalLinkage
Externally visible function.
Definition: GlobalValue.h:52
@ WeakAnyLinkage
Keep one copy of named function when linking (weak)
Definition: GlobalValue.h:56
@ AvailableExternallyLinkage
Available for inspection, not emission.
Definition: GlobalValue.h:53
HashResultTy< HasherT_ > final()
Forward to HasherT::final() if available.
Definition: HashBuilder.h:66
Interface to help hash various types through a hasher type.
Definition: HashBuilder.h:139
std::enable_if_t< hashbuilder_detail::IsHashableData< T >::value, HashBuilder & > add(T Value)
Implement hashing for hashable data types, e.g. integral or enum values.
Definition: HashBuilder.h:149
Value * CreateICmpULT(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2277
Value * CreateExtractElement(Value *Vec, Value *Idx, const Twine &Name="")
Definition: IRBuilder.h:2480
Value * CreatePointerCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2190
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2142
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition: IRBuilder.h:1454
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
Definition: IRBuilder.h:523
Value * CreateGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition: IRBuilder.h:1883
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Definition: IRBuilder.h:1807
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1492
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
Definition: IRBuilder.h:1820
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1344
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
Definition: IRBuilder.h:2216
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition: IRBuilder.h:177
Type * getVoidTy()
Fetch the type representing void.
Definition: IRBuilder.h:561
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args=std::nullopt, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:2432
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2686
Reader for the indexed binary instrprof format.
static Expected< std::unique_ptr< IndexedInstrProfReader > > create(const Twine &Path, vfs::FileSystem &FS, const Twine &RemappingPath="")
Factory method to create an indexed reader.
An analysis over an "outer" IR unit that provides access to an analysis manager over an "inner" IR un...
Definition: PassManager.h:563
instrprof_error get() const
Definition: InstrProf.h:413
std::string message() const override
Return the error message as a string.
Definition: InstrProf.cpp:255
A smart pointer to a reference-counted object that inherits from RefCountedBase or ThreadSafeRefCount...
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
An instruction for reading from memory.
Definition: Instructions.h:174
A single uniqued string.
Definition: Metadata.h:720
StringRef getString() const
Definition: Metadata.cpp:616
This is the common base class for memset/memcpy/memmove.
PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
MemProfUsePass(std::string MemoryProfileFile, IntrusiveRefCntPtr< vfs::FileSystem > FS=nullptr)
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
A set of analyses that are preserved following a run of a transformation pass.
Definition: Analysis.h:111
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
Definition: Analysis.h:114
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: Analysis.h:117
bool empty() const
Definition: SmallVector.h:94
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
An instruction for storing to memory.
Definition: Instructions.h:290
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
constexpr bool empty() const
empty - Check if the string is empty.
Definition: StringRef.h:134
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
ObjectFormatType getObjectFormat() const
Get the object format for this triple.
Definition: Triple.h:399
bool isOSEmscripten() const
Tests whether the OS is Emscripten.
Definition: Triple.h:698
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
static IntegerType * getInt1Ty(LLVMContext &C)
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
static IntegerType * getInt8Ty(LLVMContext &C)
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
Class to build a trie of call stack contexts for a particular profiled allocation call,...
void addCallStack(AllocationType AllocType, ArrayRef< uint64_t > StackIds, uint64_t TotalSize=0)
Add a call stack context with the given allocation type to the Trie.
bool buildAndAttachMIBMetadata(CallBase *CI)
Build and attach the minimal necessary MIB metadata.
Helper class to iterate through stack ids in both metadata (memprof MIB and callsite) and the corresp...
void instrumentAddress(Module &M, IRBuilder<> &IRB, Instruction *OrigIns, Instruction *InsertBefore, Value *Addr, Align Alignment, TypeSize TypeStoreSize, bool IsWrite, Value *SizeArgument, bool UseCalls, bool Recover, int AsanScale, int AsanOffset)
Instrument the memory operand Addr.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:121
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
MDNode * buildCallstackMetadata(ArrayRef< uint64_t > CallStack, LLVMContext &Ctx)
Build callstack metadata from the provided list of call stack ids.
AllocationType getAllocType(uint64_t TotalLifetimeAccessDensity, uint64_t AllocCount, uint64_t TotalLifetime)
Return the allocation type for a given set of memory profile values.
std::string getAllocTypeAttributeString(AllocationType Type)
Returns the string to use in attributes with the given type.
IntrusiveRefCntPtr< FileSystem > getRealFileSystem()
Gets an vfs::FileSystem for the 'real' file system, as seen by the operating system.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
void handleAllErrors(Error E, HandlerTs &&... Handlers)
Behaves the same as handleErrors, except that by contract all errors must be handled by the given han...
Definition: Error.h:977
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
cl::opt< bool > PGOWarnMissing
std::string getInstrProfSectionName(InstrProfSectKind IPSK, Triple::ObjectFormatType OF, bool AddSegmentInfo=true)
Return the name of the profile section corresponding to IPSK.
Definition: InstrProf.cpp:236
std::array< uint8_t, NumBytes > BLAKE3Result
The constant LLVM_BLAKE3_OUT_LEN provides the default output length, 32 bytes, which is recommended f...
Definition: BLAKE3.h:35
FunctionCallee declareSanitizerInitFunction(Module &M, StringRef InitName, ArrayRef< Type * > InitArgTypes, bool Weak=false)
std::pair< Function *, FunctionCallee > createSanitizerCtorAndInitFunctions(Module &M, StringRef CtorName, StringRef InitName, ArrayRef< Type * > InitArgTypes, ArrayRef< Value * > InitArgs, StringRef VersionCheckName=StringRef(), bool Weak=false)
Creates sanitizer constructor function, and calls sanitizer's init function from it.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
cl::opt< bool > NoPGOWarnMismatch
Definition: MemProfiler.cpp:56
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
void appendToCompilerUsed(Module &M, ArrayRef< GlobalValue * > Values)
Adds global values to the llvm.compiler.used list.
void createProfileFileNameVar(Module &M, StringRef InstrProfileOutput)
Definition: InstrProf.cpp:1487
@ DS_Warning
void appendToGlobalCtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Append F to the list of global ctors of module M with the given Priority.
Definition: ModuleUtils.cpp:74
Instruction * SplitBlockAndInsertIfThen(Value *Cond, BasicBlock::iterator SplitBefore, bool Unreachable, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, BasicBlock *ThenBlock=nullptr)
Split the containing block at the specified instruction - everything before SplitBefore stays in the ...
cl::opt< bool > NoPGOWarnMismatchComdatWeak
Summary of memprof metadata on allocations.
GlobalValue::GUID Function
Definition: MemProf.h:207
uint32_t LineOffset
Definition: MemProf.h:212