LLVM 20.0.0git
AMDGPUAsanInstrumentation.cpp
Go to the documentation of this file.
1//===AMDGPUAsanInstrumentation.cpp - ASAN related helper functions===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===-------------------------------------------------------------===//
8
10
11#define DEBUG_TYPE "amdgpu-asan-instrumentation"
12
13using namespace llvm;
14
15namespace llvm {
16namespace AMDGPU {
17
18static uint64_t getRedzoneSizeForScale(int AsanScale) {
19 // Redzone used for stack and globals is at least 32 bytes.
20 // For scales 6 and 7, the redzone has to be 64 and 128 bytes respectively.
21 return std::max(32U, 1U << AsanScale);
22}
23
24static uint64_t getMinRedzoneSizeForGlobal(int AsanScale) {
25 return getRedzoneSizeForScale(AsanScale);
26}
27
28uint64_t getRedzoneSizeForGlobal(int AsanScale, uint64_t SizeInBytes) {
29 constexpr uint64_t kMaxRZ = 1 << 18;
30 const uint64_t MinRZ = getMinRedzoneSizeForGlobal(AsanScale);
31
32 uint64_t RZ = 0;
33 if (SizeInBytes <= MinRZ / 2) {
34 // Reduce redzone size for small size objects, e.g. int, char[1]. MinRZ is
35 // at least 32 bytes, optimize when SizeInBytes is less than or equal to
36 // half of MinRZ.
37 RZ = MinRZ - SizeInBytes;
38 } else {
39 // Calculate RZ, where MinRZ <= RZ <= MaxRZ, and RZ ~ 1/4 * SizeInBytes.
40 RZ = std::clamp((SizeInBytes / MinRZ / 4) * MinRZ, MinRZ, kMaxRZ);
41
42 // Round up to multiple of MinRZ.
43 if (SizeInBytes % MinRZ)
44 RZ += MinRZ - (SizeInBytes % MinRZ);
45 }
46
47 assert((RZ + SizeInBytes) % MinRZ == 0);
48
49 return RZ;
50}
51
53 size_t Res = llvm::countr_zero(TypeSize / 8);
54 return Res;
55}
56
58 Value *Cond, bool Recover) {
59 Value *ReportCond = Cond;
60 if (!Recover) {
61 auto *Ballot =
62 IRB.CreateIntrinsic(Intrinsic::amdgcn_ballot, IRB.getInt64Ty(), {Cond});
63 ReportCond = IRB.CreateIsNotNull(Ballot);
64 }
65
66 auto *Trm = SplitBlockAndInsertIfThen(
67 ReportCond, &*IRB.GetInsertPoint(), false,
68 MDBuilder(M.getContext()).createUnlikelyBranchWeights());
69 Trm->getParent()->setName("asan.report");
70
71 if (Recover)
72 return Trm;
73
74 Trm = SplitBlockAndInsertIfThen(Cond, Trm, false);
75 IRB.SetInsertPoint(Trm);
76 return IRB.CreateIntrinsic(Intrinsic::amdgcn_unreachable, {}, {});
77}
78
79static Value *createSlowPathCmp(Module &M, IRBuilder<> &IRB, Type *IntptrTy,
80 Value *AddrLong, Value *ShadowValue,
81 uint32_t TypeStoreSize, int AsanScale) {
82 uint64_t Granularity = static_cast<uint64_t>(1) << AsanScale;
83 // Addr & (Granularity - 1)
84 Value *LastAccessedByte =
85 IRB.CreateAnd(AddrLong, ConstantInt::get(IntptrTy, Granularity - 1));
86 // (Addr & (Granularity - 1)) + size - 1
87 if (TypeStoreSize / 8 > 1)
88 LastAccessedByte = IRB.CreateAdd(
89 LastAccessedByte, ConstantInt::get(IntptrTy, TypeStoreSize / 8 - 1));
90 // (uint8_t) ((Addr & (Granularity-1)) + size - 1)
91 LastAccessedByte =
92 IRB.CreateIntCast(LastAccessedByte, ShadowValue->getType(), false);
93 // ((uint8_t) ((Addr & (Granularity-1)) + size - 1)) >= ShadowValue
94 return IRB.CreateICmpSGE(LastAccessedByte, ShadowValue);
95}
96
98 Type *IntptrTy, Instruction *InsertBefore,
99 Value *Addr, bool IsWrite,
100 size_t AccessSizeIndex,
101 Value *SizeArgument, bool Recover) {
102 IRB.SetInsertPoint(InsertBefore);
103 CallInst *Call = nullptr;
105 SmallString<64> TypeStr{IsWrite ? "store" : "load"};
106 SmallString<64> EndingStr{Recover ? "_noabort" : ""};
107
108 SmallString<128> AsanErrorCallbackSizedString;
109 raw_svector_ostream AsanErrorCallbackSizedOS(AsanErrorCallbackSizedString);
110 AsanErrorCallbackSizedOS << kAsanReportErrorTemplate << TypeStr << "_n"
111 << EndingStr;
112
113 SmallVector<Type *, 3> Args2 = {IntptrTy, IntptrTy};
114 AttributeList AL2;
115 FunctionCallee AsanErrorCallbackSized = M.getOrInsertFunction(
116 AsanErrorCallbackSizedOS.str(),
117 FunctionType::get(IRB.getVoidTy(), Args2, false), AL2);
118 SmallVector<Type *, 2> Args1{1, IntptrTy};
119 AttributeList AL1;
120
121 SmallString<128> AsanErrorCallbackString;
122 raw_svector_ostream AsanErrorCallbackOS(AsanErrorCallbackString);
123 AsanErrorCallbackOS << kAsanReportErrorTemplate << TypeStr
124 << (1ULL << AccessSizeIndex) << EndingStr;
125
126 FunctionCallee AsanErrorCallback = M.getOrInsertFunction(
127 AsanErrorCallbackOS.str(),
128 FunctionType::get(IRB.getVoidTy(), Args1, false), AL1);
129 if (SizeArgument) {
130 Call = IRB.CreateCall(AsanErrorCallbackSized, {Addr, SizeArgument});
131 } else {
132 Call = IRB.CreateCall(AsanErrorCallback, Addr);
133 }
134
135 Call->setCannotMerge();
136 return Call;
137}
138
139static Value *memToShadow(Module &M, IRBuilder<> &IRB, Type *IntptrTy,
140 Value *Shadow, int AsanScale, uint32_t AsanOffset) {
141 // Shadow >> scale
142 Shadow = IRB.CreateLShr(Shadow, AsanScale);
143 if (AsanOffset == 0)
144 return Shadow;
145 // (Shadow >> scale) | offset
146 Value *ShadowBase = ConstantInt::get(IntptrTy, AsanOffset);
147 return IRB.CreateAdd(Shadow, ShadowBase);
148}
149
151 Instruction *OrigIns,
152 Instruction *InsertBefore, Value *Addr,
153 Align Alignment, uint32_t TypeStoreSize,
154 bool IsWrite, Value *SizeArgument,
155 bool UseCalls, bool Recover, int AsanScale,
156 int AsanOffset) {
157 Type *AddrTy = Addr->getType();
158 Type *IntptrTy = M.getDataLayout().getIntPtrType(
159 M.getContext(), AddrTy->getPointerAddressSpace());
160 IRB.SetInsertPoint(InsertBefore);
161 size_t AccessSizeIndex = TypeStoreSizeToSizeIndex(TypeStoreSize);
162 Type *ShadowTy = IntegerType::get(M.getContext(),
163 std::max(8U, TypeStoreSize >> AsanScale));
164 Type *ShadowPtrTy = PointerType::get(ShadowTy, 0);
165 Value *AddrLong = IRB.CreatePtrToInt(Addr, IntptrTy);
166 Value *ShadowPtr =
167 memToShadow(M, IRB, IntptrTy, AddrLong, AsanScale, AsanOffset);
168 const uint64_t ShadowAlign =
169 std::max<uint64_t>(Alignment.value() >> AsanScale, 1);
170 Value *ShadowValue = IRB.CreateAlignedLoad(
171 ShadowTy, IRB.CreateIntToPtr(ShadowPtr, ShadowPtrTy), Align(ShadowAlign));
172 Value *Cmp = IRB.CreateIsNotNull(ShadowValue);
173 auto *Cmp2 = createSlowPathCmp(M, IRB, IntptrTy, AddrLong, ShadowValue,
174 TypeStoreSize, AsanScale);
175 Cmp = IRB.CreateAnd(Cmp, Cmp2);
176 Instruction *CrashTerm = genAMDGPUReportBlock(M, IRB, Cmp, Recover);
177 Instruction *Crash =
178 generateCrashCode(M, IRB, IntptrTy, CrashTerm, AddrLong, IsWrite,
179 AccessSizeIndex, SizeArgument, Recover);
180 Crash->setDebugLoc(OrigIns->getDebugLoc());
181 return;
182}
183
185 Instruction *InsertBefore, Value *Addr, Align Alignment,
186 TypeSize TypeStoreSize, bool IsWrite,
187 Value *SizeArgument, bool UseCalls, bool Recover,
188 int AsanScale, int AsanOffset) {
189 if (!TypeStoreSize.isScalable()) {
190 unsigned Granularity = 1 << AsanScale;
191 const auto FixedSize = TypeStoreSize.getFixedValue();
192 switch (FixedSize) {
193 case 8:
194 case 16:
195 case 32:
196 case 64:
197 case 128:
198 if (Alignment.value() >= Granularity ||
199 Alignment.value() >= FixedSize / 8)
201 M, IRB, OrigIns, InsertBefore, Addr, Alignment, FixedSize, IsWrite,
202 SizeArgument, UseCalls, Recover, AsanScale, AsanOffset);
203 }
204 }
205 // Instrument unusual size or unusual alignment.
206 IRB.SetInsertPoint(InsertBefore);
207 Type *AddrTy = Addr->getType();
208 Type *IntptrTy = M.getDataLayout().getIntPtrType(AddrTy);
209 Value *NumBits = IRB.CreateTypeSize(IntptrTy, TypeStoreSize);
210 Value *Size = IRB.CreateLShr(NumBits, ConstantInt::get(IntptrTy, 3));
211 Value *AddrLong = IRB.CreatePtrToInt(Addr, IntptrTy);
212 Value *SizeMinusOne = IRB.CreateAdd(Size, ConstantInt::get(IntptrTy, -1));
213 Value *LastByte =
214 IRB.CreateIntToPtr(IRB.CreateAdd(AddrLong, SizeMinusOne), AddrTy);
215 instrumentAddressImpl(M, IRB, OrigIns, InsertBefore, Addr, {}, 8, IsWrite,
216 SizeArgument, UseCalls, Recover, AsanScale, AsanOffset);
217 instrumentAddressImpl(M, IRB, OrigIns, InsertBefore, LastByte, {}, 8, IsWrite,
218 SizeArgument, UseCalls, Recover, AsanScale, AsanOffset);
219}
220
222 Module &M, Instruction *I,
224 const DataLayout &DL = M.getDataLayout();
225 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
226 Interesting.emplace_back(I, LI->getPointerOperandIndex(), false,
227 LI->getType(), LI->getAlign());
228 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
229 Interesting.emplace_back(I, SI->getPointerOperandIndex(), true,
230 SI->getValueOperand()->getType(), SI->getAlign());
231 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
232 Interesting.emplace_back(I, RMW->getPointerOperandIndex(), true,
233 RMW->getValOperand()->getType(), std::nullopt);
234 } else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) {
235 Interesting.emplace_back(I, XCHG->getPointerOperandIndex(), true,
236 XCHG->getCompareOperand()->getType(),
237 std::nullopt);
238 } else if (auto CI = dyn_cast<CallInst>(I)) {
239 switch (CI->getIntrinsicID()) {
240 case Intrinsic::masked_load:
241 case Intrinsic::masked_store:
242 case Intrinsic::masked_gather:
243 case Intrinsic::masked_scatter: {
244 bool IsWrite = CI->getType()->isVoidTy();
245 // Masked store has an initial operand for the value.
246 unsigned OpOffset = IsWrite ? 1 : 0;
247 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
248 MaybeAlign Alignment = Align(1);
249 // Otherwise no alignment guarantees. We probably got Undef.
250 if (auto *Op = dyn_cast<ConstantInt>(CI->getOperand(1 + OpOffset)))
251 Alignment = Op->getMaybeAlignValue();
252 Value *Mask = CI->getOperand(2 + OpOffset);
253 Interesting.emplace_back(I, OpOffset, IsWrite, Ty, Alignment, Mask);
254 break;
255 }
256 case Intrinsic::masked_expandload:
257 case Intrinsic::masked_compressstore: {
258 bool IsWrite = CI->getIntrinsicID() == Intrinsic::masked_compressstore;
259 unsigned OpOffset = IsWrite ? 1 : 0;
260 auto BasePtr = CI->getOperand(OpOffset);
261 MaybeAlign Alignment = BasePtr->getPointerAlignment(DL);
262 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
263 IRBuilder<> IB(I);
264 Value *Mask = CI->getOperand(1 + OpOffset);
265 Type *IntptrTy = M.getDataLayout().getIntPtrType(
266 M.getContext(), BasePtr->getType()->getPointerAddressSpace());
267 // Use the popcount of Mask as the effective vector length.
268 Type *ExtTy = VectorType::get(IntptrTy, cast<VectorType>(Ty));
269 Value *ExtMask = IB.CreateZExt(Mask, ExtTy);
270 Value *EVL = IB.CreateAddReduce(ExtMask);
271 Value *TrueMask = ConstantInt::get(Mask->getType(), 1);
272 Interesting.emplace_back(I, OpOffset, IsWrite, Ty, Alignment, TrueMask,
273 EVL);
274 break;
275 }
276 case Intrinsic::vp_load:
277 case Intrinsic::vp_store:
278 case Intrinsic::experimental_vp_strided_load:
279 case Intrinsic::experimental_vp_strided_store: {
280 auto *VPI = cast<VPIntrinsic>(CI);
281 unsigned IID = CI->getIntrinsicID();
282 bool IsWrite = CI->getType()->isVoidTy();
283 unsigned PtrOpNo = *VPI->getMemoryPointerParamPos(IID);
284 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
285 MaybeAlign Alignment = VPI->getOperand(PtrOpNo)->getPointerAlignment(DL);
286 Value *Stride = nullptr;
287 if (IID == Intrinsic::experimental_vp_strided_store ||
288 IID == Intrinsic::experimental_vp_strided_load) {
289 Stride = VPI->getOperand(PtrOpNo + 1);
290 // Use the pointer alignment as the element alignment if the stride is a
291 // mutiple of the pointer alignment. Otherwise, the element alignment
292 // should be Align(1).
293 unsigned PointerAlign = Alignment.valueOrOne().value();
294 if (!isa<ConstantInt>(Stride) ||
295 cast<ConstantInt>(Stride)->getZExtValue() % PointerAlign != 0)
296 Alignment = Align(1);
297 }
298 Interesting.emplace_back(I, PtrOpNo, IsWrite, Ty, Alignment,
299 VPI->getMaskParam(), VPI->getVectorLengthParam(),
300 Stride);
301 break;
302 }
303 case Intrinsic::vp_gather:
304 case Intrinsic::vp_scatter: {
305 auto *VPI = cast<VPIntrinsic>(CI);
306 unsigned IID = CI->getIntrinsicID();
307 bool IsWrite = IID == Intrinsic::vp_scatter;
308 unsigned PtrOpNo = *VPI->getMemoryPointerParamPos(IID);
309 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
310 MaybeAlign Alignment = VPI->getPointerAlignment();
311 Interesting.emplace_back(I, PtrOpNo, IsWrite, Ty, Alignment,
312 VPI->getMaskParam(),
313 VPI->getVectorLengthParam());
314 break;
315 }
316 case Intrinsic::amdgcn_raw_buffer_load:
317 case Intrinsic::amdgcn_raw_ptr_buffer_load:
318 case Intrinsic::amdgcn_raw_buffer_load_format:
319 case Intrinsic::amdgcn_raw_ptr_buffer_load_format:
320 case Intrinsic::amdgcn_raw_tbuffer_load:
321 case Intrinsic::amdgcn_raw_ptr_tbuffer_load:
322 case Intrinsic::amdgcn_struct_buffer_load:
323 case Intrinsic::amdgcn_struct_ptr_buffer_load:
324 case Intrinsic::amdgcn_struct_buffer_load_format:
325 case Intrinsic::amdgcn_struct_ptr_buffer_load_format:
326 case Intrinsic::amdgcn_struct_tbuffer_load:
327 case Intrinsic::amdgcn_struct_ptr_tbuffer_load:
328 case Intrinsic::amdgcn_s_buffer_load:
329 case Intrinsic::amdgcn_global_load_tr_b64:
330 case Intrinsic::amdgcn_global_load_tr_b128: {
331 unsigned PtrOpNo = 0;
332 bool IsWrite = false;
333 Type *Ty = CI->getType();
334 Value *Ptr = CI->getArgOperand(PtrOpNo);
335 MaybeAlign Alignment = Ptr->getPointerAlignment(DL);
336 Interesting.emplace_back(I, PtrOpNo, IsWrite, Ty, Alignment);
337 break;
338 }
339 case Intrinsic::amdgcn_raw_tbuffer_store:
340 case Intrinsic::amdgcn_raw_ptr_tbuffer_store:
341 case Intrinsic::amdgcn_raw_buffer_store:
342 case Intrinsic::amdgcn_raw_ptr_buffer_store:
343 case Intrinsic::amdgcn_raw_buffer_store_format:
344 case Intrinsic::amdgcn_raw_ptr_buffer_store_format:
345 case Intrinsic::amdgcn_struct_buffer_store:
346 case Intrinsic::amdgcn_struct_ptr_buffer_store:
347 case Intrinsic::amdgcn_struct_buffer_store_format:
348 case Intrinsic::amdgcn_struct_ptr_buffer_store_format:
349 case Intrinsic::amdgcn_struct_tbuffer_store:
350 case Intrinsic::amdgcn_struct_ptr_tbuffer_store: {
351 unsigned PtrOpNo = 1;
352 bool IsWrite = true;
353 Value *Ptr = CI->getArgOperand(PtrOpNo);
354 Type *Ty = Ptr->getType();
355 MaybeAlign Alignment = Ptr->getPointerAlignment(DL);
356 Interesting.emplace_back(I, PtrOpNo, IsWrite, Ty, Alignment);
357 break;
358 }
359 default:
360 for (unsigned ArgNo = 0; ArgNo < CI->arg_size(); ArgNo++) {
361 if (Type *Ty = CI->getParamByRefType(ArgNo)) {
362 Interesting.emplace_back(I, ArgNo, false, Ty, Align(1));
363 } else if (Type *Ty = CI->getParamByValType(ArgNo)) {
364 Interesting.emplace_back(I, ArgNo, false, Ty, Align(1));
365 }
366 }
367 }
368 }
369}
370} // end namespace AMDGPU
371} // end namespace llvm
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
const char kAsanReportErrorTemplate[]
uint64_t Addr
uint64_t Size
#define I(x, y, z)
Definition: MD5.cpp:58
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
An instruction that atomically checks whether a specified value is in a memory location,...
Definition: Instructions.h:495
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:696
This class represents a function call, abstracting a target machine's calling convention.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
Definition: DerivedTypes.h:168
static FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
Definition: IRBuilder.h:1824
CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, Instruction *FMFSource=nullptr, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
Definition: IRBuilder.cpp:933
Value * CreateICmpSGE(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2289
BasicBlock::iterator GetInsertPoint() const
Definition: IRBuilder.h:172
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2142
Value * CreateTypeSize(Type *DstType, TypeSize Size)
Create an expression which evaluates to the number of units in Size at runtime.
Definition: IRBuilder.cpp:105
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition: IRBuilder.h:1454
IntegerType * getInt64Ty()
Fetch the type representing a 64-bit integer.
Definition: IRBuilder.h:528
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1492
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1344
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2137
Value * CreateIsNotNull(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg != 0.
Definition: IRBuilder.h:2569
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
Definition: IRBuilder.h:2216
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition: IRBuilder.h:177
Type * getVoidTy()
Fetch the type representing void.
Definition: IRBuilder.h:561
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args=std::nullopt, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:2432
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2686
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:466
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Definition: Instruction.h:463
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:266
An instruction for reading from memory.
Definition: Instructions.h:174
MDNode * createUnlikelyBranchWeights()
Return metadata containing two branch weights, with significant bias towards false destination.
Definition: MDBuilder.cpp:47
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition: SmallString.h:26
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:950
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
An instruction for storing to memory.
Definition: Instructions.h:290
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
static VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Definition: Type.cpp:664
constexpr ScalarTy getFixedValue() const
Definition: TypeSize.h:202
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:171
A raw_ostream that writes to an SmallVector or SmallString.
Definition: raw_ostream.h:691
StringRef str() const
Return a StringRef for the vector contents.
Definition: raw_ostream.h:720
static uint64_t getMinRedzoneSizeForGlobal(int AsanScale)
static Value * memToShadow(Module &M, IRBuilder<> &IRB, Type *IntptrTy, Value *Shadow, int AsanScale, uint32_t AsanOffset)
void getInterestingMemoryOperands(Module &M, Instruction *I, SmallVectorImpl< InterestingMemoryOperand > &Interesting)
Get all the memory operands from the instruction that needs to be instrumented.
static uint64_t getRedzoneSizeForScale(int AsanScale)
static Instruction * generateCrashCode(Module &M, IRBuilder<> &IRB, Type *IntptrTy, Instruction *InsertBefore, Value *Addr, bool IsWrite, size_t AccessSizeIndex, Value *SizeArgument, bool Recover)
static Instruction * genAMDGPUReportBlock(Module &M, IRBuilder<> &IRB, Value *Cond, bool Recover)
static size_t TypeStoreSizeToSizeIndex(uint32_t TypeSize)
static Value * createSlowPathCmp(Module &M, IRBuilder<> &IRB, Type *IntptrTy, Value *AddrLong, Value *ShadowValue, uint32_t TypeStoreSize, int AsanScale)
void instrumentAddress(Module &M, IRBuilder<> &IRB, Instruction *OrigIns, Instruction *InsertBefore, Value *Addr, Align Alignment, TypeSize TypeStoreSize, bool IsWrite, Value *SizeArgument, bool UseCalls, bool Recover, int AsanScale, int AsanOffset)
Instrument the memory operand Addr.
static void instrumentAddressImpl(Module &M, IRBuilder<> &IRB, Instruction *OrigIns, Instruction *InsertBefore, Value *Addr, Align Alignment, uint32_t TypeStoreSize, bool IsWrite, Value *SizeArgument, bool UseCalls, bool Recover, int AsanScale, int AsanOffset)
uint64_t getRedzoneSizeForGlobal(int AsanScale, uint64_t SizeInBytes)
Given SizeInBytes of the Value to be instrunmented, Returns the redzone size corresponding to it.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition: bit.h:215
Instruction * SplitBlockAndInsertIfThen(Value *Cond, BasicBlock::iterator SplitBefore, bool Unreachable, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, BasicBlock *ThenBlock=nullptr)
Split the containing block at the specified instruction - everything before SplitBefore stays in the ...
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
uint64_t value() const
This is a hole in the type system and should not be abused.
Definition: Alignment.h:85
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:117
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
Definition: Alignment.h:141