LLVM 20.0.0git
AMDGPUAsanInstrumentation.cpp
Go to the documentation of this file.
1//===AMDGPUAsanInstrumentation.cpp - ASAN related helper functions===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===-------------------------------------------------------------===//
8
10
11#define DEBUG_TYPE "amdgpu-asan-instrumentation"
12
13using namespace llvm;
14
15namespace llvm {
16namespace AMDGPU {
17
18static uint64_t getRedzoneSizeForScale(int AsanScale) {
19 // Redzone used for stack and globals is at least 32 bytes.
20 // For scales 6 and 7, the redzone has to be 64 and 128 bytes respectively.
21 return std::max(32U, 1U << AsanScale);
22}
23
24static uint64_t getMinRedzoneSizeForGlobal(int AsanScale) {
25 return getRedzoneSizeForScale(AsanScale);
26}
27
28uint64_t getRedzoneSizeForGlobal(int AsanScale, uint64_t SizeInBytes) {
29 constexpr uint64_t kMaxRZ = 1 << 18;
30 const uint64_t MinRZ = getMinRedzoneSizeForGlobal(AsanScale);
31
32 uint64_t RZ = 0;
33 if (SizeInBytes <= MinRZ / 2) {
34 // Reduce redzone size for small size objects, e.g. int, char[1]. MinRZ is
35 // at least 32 bytes, optimize when SizeInBytes is less than or equal to
36 // half of MinRZ.
37 RZ = MinRZ - SizeInBytes;
38 } else {
39 // Calculate RZ, where MinRZ <= RZ <= MaxRZ, and RZ ~ 1/4 * SizeInBytes.
40 RZ = std::clamp((SizeInBytes / MinRZ / 4) * MinRZ, MinRZ, kMaxRZ);
41
42 // Round up to multiple of MinRZ.
43 if (SizeInBytes % MinRZ)
44 RZ += MinRZ - (SizeInBytes % MinRZ);
45 }
46
47 assert((RZ + SizeInBytes) % MinRZ == 0);
48
49 return RZ;
50}
51
53 size_t Res = llvm::countr_zero(TypeSize / 8);
54 return Res;
55}
56
58 Value *Cond, bool Recover) {
59 Value *ReportCond = Cond;
60 if (!Recover) {
61 auto *Ballot =
62 IRB.CreateIntrinsic(Intrinsic::amdgcn_ballot, IRB.getInt64Ty(), {Cond});
63 ReportCond = IRB.CreateIsNotNull(Ballot);
64 }
65
66 auto *Trm = SplitBlockAndInsertIfThen(
67 ReportCond, &*IRB.GetInsertPoint(), false,
68 MDBuilder(M.getContext()).createUnlikelyBranchWeights());
69 Trm->getParent()->setName("asan.report");
70
71 if (Recover)
72 return Trm;
73
74 Trm = SplitBlockAndInsertIfThen(Cond, Trm, false);
75 IRB.SetInsertPoint(Trm);
76 return IRB.CreateIntrinsic(Intrinsic::amdgcn_unreachable, {}, {});
77}
78
79static Value *createSlowPathCmp(Module &M, IRBuilder<> &IRB, Type *IntptrTy,
80 Value *AddrLong, Value *ShadowValue,
81 uint32_t TypeStoreSize, int AsanScale) {
82 uint64_t Granularity = static_cast<uint64_t>(1) << AsanScale;
83 // Addr & (Granularity - 1)
84 Value *LastAccessedByte =
85 IRB.CreateAnd(AddrLong, ConstantInt::get(IntptrTy, Granularity - 1));
86 // (Addr & (Granularity - 1)) + size - 1
87 if (TypeStoreSize / 8 > 1)
88 LastAccessedByte = IRB.CreateAdd(
89 LastAccessedByte, ConstantInt::get(IntptrTy, TypeStoreSize / 8 - 1));
90 // (uint8_t) ((Addr & (Granularity-1)) + size - 1)
91 LastAccessedByte =
92 IRB.CreateIntCast(LastAccessedByte, ShadowValue->getType(), false);
93 // ((uint8_t) ((Addr & (Granularity-1)) + size - 1)) >= ShadowValue
94 return IRB.CreateICmpSGE(LastAccessedByte, ShadowValue);
95}
96
98 Type *IntptrTy, Instruction *InsertBefore,
99 Value *Addr, bool IsWrite,
100 size_t AccessSizeIndex,
101 Value *SizeArgument, bool Recover) {
102 IRB.SetInsertPoint(InsertBefore);
103 CallInst *Call = nullptr;
105 SmallString<64> TypeStr{IsWrite ? "store" : "load"};
106 SmallString<64> EndingStr{Recover ? "_noabort" : ""};
107
108 SmallString<128> AsanErrorCallbackSizedString;
109 raw_svector_ostream AsanErrorCallbackSizedOS(AsanErrorCallbackSizedString);
110 AsanErrorCallbackSizedOS << kAsanReportErrorTemplate << TypeStr << "_n"
111 << EndingStr;
112
113 SmallVector<Type *, 3> Args2 = {IntptrTy, IntptrTy};
114 AttributeList AL2;
115 FunctionCallee AsanErrorCallbackSized = M.getOrInsertFunction(
116 AsanErrorCallbackSizedOS.str(),
117 FunctionType::get(IRB.getVoidTy(), Args2, false), AL2);
118 SmallVector<Type *, 2> Args1{1, IntptrTy};
119 AttributeList AL1;
120
121 SmallString<128> AsanErrorCallbackString;
122 raw_svector_ostream AsanErrorCallbackOS(AsanErrorCallbackString);
123 AsanErrorCallbackOS << kAsanReportErrorTemplate << TypeStr
124 << (1ULL << AccessSizeIndex) << EndingStr;
125
126 FunctionCallee AsanErrorCallback = M.getOrInsertFunction(
127 AsanErrorCallbackOS.str(),
128 FunctionType::get(IRB.getVoidTy(), Args1, false), AL1);
129 if (SizeArgument) {
130 Call = IRB.CreateCall(AsanErrorCallbackSized, {Addr, SizeArgument});
131 } else {
132 Call = IRB.CreateCall(AsanErrorCallback, Addr);
133 }
134
135 Call->setCannotMerge();
136 return Call;
137}
138
139static Value *memToShadow(Module &M, IRBuilder<> &IRB, Type *IntptrTy,
140 Value *Shadow, int AsanScale, uint32_t AsanOffset) {
141 // Shadow >> scale
142 Shadow = IRB.CreateLShr(Shadow, AsanScale);
143 if (AsanOffset == 0)
144 return Shadow;
145 // (Shadow >> scale) | offset
146 Value *ShadowBase = ConstantInt::get(IntptrTy, AsanOffset);
147 return IRB.CreateAdd(Shadow, ShadowBase);
148}
149
151 Instruction *InsertBefore, Value *Addr,
152 MaybeAlign Alignment, uint32_t TypeStoreSize,
153 bool IsWrite, Value *SizeArgument, bool UseCalls,
154 bool Recover, int AsanScale, int AsanOffset) {
155 Type *AddrTy = Addr->getType();
156 Type *IntptrTy = M.getDataLayout().getIntPtrType(
157 M.getContext(), AddrTy->getPointerAddressSpace());
158 IRB.SetInsertPoint(InsertBefore);
159 size_t AccessSizeIndex = TypeStoreSizeToSizeIndex(TypeStoreSize);
160 Type *ShadowTy = IntegerType::get(M.getContext(),
161 std::max(8U, TypeStoreSize >> AsanScale));
162 Type *ShadowPtrTy = PointerType::get(ShadowTy, 0);
163 Value *AddrLong = IRB.CreatePtrToInt(Addr, IntptrTy);
164 Value *ShadowPtr =
165 memToShadow(M, IRB, IntptrTy, AddrLong, AsanScale, AsanOffset);
166 const uint64_t ShadowAlign =
167 std::max<uint64_t>(Alignment.valueOrOne().value() >> AsanScale, 1);
168 Value *ShadowValue = IRB.CreateAlignedLoad(
169 ShadowTy, IRB.CreateIntToPtr(ShadowPtr, ShadowPtrTy), Align(ShadowAlign));
170 Value *Cmp = IRB.CreateIsNotNull(ShadowValue);
171 auto *Cmp2 = createSlowPathCmp(M, IRB, IntptrTy, AddrLong, ShadowValue,
172 TypeStoreSize, AsanScale);
173 Cmp = IRB.CreateAnd(Cmp, Cmp2);
174 Instruction *CrashTerm = genAMDGPUReportBlock(M, IRB, Cmp, Recover);
175 Instruction *Crash =
176 generateCrashCode(M, IRB, IntptrTy, CrashTerm, AddrLong, IsWrite,
177 AccessSizeIndex, SizeArgument, Recover);
178 Crash->setDebugLoc(OrigIns->getDebugLoc());
179 return;
180}
181
183 Module &M, Instruction *I,
185 const DataLayout &DL = M.getDataLayout();
186 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
187 Interesting.emplace_back(I, LI->getPointerOperandIndex(), false,
188 LI->getType(), LI->getAlign());
189 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
190 Interesting.emplace_back(I, SI->getPointerOperandIndex(), true,
191 SI->getValueOperand()->getType(), SI->getAlign());
192 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
193 Interesting.emplace_back(I, RMW->getPointerOperandIndex(), true,
194 RMW->getValOperand()->getType(), std::nullopt);
195 } else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) {
196 Interesting.emplace_back(I, XCHG->getPointerOperandIndex(), true,
197 XCHG->getCompareOperand()->getType(),
198 std::nullopt);
199 } else if (auto CI = dyn_cast<CallInst>(I)) {
200 switch (CI->getIntrinsicID()) {
201 case Intrinsic::masked_load:
202 case Intrinsic::masked_store:
203 case Intrinsic::masked_gather:
204 case Intrinsic::masked_scatter: {
205 bool IsWrite = CI->getType()->isVoidTy();
206 // Masked store has an initial operand for the value.
207 unsigned OpOffset = IsWrite ? 1 : 0;
208 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
209 MaybeAlign Alignment = Align(1);
210 // Otherwise no alignment guarantees. We probably got Undef.
211 if (auto *Op = dyn_cast<ConstantInt>(CI->getOperand(1 + OpOffset)))
212 Alignment = Op->getMaybeAlignValue();
213 Value *Mask = CI->getOperand(2 + OpOffset);
214 Interesting.emplace_back(I, OpOffset, IsWrite, Ty, Alignment, Mask);
215 break;
216 }
217 case Intrinsic::masked_expandload:
218 case Intrinsic::masked_compressstore: {
219 bool IsWrite = CI->getIntrinsicID() == Intrinsic::masked_compressstore;
220 unsigned OpOffset = IsWrite ? 1 : 0;
221 auto BasePtr = CI->getOperand(OpOffset);
222 MaybeAlign Alignment = BasePtr->getPointerAlignment(DL);
223 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
224 IRBuilder<> IB(I);
225 Value *Mask = CI->getOperand(1 + OpOffset);
226 Type *IntptrTy = M.getDataLayout().getIntPtrType(
227 M.getContext(), BasePtr->getType()->getPointerAddressSpace());
228 // Use the popcount of Mask as the effective vector length.
229 Type *ExtTy = VectorType::get(IntptrTy, cast<VectorType>(Ty));
230 Value *ExtMask = IB.CreateZExt(Mask, ExtTy);
231 Value *EVL = IB.CreateAddReduce(ExtMask);
232 Value *TrueMask = ConstantInt::get(Mask->getType(), 1);
233 Interesting.emplace_back(I, OpOffset, IsWrite, Ty, Alignment, TrueMask,
234 EVL);
235 break;
236 }
237 case Intrinsic::vp_load:
238 case Intrinsic::vp_store:
239 case Intrinsic::experimental_vp_strided_load:
240 case Intrinsic::experimental_vp_strided_store: {
241 auto *VPI = cast<VPIntrinsic>(CI);
242 unsigned IID = CI->getIntrinsicID();
243 bool IsWrite = CI->getType()->isVoidTy();
244 unsigned PtrOpNo = *VPI->getMemoryPointerParamPos(IID);
245 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
246 MaybeAlign Alignment = VPI->getOperand(PtrOpNo)->getPointerAlignment(DL);
247 Value *Stride = nullptr;
248 if (IID == Intrinsic::experimental_vp_strided_store ||
249 IID == Intrinsic::experimental_vp_strided_load) {
250 Stride = VPI->getOperand(PtrOpNo + 1);
251 // Use the pointer alignment as the element alignment if the stride is a
252 // mutiple of the pointer alignment. Otherwise, the element alignment
253 // should be Align(1).
254 unsigned PointerAlign = Alignment.valueOrOne().value();
255 if (!isa<ConstantInt>(Stride) ||
256 cast<ConstantInt>(Stride)->getZExtValue() % PointerAlign != 0)
257 Alignment = Align(1);
258 }
259 Interesting.emplace_back(I, PtrOpNo, IsWrite, Ty, Alignment,
260 VPI->getMaskParam(), VPI->getVectorLengthParam(),
261 Stride);
262 break;
263 }
264 case Intrinsic::vp_gather:
265 case Intrinsic::vp_scatter: {
266 auto *VPI = cast<VPIntrinsic>(CI);
267 unsigned IID = CI->getIntrinsicID();
268 bool IsWrite = IID == Intrinsic::vp_scatter;
269 unsigned PtrOpNo = *VPI->getMemoryPointerParamPos(IID);
270 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
271 MaybeAlign Alignment = VPI->getPointerAlignment();
272 Interesting.emplace_back(I, PtrOpNo, IsWrite, Ty, Alignment,
273 VPI->getMaskParam(),
274 VPI->getVectorLengthParam());
275 break;
276 }
277 case Intrinsic::amdgcn_raw_buffer_load:
278 case Intrinsic::amdgcn_raw_ptr_buffer_load:
279 case Intrinsic::amdgcn_raw_buffer_load_format:
280 case Intrinsic::amdgcn_raw_ptr_buffer_load_format:
281 case Intrinsic::amdgcn_raw_tbuffer_load:
282 case Intrinsic::amdgcn_raw_ptr_tbuffer_load:
283 case Intrinsic::amdgcn_struct_buffer_load:
284 case Intrinsic::amdgcn_struct_ptr_buffer_load:
285 case Intrinsic::amdgcn_struct_buffer_load_format:
286 case Intrinsic::amdgcn_struct_ptr_buffer_load_format:
287 case Intrinsic::amdgcn_struct_tbuffer_load:
288 case Intrinsic::amdgcn_struct_ptr_tbuffer_load:
289 case Intrinsic::amdgcn_s_buffer_load:
290 case Intrinsic::amdgcn_global_load_tr_b64:
291 case Intrinsic::amdgcn_global_load_tr_b128: {
292 unsigned PtrOpNo = 0;
293 bool IsWrite = false;
294 Type *Ty = CI->getType();
295 Value *Ptr = CI->getArgOperand(PtrOpNo);
296 MaybeAlign Alignment = Ptr->getPointerAlignment(DL);
297 Interesting.emplace_back(I, PtrOpNo, IsWrite, Ty, Alignment);
298 break;
299 }
300 case Intrinsic::amdgcn_raw_tbuffer_store:
301 case Intrinsic::amdgcn_raw_ptr_tbuffer_store:
302 case Intrinsic::amdgcn_raw_buffer_store:
303 case Intrinsic::amdgcn_raw_ptr_buffer_store:
304 case Intrinsic::amdgcn_raw_buffer_store_format:
305 case Intrinsic::amdgcn_raw_ptr_buffer_store_format:
306 case Intrinsic::amdgcn_struct_buffer_store:
307 case Intrinsic::amdgcn_struct_ptr_buffer_store:
308 case Intrinsic::amdgcn_struct_buffer_store_format:
309 case Intrinsic::amdgcn_struct_ptr_buffer_store_format:
310 case Intrinsic::amdgcn_struct_tbuffer_store:
311 case Intrinsic::amdgcn_struct_ptr_tbuffer_store: {
312 unsigned PtrOpNo = 1;
313 bool IsWrite = true;
314 Value *Ptr = CI->getArgOperand(PtrOpNo);
315 Type *Ty = Ptr->getType();
316 MaybeAlign Alignment = Ptr->getPointerAlignment(DL);
317 Interesting.emplace_back(I, PtrOpNo, IsWrite, Ty, Alignment);
318 break;
319 }
320 default:
321 for (unsigned ArgNo = 0; ArgNo < CI->arg_size(); ArgNo++) {
322 if (Type *Ty = CI->getParamByRefType(ArgNo)) {
323 Interesting.emplace_back(I, ArgNo, false, Ty, Align(1));
324 } else if (Type *Ty = CI->getParamByValType(ArgNo)) {
325 Interesting.emplace_back(I, ArgNo, false, Ty, Align(1));
326 }
327 }
328 }
329 }
330}
331} // end namespace AMDGPU
332} // end namespace llvm
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
const char kAsanReportErrorTemplate[]
uint64_t Addr
#define I(x, y, z)
Definition: MD5.cpp:58
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
An instruction that atomically checks whether a specified value is in a memory location,...
Definition: Instructions.h:495
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:696
This class represents a function call, abstracting a target machine's calling convention.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
Definition: DerivedTypes.h:168
static FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
Definition: IRBuilder.h:1812
CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, Instruction *FMFSource=nullptr, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
Definition: IRBuilder.cpp:933
Value * CreateICmpSGE(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2274
BasicBlock::iterator GetInsertPoint() const
Definition: IRBuilder.h:172
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2127
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition: IRBuilder.h:1442
IntegerType * getInt64Ty()
Fetch the type representing a 64-bit integer.
Definition: IRBuilder.h:528
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1480
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1332
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2122
Value * CreateIsNotNull(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg != 0.
Definition: IRBuilder.h:2554
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
Definition: IRBuilder.h:2201
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition: IRBuilder.h:177
Type * getVoidTy()
Fetch the type representing void.
Definition: IRBuilder.h:561
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args=std::nullopt, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:2417
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2671
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:466
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Definition: Instruction.h:463
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:266
An instruction for reading from memory.
Definition: Instructions.h:174
MDNode * createUnlikelyBranchWeights()
Return metadata containing two branch weights, with significant bias towards false destination.
Definition: MDBuilder.cpp:47
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition: SmallString.h:26
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:950
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
An instruction for storing to memory.
Definition: Instructions.h:290
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
static VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Definition: Type.cpp:664
A raw_ostream that writes to an SmallVector or SmallString.
Definition: raw_ostream.h:691
StringRef str() const
Return a StringRef for the vector contents.
Definition: raw_ostream.h:720
static uint64_t getMinRedzoneSizeForGlobal(int AsanScale)
static Value * memToShadow(Module &M, IRBuilder<> &IRB, Type *IntptrTy, Value *Shadow, int AsanScale, uint32_t AsanOffset)
void getInterestingMemoryOperands(Module &M, Instruction *I, SmallVectorImpl< InterestingMemoryOperand > &Interesting)
Get all the memory operands from the instruction that needs to be instrumented.
static uint64_t getRedzoneSizeForScale(int AsanScale)
static Instruction * generateCrashCode(Module &M, IRBuilder<> &IRB, Type *IntptrTy, Instruction *InsertBefore, Value *Addr, bool IsWrite, size_t AccessSizeIndex, Value *SizeArgument, bool Recover)
static Instruction * genAMDGPUReportBlock(Module &M, IRBuilder<> &IRB, Value *Cond, bool Recover)
void instrumentAddress(Module &M, IRBuilder<> &IRB, Instruction *OrigIns, Instruction *InsertBefore, Value *Addr, MaybeAlign Alignment, uint32_t TypeStoreSize, bool IsWrite, Value *SizeArgument, bool UseCalls, bool Recover, int AsanScale, int AsanOffset)
Instrument the memory operand Addr.
static size_t TypeStoreSizeToSizeIndex(uint32_t TypeSize)
static Value * createSlowPathCmp(Module &M, IRBuilder<> &IRB, Type *IntptrTy, Value *AddrLong, Value *ShadowValue, uint32_t TypeStoreSize, int AsanScale)
uint64_t getRedzoneSizeForGlobal(int AsanScale, uint64_t SizeInBytes)
Given SizeInBytes of the Value to be instrunmented, Returns the redzone size corresponding to it.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition: bit.h:215
Instruction * SplitBlockAndInsertIfThen(Value *Cond, BasicBlock::iterator SplitBefore, bool Unreachable, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, BasicBlock *ThenBlock=nullptr)
Split the containing block at the specified instruction - everything before SplitBefore stays in the ...
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
uint64_t value() const
This is a hole in the type system and should not be abused.
Definition: Alignment.h:85
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:117
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
Definition: Alignment.h:141