File: | llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp |
Warning: | line 1512, column 26 Forming reference to null pointer |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===- AddressSanitizer.cpp - memory error detector -----------------------===// | ||||
2 | // | ||||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | ||||
4 | // See https://llvm.org/LICENSE.txt for license information. | ||||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | ||||
6 | // | ||||
7 | //===----------------------------------------------------------------------===// | ||||
8 | // | ||||
9 | // This file is a part of AddressSanitizer, an address basic correctness | ||||
10 | // checker. | ||||
11 | // Details of the algorithm: | ||||
12 | // https://github.com/google/sanitizers/wiki/AddressSanitizerAlgorithm | ||||
13 | // | ||||
14 | // FIXME: This sanitizer does not yet handle scalable vectors | ||||
15 | // | ||||
16 | //===----------------------------------------------------------------------===// | ||||
17 | |||||
18 | #include "llvm/Transforms/Instrumentation/AddressSanitizer.h" | ||||
19 | #include "llvm/ADT/ArrayRef.h" | ||||
20 | #include "llvm/ADT/DenseMap.h" | ||||
21 | #include "llvm/ADT/DepthFirstIterator.h" | ||||
22 | #include "llvm/ADT/SmallPtrSet.h" | ||||
23 | #include "llvm/ADT/SmallVector.h" | ||||
24 | #include "llvm/ADT/Statistic.h" | ||||
25 | #include "llvm/ADT/StringExtras.h" | ||||
26 | #include "llvm/ADT/StringRef.h" | ||||
27 | #include "llvm/ADT/Triple.h" | ||||
28 | #include "llvm/ADT/Twine.h" | ||||
29 | #include "llvm/Analysis/MemoryBuiltins.h" | ||||
30 | #include "llvm/Analysis/StackSafetyAnalysis.h" | ||||
31 | #include "llvm/Analysis/TargetLibraryInfo.h" | ||||
32 | #include "llvm/Analysis/ValueTracking.h" | ||||
33 | #include "llvm/BinaryFormat/MachO.h" | ||||
34 | #include "llvm/IR/Argument.h" | ||||
35 | #include "llvm/IR/Attributes.h" | ||||
36 | #include "llvm/IR/BasicBlock.h" | ||||
37 | #include "llvm/IR/Comdat.h" | ||||
38 | #include "llvm/IR/Constant.h" | ||||
39 | #include "llvm/IR/Constants.h" | ||||
40 | #include "llvm/IR/DIBuilder.h" | ||||
41 | #include "llvm/IR/DataLayout.h" | ||||
42 | #include "llvm/IR/DebugInfoMetadata.h" | ||||
43 | #include "llvm/IR/DebugLoc.h" | ||||
44 | #include "llvm/IR/DerivedTypes.h" | ||||
45 | #include "llvm/IR/Dominators.h" | ||||
46 | #include "llvm/IR/Function.h" | ||||
47 | #include "llvm/IR/GlobalAlias.h" | ||||
48 | #include "llvm/IR/GlobalValue.h" | ||||
49 | #include "llvm/IR/GlobalVariable.h" | ||||
50 | #include "llvm/IR/IRBuilder.h" | ||||
51 | #include "llvm/IR/InlineAsm.h" | ||||
52 | #include "llvm/IR/InstIterator.h" | ||||
53 | #include "llvm/IR/InstVisitor.h" | ||||
54 | #include "llvm/IR/InstrTypes.h" | ||||
55 | #include "llvm/IR/Instruction.h" | ||||
56 | #include "llvm/IR/Instructions.h" | ||||
57 | #include "llvm/IR/IntrinsicInst.h" | ||||
58 | #include "llvm/IR/Intrinsics.h" | ||||
59 | #include "llvm/IR/LLVMContext.h" | ||||
60 | #include "llvm/IR/MDBuilder.h" | ||||
61 | #include "llvm/IR/Metadata.h" | ||||
62 | #include "llvm/IR/Module.h" | ||||
63 | #include "llvm/IR/Type.h" | ||||
64 | #include "llvm/IR/Use.h" | ||||
65 | #include "llvm/IR/Value.h" | ||||
66 | #include "llvm/InitializePasses.h" | ||||
67 | #include "llvm/MC/MCSectionMachO.h" | ||||
68 | #include "llvm/Pass.h" | ||||
69 | #include "llvm/Support/Casting.h" | ||||
70 | #include "llvm/Support/CommandLine.h" | ||||
71 | #include "llvm/Support/Debug.h" | ||||
72 | #include "llvm/Support/ErrorHandling.h" | ||||
73 | #include "llvm/Support/MathExtras.h" | ||||
74 | #include "llvm/Support/ScopedPrinter.h" | ||||
75 | #include "llvm/Support/raw_ostream.h" | ||||
76 | #include "llvm/Transforms/Instrumentation.h" | ||||
77 | #include "llvm/Transforms/Instrumentation/AddressSanitizerCommon.h" | ||||
78 | #include "llvm/Transforms/Instrumentation/AddressSanitizerOptions.h" | ||||
79 | #include "llvm/Transforms/Utils/ASanStackFrameLayout.h" | ||||
80 | #include "llvm/Transforms/Utils/BasicBlockUtils.h" | ||||
81 | #include "llvm/Transforms/Utils/Local.h" | ||||
82 | #include "llvm/Transforms/Utils/ModuleUtils.h" | ||||
83 | #include "llvm/Transforms/Utils/PromoteMemToReg.h" | ||||
84 | #include <algorithm> | ||||
85 | #include <cassert> | ||||
86 | #include <cstddef> | ||||
87 | #include <cstdint> | ||||
88 | #include <iomanip> | ||||
89 | #include <limits> | ||||
90 | #include <memory> | ||||
91 | #include <sstream> | ||||
92 | #include <string> | ||||
93 | #include <tuple> | ||||
94 | |||||
95 | using namespace llvm; | ||||
96 | |||||
97 | #define DEBUG_TYPE"asan" "asan" | ||||
98 | |||||
99 | static const uint64_t kDefaultShadowScale = 3; | ||||
100 | static const uint64_t kDefaultShadowOffset32 = 1ULL << 29; | ||||
101 | static const uint64_t kDefaultShadowOffset64 = 1ULL << 44; | ||||
102 | static const uint64_t kDynamicShadowSentinel = | ||||
103 | std::numeric_limits<uint64_t>::max(); | ||||
104 | static const uint64_t kSmallX86_64ShadowOffsetBase = 0x7FFFFFFF; // < 2G. | ||||
105 | static const uint64_t kSmallX86_64ShadowOffsetAlignMask = ~0xFFFULL; | ||||
106 | static const uint64_t kLinuxKasan_ShadowOffset64 = 0xdffffc0000000000; | ||||
107 | static const uint64_t kPPC64_ShadowOffset64 = 1ULL << 44; | ||||
108 | static const uint64_t kSystemZ_ShadowOffset64 = 1ULL << 52; | ||||
109 | static const uint64_t kMIPS32_ShadowOffset32 = 0x0aaa0000; | ||||
110 | static const uint64_t kMIPS64_ShadowOffset64 = 1ULL << 37; | ||||
111 | static const uint64_t kAArch64_ShadowOffset64 = 1ULL << 36; | ||||
112 | static const uint64_t kRISCV64_ShadowOffset64 = 0xd55550000; | ||||
113 | static const uint64_t kFreeBSD_ShadowOffset32 = 1ULL << 30; | ||||
114 | static const uint64_t kFreeBSD_ShadowOffset64 = 1ULL << 46; | ||||
115 | static const uint64_t kFreeBSDKasan_ShadowOffset64 = 0xdffff7c000000000; | ||||
116 | static const uint64_t kNetBSD_ShadowOffset32 = 1ULL << 30; | ||||
117 | static const uint64_t kNetBSD_ShadowOffset64 = 1ULL << 46; | ||||
118 | static const uint64_t kNetBSDKasan_ShadowOffset64 = 0xdfff900000000000; | ||||
119 | static const uint64_t kPS4CPU_ShadowOffset64 = 1ULL << 40; | ||||
120 | static const uint64_t kWindowsShadowOffset32 = 3ULL << 28; | ||||
121 | static const uint64_t kEmscriptenShadowOffset = 0; | ||||
122 | |||||
123 | // The shadow memory space is dynamically allocated. | ||||
124 | static const uint64_t kWindowsShadowOffset64 = kDynamicShadowSentinel; | ||||
125 | |||||
126 | static const size_t kMinStackMallocSize = 1 << 6; // 64B | ||||
127 | static const size_t kMaxStackMallocSize = 1 << 16; // 64K | ||||
128 | static const uintptr_t kCurrentStackFrameMagic = 0x41B58AB3; | ||||
129 | static const uintptr_t kRetiredStackFrameMagic = 0x45E0360E; | ||||
130 | |||||
131 | const char kAsanModuleCtorName[] = "asan.module_ctor"; | ||||
132 | const char kAsanModuleDtorName[] = "asan.module_dtor"; | ||||
133 | static const uint64_t kAsanCtorAndDtorPriority = 1; | ||||
134 | // On Emscripten, the system needs more than one priorities for constructors. | ||||
135 | static const uint64_t kAsanEmscriptenCtorAndDtorPriority = 50; | ||||
136 | const char kAsanReportErrorTemplate[] = "__asan_report_"; | ||||
137 | const char kAsanRegisterGlobalsName[] = "__asan_register_globals"; | ||||
138 | const char kAsanUnregisterGlobalsName[] = "__asan_unregister_globals"; | ||||
139 | const char kAsanRegisterImageGlobalsName[] = "__asan_register_image_globals"; | ||||
140 | const char kAsanUnregisterImageGlobalsName[] = | ||||
141 | "__asan_unregister_image_globals"; | ||||
142 | const char kAsanRegisterElfGlobalsName[] = "__asan_register_elf_globals"; | ||||
143 | const char kAsanUnregisterElfGlobalsName[] = "__asan_unregister_elf_globals"; | ||||
144 | const char kAsanPoisonGlobalsName[] = "__asan_before_dynamic_init"; | ||||
145 | const char kAsanUnpoisonGlobalsName[] = "__asan_after_dynamic_init"; | ||||
146 | const char kAsanInitName[] = "__asan_init"; | ||||
147 | const char kAsanVersionCheckNamePrefix[] = "__asan_version_mismatch_check_v"; | ||||
148 | const char kAsanPtrCmp[] = "__sanitizer_ptr_cmp"; | ||||
149 | const char kAsanPtrSub[] = "__sanitizer_ptr_sub"; | ||||
150 | const char kAsanHandleNoReturnName[] = "__asan_handle_no_return"; | ||||
151 | static const int kMaxAsanStackMallocSizeClass = 10; | ||||
152 | const char kAsanStackMallocNameTemplate[] = "__asan_stack_malloc_"; | ||||
153 | const char kAsanStackMallocAlwaysNameTemplate[] = | ||||
154 | "__asan_stack_malloc_always_"; | ||||
155 | const char kAsanStackFreeNameTemplate[] = "__asan_stack_free_"; | ||||
156 | const char kAsanGenPrefix[] = "___asan_gen_"; | ||||
157 | const char kODRGenPrefix[] = "__odr_asan_gen_"; | ||||
158 | const char kSanCovGenPrefix[] = "__sancov_gen_"; | ||||
159 | const char kAsanSetShadowPrefix[] = "__asan_set_shadow_"; | ||||
160 | const char kAsanPoisonStackMemoryName[] = "__asan_poison_stack_memory"; | ||||
161 | const char kAsanUnpoisonStackMemoryName[] = "__asan_unpoison_stack_memory"; | ||||
162 | |||||
163 | // ASan version script has __asan_* wildcard. Triple underscore prevents a | ||||
164 | // linker (gold) warning about attempting to export a local symbol. | ||||
165 | const char kAsanGlobalsRegisteredFlagName[] = "___asan_globals_registered"; | ||||
166 | |||||
167 | const char kAsanOptionDetectUseAfterReturn[] = | ||||
168 | "__asan_option_detect_stack_use_after_return"; | ||||
169 | |||||
170 | const char kAsanShadowMemoryDynamicAddress[] = | ||||
171 | "__asan_shadow_memory_dynamic_address"; | ||||
172 | |||||
173 | const char kAsanAllocaPoison[] = "__asan_alloca_poison"; | ||||
174 | const char kAsanAllocasUnpoison[] = "__asan_allocas_unpoison"; | ||||
175 | |||||
176 | const char kAMDGPUAddressSharedName[] = "llvm.amdgcn.is.shared"; | ||||
177 | const char kAMDGPUAddressPrivateName[] = "llvm.amdgcn.is.private"; | ||||
178 | |||||
179 | // Accesses sizes are powers of two: 1, 2, 4, 8, 16. | ||||
180 | static const size_t kNumberOfAccessSizes = 5; | ||||
181 | |||||
182 | static const uint64_t kAllocaRzSize = 32; | ||||
183 | |||||
184 | // ASanAccessInfo implementation constants. | ||||
185 | constexpr size_t kCompileKernelShift = 0; | ||||
186 | constexpr size_t kCompileKernelMask = 0x1; | ||||
187 | constexpr size_t kAccessSizeIndexShift = 1; | ||||
188 | constexpr size_t kAccessSizeIndexMask = 0xf; | ||||
189 | constexpr size_t kIsWriteShift = 5; | ||||
190 | constexpr size_t kIsWriteMask = 0x1; | ||||
191 | |||||
192 | // Command-line flags. | ||||
193 | |||||
194 | static cl::opt<bool> ClEnableKasan( | ||||
195 | "asan-kernel", cl::desc("Enable KernelAddressSanitizer instrumentation"), | ||||
196 | cl::Hidden, cl::init(false)); | ||||
197 | |||||
198 | static cl::opt<bool> ClRecover( | ||||
199 | "asan-recover", | ||||
200 | cl::desc("Enable recovery mode (continue-after-error)."), | ||||
201 | cl::Hidden, cl::init(false)); | ||||
202 | |||||
203 | static cl::opt<bool> ClInsertVersionCheck( | ||||
204 | "asan-guard-against-version-mismatch", | ||||
205 | cl::desc("Guard against compiler/runtime version mismatch."), | ||||
206 | cl::Hidden, cl::init(true)); | ||||
207 | |||||
208 | // This flag may need to be replaced with -f[no-]asan-reads. | ||||
209 | static cl::opt<bool> ClInstrumentReads("asan-instrument-reads", | ||||
210 | cl::desc("instrument read instructions"), | ||||
211 | cl::Hidden, cl::init(true)); | ||||
212 | |||||
213 | static cl::opt<bool> ClInstrumentWrites( | ||||
214 | "asan-instrument-writes", cl::desc("instrument write instructions"), | ||||
215 | cl::Hidden, cl::init(true)); | ||||
216 | |||||
217 | static cl::opt<bool> | ||||
218 | ClUseStackSafety("asan-use-stack-safety", cl::Hidden, cl::init(false), | ||||
219 | cl::Hidden, cl::desc("Use Stack Safety analysis results"), | ||||
220 | cl::Optional); | ||||
221 | |||||
222 | static cl::opt<bool> ClInstrumentAtomics( | ||||
223 | "asan-instrument-atomics", | ||||
224 | cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden, | ||||
225 | cl::init(true)); | ||||
226 | |||||
227 | static cl::opt<bool> | ||||
228 | ClInstrumentByval("asan-instrument-byval", | ||||
229 | cl::desc("instrument byval call arguments"), cl::Hidden, | ||||
230 | cl::init(true)); | ||||
231 | |||||
232 | static cl::opt<bool> ClAlwaysSlowPath( | ||||
233 | "asan-always-slow-path", | ||||
234 | cl::desc("use instrumentation with slow path for all accesses"), cl::Hidden, | ||||
235 | cl::init(false)); | ||||
236 | |||||
237 | static cl::opt<bool> ClForceDynamicShadow( | ||||
238 | "asan-force-dynamic-shadow", | ||||
239 | cl::desc("Load shadow address into a local variable for each function"), | ||||
240 | cl::Hidden, cl::init(false)); | ||||
241 | |||||
242 | static cl::opt<bool> | ||||
243 | ClWithIfunc("asan-with-ifunc", | ||||
244 | cl::desc("Access dynamic shadow through an ifunc global on " | ||||
245 | "platforms that support this"), | ||||
246 | cl::Hidden, cl::init(true)); | ||||
247 | |||||
248 | static cl::opt<bool> ClWithIfuncSuppressRemat( | ||||
249 | "asan-with-ifunc-suppress-remat", | ||||
250 | cl::desc("Suppress rematerialization of dynamic shadow address by passing " | ||||
251 | "it through inline asm in prologue."), | ||||
252 | cl::Hidden, cl::init(true)); | ||||
253 | |||||
254 | // This flag limits the number of instructions to be instrumented | ||||
255 | // in any given BB. Normally, this should be set to unlimited (INT_MAX), | ||||
256 | // but due to http://llvm.org/bugs/show_bug.cgi?id=12652 we temporary | ||||
257 | // set it to 10000. | ||||
258 | static cl::opt<int> ClMaxInsnsToInstrumentPerBB( | ||||
259 | "asan-max-ins-per-bb", cl::init(10000), | ||||
260 | cl::desc("maximal number of instructions to instrument in any given BB"), | ||||
261 | cl::Hidden); | ||||
262 | |||||
263 | // This flag may need to be replaced with -f[no]asan-stack. | ||||
264 | static cl::opt<bool> ClStack("asan-stack", cl::desc("Handle stack memory"), | ||||
265 | cl::Hidden, cl::init(true)); | ||||
266 | static cl::opt<uint32_t> ClMaxInlinePoisoningSize( | ||||
267 | "asan-max-inline-poisoning-size", | ||||
268 | cl::desc( | ||||
269 | "Inline shadow poisoning for blocks up to the given size in bytes."), | ||||
270 | cl::Hidden, cl::init(64)); | ||||
271 | |||||
272 | static cl::opt<AsanDetectStackUseAfterReturnMode> ClUseAfterReturn( | ||||
273 | "asan-use-after-return", | ||||
274 | cl::desc("Sets the mode of detection for stack-use-after-return."), | ||||
275 | cl::values( | ||||
276 | clEnumValN(AsanDetectStackUseAfterReturnMode::Never, "never",llvm::cl::OptionEnumValue { "never", int(AsanDetectStackUseAfterReturnMode ::Never), "Never detect stack use after return." } | ||||
277 | "Never detect stack use after return.")llvm::cl::OptionEnumValue { "never", int(AsanDetectStackUseAfterReturnMode ::Never), "Never detect stack use after return." }, | ||||
278 | clEnumValN(llvm::cl::OptionEnumValue { "runtime", int(AsanDetectStackUseAfterReturnMode ::Runtime), "Detect stack use after return if " "binary flag 'ASAN_OPTIONS=detect_stack_use_after_return' is set." } | ||||
279 | AsanDetectStackUseAfterReturnMode::Runtime, "runtime",llvm::cl::OptionEnumValue { "runtime", int(AsanDetectStackUseAfterReturnMode ::Runtime), "Detect stack use after return if " "binary flag 'ASAN_OPTIONS=detect_stack_use_after_return' is set." } | ||||
280 | "Detect stack use after return if "llvm::cl::OptionEnumValue { "runtime", int(AsanDetectStackUseAfterReturnMode ::Runtime), "Detect stack use after return if " "binary flag 'ASAN_OPTIONS=detect_stack_use_after_return' is set." } | ||||
281 | "binary flag 'ASAN_OPTIONS=detect_stack_use_after_return' is set.")llvm::cl::OptionEnumValue { "runtime", int(AsanDetectStackUseAfterReturnMode ::Runtime), "Detect stack use after return if " "binary flag 'ASAN_OPTIONS=detect_stack_use_after_return' is set." }, | ||||
282 | clEnumValN(AsanDetectStackUseAfterReturnMode::Always, "always",llvm::cl::OptionEnumValue { "always", int(AsanDetectStackUseAfterReturnMode ::Always), "Always detect stack use after return." } | ||||
283 | "Always detect stack use after return.")llvm::cl::OptionEnumValue { "always", int(AsanDetectStackUseAfterReturnMode ::Always), "Always detect stack use after return." }), | ||||
284 | cl::Hidden, cl::init(AsanDetectStackUseAfterReturnMode::Runtime)); | ||||
285 | |||||
286 | static cl::opt<bool> ClRedzoneByvalArgs("asan-redzone-byval-args", | ||||
287 | cl::desc("Create redzones for byval " | ||||
288 | "arguments (extra copy " | ||||
289 | "required)"), cl::Hidden, | ||||
290 | cl::init(true)); | ||||
291 | |||||
292 | static cl::opt<bool> ClUseAfterScope("asan-use-after-scope", | ||||
293 | cl::desc("Check stack-use-after-scope"), | ||||
294 | cl::Hidden, cl::init(false)); | ||||
295 | |||||
296 | // This flag may need to be replaced with -f[no]asan-globals. | ||||
297 | static cl::opt<bool> ClGlobals("asan-globals", | ||||
298 | cl::desc("Handle global objects"), cl::Hidden, | ||||
299 | cl::init(true)); | ||||
300 | |||||
301 | static cl::opt<bool> ClInitializers("asan-initialization-order", | ||||
302 | cl::desc("Handle C++ initializer order"), | ||||
303 | cl::Hidden, cl::init(true)); | ||||
304 | |||||
305 | static cl::opt<bool> ClInvalidPointerPairs( | ||||
306 | "asan-detect-invalid-pointer-pair", | ||||
307 | cl::desc("Instrument <, <=, >, >=, - with pointer operands"), cl::Hidden, | ||||
308 | cl::init(false)); | ||||
309 | |||||
310 | static cl::opt<bool> ClInvalidPointerCmp( | ||||
311 | "asan-detect-invalid-pointer-cmp", | ||||
312 | cl::desc("Instrument <, <=, >, >= with pointer operands"), cl::Hidden, | ||||
313 | cl::init(false)); | ||||
314 | |||||
315 | static cl::opt<bool> ClInvalidPointerSub( | ||||
316 | "asan-detect-invalid-pointer-sub", | ||||
317 | cl::desc("Instrument - operations with pointer operands"), cl::Hidden, | ||||
318 | cl::init(false)); | ||||
319 | |||||
320 | static cl::opt<unsigned> ClRealignStack( | ||||
321 | "asan-realign-stack", | ||||
322 | cl::desc("Realign stack to the value of this flag (power of two)"), | ||||
323 | cl::Hidden, cl::init(32)); | ||||
324 | |||||
325 | static cl::opt<int> ClInstrumentationWithCallsThreshold( | ||||
326 | "asan-instrumentation-with-call-threshold", | ||||
327 | cl::desc( | ||||
328 | "If the function being instrumented contains more than " | ||||
329 | "this number of memory accesses, use callbacks instead of " | ||||
330 | "inline checks (-1 means never use callbacks)."), | ||||
331 | cl::Hidden, cl::init(7000)); | ||||
332 | |||||
333 | static cl::opt<std::string> ClMemoryAccessCallbackPrefix( | ||||
334 | "asan-memory-access-callback-prefix", | ||||
335 | cl::desc("Prefix for memory access callbacks"), cl::Hidden, | ||||
336 | cl::init("__asan_")); | ||||
337 | |||||
338 | static cl::opt<bool> | ||||
339 | ClInstrumentDynamicAllocas("asan-instrument-dynamic-allocas", | ||||
340 | cl::desc("instrument dynamic allocas"), | ||||
341 | cl::Hidden, cl::init(true)); | ||||
342 | |||||
343 | static cl::opt<bool> ClSkipPromotableAllocas( | ||||
344 | "asan-skip-promotable-allocas", | ||||
345 | cl::desc("Do not instrument promotable allocas"), cl::Hidden, | ||||
346 | cl::init(true)); | ||||
347 | |||||
348 | // These flags allow to change the shadow mapping. | ||||
349 | // The shadow mapping looks like | ||||
350 | // Shadow = (Mem >> scale) + offset | ||||
351 | |||||
352 | static cl::opt<int> ClMappingScale("asan-mapping-scale", | ||||
353 | cl::desc("scale of asan shadow mapping"), | ||||
354 | cl::Hidden, cl::init(0)); | ||||
355 | |||||
356 | static cl::opt<uint64_t> | ||||
357 | ClMappingOffset("asan-mapping-offset", | ||||
358 | cl::desc("offset of asan shadow mapping [EXPERIMENTAL]"), | ||||
359 | cl::Hidden, cl::init(0)); | ||||
360 | |||||
361 | // Optimization flags. Not user visible, used mostly for testing | ||||
362 | // and benchmarking the tool. | ||||
363 | |||||
364 | static cl::opt<bool> ClOpt("asan-opt", cl::desc("Optimize instrumentation"), | ||||
365 | cl::Hidden, cl::init(true)); | ||||
366 | |||||
367 | static cl::opt<bool> ClOptimizeCallbacks("asan-optimize-callbacks", | ||||
368 | cl::desc("Optimize callbacks"), | ||||
369 | cl::Hidden, cl::init(false)); | ||||
370 | |||||
371 | static cl::opt<bool> ClOptSameTemp( | ||||
372 | "asan-opt-same-temp", cl::desc("Instrument the same temp just once"), | ||||
373 | cl::Hidden, cl::init(true)); | ||||
374 | |||||
375 | static cl::opt<bool> ClOptGlobals("asan-opt-globals", | ||||
376 | cl::desc("Don't instrument scalar globals"), | ||||
377 | cl::Hidden, cl::init(true)); | ||||
378 | |||||
379 | static cl::opt<bool> ClOptStack( | ||||
380 | "asan-opt-stack", cl::desc("Don't instrument scalar stack variables"), | ||||
381 | cl::Hidden, cl::init(false)); | ||||
382 | |||||
383 | static cl::opt<bool> ClDynamicAllocaStack( | ||||
384 | "asan-stack-dynamic-alloca", | ||||
385 | cl::desc("Use dynamic alloca to represent stack variables"), cl::Hidden, | ||||
386 | cl::init(true)); | ||||
387 | |||||
388 | static cl::opt<uint32_t> ClForceExperiment( | ||||
389 | "asan-force-experiment", | ||||
390 | cl::desc("Force optimization experiment (for testing)"), cl::Hidden, | ||||
391 | cl::init(0)); | ||||
392 | |||||
393 | static cl::opt<bool> | ||||
394 | ClUsePrivateAlias("asan-use-private-alias", | ||||
395 | cl::desc("Use private aliases for global variables"), | ||||
396 | cl::Hidden, cl::init(false)); | ||||
397 | |||||
398 | static cl::opt<bool> | ||||
399 | ClUseOdrIndicator("asan-use-odr-indicator", | ||||
400 | cl::desc("Use odr indicators to improve ODR reporting"), | ||||
401 | cl::Hidden, cl::init(false)); | ||||
402 | |||||
403 | static cl::opt<bool> | ||||
404 | ClUseGlobalsGC("asan-globals-live-support", | ||||
405 | cl::desc("Use linker features to support dead " | ||||
406 | "code stripping of globals"), | ||||
407 | cl::Hidden, cl::init(true)); | ||||
408 | |||||
409 | // This is on by default even though there is a bug in gold: | ||||
410 | // https://sourceware.org/bugzilla/show_bug.cgi?id=19002 | ||||
411 | static cl::opt<bool> | ||||
412 | ClWithComdat("asan-with-comdat", | ||||
413 | cl::desc("Place ASan constructors in comdat sections"), | ||||
414 | cl::Hidden, cl::init(true)); | ||||
415 | |||||
416 | static cl::opt<AsanDtorKind> ClOverrideDestructorKind( | ||||
417 | "asan-destructor-kind", | ||||
418 | cl::desc("Sets the ASan destructor kind. The default is to use the value " | ||||
419 | "provided to the pass constructor"), | ||||
420 | cl::values(clEnumValN(AsanDtorKind::None, "none", "No destructors")llvm::cl::OptionEnumValue { "none", int(AsanDtorKind::None), "No destructors" }, | ||||
421 | clEnumValN(AsanDtorKind::Global, "global",llvm::cl::OptionEnumValue { "global", int(AsanDtorKind::Global ), "Use global destructors" } | ||||
422 | "Use global destructors")llvm::cl::OptionEnumValue { "global", int(AsanDtorKind::Global ), "Use global destructors" }), | ||||
423 | cl::init(AsanDtorKind::Invalid), cl::Hidden); | ||||
424 | |||||
425 | // Debug flags. | ||||
426 | |||||
427 | static cl::opt<int> ClDebug("asan-debug", cl::desc("debug"), cl::Hidden, | ||||
428 | cl::init(0)); | ||||
429 | |||||
430 | static cl::opt<int> ClDebugStack("asan-debug-stack", cl::desc("debug stack"), | ||||
431 | cl::Hidden, cl::init(0)); | ||||
432 | |||||
433 | static cl::opt<std::string> ClDebugFunc("asan-debug-func", cl::Hidden, | ||||
434 | cl::desc("Debug func")); | ||||
435 | |||||
436 | static cl::opt<int> ClDebugMin("asan-debug-min", cl::desc("Debug min inst"), | ||||
437 | cl::Hidden, cl::init(-1)); | ||||
438 | |||||
439 | static cl::opt<int> ClDebugMax("asan-debug-max", cl::desc("Debug max inst"), | ||||
440 | cl::Hidden, cl::init(-1)); | ||||
441 | |||||
442 | STATISTIC(NumInstrumentedReads, "Number of instrumented reads")static llvm::Statistic NumInstrumentedReads = {"asan", "NumInstrumentedReads" , "Number of instrumented reads"}; | ||||
443 | STATISTIC(NumInstrumentedWrites, "Number of instrumented writes")static llvm::Statistic NumInstrumentedWrites = {"asan", "NumInstrumentedWrites" , "Number of instrumented writes"}; | ||||
444 | STATISTIC(NumOptimizedAccessesToGlobalVar,static llvm::Statistic NumOptimizedAccessesToGlobalVar = {"asan" , "NumOptimizedAccessesToGlobalVar", "Number of optimized accesses to global vars" } | ||||
445 | "Number of optimized accesses to global vars")static llvm::Statistic NumOptimizedAccessesToGlobalVar = {"asan" , "NumOptimizedAccessesToGlobalVar", "Number of optimized accesses to global vars" }; | ||||
446 | STATISTIC(NumOptimizedAccessesToStackVar,static llvm::Statistic NumOptimizedAccessesToStackVar = {"asan" , "NumOptimizedAccessesToStackVar", "Number of optimized accesses to stack vars" } | ||||
447 | "Number of optimized accesses to stack vars")static llvm::Statistic NumOptimizedAccessesToStackVar = {"asan" , "NumOptimizedAccessesToStackVar", "Number of optimized accesses to stack vars" }; | ||||
448 | |||||
449 | namespace { | ||||
450 | |||||
451 | /// This struct defines the shadow mapping using the rule: | ||||
452 | /// shadow = (mem >> Scale) ADD-or-OR Offset. | ||||
453 | /// If InGlobal is true, then | ||||
454 | /// extern char __asan_shadow[]; | ||||
455 | /// shadow = (mem >> Scale) + &__asan_shadow | ||||
456 | struct ShadowMapping { | ||||
457 | int Scale; | ||||
458 | uint64_t Offset; | ||||
459 | bool OrShadowOffset; | ||||
460 | bool InGlobal; | ||||
461 | }; | ||||
462 | |||||
463 | } // end anonymous namespace | ||||
464 | |||||
465 | static ShadowMapping getShadowMapping(const Triple &TargetTriple, int LongSize, | ||||
466 | bool IsKasan) { | ||||
467 | bool IsAndroid = TargetTriple.isAndroid(); | ||||
468 | bool IsIOS = TargetTriple.isiOS() || TargetTriple.isWatchOS(); | ||||
469 | bool IsMacOS = TargetTriple.isMacOSX(); | ||||
470 | bool IsFreeBSD = TargetTriple.isOSFreeBSD(); | ||||
471 | bool IsNetBSD = TargetTriple.isOSNetBSD(); | ||||
472 | bool IsPS4CPU = TargetTriple.isPS4CPU(); | ||||
473 | bool IsLinux = TargetTriple.isOSLinux(); | ||||
474 | bool IsPPC64 = TargetTriple.getArch() == Triple::ppc64 || | ||||
475 | TargetTriple.getArch() == Triple::ppc64le; | ||||
476 | bool IsSystemZ = TargetTriple.getArch() == Triple::systemz; | ||||
477 | bool IsX86_64 = TargetTriple.getArch() == Triple::x86_64; | ||||
478 | bool IsMIPS32 = TargetTriple.isMIPS32(); | ||||
479 | bool IsMIPS64 = TargetTriple.isMIPS64(); | ||||
480 | bool IsArmOrThumb = TargetTriple.isARM() || TargetTriple.isThumb(); | ||||
481 | bool IsAArch64 = TargetTriple.getArch() == Triple::aarch64; | ||||
482 | bool IsRISCV64 = TargetTriple.getArch() == Triple::riscv64; | ||||
483 | bool IsWindows = TargetTriple.isOSWindows(); | ||||
484 | bool IsFuchsia = TargetTriple.isOSFuchsia(); | ||||
485 | bool IsEmscripten = TargetTriple.isOSEmscripten(); | ||||
486 | bool IsAMDGPU = TargetTriple.isAMDGPU(); | ||||
487 | |||||
488 | ShadowMapping Mapping; | ||||
489 | |||||
490 | Mapping.Scale = kDefaultShadowScale; | ||||
491 | if (ClMappingScale.getNumOccurrences() > 0) { | ||||
492 | Mapping.Scale = ClMappingScale; | ||||
493 | } | ||||
494 | |||||
495 | if (LongSize == 32) { | ||||
496 | if (IsAndroid) | ||||
497 | Mapping.Offset = kDynamicShadowSentinel; | ||||
498 | else if (IsMIPS32) | ||||
499 | Mapping.Offset = kMIPS32_ShadowOffset32; | ||||
500 | else if (IsFreeBSD) | ||||
501 | Mapping.Offset = kFreeBSD_ShadowOffset32; | ||||
502 | else if (IsNetBSD) | ||||
503 | Mapping.Offset = kNetBSD_ShadowOffset32; | ||||
504 | else if (IsIOS) | ||||
505 | Mapping.Offset = kDynamicShadowSentinel; | ||||
506 | else if (IsWindows) | ||||
507 | Mapping.Offset = kWindowsShadowOffset32; | ||||
508 | else if (IsEmscripten) | ||||
509 | Mapping.Offset = kEmscriptenShadowOffset; | ||||
510 | else | ||||
511 | Mapping.Offset = kDefaultShadowOffset32; | ||||
512 | } else { // LongSize == 64 | ||||
513 | // Fuchsia is always PIE, which means that the beginning of the address | ||||
514 | // space is always available. | ||||
515 | if (IsFuchsia) | ||||
516 | Mapping.Offset = 0; | ||||
517 | else if (IsPPC64) | ||||
518 | Mapping.Offset = kPPC64_ShadowOffset64; | ||||
519 | else if (IsSystemZ) | ||||
520 | Mapping.Offset = kSystemZ_ShadowOffset64; | ||||
521 | else if (IsFreeBSD && !IsMIPS64) { | ||||
522 | if (IsKasan) | ||||
523 | Mapping.Offset = kFreeBSDKasan_ShadowOffset64; | ||||
524 | else | ||||
525 | Mapping.Offset = kFreeBSD_ShadowOffset64; | ||||
526 | } else if (IsNetBSD) { | ||||
527 | if (IsKasan) | ||||
528 | Mapping.Offset = kNetBSDKasan_ShadowOffset64; | ||||
529 | else | ||||
530 | Mapping.Offset = kNetBSD_ShadowOffset64; | ||||
531 | } else if (IsPS4CPU) | ||||
532 | Mapping.Offset = kPS4CPU_ShadowOffset64; | ||||
533 | else if (IsLinux && IsX86_64) { | ||||
534 | if (IsKasan) | ||||
535 | Mapping.Offset = kLinuxKasan_ShadowOffset64; | ||||
536 | else | ||||
537 | Mapping.Offset = (kSmallX86_64ShadowOffsetBase & | ||||
538 | (kSmallX86_64ShadowOffsetAlignMask << Mapping.Scale)); | ||||
539 | } else if (IsWindows && IsX86_64) { | ||||
540 | Mapping.Offset = kWindowsShadowOffset64; | ||||
541 | } else if (IsMIPS64) | ||||
542 | Mapping.Offset = kMIPS64_ShadowOffset64; | ||||
543 | else if (IsIOS) | ||||
544 | Mapping.Offset = kDynamicShadowSentinel; | ||||
545 | else if (IsMacOS && IsAArch64) | ||||
546 | Mapping.Offset = kDynamicShadowSentinel; | ||||
547 | else if (IsAArch64) | ||||
548 | Mapping.Offset = kAArch64_ShadowOffset64; | ||||
549 | else if (IsRISCV64) | ||||
550 | Mapping.Offset = kRISCV64_ShadowOffset64; | ||||
551 | else if (IsAMDGPU) | ||||
552 | Mapping.Offset = (kSmallX86_64ShadowOffsetBase & | ||||
553 | (kSmallX86_64ShadowOffsetAlignMask << Mapping.Scale)); | ||||
554 | else | ||||
555 | Mapping.Offset = kDefaultShadowOffset64; | ||||
556 | } | ||||
557 | |||||
558 | if (ClForceDynamicShadow) { | ||||
559 | Mapping.Offset = kDynamicShadowSentinel; | ||||
560 | } | ||||
561 | |||||
562 | if (ClMappingOffset.getNumOccurrences() > 0) { | ||||
563 | Mapping.Offset = ClMappingOffset; | ||||
564 | } | ||||
565 | |||||
566 | // OR-ing shadow offset if more efficient (at least on x86) if the offset | ||||
567 | // is a power of two, but on ppc64 we have to use add since the shadow | ||||
568 | // offset is not necessary 1/8-th of the address space. On SystemZ, | ||||
569 | // we could OR the constant in a single instruction, but it's more | ||||
570 | // efficient to load it once and use indexed addressing. | ||||
571 | Mapping.OrShadowOffset = !IsAArch64 && !IsPPC64 && !IsSystemZ && !IsPS4CPU && | ||||
572 | !IsRISCV64 && | ||||
573 | !(Mapping.Offset & (Mapping.Offset - 1)) && | ||||
574 | Mapping.Offset != kDynamicShadowSentinel; | ||||
575 | bool IsAndroidWithIfuncSupport = | ||||
576 | IsAndroid && !TargetTriple.isAndroidVersionLT(21); | ||||
577 | Mapping.InGlobal = ClWithIfunc && IsAndroidWithIfuncSupport && IsArmOrThumb; | ||||
578 | |||||
579 | return Mapping; | ||||
580 | } | ||||
581 | |||||
582 | namespace llvm { | ||||
583 | void getAddressSanitizerParams(const Triple &TargetTriple, int LongSize, | ||||
584 | bool IsKasan, uint64_t *ShadowBase, | ||||
585 | int *MappingScale, bool *OrShadowOffset) { | ||||
586 | auto Mapping = getShadowMapping(TargetTriple, LongSize, IsKasan); | ||||
587 | *ShadowBase = Mapping.Offset; | ||||
588 | *MappingScale = Mapping.Scale; | ||||
589 | *OrShadowOffset = Mapping.OrShadowOffset; | ||||
590 | } | ||||
591 | |||||
592 | ASanAccessInfo::ASanAccessInfo(int32_t Packed) | ||||
593 | : Packed(Packed), | ||||
594 | AccessSizeIndex((Packed >> kAccessSizeIndexShift) & kAccessSizeIndexMask), | ||||
595 | IsWrite((Packed >> kIsWriteShift) & kIsWriteMask), | ||||
596 | CompileKernel((Packed >> kCompileKernelShift) & kCompileKernelMask) {} | ||||
597 | |||||
598 | ASanAccessInfo::ASanAccessInfo(bool IsWrite, bool CompileKernel, | ||||
599 | uint8_t AccessSizeIndex) | ||||
600 | : Packed((IsWrite << kIsWriteShift) + | ||||
601 | (CompileKernel << kCompileKernelShift) + | ||||
602 | (AccessSizeIndex << kAccessSizeIndexShift)), | ||||
603 | AccessSizeIndex(AccessSizeIndex), IsWrite(IsWrite), | ||||
604 | CompileKernel(CompileKernel) {} | ||||
605 | |||||
606 | } // namespace llvm | ||||
607 | |||||
608 | static uint64_t getRedzoneSizeForScale(int MappingScale) { | ||||
609 | // Redzone used for stack and globals is at least 32 bytes. | ||||
610 | // For scales 6 and 7, the redzone has to be 64 and 128 bytes respectively. | ||||
611 | return std::max(32U, 1U << MappingScale); | ||||
612 | } | ||||
613 | |||||
614 | static uint64_t GetCtorAndDtorPriority(Triple &TargetTriple) { | ||||
615 | if (TargetTriple.isOSEmscripten()) { | ||||
616 | return kAsanEmscriptenCtorAndDtorPriority; | ||||
617 | } else { | ||||
618 | return kAsanCtorAndDtorPriority; | ||||
619 | } | ||||
620 | } | ||||
621 | |||||
622 | namespace { | ||||
623 | |||||
624 | /// Module analysis for getting various metadata about the module. | ||||
625 | class ASanGlobalsMetadataWrapperPass : public ModulePass { | ||||
626 | public: | ||||
627 | static char ID; | ||||
628 | |||||
629 | ASanGlobalsMetadataWrapperPass() : ModulePass(ID) { | ||||
630 | initializeASanGlobalsMetadataWrapperPassPass( | ||||
631 | *PassRegistry::getPassRegistry()); | ||||
632 | } | ||||
633 | |||||
634 | bool runOnModule(Module &M) override { | ||||
635 | GlobalsMD = GlobalsMetadata(M); | ||||
636 | return false; | ||||
637 | } | ||||
638 | |||||
639 | StringRef getPassName() const override { | ||||
640 | return "ASanGlobalsMetadataWrapperPass"; | ||||
641 | } | ||||
642 | |||||
643 | void getAnalysisUsage(AnalysisUsage &AU) const override { | ||||
644 | AU.setPreservesAll(); | ||||
645 | } | ||||
646 | |||||
647 | GlobalsMetadata &getGlobalsMD() { return GlobalsMD; } | ||||
648 | |||||
649 | private: | ||||
650 | GlobalsMetadata GlobalsMD; | ||||
651 | }; | ||||
652 | |||||
653 | char ASanGlobalsMetadataWrapperPass::ID = 0; | ||||
654 | |||||
655 | /// AddressSanitizer: instrument the code in module to find memory bugs. | ||||
656 | struct AddressSanitizer { | ||||
657 | AddressSanitizer(Module &M, const GlobalsMetadata *GlobalsMD, | ||||
658 | const StackSafetyGlobalInfo *SSGI, | ||||
659 | bool CompileKernel = false, bool Recover = false, | ||||
660 | bool UseAfterScope = false, | ||||
661 | AsanDetectStackUseAfterReturnMode UseAfterReturn = | ||||
662 | AsanDetectStackUseAfterReturnMode::Runtime) | ||||
663 | : CompileKernel(ClEnableKasan.getNumOccurrences() > 0 ? ClEnableKasan | ||||
664 | : CompileKernel), | ||||
665 | Recover(ClRecover.getNumOccurrences() > 0 ? ClRecover : Recover), | ||||
666 | UseAfterScope(UseAfterScope || ClUseAfterScope), | ||||
667 | UseAfterReturn(ClUseAfterReturn.getNumOccurrences() ? ClUseAfterReturn | ||||
668 | : UseAfterReturn), | ||||
669 | GlobalsMD(*GlobalsMD), SSGI(SSGI) { | ||||
670 | C = &(M.getContext()); | ||||
671 | LongSize = M.getDataLayout().getPointerSizeInBits(); | ||||
672 | IntptrTy = Type::getIntNTy(*C, LongSize); | ||||
673 | Int8PtrTy = Type::getInt8PtrTy(*C); | ||||
674 | Int32Ty = Type::getInt32Ty(*C); | ||||
675 | TargetTriple = Triple(M.getTargetTriple()); | ||||
676 | |||||
677 | Mapping = getShadowMapping(TargetTriple, LongSize, this->CompileKernel); | ||||
678 | |||||
679 | assert(this->UseAfterReturn != AsanDetectStackUseAfterReturnMode::Invalid)(static_cast <bool> (this->UseAfterReturn != AsanDetectStackUseAfterReturnMode ::Invalid) ? void (0) : __assert_fail ("this->UseAfterReturn != AsanDetectStackUseAfterReturnMode::Invalid" , "llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp", 679, __extension__ __PRETTY_FUNCTION__)); | ||||
680 | } | ||||
681 | |||||
682 | uint64_t getAllocaSizeInBytes(const AllocaInst &AI) const { | ||||
683 | uint64_t ArraySize = 1; | ||||
684 | if (AI.isArrayAllocation()) { | ||||
685 | const ConstantInt *CI = dyn_cast<ConstantInt>(AI.getArraySize()); | ||||
686 | assert(CI && "non-constant array size")(static_cast <bool> (CI && "non-constant array size" ) ? void (0) : __assert_fail ("CI && \"non-constant array size\"" , "llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp", 686, __extension__ __PRETTY_FUNCTION__)); | ||||
687 | ArraySize = CI->getZExtValue(); | ||||
688 | } | ||||
689 | Type *Ty = AI.getAllocatedType(); | ||||
690 | uint64_t SizeInBytes = | ||||
691 | AI.getModule()->getDataLayout().getTypeAllocSize(Ty); | ||||
692 | return SizeInBytes * ArraySize; | ||||
693 | } | ||||
694 | |||||
695 | /// Check if we want (and can) handle this alloca. | ||||
696 | bool isInterestingAlloca(const AllocaInst &AI); | ||||
697 | |||||
698 | bool ignoreAccess(Instruction *Inst, Value *Ptr); | ||||
699 | void getInterestingMemoryOperands( | ||||
700 | Instruction *I, SmallVectorImpl<InterestingMemoryOperand> &Interesting); | ||||
701 | |||||
702 | void instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis, | ||||
703 | InterestingMemoryOperand &O, bool UseCalls, | ||||
704 | const DataLayout &DL); | ||||
705 | void instrumentPointerComparisonOrSubtraction(Instruction *I); | ||||
706 | void instrumentAddress(Instruction *OrigIns, Instruction *InsertBefore, | ||||
707 | Value *Addr, uint32_t TypeSize, bool IsWrite, | ||||
708 | Value *SizeArgument, bool UseCalls, uint32_t Exp); | ||||
709 | Instruction *instrumentAMDGPUAddress(Instruction *OrigIns, | ||||
710 | Instruction *InsertBefore, Value *Addr, | ||||
711 | uint32_t TypeSize, bool IsWrite, | ||||
712 | Value *SizeArgument); | ||||
713 | void instrumentUnusualSizeOrAlignment(Instruction *I, | ||||
714 | Instruction *InsertBefore, Value *Addr, | ||||
715 | uint32_t TypeSize, bool IsWrite, | ||||
716 | Value *SizeArgument, bool UseCalls, | ||||
717 | uint32_t Exp); | ||||
718 | Value *createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong, | ||||
719 | Value *ShadowValue, uint32_t TypeSize); | ||||
720 | Instruction *generateCrashCode(Instruction *InsertBefore, Value *Addr, | ||||
721 | bool IsWrite, size_t AccessSizeIndex, | ||||
722 | Value *SizeArgument, uint32_t Exp); | ||||
723 | void instrumentMemIntrinsic(MemIntrinsic *MI); | ||||
724 | Value *memToShadow(Value *Shadow, IRBuilder<> &IRB); | ||||
725 | bool suppressInstrumentationSiteForDebug(int &Instrumented); | ||||
726 | bool instrumentFunction(Function &F, const TargetLibraryInfo *TLI); | ||||
727 | bool maybeInsertAsanInitAtFunctionEntry(Function &F); | ||||
728 | bool maybeInsertDynamicShadowAtFunctionEntry(Function &F); | ||||
729 | void markEscapedLocalAllocas(Function &F); | ||||
730 | |||||
731 | private: | ||||
732 | friend struct FunctionStackPoisoner; | ||||
733 | |||||
734 | void initializeCallbacks(Module &M); | ||||
735 | |||||
736 | bool LooksLikeCodeInBug11395(Instruction *I); | ||||
737 | bool GlobalIsLinkerInitialized(GlobalVariable *G); | ||||
738 | bool isSafeAccess(ObjectSizeOffsetVisitor &ObjSizeVis, Value *Addr, | ||||
739 | uint64_t TypeSize) const; | ||||
740 | |||||
741 | /// Helper to cleanup per-function state. | ||||
742 | struct FunctionStateRAII { | ||||
743 | AddressSanitizer *Pass; | ||||
744 | |||||
745 | FunctionStateRAII(AddressSanitizer *Pass) : Pass(Pass) { | ||||
746 | assert(Pass->ProcessedAllocas.empty() &&(static_cast <bool> (Pass->ProcessedAllocas.empty() && "last pass forgot to clear cache") ? void (0) : __assert_fail ("Pass->ProcessedAllocas.empty() && \"last pass forgot to clear cache\"" , "llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp", 747, __extension__ __PRETTY_FUNCTION__)) | ||||
747 | "last pass forgot to clear cache")(static_cast <bool> (Pass->ProcessedAllocas.empty() && "last pass forgot to clear cache") ? void (0) : __assert_fail ("Pass->ProcessedAllocas.empty() && \"last pass forgot to clear cache\"" , "llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp", 747, __extension__ __PRETTY_FUNCTION__)); | ||||
748 | assert(!Pass->LocalDynamicShadow)(static_cast <bool> (!Pass->LocalDynamicShadow) ? void (0) : __assert_fail ("!Pass->LocalDynamicShadow", "llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp" , 748, __extension__ __PRETTY_FUNCTION__)); | ||||
749 | } | ||||
750 | |||||
751 | ~FunctionStateRAII() { | ||||
752 | Pass->LocalDynamicShadow = nullptr; | ||||
753 | Pass->ProcessedAllocas.clear(); | ||||
754 | } | ||||
755 | }; | ||||
756 | |||||
757 | LLVMContext *C; | ||||
758 | Triple TargetTriple; | ||||
759 | int LongSize; | ||||
760 | bool CompileKernel; | ||||
761 | bool Recover; | ||||
762 | bool UseAfterScope; | ||||
763 | AsanDetectStackUseAfterReturnMode UseAfterReturn; | ||||
764 | Type *IntptrTy; | ||||
765 | Type *Int8PtrTy; | ||||
766 | Type *Int32Ty; | ||||
767 | ShadowMapping Mapping; | ||||
768 | FunctionCallee AsanHandleNoReturnFunc; | ||||
769 | FunctionCallee AsanPtrCmpFunction, AsanPtrSubFunction; | ||||
770 | Constant *AsanShadowGlobal; | ||||
771 | |||||
772 | // These arrays is indexed by AccessIsWrite, Experiment and log2(AccessSize). | ||||
773 | FunctionCallee AsanErrorCallback[2][2][kNumberOfAccessSizes]; | ||||
774 | FunctionCallee AsanMemoryAccessCallback[2][2][kNumberOfAccessSizes]; | ||||
775 | |||||
776 | // These arrays is indexed by AccessIsWrite and Experiment. | ||||
777 | FunctionCallee AsanErrorCallbackSized[2][2]; | ||||
778 | FunctionCallee AsanMemoryAccessCallbackSized[2][2]; | ||||
779 | |||||
780 | FunctionCallee AsanMemmove, AsanMemcpy, AsanMemset; | ||||
781 | Value *LocalDynamicShadow = nullptr; | ||||
782 | const GlobalsMetadata &GlobalsMD; | ||||
783 | const StackSafetyGlobalInfo *SSGI; | ||||
784 | DenseMap<const AllocaInst *, bool> ProcessedAllocas; | ||||
785 | |||||
786 | FunctionCallee AMDGPUAddressShared; | ||||
787 | FunctionCallee AMDGPUAddressPrivate; | ||||
788 | }; | ||||
789 | |||||
790 | class AddressSanitizerLegacyPass : public FunctionPass { | ||||
791 | public: | ||||
792 | static char ID; | ||||
793 | |||||
794 | explicit AddressSanitizerLegacyPass( | ||||
795 | bool CompileKernel = false, bool Recover = false, | ||||
796 | bool UseAfterScope = false, | ||||
797 | AsanDetectStackUseAfterReturnMode UseAfterReturn = | ||||
798 | AsanDetectStackUseAfterReturnMode::Runtime) | ||||
799 | : FunctionPass(ID), CompileKernel(CompileKernel), Recover(Recover), | ||||
800 | UseAfterScope(UseAfterScope), UseAfterReturn(UseAfterReturn) { | ||||
801 | initializeAddressSanitizerLegacyPassPass(*PassRegistry::getPassRegistry()); | ||||
802 | } | ||||
803 | |||||
804 | StringRef getPassName() const override { | ||||
805 | return "AddressSanitizerFunctionPass"; | ||||
806 | } | ||||
807 | |||||
808 | void getAnalysisUsage(AnalysisUsage &AU) const override { | ||||
809 | AU.addRequired<ASanGlobalsMetadataWrapperPass>(); | ||||
810 | if (ClUseStackSafety) | ||||
811 | AU.addRequired<StackSafetyGlobalInfoWrapperPass>(); | ||||
812 | AU.addRequired<TargetLibraryInfoWrapperPass>(); | ||||
813 | } | ||||
814 | |||||
815 | bool runOnFunction(Function &F) override { | ||||
816 | GlobalsMetadata &GlobalsMD = | ||||
817 | getAnalysis<ASanGlobalsMetadataWrapperPass>().getGlobalsMD(); | ||||
818 | const StackSafetyGlobalInfo *const SSGI = | ||||
819 | ClUseStackSafety | ||||
820 | ? &getAnalysis<StackSafetyGlobalInfoWrapperPass>().getResult() | ||||
821 | : nullptr; | ||||
822 | const TargetLibraryInfo *TLI = | ||||
823 | &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F); | ||||
824 | AddressSanitizer ASan(*F.getParent(), &GlobalsMD, SSGI, CompileKernel, | ||||
825 | Recover, UseAfterScope, UseAfterReturn); | ||||
826 | return ASan.instrumentFunction(F, TLI); | ||||
827 | } | ||||
828 | |||||
829 | private: | ||||
830 | bool CompileKernel; | ||||
831 | bool Recover; | ||||
832 | bool UseAfterScope; | ||||
833 | AsanDetectStackUseAfterReturnMode UseAfterReturn; | ||||
834 | }; | ||||
835 | |||||
836 | class ModuleAddressSanitizer { | ||||
837 | public: | ||||
838 | ModuleAddressSanitizer(Module &M, const GlobalsMetadata *GlobalsMD, | ||||
839 | bool CompileKernel = false, bool Recover = false, | ||||
840 | bool UseGlobalsGC = true, bool UseOdrIndicator = false, | ||||
841 | AsanDtorKind DestructorKind = AsanDtorKind::Global) | ||||
842 | : GlobalsMD(*GlobalsMD), | ||||
843 | CompileKernel(ClEnableKasan.getNumOccurrences() > 0 ? ClEnableKasan | ||||
844 | : CompileKernel), | ||||
845 | Recover(ClRecover.getNumOccurrences() > 0 ? ClRecover : Recover), | ||||
846 | UseGlobalsGC(UseGlobalsGC && ClUseGlobalsGC && !this->CompileKernel), | ||||
847 | // Enable aliases as they should have no downside with ODR indicators. | ||||
848 | UsePrivateAlias(UseOdrIndicator || ClUsePrivateAlias), | ||||
849 | UseOdrIndicator(UseOdrIndicator || ClUseOdrIndicator), | ||||
850 | // Not a typo: ClWithComdat is almost completely pointless without | ||||
851 | // ClUseGlobalsGC (because then it only works on modules without | ||||
852 | // globals, which are rare); it is a prerequisite for ClUseGlobalsGC; | ||||
853 | // and both suffer from gold PR19002 for which UseGlobalsGC constructor | ||||
854 | // argument is designed as workaround. Therefore, disable both | ||||
855 | // ClWithComdat and ClUseGlobalsGC unless the frontend says it's ok to | ||||
856 | // do globals-gc. | ||||
857 | UseCtorComdat(UseGlobalsGC && ClWithComdat && !this->CompileKernel), | ||||
858 | DestructorKind(DestructorKind) { | ||||
859 | C = &(M.getContext()); | ||||
860 | int LongSize = M.getDataLayout().getPointerSizeInBits(); | ||||
861 | IntptrTy = Type::getIntNTy(*C, LongSize); | ||||
862 | TargetTriple = Triple(M.getTargetTriple()); | ||||
863 | Mapping = getShadowMapping(TargetTriple, LongSize, this->CompileKernel); | ||||
864 | |||||
865 | if (ClOverrideDestructorKind != AsanDtorKind::Invalid) | ||||
866 | this->DestructorKind = ClOverrideDestructorKind; | ||||
867 | assert(this->DestructorKind != AsanDtorKind::Invalid)(static_cast <bool> (this->DestructorKind != AsanDtorKind ::Invalid) ? void (0) : __assert_fail ("this->DestructorKind != AsanDtorKind::Invalid" , "llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp", 867, __extension__ __PRETTY_FUNCTION__)); | ||||
868 | } | ||||
869 | |||||
870 | bool instrumentModule(Module &); | ||||
871 | |||||
872 | private: | ||||
873 | void initializeCallbacks(Module &M); | ||||
874 | |||||
875 | bool InstrumentGlobals(IRBuilder<> &IRB, Module &M, bool *CtorComdat); | ||||
876 | void InstrumentGlobalsCOFF(IRBuilder<> &IRB, Module &M, | ||||
877 | ArrayRef<GlobalVariable *> ExtendedGlobals, | ||||
878 | ArrayRef<Constant *> MetadataInitializers); | ||||
879 | void InstrumentGlobalsELF(IRBuilder<> &IRB, Module &M, | ||||
880 | ArrayRef<GlobalVariable *> ExtendedGlobals, | ||||
881 | ArrayRef<Constant *> MetadataInitializers, | ||||
882 | const std::string &UniqueModuleId); | ||||
883 | void InstrumentGlobalsMachO(IRBuilder<> &IRB, Module &M, | ||||
884 | ArrayRef<GlobalVariable *> ExtendedGlobals, | ||||
885 | ArrayRef<Constant *> MetadataInitializers); | ||||
886 | void | ||||
887 | InstrumentGlobalsWithMetadataArray(IRBuilder<> &IRB, Module &M, | ||||
888 | ArrayRef<GlobalVariable *> ExtendedGlobals, | ||||
889 | ArrayRef<Constant *> MetadataInitializers); | ||||
890 | |||||
891 | GlobalVariable *CreateMetadataGlobal(Module &M, Constant *Initializer, | ||||
892 | StringRef OriginalName); | ||||
893 | void SetComdatForGlobalMetadata(GlobalVariable *G, GlobalVariable *Metadata, | ||||
894 | StringRef InternalSuffix); | ||||
895 | Instruction *CreateAsanModuleDtor(Module &M); | ||||
896 | |||||
897 | const GlobalVariable *getExcludedAliasedGlobal(const GlobalAlias &GA) const; | ||||
898 | bool shouldInstrumentGlobal(GlobalVariable *G) const; | ||||
899 | bool ShouldUseMachOGlobalsSection() const; | ||||
900 | StringRef getGlobalMetadataSection() const; | ||||
901 | void poisonOneInitializer(Function &GlobalInit, GlobalValue *ModuleName); | ||||
902 | void createInitializerPoisonCalls(Module &M, GlobalValue *ModuleName); | ||||
903 | uint64_t getMinRedzoneSizeForGlobal() const { | ||||
904 | return getRedzoneSizeForScale(Mapping.Scale); | ||||
905 | } | ||||
906 | uint64_t getRedzoneSizeForGlobal(uint64_t SizeInBytes) const; | ||||
907 | int GetAsanVersion(const Module &M) const; | ||||
908 | |||||
909 | const GlobalsMetadata &GlobalsMD; | ||||
910 | bool CompileKernel; | ||||
911 | bool Recover; | ||||
912 | bool UseGlobalsGC; | ||||
913 | bool UsePrivateAlias; | ||||
914 | bool UseOdrIndicator; | ||||
915 | bool UseCtorComdat; | ||||
916 | AsanDtorKind DestructorKind; | ||||
917 | Type *IntptrTy; | ||||
918 | LLVMContext *C; | ||||
919 | Triple TargetTriple; | ||||
920 | ShadowMapping Mapping; | ||||
921 | FunctionCallee AsanPoisonGlobals; | ||||
922 | FunctionCallee AsanUnpoisonGlobals; | ||||
923 | FunctionCallee AsanRegisterGlobals; | ||||
924 | FunctionCallee AsanUnregisterGlobals; | ||||
925 | FunctionCallee AsanRegisterImageGlobals; | ||||
926 | FunctionCallee AsanUnregisterImageGlobals; | ||||
927 | FunctionCallee AsanRegisterElfGlobals; | ||||
928 | FunctionCallee AsanUnregisterElfGlobals; | ||||
929 | |||||
930 | Function *AsanCtorFunction = nullptr; | ||||
931 | Function *AsanDtorFunction = nullptr; | ||||
932 | }; | ||||
933 | |||||
934 | class ModuleAddressSanitizerLegacyPass : public ModulePass { | ||||
935 | public: | ||||
936 | static char ID; | ||||
937 | |||||
938 | explicit ModuleAddressSanitizerLegacyPass( | ||||
939 | bool CompileKernel = false, bool Recover = false, bool UseGlobalGC = true, | ||||
940 | bool UseOdrIndicator = false, | ||||
941 | AsanDtorKind DestructorKind = AsanDtorKind::Global) | ||||
942 | : ModulePass(ID), CompileKernel(CompileKernel), Recover(Recover), | ||||
943 | UseGlobalGC(UseGlobalGC), UseOdrIndicator(UseOdrIndicator), | ||||
944 | DestructorKind(DestructorKind) { | ||||
945 | initializeModuleAddressSanitizerLegacyPassPass( | ||||
946 | *PassRegistry::getPassRegistry()); | ||||
947 | } | ||||
948 | |||||
949 | StringRef getPassName() const override { return "ModuleAddressSanitizer"; } | ||||
950 | |||||
951 | void getAnalysisUsage(AnalysisUsage &AU) const override { | ||||
952 | AU.addRequired<ASanGlobalsMetadataWrapperPass>(); | ||||
953 | } | ||||
954 | |||||
955 | bool runOnModule(Module &M) override { | ||||
956 | GlobalsMetadata &GlobalsMD = | ||||
957 | getAnalysis<ASanGlobalsMetadataWrapperPass>().getGlobalsMD(); | ||||
958 | ModuleAddressSanitizer ASanModule(M, &GlobalsMD, CompileKernel, Recover, | ||||
959 | UseGlobalGC, UseOdrIndicator, | ||||
960 | DestructorKind); | ||||
961 | return ASanModule.instrumentModule(M); | ||||
962 | } | ||||
963 | |||||
964 | private: | ||||
965 | bool CompileKernel; | ||||
966 | bool Recover; | ||||
967 | bool UseGlobalGC; | ||||
968 | bool UseOdrIndicator; | ||||
969 | AsanDtorKind DestructorKind; | ||||
970 | }; | ||||
971 | |||||
972 | // Stack poisoning does not play well with exception handling. | ||||
973 | // When an exception is thrown, we essentially bypass the code | ||||
974 | // that unpoisones the stack. This is why the run-time library has | ||||
975 | // to intercept __cxa_throw (as well as longjmp, etc) and unpoison the entire | ||||
976 | // stack in the interceptor. This however does not work inside the | ||||
977 | // actual function which catches the exception. Most likely because the | ||||
978 | // compiler hoists the load of the shadow value somewhere too high. | ||||
979 | // This causes asan to report a non-existing bug on 453.povray. | ||||
980 | // It sounds like an LLVM bug. | ||||
981 | struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> { | ||||
982 | Function &F; | ||||
983 | AddressSanitizer &ASan; | ||||
984 | DIBuilder DIB; | ||||
985 | LLVMContext *C; | ||||
986 | Type *IntptrTy; | ||||
987 | Type *IntptrPtrTy; | ||||
988 | ShadowMapping Mapping; | ||||
989 | |||||
990 | SmallVector<AllocaInst *, 16> AllocaVec; | ||||
991 | SmallVector<AllocaInst *, 16> StaticAllocasToMoveUp; | ||||
992 | SmallVector<Instruction *, 8> RetVec; | ||||
993 | |||||
994 | FunctionCallee AsanStackMallocFunc[kMaxAsanStackMallocSizeClass + 1], | ||||
995 | AsanStackFreeFunc[kMaxAsanStackMallocSizeClass + 1]; | ||||
996 | FunctionCallee AsanSetShadowFunc[0x100] = {}; | ||||
997 | FunctionCallee AsanPoisonStackMemoryFunc, AsanUnpoisonStackMemoryFunc; | ||||
998 | FunctionCallee AsanAllocaPoisonFunc, AsanAllocasUnpoisonFunc; | ||||
999 | |||||
1000 | // Stores a place and arguments of poisoning/unpoisoning call for alloca. | ||||
1001 | struct AllocaPoisonCall { | ||||
1002 | IntrinsicInst *InsBefore; | ||||
1003 | AllocaInst *AI; | ||||
1004 | uint64_t Size; | ||||
1005 | bool DoPoison; | ||||
1006 | }; | ||||
1007 | SmallVector<AllocaPoisonCall, 8> DynamicAllocaPoisonCallVec; | ||||
1008 | SmallVector<AllocaPoisonCall, 8> StaticAllocaPoisonCallVec; | ||||
1009 | bool HasUntracedLifetimeIntrinsic = false; | ||||
1010 | |||||
1011 | SmallVector<AllocaInst *, 1> DynamicAllocaVec; | ||||
1012 | SmallVector<IntrinsicInst *, 1> StackRestoreVec; | ||||
1013 | AllocaInst *DynamicAllocaLayout = nullptr; | ||||
1014 | IntrinsicInst *LocalEscapeCall = nullptr; | ||||
1015 | |||||
1016 | bool HasInlineAsm = false; | ||||
1017 | bool HasReturnsTwiceCall = false; | ||||
1018 | bool PoisonStack; | ||||
1019 | |||||
1020 | FunctionStackPoisoner(Function &F, AddressSanitizer &ASan) | ||||
1021 | : F(F), ASan(ASan), DIB(*F.getParent(), /*AllowUnresolved*/ false), | ||||
1022 | C(ASan.C), IntptrTy(ASan.IntptrTy), | ||||
1023 | IntptrPtrTy(PointerType::get(IntptrTy, 0)), Mapping(ASan.Mapping), | ||||
1024 | PoisonStack(ClStack && | ||||
1025 | !Triple(F.getParent()->getTargetTriple()).isAMDGPU()) {} | ||||
1026 | |||||
1027 | bool runOnFunction() { | ||||
1028 | if (!PoisonStack) | ||||
1029 | return false; | ||||
1030 | |||||
1031 | if (ClRedzoneByvalArgs) | ||||
1032 | copyArgsPassedByValToAllocas(); | ||||
1033 | |||||
1034 | // Collect alloca, ret, lifetime instructions etc. | ||||
1035 | for (BasicBlock *BB : depth_first(&F.getEntryBlock())) visit(*BB); | ||||
1036 | |||||
1037 | if (AllocaVec.empty() && DynamicAllocaVec.empty()) return false; | ||||
1038 | |||||
1039 | initializeCallbacks(*F.getParent()); | ||||
1040 | |||||
1041 | if (HasUntracedLifetimeIntrinsic) { | ||||
1042 | // If there are lifetime intrinsics which couldn't be traced back to an | ||||
1043 | // alloca, we may not know exactly when a variable enters scope, and | ||||
1044 | // therefore should "fail safe" by not poisoning them. | ||||
1045 | StaticAllocaPoisonCallVec.clear(); | ||||
1046 | DynamicAllocaPoisonCallVec.clear(); | ||||
1047 | } | ||||
1048 | |||||
1049 | processDynamicAllocas(); | ||||
1050 | processStaticAllocas(); | ||||
1051 | |||||
1052 | if (ClDebugStack) { | ||||
1053 | LLVM_DEBUG(dbgs() << F)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("asan")) { dbgs() << F; } } while (false); | ||||
1054 | } | ||||
1055 | return true; | ||||
1056 | } | ||||
1057 | |||||
1058 | // Arguments marked with the "byval" attribute are implicitly copied without | ||||
1059 | // using an alloca instruction. To produce redzones for those arguments, we | ||||
1060 | // copy them a second time into memory allocated with an alloca instruction. | ||||
1061 | void copyArgsPassedByValToAllocas(); | ||||
1062 | |||||
1063 | // Finds all Alloca instructions and puts | ||||
1064 | // poisoned red zones around all of them. | ||||
1065 | // Then unpoison everything back before the function returns. | ||||
1066 | void processStaticAllocas(); | ||||
1067 | void processDynamicAllocas(); | ||||
1068 | |||||
1069 | void createDynamicAllocasInitStorage(); | ||||
1070 | |||||
1071 | // ----------------------- Visitors. | ||||
1072 | /// Collect all Ret instructions, or the musttail call instruction if it | ||||
1073 | /// precedes the return instruction. | ||||
1074 | void visitReturnInst(ReturnInst &RI) { | ||||
1075 | if (CallInst *CI = RI.getParent()->getTerminatingMustTailCall()) | ||||
1076 | RetVec.push_back(CI); | ||||
1077 | else | ||||
1078 | RetVec.push_back(&RI); | ||||
1079 | } | ||||
1080 | |||||
1081 | /// Collect all Resume instructions. | ||||
1082 | void visitResumeInst(ResumeInst &RI) { RetVec.push_back(&RI); } | ||||
1083 | |||||
1084 | /// Collect all CatchReturnInst instructions. | ||||
1085 | void visitCleanupReturnInst(CleanupReturnInst &CRI) { RetVec.push_back(&CRI); } | ||||
1086 | |||||
1087 | void unpoisonDynamicAllocasBeforeInst(Instruction *InstBefore, | ||||
1088 | Value *SavedStack) { | ||||
1089 | IRBuilder<> IRB(InstBefore); | ||||
1090 | Value *DynamicAreaPtr = IRB.CreatePtrToInt(SavedStack, IntptrTy); | ||||
1091 | // When we insert _asan_allocas_unpoison before @llvm.stackrestore, we | ||||
1092 | // need to adjust extracted SP to compute the address of the most recent | ||||
1093 | // alloca. We have a special @llvm.get.dynamic.area.offset intrinsic for | ||||
1094 | // this purpose. | ||||
1095 | if (!isa<ReturnInst>(InstBefore)) { | ||||
1096 | Function *DynamicAreaOffsetFunc = Intrinsic::getDeclaration( | ||||
1097 | InstBefore->getModule(), Intrinsic::get_dynamic_area_offset, | ||||
1098 | {IntptrTy}); | ||||
1099 | |||||
1100 | Value *DynamicAreaOffset = IRB.CreateCall(DynamicAreaOffsetFunc, {}); | ||||
1101 | |||||
1102 | DynamicAreaPtr = IRB.CreateAdd(IRB.CreatePtrToInt(SavedStack, IntptrTy), | ||||
1103 | DynamicAreaOffset); | ||||
1104 | } | ||||
1105 | |||||
1106 | IRB.CreateCall( | ||||
1107 | AsanAllocasUnpoisonFunc, | ||||
1108 | {IRB.CreateLoad(IntptrTy, DynamicAllocaLayout), DynamicAreaPtr}); | ||||
1109 | } | ||||
1110 | |||||
1111 | // Unpoison dynamic allocas redzones. | ||||
1112 | void unpoisonDynamicAllocas() { | ||||
1113 | for (Instruction *Ret : RetVec) | ||||
1114 | unpoisonDynamicAllocasBeforeInst(Ret, DynamicAllocaLayout); | ||||
1115 | |||||
1116 | for (Instruction *StackRestoreInst : StackRestoreVec) | ||||
1117 | unpoisonDynamicAllocasBeforeInst(StackRestoreInst, | ||||
1118 | StackRestoreInst->getOperand(0)); | ||||
1119 | } | ||||
1120 | |||||
1121 | // Deploy and poison redzones around dynamic alloca call. To do this, we | ||||
1122 | // should replace this call with another one with changed parameters and | ||||
1123 | // replace all its uses with new address, so | ||||
1124 | // addr = alloca type, old_size, align | ||||
1125 | // is replaced by | ||||
1126 | // new_size = (old_size + additional_size) * sizeof(type) | ||||
1127 | // tmp = alloca i8, new_size, max(align, 32) | ||||
1128 | // addr = tmp + 32 (first 32 bytes are for the left redzone). | ||||
1129 | // Additional_size is added to make new memory allocation contain not only | ||||
1130 | // requested memory, but also left, partial and right redzones. | ||||
1131 | void handleDynamicAllocaCall(AllocaInst *AI); | ||||
1132 | |||||
1133 | /// Collect Alloca instructions we want (and can) handle. | ||||
1134 | void visitAllocaInst(AllocaInst &AI) { | ||||
1135 | if (!ASan.isInterestingAlloca(AI)) { | ||||
1136 | if (AI.isStaticAlloca()) { | ||||
1137 | // Skip over allocas that are present *before* the first instrumented | ||||
1138 | // alloca, we don't want to move those around. | ||||
1139 | if (AllocaVec.empty()) | ||||
1140 | return; | ||||
1141 | |||||
1142 | StaticAllocasToMoveUp.push_back(&AI); | ||||
1143 | } | ||||
1144 | return; | ||||
1145 | } | ||||
1146 | |||||
1147 | if (!AI.isStaticAlloca()) | ||||
1148 | DynamicAllocaVec.push_back(&AI); | ||||
1149 | else | ||||
1150 | AllocaVec.push_back(&AI); | ||||
1151 | } | ||||
1152 | |||||
1153 | /// Collect lifetime intrinsic calls to check for use-after-scope | ||||
1154 | /// errors. | ||||
1155 | void visitIntrinsicInst(IntrinsicInst &II) { | ||||
1156 | Intrinsic::ID ID = II.getIntrinsicID(); | ||||
1157 | if (ID == Intrinsic::stackrestore) StackRestoreVec.push_back(&II); | ||||
1158 | if (ID == Intrinsic::localescape) LocalEscapeCall = &II; | ||||
1159 | if (!ASan.UseAfterScope) | ||||
1160 | return; | ||||
1161 | if (!II.isLifetimeStartOrEnd()) | ||||
1162 | return; | ||||
1163 | // Found lifetime intrinsic, add ASan instrumentation if necessary. | ||||
1164 | auto *Size = cast<ConstantInt>(II.getArgOperand(0)); | ||||
1165 | // If size argument is undefined, don't do anything. | ||||
1166 | if (Size->isMinusOne()) return; | ||||
1167 | // Check that size doesn't saturate uint64_t and can | ||||
1168 | // be stored in IntptrTy. | ||||
1169 | const uint64_t SizeValue = Size->getValue().getLimitedValue(); | ||||
1170 | if (SizeValue == ~0ULL || | ||||
1171 | !ConstantInt::isValueValidForType(IntptrTy, SizeValue)) | ||||
1172 | return; | ||||
1173 | // Find alloca instruction that corresponds to llvm.lifetime argument. | ||||
1174 | // Currently we can only handle lifetime markers pointing to the | ||||
1175 | // beginning of the alloca. | ||||
1176 | AllocaInst *AI = findAllocaForValue(II.getArgOperand(1), true); | ||||
1177 | if (!AI) { | ||||
1178 | HasUntracedLifetimeIntrinsic = true; | ||||
1179 | return; | ||||
1180 | } | ||||
1181 | // We're interested only in allocas we can handle. | ||||
1182 | if (!ASan.isInterestingAlloca(*AI)) | ||||
1183 | return; | ||||
1184 | bool DoPoison = (ID == Intrinsic::lifetime_end); | ||||
1185 | AllocaPoisonCall APC = {&II, AI, SizeValue, DoPoison}; | ||||
1186 | if (AI->isStaticAlloca()) | ||||
1187 | StaticAllocaPoisonCallVec.push_back(APC); | ||||
1188 | else if (ClInstrumentDynamicAllocas) | ||||
1189 | DynamicAllocaPoisonCallVec.push_back(APC); | ||||
1190 | } | ||||
1191 | |||||
1192 | void visitCallBase(CallBase &CB) { | ||||
1193 | if (CallInst *CI = dyn_cast<CallInst>(&CB)) { | ||||
1194 | HasInlineAsm |= CI->isInlineAsm() && &CB != ASan.LocalDynamicShadow; | ||||
1195 | HasReturnsTwiceCall |= CI->canReturnTwice(); | ||||
1196 | } | ||||
1197 | } | ||||
1198 | |||||
1199 | // ---------------------- Helpers. | ||||
1200 | void initializeCallbacks(Module &M); | ||||
1201 | |||||
1202 | // Copies bytes from ShadowBytes into shadow memory for indexes where | ||||
1203 | // ShadowMask is not zero. If ShadowMask[i] is zero, we assume that | ||||
1204 | // ShadowBytes[i] is constantly zero and doesn't need to be overwritten. | ||||
1205 | void copyToShadow(ArrayRef<uint8_t> ShadowMask, ArrayRef<uint8_t> ShadowBytes, | ||||
1206 | IRBuilder<> &IRB, Value *ShadowBase); | ||||
1207 | void copyToShadow(ArrayRef<uint8_t> ShadowMask, ArrayRef<uint8_t> ShadowBytes, | ||||
1208 | size_t Begin, size_t End, IRBuilder<> &IRB, | ||||
1209 | Value *ShadowBase); | ||||
1210 | void copyToShadowInline(ArrayRef<uint8_t> ShadowMask, | ||||
1211 | ArrayRef<uint8_t> ShadowBytes, size_t Begin, | ||||
1212 | size_t End, IRBuilder<> &IRB, Value *ShadowBase); | ||||
1213 | |||||
1214 | void poisonAlloca(Value *V, uint64_t Size, IRBuilder<> &IRB, bool DoPoison); | ||||
1215 | |||||
1216 | Value *createAllocaForLayout(IRBuilder<> &IRB, const ASanStackFrameLayout &L, | ||||
1217 | bool Dynamic); | ||||
1218 | PHINode *createPHI(IRBuilder<> &IRB, Value *Cond, Value *ValueIfTrue, | ||||
1219 | Instruction *ThenTerm, Value *ValueIfFalse); | ||||
1220 | }; | ||||
1221 | |||||
1222 | } // end anonymous namespace | ||||
1223 | |||||
1224 | void LocationMetadata::parse(MDNode *MDN) { | ||||
1225 | assert(MDN->getNumOperands() == 3)(static_cast <bool> (MDN->getNumOperands() == 3) ? void (0) : __assert_fail ("MDN->getNumOperands() == 3", "llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp" , 1225, __extension__ __PRETTY_FUNCTION__)); | ||||
1226 | MDString *DIFilename = cast<MDString>(MDN->getOperand(0)); | ||||
1227 | Filename = DIFilename->getString(); | ||||
1228 | LineNo = mdconst::extract<ConstantInt>(MDN->getOperand(1))->getLimitedValue(); | ||||
1229 | ColumnNo = | ||||
1230 | mdconst::extract<ConstantInt>(MDN->getOperand(2))->getLimitedValue(); | ||||
1231 | } | ||||
1232 | |||||
1233 | // FIXME: It would be cleaner to instead attach relevant metadata to the globals | ||||
1234 | // we want to sanitize instead and reading this metadata on each pass over a | ||||
1235 | // function instead of reading module level metadata at first. | ||||
1236 | GlobalsMetadata::GlobalsMetadata(Module &M) { | ||||
1237 | NamedMDNode *Globals = M.getNamedMetadata("llvm.asan.globals"); | ||||
1238 | if (!Globals) | ||||
1239 | return; | ||||
1240 | for (auto MDN : Globals->operands()) { | ||||
1241 | // Metadata node contains the global and the fields of "Entry". | ||||
1242 | assert(MDN->getNumOperands() == 5)(static_cast <bool> (MDN->getNumOperands() == 5) ? void (0) : __assert_fail ("MDN->getNumOperands() == 5", "llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp" , 1242, __extension__ __PRETTY_FUNCTION__)); | ||||
1243 | auto *V = mdconst::extract_or_null<Constant>(MDN->getOperand(0)); | ||||
1244 | // The optimizer may optimize away a global entirely. | ||||
1245 | if (!V) | ||||
1246 | continue; | ||||
1247 | auto *StrippedV = V->stripPointerCasts(); | ||||
1248 | auto *GV = dyn_cast<GlobalVariable>(StrippedV); | ||||
1249 | if (!GV) | ||||
1250 | continue; | ||||
1251 | // We can already have an entry for GV if it was merged with another | ||||
1252 | // global. | ||||
1253 | Entry &E = Entries[GV]; | ||||
1254 | if (auto *Loc = cast_or_null<MDNode>(MDN->getOperand(1))) | ||||
1255 | E.SourceLoc.parse(Loc); | ||||
1256 | if (auto *Name = cast_or_null<MDString>(MDN->getOperand(2))) | ||||
1257 | E.Name = Name->getString(); | ||||
1258 | ConstantInt *IsDynInit = mdconst::extract<ConstantInt>(MDN->getOperand(3)); | ||||
1259 | E.IsDynInit |= IsDynInit->isOne(); | ||||
1260 | ConstantInt *IsExcluded = | ||||
1261 | mdconst::extract<ConstantInt>(MDN->getOperand(4)); | ||||
1262 | E.IsExcluded |= IsExcluded->isOne(); | ||||
1263 | } | ||||
1264 | } | ||||
1265 | |||||
1266 | AnalysisKey ASanGlobalsMetadataAnalysis::Key; | ||||
1267 | |||||
1268 | GlobalsMetadata ASanGlobalsMetadataAnalysis::run(Module &M, | ||||
1269 | ModuleAnalysisManager &AM) { | ||||
1270 | return GlobalsMetadata(M); | ||||
1271 | } | ||||
1272 | |||||
1273 | PreservedAnalyses AddressSanitizerPass::run(Function &F, | ||||
1274 | AnalysisManager<Function> &AM) { | ||||
1275 | auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F); | ||||
1276 | Module &M = *F.getParent(); | ||||
1277 | if (auto *R
| ||||
| |||||
1278 | const TargetLibraryInfo *TLI = &AM.getResult<TargetLibraryAnalysis>(F); | ||||
1279 | AddressSanitizer Sanitizer(M, R, nullptr, Options.CompileKernel, | ||||
1280 | Options.Recover, Options.UseAfterScope, | ||||
1281 | Options.UseAfterReturn); | ||||
1282 | if (Sanitizer.instrumentFunction(F, TLI)) | ||||
1283 | return PreservedAnalyses::none(); | ||||
1284 | return PreservedAnalyses::all(); | ||||
1285 | } | ||||
1286 | |||||
1287 | report_fatal_error( | ||||
1288 | "The ASanGlobalsMetadataAnalysis is required to run before " | ||||
1289 | "AddressSanitizer can run"); | ||||
1290 | return PreservedAnalyses::all(); | ||||
1291 | } | ||||
1292 | |||||
1293 | void AddressSanitizerPass::printPipeline( | ||||
1294 | raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) { | ||||
1295 | static_cast<PassInfoMixin<AddressSanitizerPass> *>(this)->printPipeline( | ||||
1296 | OS, MapClassName2PassName); | ||||
1297 | OS << "<"; | ||||
1298 | if (Options.CompileKernel) | ||||
1299 | OS << "kernel"; | ||||
1300 | OS << ">"; | ||||
1301 | } | ||||
1302 | |||||
1303 | void ModuleAddressSanitizerPass::printPipeline( | ||||
1304 | raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) { | ||||
1305 | static_cast<PassInfoMixin<ModuleAddressSanitizerPass> *>(this)->printPipeline( | ||||
1306 | OS, MapClassName2PassName); | ||||
1307 | OS << "<"; | ||||
1308 | if (Options.CompileKernel) | ||||
1309 | OS << "kernel"; | ||||
1310 | OS << ">"; | ||||
1311 | } | ||||
1312 | |||||
1313 | ModuleAddressSanitizerPass::ModuleAddressSanitizerPass( | ||||
1314 | const AddressSanitizerOptions &Options, bool UseGlobalGC, | ||||
1315 | bool UseOdrIndicator, AsanDtorKind DestructorKind) | ||||
1316 | : Options(Options), UseGlobalGC(UseGlobalGC), | ||||
1317 | UseOdrIndicator(UseOdrIndicator), DestructorKind(DestructorKind) {} | ||||
1318 | |||||
1319 | PreservedAnalyses ModuleAddressSanitizerPass::run(Module &M, | ||||
1320 | ModuleAnalysisManager &MAM) { | ||||
1321 | GlobalsMetadata &GlobalsMD = MAM.getResult<ASanGlobalsMetadataAnalysis>(M); | ||||
1322 | ModuleAddressSanitizer ModuleSanitizer(M, &GlobalsMD, Options.CompileKernel, | ||||
1323 | Options.Recover, UseGlobalGC, | ||||
1324 | UseOdrIndicator, DestructorKind); | ||||
1325 | bool Modified = false; | ||||
1326 | auto &FAM = MAM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager(); | ||||
1327 | const StackSafetyGlobalInfo *const SSGI = | ||||
1328 | ClUseStackSafety ? &MAM.getResult<StackSafetyGlobalAnalysis>(M) : nullptr; | ||||
1329 | for (Function &F : M) { | ||||
1330 | AddressSanitizer FunctionSanitizer( | ||||
1331 | M, &GlobalsMD, SSGI, Options.CompileKernel, Options.Recover, | ||||
1332 | Options.UseAfterScope, Options.UseAfterReturn); | ||||
1333 | const TargetLibraryInfo &TLI = FAM.getResult<TargetLibraryAnalysis>(F); | ||||
1334 | Modified |= FunctionSanitizer.instrumentFunction(F, &TLI); | ||||
1335 | } | ||||
1336 | Modified |= ModuleSanitizer.instrumentModule(M); | ||||
1337 | return Modified ? PreservedAnalyses::none() : PreservedAnalyses::all(); | ||||
1338 | } | ||||
1339 | |||||
1340 | INITIALIZE_PASS(ASanGlobalsMetadataWrapperPass, "asan-globals-md",static void *initializeASanGlobalsMetadataWrapperPassPassOnce (PassRegistry &Registry) { PassInfo *PI = new PassInfo( "Read metadata to mark which globals should be instrumented " "when running ASan.", "asan-globals-md", &ASanGlobalsMetadataWrapperPass ::ID, PassInfo::NormalCtor_t(callDefaultCtor<ASanGlobalsMetadataWrapperPass >), false, true); Registry.registerPass(*PI, true); return PI; } static llvm::once_flag InitializeASanGlobalsMetadataWrapperPassPassFlag ; void llvm::initializeASanGlobalsMetadataWrapperPassPass(PassRegistry &Registry) { llvm::call_once(InitializeASanGlobalsMetadataWrapperPassPassFlag , initializeASanGlobalsMetadataWrapperPassPassOnce, std::ref( Registry)); } | ||||
1341 | "Read metadata to mark which globals should be instrumented "static void *initializeASanGlobalsMetadataWrapperPassPassOnce (PassRegistry &Registry) { PassInfo *PI = new PassInfo( "Read metadata to mark which globals should be instrumented " "when running ASan.", "asan-globals-md", &ASanGlobalsMetadataWrapperPass ::ID, PassInfo::NormalCtor_t(callDefaultCtor<ASanGlobalsMetadataWrapperPass >), false, true); Registry.registerPass(*PI, true); return PI; } static llvm::once_flag InitializeASanGlobalsMetadataWrapperPassPassFlag ; void llvm::initializeASanGlobalsMetadataWrapperPassPass(PassRegistry &Registry) { llvm::call_once(InitializeASanGlobalsMetadataWrapperPassPassFlag , initializeASanGlobalsMetadataWrapperPassPassOnce, std::ref( Registry)); } | ||||
1342 | "when running ASan.",static void *initializeASanGlobalsMetadataWrapperPassPassOnce (PassRegistry &Registry) { PassInfo *PI = new PassInfo( "Read metadata to mark which globals should be instrumented " "when running ASan.", "asan-globals-md", &ASanGlobalsMetadataWrapperPass ::ID, PassInfo::NormalCtor_t(callDefaultCtor<ASanGlobalsMetadataWrapperPass >), false, true); Registry.registerPass(*PI, true); return PI; } static llvm::once_flag InitializeASanGlobalsMetadataWrapperPassPassFlag ; void llvm::initializeASanGlobalsMetadataWrapperPassPass(PassRegistry &Registry) { llvm::call_once(InitializeASanGlobalsMetadataWrapperPassPassFlag , initializeASanGlobalsMetadataWrapperPassPassOnce, std::ref( Registry)); } | ||||
1343 | false, true)static void *initializeASanGlobalsMetadataWrapperPassPassOnce (PassRegistry &Registry) { PassInfo *PI = new PassInfo( "Read metadata to mark which globals should be instrumented " "when running ASan.", "asan-globals-md", &ASanGlobalsMetadataWrapperPass ::ID, PassInfo::NormalCtor_t(callDefaultCtor<ASanGlobalsMetadataWrapperPass >), false, true); Registry.registerPass(*PI, true); return PI; } static llvm::once_flag InitializeASanGlobalsMetadataWrapperPassPassFlag ; void llvm::initializeASanGlobalsMetadataWrapperPassPass(PassRegistry &Registry) { llvm::call_once(InitializeASanGlobalsMetadataWrapperPassPassFlag , initializeASanGlobalsMetadataWrapperPassPassOnce, std::ref( Registry)); } | ||||
1344 | |||||
1345 | char AddressSanitizerLegacyPass::ID = 0; | ||||
1346 | |||||
1347 | INITIALIZE_PASS_BEGIN(static void *initializeAddressSanitizerLegacyPassPassOnce(PassRegistry &Registry) { | ||||
1348 | AddressSanitizerLegacyPass, "asan",static void *initializeAddressSanitizerLegacyPassPassOnce(PassRegistry &Registry) { | ||||
1349 | "AddressSanitizer: detects use-after-free and out-of-bounds bugs.", false,static void *initializeAddressSanitizerLegacyPassPassOnce(PassRegistry &Registry) { | ||||
1350 | false)static void *initializeAddressSanitizerLegacyPassPassOnce(PassRegistry &Registry) { | ||||
1351 | INITIALIZE_PASS_DEPENDENCY(ASanGlobalsMetadataWrapperPass)initializeASanGlobalsMetadataWrapperPassPass(Registry); | ||||
1352 | INITIALIZE_PASS_DEPENDENCY(StackSafetyGlobalInfoWrapperPass)initializeStackSafetyGlobalInfoWrapperPassPass(Registry); | ||||
1353 | INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)initializeTargetLibraryInfoWrapperPassPass(Registry); | ||||
1354 | INITIALIZE_PASS_END(PassInfo *PI = new PassInfo( "AddressSanitizer: detects use-after-free and out-of-bounds bugs." , "asan", &AddressSanitizerLegacyPass::ID, PassInfo::NormalCtor_t (callDefaultCtor<AddressSanitizerLegacyPass>), false, false ); Registry.registerPass(*PI, true); return PI; } static llvm ::once_flag InitializeAddressSanitizerLegacyPassPassFlag; void llvm::initializeAddressSanitizerLegacyPassPass(PassRegistry & Registry) { llvm::call_once(InitializeAddressSanitizerLegacyPassPassFlag , initializeAddressSanitizerLegacyPassPassOnce, std::ref(Registry )); } | ||||
1355 | AddressSanitizerLegacyPass, "asan",PassInfo *PI = new PassInfo( "AddressSanitizer: detects use-after-free and out-of-bounds bugs." , "asan", &AddressSanitizerLegacyPass::ID, PassInfo::NormalCtor_t (callDefaultCtor<AddressSanitizerLegacyPass>), false, false ); Registry.registerPass(*PI, true); return PI; } static llvm ::once_flag InitializeAddressSanitizerLegacyPassPassFlag; void llvm::initializeAddressSanitizerLegacyPassPass(PassRegistry & Registry) { llvm::call_once(InitializeAddressSanitizerLegacyPassPassFlag , initializeAddressSanitizerLegacyPassPassOnce, std::ref(Registry )); } | ||||
1356 | "AddressSanitizer: detects use-after-free and out-of-bounds bugs.", false,PassInfo *PI = new PassInfo( "AddressSanitizer: detects use-after-free and out-of-bounds bugs." , "asan", &AddressSanitizerLegacyPass::ID, PassInfo::NormalCtor_t (callDefaultCtor<AddressSanitizerLegacyPass>), false, false ); Registry.registerPass(*PI, true); return PI; } static llvm ::once_flag InitializeAddressSanitizerLegacyPassPassFlag; void llvm::initializeAddressSanitizerLegacyPassPass(PassRegistry & Registry) { llvm::call_once(InitializeAddressSanitizerLegacyPassPassFlag , initializeAddressSanitizerLegacyPassPassOnce, std::ref(Registry )); } | ||||
1357 | false)PassInfo *PI = new PassInfo( "AddressSanitizer: detects use-after-free and out-of-bounds bugs." , "asan", &AddressSanitizerLegacyPass::ID, PassInfo::NormalCtor_t (callDefaultCtor<AddressSanitizerLegacyPass>), false, false ); Registry.registerPass(*PI, true); return PI; } static llvm ::once_flag InitializeAddressSanitizerLegacyPassPassFlag; void llvm::initializeAddressSanitizerLegacyPassPass(PassRegistry & Registry) { llvm::call_once(InitializeAddressSanitizerLegacyPassPassFlag , initializeAddressSanitizerLegacyPassPassOnce, std::ref(Registry )); } | ||||
1358 | |||||
1359 | FunctionPass *llvm::createAddressSanitizerFunctionPass( | ||||
1360 | bool CompileKernel, bool Recover, bool UseAfterScope, | ||||
1361 | AsanDetectStackUseAfterReturnMode UseAfterReturn) { | ||||
1362 | assert(!CompileKernel || Recover)(static_cast <bool> (!CompileKernel || Recover) ? void ( 0) : __assert_fail ("!CompileKernel || Recover", "llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp" , 1362, __extension__ __PRETTY_FUNCTION__)); | ||||
1363 | return new AddressSanitizerLegacyPass(CompileKernel, Recover, UseAfterScope, | ||||
1364 | UseAfterReturn); | ||||
1365 | } | ||||
1366 | |||||
1367 | char ModuleAddressSanitizerLegacyPass::ID = 0; | ||||
1368 | |||||
1369 | INITIALIZE_PASS(static void *initializeModuleAddressSanitizerLegacyPassPassOnce (PassRegistry &Registry) { PassInfo *PI = new PassInfo( "AddressSanitizer: detects use-after-free and out-of-bounds bugs." "ModulePass", "asan-module", &ModuleAddressSanitizerLegacyPass ::ID, PassInfo::NormalCtor_t(callDefaultCtor<ModuleAddressSanitizerLegacyPass >), false, false); Registry.registerPass(*PI, true); return PI; } static llvm::once_flag InitializeModuleAddressSanitizerLegacyPassPassFlag ; void llvm::initializeModuleAddressSanitizerLegacyPassPass(PassRegistry &Registry) { llvm::call_once(InitializeModuleAddressSanitizerLegacyPassPassFlag , initializeModuleAddressSanitizerLegacyPassPassOnce, std::ref (Registry)); } | ||||
1370 | ModuleAddressSanitizerLegacyPass, "asan-module",static void *initializeModuleAddressSanitizerLegacyPassPassOnce (PassRegistry &Registry) { PassInfo *PI = new PassInfo( "AddressSanitizer: detects use-after-free and out-of-bounds bugs." "ModulePass", "asan-module", &ModuleAddressSanitizerLegacyPass ::ID, PassInfo::NormalCtor_t(callDefaultCtor<ModuleAddressSanitizerLegacyPass >), false, false); Registry.registerPass(*PI, true); return PI; } static llvm::once_flag InitializeModuleAddressSanitizerLegacyPassPassFlag ; void llvm::initializeModuleAddressSanitizerLegacyPassPass(PassRegistry &Registry) { llvm::call_once(InitializeModuleAddressSanitizerLegacyPassPassFlag , initializeModuleAddressSanitizerLegacyPassPassOnce, std::ref (Registry)); } | ||||
1371 | "AddressSanitizer: detects use-after-free and out-of-bounds bugs."static void *initializeModuleAddressSanitizerLegacyPassPassOnce (PassRegistry &Registry) { PassInfo *PI = new PassInfo( "AddressSanitizer: detects use-after-free and out-of-bounds bugs." "ModulePass", "asan-module", &ModuleAddressSanitizerLegacyPass ::ID, PassInfo::NormalCtor_t(callDefaultCtor<ModuleAddressSanitizerLegacyPass >), false, false); Registry.registerPass(*PI, true); return PI; } static llvm::once_flag InitializeModuleAddressSanitizerLegacyPassPassFlag ; void llvm::initializeModuleAddressSanitizerLegacyPassPass(PassRegistry &Registry) { llvm::call_once(InitializeModuleAddressSanitizerLegacyPassPassFlag , initializeModuleAddressSanitizerLegacyPassPassOnce, std::ref (Registry)); } | ||||
1372 | "ModulePass",static void *initializeModuleAddressSanitizerLegacyPassPassOnce (PassRegistry &Registry) { PassInfo *PI = new PassInfo( "AddressSanitizer: detects use-after-free and out-of-bounds bugs." "ModulePass", "asan-module", &ModuleAddressSanitizerLegacyPass ::ID, PassInfo::NormalCtor_t(callDefaultCtor<ModuleAddressSanitizerLegacyPass >), false, false); Registry.registerPass(*PI, true); return PI; } static llvm::once_flag InitializeModuleAddressSanitizerLegacyPassPassFlag ; void llvm::initializeModuleAddressSanitizerLegacyPassPass(PassRegistry &Registry) { llvm::call_once(InitializeModuleAddressSanitizerLegacyPassPassFlag , initializeModuleAddressSanitizerLegacyPassPassOnce, std::ref (Registry)); } | ||||
1373 | false, false)static void *initializeModuleAddressSanitizerLegacyPassPassOnce (PassRegistry &Registry) { PassInfo *PI = new PassInfo( "AddressSanitizer: detects use-after-free and out-of-bounds bugs." "ModulePass", "asan-module", &ModuleAddressSanitizerLegacyPass ::ID, PassInfo::NormalCtor_t(callDefaultCtor<ModuleAddressSanitizerLegacyPass >), false, false); Registry.registerPass(*PI, true); return PI; } static llvm::once_flag InitializeModuleAddressSanitizerLegacyPassPassFlag ; void llvm::initializeModuleAddressSanitizerLegacyPassPass(PassRegistry &Registry) { llvm::call_once(InitializeModuleAddressSanitizerLegacyPassPassFlag , initializeModuleAddressSanitizerLegacyPassPassOnce, std::ref (Registry)); } | ||||
1374 | |||||
1375 | ModulePass *llvm::createModuleAddressSanitizerLegacyPassPass( | ||||
1376 | bool CompileKernel, bool Recover, bool UseGlobalsGC, bool UseOdrIndicator, | ||||
1377 | AsanDtorKind Destructor) { | ||||
1378 | assert(!CompileKernel || Recover)(static_cast <bool> (!CompileKernel || Recover) ? void ( 0) : __assert_fail ("!CompileKernel || Recover", "llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp" , 1378, __extension__ __PRETTY_FUNCTION__)); | ||||
1379 | return new ModuleAddressSanitizerLegacyPass( | ||||
1380 | CompileKernel, Recover, UseGlobalsGC, UseOdrIndicator, Destructor); | ||||
1381 | } | ||||
1382 | |||||
1383 | static size_t TypeSizeToSizeIndex(uint32_t TypeSize) { | ||||
1384 | size_t Res = countTrailingZeros(TypeSize / 8); | ||||
1385 | assert(Res < kNumberOfAccessSizes)(static_cast <bool> (Res < kNumberOfAccessSizes) ? void (0) : __assert_fail ("Res < kNumberOfAccessSizes", "llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp" , 1385, __extension__ __PRETTY_FUNCTION__)); | ||||
1386 | return Res; | ||||
1387 | } | ||||
1388 | |||||
1389 | /// Create a global describing a source location. | ||||
1390 | static GlobalVariable *createPrivateGlobalForSourceLoc(Module &M, | ||||
1391 | LocationMetadata MD) { | ||||
1392 | Constant *LocData[] = { | ||||
1393 | createPrivateGlobalForString(M, MD.Filename, true, kAsanGenPrefix), | ||||
1394 | ConstantInt::get(Type::getInt32Ty(M.getContext()), MD.LineNo), | ||||
1395 | ConstantInt::get(Type::getInt32Ty(M.getContext()), MD.ColumnNo), | ||||
1396 | }; | ||||
1397 | auto LocStruct = ConstantStruct::getAnon(LocData); | ||||
1398 | auto GV = new GlobalVariable(M, LocStruct->getType(), true, | ||||
1399 | GlobalValue::PrivateLinkage, LocStruct, | ||||
1400 | kAsanGenPrefix); | ||||
1401 | GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global); | ||||
1402 | return GV; | ||||
1403 | } | ||||
1404 | |||||
1405 | /// Check if \p G has been created by a trusted compiler pass. | ||||
1406 | static bool GlobalWasGeneratedByCompiler(GlobalVariable *G) { | ||||
1407 | // Do not instrument @llvm.global_ctors, @llvm.used, etc. | ||||
1408 | if (G->getName().startswith("llvm.")) | ||||
1409 | return true; | ||||
1410 | |||||
1411 | // Do not instrument asan globals. | ||||
1412 | if (G->getName().startswith(kAsanGenPrefix) || | ||||
1413 | G->getName().startswith(kSanCovGenPrefix) || | ||||
1414 | G->getName().startswith(kODRGenPrefix)) | ||||
1415 | return true; | ||||
1416 | |||||
1417 | // Do not instrument gcov counter arrays. | ||||
1418 | if (G->getName() == "__llvm_gcov_ctr") | ||||
1419 | return true; | ||||
1420 | |||||
1421 | return false; | ||||
1422 | } | ||||
1423 | |||||
1424 | static bool isUnsupportedAMDGPUAddrspace(Value *Addr) { | ||||
1425 | Type *PtrTy = cast<PointerType>(Addr->getType()->getScalarType()); | ||||
1426 | unsigned int AddrSpace = PtrTy->getPointerAddressSpace(); | ||||
1427 | if (AddrSpace == 3 || AddrSpace == 5) | ||||
1428 | return true; | ||||
1429 | return false; | ||||
1430 | } | ||||
1431 | |||||
1432 | Value *AddressSanitizer::memToShadow(Value *Shadow, IRBuilder<> &IRB) { | ||||
1433 | // Shadow >> scale | ||||
1434 | Shadow = IRB.CreateLShr(Shadow, Mapping.Scale); | ||||
1435 | if (Mapping.Offset == 0) return Shadow; | ||||
1436 | // (Shadow >> scale) | offset | ||||
1437 | Value *ShadowBase; | ||||
1438 | if (LocalDynamicShadow) | ||||
1439 | ShadowBase = LocalDynamicShadow; | ||||
1440 | else | ||||
1441 | ShadowBase = ConstantInt::get(IntptrTy, Mapping.Offset); | ||||
1442 | if (Mapping.OrShadowOffset) | ||||
1443 | return IRB.CreateOr(Shadow, ShadowBase); | ||||
1444 | else | ||||
1445 | return IRB.CreateAdd(Shadow, ShadowBase); | ||||
1446 | } | ||||
1447 | |||||
1448 | // Instrument memset/memmove/memcpy | ||||
1449 | void AddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) { | ||||
1450 | IRBuilder<> IRB(MI); | ||||
1451 | if (isa<MemTransferInst>(MI)) { | ||||
1452 | IRB.CreateCall( | ||||
1453 | isa<MemMoveInst>(MI) ? AsanMemmove : AsanMemcpy, | ||||
1454 | {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()), | ||||
1455 | IRB.CreatePointerCast(MI->getOperand(1), IRB.getInt8PtrTy()), | ||||
1456 | IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)}); | ||||
1457 | } else if (isa<MemSetInst>(MI)) { | ||||
1458 | IRB.CreateCall( | ||||
1459 | AsanMemset, | ||||
1460 | {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()), | ||||
1461 | IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false), | ||||
1462 | IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)}); | ||||
1463 | } | ||||
1464 | MI->eraseFromParent(); | ||||
1465 | } | ||||
1466 | |||||
1467 | /// Check if we want (and can) handle this alloca. | ||||
1468 | bool AddressSanitizer::isInterestingAlloca(const AllocaInst &AI) { | ||||
1469 | auto PreviouslySeenAllocaInfo = ProcessedAllocas.find(&AI); | ||||
1470 | |||||
1471 | if (PreviouslySeenAllocaInfo != ProcessedAllocas.end()) | ||||
1472 | return PreviouslySeenAllocaInfo->getSecond(); | ||||
1473 | |||||
1474 | bool IsInteresting = | ||||
1475 | (AI.getAllocatedType()->isSized() && | ||||
1476 | // alloca() may be called with 0 size, ignore it. | ||||
1477 | ((!AI.isStaticAlloca()) || getAllocaSizeInBytes(AI) > 0) && | ||||
1478 | // We are only interested in allocas not promotable to registers. | ||||
1479 | // Promotable allocas are common under -O0. | ||||
1480 | (!ClSkipPromotableAllocas || !isAllocaPromotable(&AI)) && | ||||
1481 | // inalloca allocas are not treated as static, and we don't want | ||||
1482 | // dynamic alloca instrumentation for them as well. | ||||
1483 | !AI.isUsedWithInAlloca() && | ||||
1484 | // swifterror allocas are register promoted by ISel | ||||
1485 | !AI.isSwiftError()); | ||||
1486 | |||||
1487 | ProcessedAllocas[&AI] = IsInteresting; | ||||
1488 | return IsInteresting; | ||||
1489 | } | ||||
1490 | |||||
1491 | bool AddressSanitizer::ignoreAccess(Instruction *Inst, Value *Ptr) { | ||||
1492 | // Instrument acesses from different address spaces only for AMDGPU. | ||||
1493 | Type *PtrTy = cast<PointerType>(Ptr->getType()->getScalarType()); | ||||
1494 | if (PtrTy->getPointerAddressSpace() != 0 && | ||||
1495 | !(TargetTriple.isAMDGPU() && !isUnsupportedAMDGPUAddrspace(Ptr))) | ||||
1496 | return true; | ||||
1497 | |||||
1498 | // Ignore swifterror addresses. | ||||
1499 | // swifterror memory addresses are mem2reg promoted by instruction | ||||
1500 | // selection. As such they cannot have regular uses like an instrumentation | ||||
1501 | // function and it makes no sense to track them as memory. | ||||
1502 | if (Ptr->isSwiftError()) | ||||
1503 | return true; | ||||
1504 | |||||
1505 | // Treat memory accesses to promotable allocas as non-interesting since they | ||||
1506 | // will not cause memory violations. This greatly speeds up the instrumented | ||||
1507 | // executable at -O0. | ||||
1508 | if (auto AI
| ||||
1509 | if (ClSkipPromotableAllocas && !isInterestingAlloca(*AI)) | ||||
1510 | return true; | ||||
1511 | |||||
1512 | if (SSGI != nullptr && SSGI->stackAccessIsSafe(*Inst) && | ||||
| |||||
1513 | findAllocaForValue(Ptr)) | ||||
1514 | return true; | ||||
1515 | |||||
1516 | return false; | ||||
1517 | } | ||||
1518 | |||||
1519 | void AddressSanitizer::getInterestingMemoryOperands( | ||||
1520 | Instruction *I, SmallVectorImpl<InterestingMemoryOperand> &Interesting) { | ||||
1521 | // Skip memory accesses inserted by another instrumentation. | ||||
1522 | if (I->hasMetadata("nosanitize")) | ||||
1523 | return; | ||||
1524 | |||||
1525 | // Do not instrument the load fetching the dynamic shadow address. | ||||
1526 | if (LocalDynamicShadow == I
| ||||
1527 | return; | ||||
1528 | |||||
1529 | if (LoadInst *LI
| ||||
1530 | if (!ClInstrumentReads || ignoreAccess(LI, LI->getPointerOperand())) | ||||
1531 | return; | ||||
1532 | Interesting.emplace_back(I, LI->getPointerOperandIndex(), false, | ||||
1533 | LI->getType(), LI->getAlign()); | ||||
1534 | } else if (StoreInst *SI
| ||||
1535 | if (!ClInstrumentWrites || ignoreAccess(LI, SI->getPointerOperand())) | ||||
1536 | return; | ||||
1537 | Interesting.emplace_back(I, SI->getPointerOperandIndex(), true, | ||||
1538 | SI->getValueOperand()->getType(), SI->getAlign()); | ||||
1539 | } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) { | ||||
1540 | if (!ClInstrumentAtomics || ignoreAccess(LI, RMW->getPointerOperand())) | ||||
1541 | return; | ||||
1542 | Interesting.emplace_back(I, RMW->getPointerOperandIndex(), true, | ||||
1543 | RMW->getValOperand()->getType(), None); | ||||
1544 | } else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) { | ||||
1545 | if (!ClInstrumentAtomics || ignoreAccess(LI, XCHG->getPointerOperand())) | ||||
1546 | return; | ||||
1547 | Interesting.emplace_back(I, XCHG->getPointerOperandIndex(), true, | ||||
1548 | XCHG->getCompareOperand()->getType(), None); | ||||
1549 | } else if (auto CI = dyn_cast<CallInst>(I)) { | ||||
1550 | if (CI->getIntrinsicID() == Intrinsic::masked_load || | ||||
1551 | CI->getIntrinsicID() == Intrinsic::masked_store) { | ||||
1552 | bool IsWrite = CI->getIntrinsicID() == Intrinsic::masked_store; | ||||
1553 | // Masked store has an initial operand for the value. | ||||
1554 | unsigned OpOffset = IsWrite ? 1 : 0; | ||||
1555 | if (IsWrite ? !ClInstrumentWrites : !ClInstrumentReads) | ||||
1556 | return; | ||||
1557 | |||||
1558 | auto BasePtr = CI->getOperand(OpOffset); | ||||
1559 | if (ignoreAccess(LI, BasePtr)) | ||||
1560 | return; | ||||
1561 | Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType(); | ||||
1562 | MaybeAlign Alignment = Align(1); | ||||
1563 | // Otherwise no alignment guarantees. We probably got Undef. | ||||
1564 | if (auto *Op = dyn_cast<ConstantInt>(CI->getOperand(1 + OpOffset))) | ||||
1565 | Alignment = Op->getMaybeAlignValue(); | ||||
1566 | Value *Mask = CI->getOperand(2 + OpOffset); | ||||
1567 | Interesting.emplace_back(I, OpOffset, IsWrite, Ty, Alignment, Mask); | ||||
1568 | } else { | ||||
1569 | for (unsigned ArgNo = 0; ArgNo < CI->arg_size(); ArgNo++) { | ||||
1570 | if (!ClInstrumentByval || !CI->isByValArgument(ArgNo) || | ||||
1571 | ignoreAccess(LI, CI->getArgOperand(ArgNo))) | ||||
1572 | continue; | ||||
1573 | Type *Ty = CI->getParamByValType(ArgNo); | ||||
1574 | Interesting.emplace_back(I, ArgNo, false, Ty, Align(1)); | ||||
1575 | } | ||||
1576 | } | ||||
1577 | } | ||||
1578 | } | ||||
1579 | |||||
1580 | static bool isPointerOperand(Value *V) { | ||||
1581 | return V->getType()->isPointerTy() || isa<PtrToIntInst>(V); | ||||
1582 | } | ||||
1583 | |||||
1584 | // This is a rough heuristic; it may cause both false positives and | ||||
1585 | // false negatives. The proper implementation requires cooperation with | ||||
1586 | // the frontend. | ||||
1587 | static bool isInterestingPointerComparison(Instruction *I) { | ||||
1588 | if (ICmpInst *Cmp = dyn_cast<ICmpInst>(I)) { | ||||
1589 | if (!Cmp->isRelational()) | ||||
1590 | return false; | ||||
1591 | } else { | ||||
1592 | return false; | ||||
1593 | } | ||||
1594 | return isPointerOperand(I->getOperand(0)) && | ||||
1595 | isPointerOperand(I->getOperand(1)); | ||||
1596 | } | ||||
1597 | |||||
1598 | // This is a rough heuristic; it may cause both false positives and | ||||
1599 | // false negatives. The proper implementation requires cooperation with | ||||
1600 | // the frontend. | ||||
1601 | static bool isInterestingPointerSubtraction(Instruction *I) { | ||||
1602 | if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I)) { | ||||
1603 | if (BO->getOpcode() != Instruction::Sub) | ||||
1604 | return false; | ||||
1605 | } else { | ||||
1606 | return false; | ||||
1607 | } | ||||
1608 | return isPointerOperand(I->getOperand(0)) && | ||||
1609 | isPointerOperand(I->getOperand(1)); | ||||
1610 | } | ||||
1611 | |||||
1612 | bool AddressSanitizer::GlobalIsLinkerInitialized(GlobalVariable *G) { | ||||
1613 | // If a global variable does not have dynamic initialization we don't | ||||
1614 | // have to instrument it. However, if a global does not have initializer | ||||
1615 | // at all, we assume it has dynamic initializer (in other TU). | ||||
1616 | // | ||||
1617 | // FIXME: Metadata should be attched directly to the global directly instead | ||||
1618 | // of being added to llvm.asan.globals. | ||||
1619 | return G->hasInitializer() && !GlobalsMD.get(G).IsDynInit; | ||||
1620 | } | ||||
1621 | |||||
1622 | void AddressSanitizer::instrumentPointerComparisonOrSubtraction( | ||||
1623 | Instruction *I) { | ||||
1624 | IRBuilder<> IRB(I); | ||||
1625 | FunctionCallee F = isa<ICmpInst>(I) ? AsanPtrCmpFunction : AsanPtrSubFunction; | ||||
1626 | Value *Param[2] = {I->getOperand(0), I->getOperand(1)}; | ||||
1627 | for (Value *&i : Param) { | ||||
1628 | if (i->getType()->isPointerTy()) | ||||
1629 | i = IRB.CreatePointerCast(i, IntptrTy); | ||||
1630 | } | ||||
1631 | IRB.CreateCall(F, Param); | ||||
1632 | } | ||||
1633 | |||||
1634 | static void doInstrumentAddress(AddressSanitizer *Pass, Instruction *I, | ||||
1635 | Instruction *InsertBefore, Value *Addr, | ||||
1636 | MaybeAlign Alignment, unsigned Granularity, | ||||
1637 | uint32_t TypeSize, bool IsWrite, | ||||
1638 | Value *SizeArgument, bool UseCalls, | ||||
1639 | uint32_t Exp) { | ||||
1640 | // Instrument a 1-, 2-, 4-, 8-, or 16- byte access with one check | ||||
1641 | // if the data is properly aligned. | ||||
1642 | if ((TypeSize == 8 || TypeSize == 16 || TypeSize == 32 || TypeSize == 64 || | ||||
1643 | TypeSize == 128) && | ||||
1644 | (!Alignment || *Alignment >= Granularity || *Alignment >= TypeSize / 8)) | ||||
1645 | return Pass->instrumentAddress(I, InsertBefore, Addr, TypeSize, IsWrite, | ||||
1646 | nullptr, UseCalls, Exp); | ||||
1647 | Pass->instrumentUnusualSizeOrAlignment(I, InsertBefore, Addr, TypeSize, | ||||
1648 | IsWrite, nullptr, UseCalls, Exp); | ||||
1649 | } | ||||
1650 | |||||
1651 | static void instrumentMaskedLoadOrStore(AddressSanitizer *Pass, | ||||
1652 | const DataLayout &DL, Type *IntptrTy, | ||||
1653 | Value *Mask, Instruction *I, | ||||
1654 | Value *Addr, MaybeAlign Alignment, | ||||
1655 | unsigned Granularity, Type *OpType, | ||||
1656 | bool IsWrite, Value *SizeArgument, | ||||
1657 | bool UseCalls, uint32_t Exp) { | ||||
1658 | auto *VTy = cast<FixedVectorType>(OpType); | ||||
1659 | uint64_t ElemTypeSize = DL.getTypeStoreSizeInBits(VTy->getScalarType()); | ||||
1660 | unsigned Num = VTy->getNumElements(); | ||||
1661 | auto Zero = ConstantInt::get(IntptrTy, 0); | ||||
1662 | for (unsigned Idx = 0; Idx < Num; ++Idx) { | ||||
1663 | Value *InstrumentedAddress = nullptr; | ||||
1664 | Instruction *InsertBefore = I; | ||||
1665 | if (auto *Vector = dyn_cast<ConstantVector>(Mask)) { | ||||
1666 | // dyn_cast as we might get UndefValue | ||||
1667 | if (auto *Masked = dyn_cast<ConstantInt>(Vector->getOperand(Idx))) { | ||||
1668 | if (Masked->isZero()) | ||||
1669 | // Mask is constant false, so no instrumentation needed. | ||||
1670 | continue; | ||||
1671 | // If we have a true or undef value, fall through to doInstrumentAddress | ||||
1672 | // with InsertBefore == I | ||||
1673 | } | ||||
1674 | } else { | ||||
1675 | IRBuilder<> IRB(I); | ||||
1676 | Value *MaskElem = IRB.CreateExtractElement(Mask, Idx); | ||||
1677 | Instruction *ThenTerm = SplitBlockAndInsertIfThen(MaskElem, I, false); | ||||
1678 | InsertBefore = ThenTerm; | ||||
1679 | } | ||||
1680 | |||||
1681 | IRBuilder<> IRB(InsertBefore); | ||||
1682 | InstrumentedAddress = | ||||
1683 | IRB.CreateGEP(VTy, Addr, {Zero, ConstantInt::get(IntptrTy, Idx)}); | ||||
1684 | doInstrumentAddress(Pass, I, InsertBefore, InstrumentedAddress, Alignment, | ||||
1685 | Granularity, ElemTypeSize, IsWrite, SizeArgument, | ||||
1686 | UseCalls, Exp); | ||||
1687 | } | ||||
1688 | } | ||||
1689 | |||||
1690 | void AddressSanitizer::instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis, | ||||
1691 | InterestingMemoryOperand &O, bool UseCalls, | ||||
1692 | const DataLayout &DL) { | ||||
1693 | Value *Addr = O.getPtr(); | ||||
1694 | |||||
1695 | // Optimization experiments. | ||||
1696 | // The experiments can be used to evaluate potential optimizations that remove | ||||
1697 | // instrumentation (assess false negatives). Instead of completely removing | ||||
1698 | // some instrumentation, you set Exp to a non-zero value (mask of optimization | ||||
1699 | // experiments that want to remove instrumentation of this instruction). | ||||
1700 | // If Exp is non-zero, this pass will emit special calls into runtime | ||||
1701 | // (e.g. __asan_report_exp_load1 instead of __asan_report_load1). These calls | ||||
1702 | // make runtime terminate the program in a special way (with a different | ||||
1703 | // exit status). Then you run the new compiler on a buggy corpus, collect | ||||
1704 | // the special terminations (ideally, you don't see them at all -- no false | ||||
1705 | // negatives) and make the decision on the optimization. | ||||
1706 | uint32_t Exp = ClForceExperiment; | ||||
1707 | |||||
1708 | if (ClOpt && ClOptGlobals) { | ||||
1709 | // If initialization order checking is disabled, a simple access to a | ||||
1710 | // dynamically initialized global is always valid. | ||||
1711 | GlobalVariable *G = dyn_cast<GlobalVariable>(getUnderlyingObject(Addr)); | ||||
1712 | if (G && (!ClInitializers || GlobalIsLinkerInitialized(G)) && | ||||
1713 | isSafeAccess(ObjSizeVis, Addr, O.TypeSize)) { | ||||
1714 | NumOptimizedAccessesToGlobalVar++; | ||||
1715 | return; | ||||
1716 | } | ||||
1717 | } | ||||
1718 | |||||
1719 | if (ClOpt && ClOptStack) { | ||||
1720 | // A direct inbounds access to a stack variable is always valid. | ||||
1721 | if (isa<AllocaInst>(getUnderlyingObject(Addr)) && | ||||
1722 | isSafeAccess(ObjSizeVis, Addr, O.TypeSize)) { | ||||
1723 | NumOptimizedAccessesToStackVar++; | ||||
1724 | return; | ||||
1725 | } | ||||
1726 | } | ||||
1727 | |||||
1728 | if (O.IsWrite) | ||||
1729 | NumInstrumentedWrites++; | ||||
1730 | else | ||||
1731 | NumInstrumentedReads++; | ||||
1732 | |||||
1733 | unsigned Granularity = 1 << Mapping.Scale; | ||||
1734 | if (O.MaybeMask) { | ||||
1735 | instrumentMaskedLoadOrStore(this, DL, IntptrTy, O.MaybeMask, O.getInsn(), | ||||
1736 | Addr, O.Alignment, Granularity, O.OpType, | ||||
1737 | O.IsWrite, nullptr, UseCalls, Exp); | ||||
1738 | } else { | ||||
1739 | doInstrumentAddress(this, O.getInsn(), O.getInsn(), Addr, O.Alignment, | ||||
1740 | Granularity, O.TypeSize, O.IsWrite, nullptr, UseCalls, | ||||
1741 | Exp); | ||||
1742 | } | ||||
1743 | } | ||||
1744 | |||||
1745 | Instruction *AddressSanitizer::generateCrashCode(Instruction *InsertBefore, | ||||
1746 | Value *Addr, bool IsWrite, | ||||
1747 | size_t AccessSizeIndex, | ||||
1748 | Value *SizeArgument, | ||||
1749 | uint32_t Exp) { | ||||
1750 | IRBuilder<> IRB(InsertBefore); | ||||
1751 | Value *ExpVal = Exp == 0 ? nullptr : ConstantInt::get(IRB.getInt32Ty(), Exp); | ||||
1752 | CallInst *Call = nullptr; | ||||
1753 | if (SizeArgument) { | ||||
1754 | if (Exp == 0) | ||||
1755 | Call = IRB.CreateCall(AsanErrorCallbackSized[IsWrite][0], | ||||
1756 | {Addr, SizeArgument}); | ||||
1757 | else | ||||
1758 | Call = IRB.CreateCall(AsanErrorCallbackSized[IsWrite][1], | ||||
1759 | {Addr, SizeArgument, ExpVal}); | ||||
1760 | } else { | ||||
1761 | if (Exp == 0) | ||||
1762 | Call = | ||||
1763 | IRB.CreateCall(AsanErrorCallback[IsWrite][0][AccessSizeIndex], Addr); | ||||
1764 | else | ||||
1765 | Call = IRB.CreateCall(AsanErrorCallback[IsWrite][1][AccessSizeIndex], | ||||
1766 | {Addr, ExpVal}); | ||||
1767 | } | ||||
1768 | |||||
1769 | Call->setCannotMerge(); | ||||
1770 | return Call; | ||||
1771 | } | ||||
1772 | |||||
1773 | Value *AddressSanitizer::createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong, | ||||
1774 | Value *ShadowValue, | ||||
1775 | uint32_t TypeSize) { | ||||
1776 | size_t Granularity = static_cast<size_t>(1) << Mapping.Scale; | ||||
1777 | // Addr & (Granularity - 1) | ||||
1778 | Value *LastAccessedByte = | ||||
1779 | IRB.CreateAnd(AddrLong, ConstantInt::get(IntptrTy, Granularity - 1)); | ||||
1780 | // (Addr & (Granularity - 1)) + size - 1 | ||||
1781 | if (TypeSize / 8 > 1) | ||||
1782 | LastAccessedByte = IRB.CreateAdd( | ||||
1783 | LastAccessedByte, ConstantInt::get(IntptrTy, TypeSize / 8 - 1)); | ||||
1784 | // (uint8_t) ((Addr & (Granularity-1)) + size - 1) | ||||
1785 | LastAccessedByte = | ||||
1786 | IRB.CreateIntCast(LastAccessedByte, ShadowValue->getType(), false); | ||||
1787 | // ((uint8_t) ((Addr & (Granularity-1)) + size - 1)) >= ShadowValue | ||||
1788 | return IRB.CreateICmpSGE(LastAccessedByte, ShadowValue); | ||||
1789 | } | ||||
1790 | |||||
1791 | Instruction *AddressSanitizer::instrumentAMDGPUAddress( | ||||
1792 | Instruction *OrigIns, Instruction *InsertBefore, Value *Addr, | ||||
1793 | uint32_t TypeSize, bool IsWrite, Value *SizeArgument) { | ||||
1794 | // Do not instrument unsupported addrspaces. | ||||
1795 | if (isUnsupportedAMDGPUAddrspace(Addr)) | ||||
1796 | return nullptr; | ||||
1797 | Type *PtrTy = cast<PointerType>(Addr->getType()->getScalarType()); | ||||
1798 | // Follow host instrumentation for global and constant addresses. | ||||
1799 | if (PtrTy->getPointerAddressSpace() != 0) | ||||
1800 | return InsertBefore; | ||||
1801 | // Instrument generic addresses in supported addressspaces. | ||||
1802 | IRBuilder<> IRB(InsertBefore); | ||||
1803 | Value *AddrLong = IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()); | ||||
1804 | Value *IsShared = IRB.CreateCall(AMDGPUAddressShared, {AddrLong}); | ||||
1805 | Value *IsPrivate = IRB.CreateCall(AMDGPUAddressPrivate, {AddrLong}); | ||||
1806 | Value *IsSharedOrPrivate = IRB.CreateOr(IsShared, IsPrivate); | ||||
1807 | Value *Cmp = IRB.CreateICmpNE(IRB.getTrue(), IsSharedOrPrivate); | ||||
1808 | Value *AddrSpaceZeroLanding = | ||||
1809 | SplitBlockAndInsertIfThen(Cmp, InsertBefore, false); | ||||
1810 | InsertBefore = cast<Instruction>(AddrSpaceZeroLanding); | ||||
1811 | return InsertBefore; | ||||
1812 | } | ||||
1813 | |||||
1814 | void AddressSanitizer::instrumentAddress(Instruction *OrigIns, | ||||
1815 | Instruction *InsertBefore, Value *Addr, | ||||
1816 | uint32_t TypeSize, bool IsWrite, | ||||
1817 | Value *SizeArgument, bool UseCalls, | ||||
1818 | uint32_t Exp) { | ||||
1819 | if (TargetTriple.isAMDGPU()) { | ||||
1820 | InsertBefore = instrumentAMDGPUAddress(OrigIns, InsertBefore, Addr, | ||||
1821 | TypeSize, IsWrite, SizeArgument); | ||||
1822 | if (!InsertBefore) | ||||
1823 | return; | ||||
1824 | } | ||||
1825 | |||||
1826 | IRBuilder<> IRB(InsertBefore); | ||||
1827 | size_t AccessSizeIndex = TypeSizeToSizeIndex(TypeSize); | ||||
1828 | const ASanAccessInfo AccessInfo(IsWrite, CompileKernel, AccessSizeIndex); | ||||
1829 | |||||
1830 | if (UseCalls && ClOptimizeCallbacks) { | ||||
1831 | const ASanAccessInfo AccessInfo(IsWrite, CompileKernel, AccessSizeIndex); | ||||
1832 | Module *M = IRB.GetInsertBlock()->getParent()->getParent(); | ||||
1833 | IRB.CreateCall( | ||||
1834 | Intrinsic::getDeclaration(M, Intrinsic::asan_check_memaccess), | ||||
1835 | {IRB.CreatePointerCast(Addr, Int8PtrTy), | ||||
1836 | ConstantInt::get(Int32Ty, AccessInfo.Packed)}); | ||||
1837 | return; | ||||
1838 | } | ||||
1839 | |||||
1840 | Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy); | ||||
1841 | if (UseCalls) { | ||||
1842 | if (Exp == 0) | ||||
1843 | IRB.CreateCall(AsanMemoryAccessCallback[IsWrite][0][AccessSizeIndex], | ||||
1844 | AddrLong); | ||||
1845 | else | ||||
1846 | IRB.CreateCall(AsanMemoryAccessCallback[IsWrite][1][AccessSizeIndex], | ||||
1847 | {AddrLong, ConstantInt::get(IRB.getInt32Ty(), Exp)}); | ||||
1848 | return; | ||||
1849 | } | ||||
1850 | |||||
1851 | Type *ShadowTy = | ||||
1852 | IntegerType::get(*C, std::max(8U, TypeSize >> Mapping.Scale)); | ||||
1853 | Type *ShadowPtrTy = PointerType::get(ShadowTy, 0); | ||||
1854 | Value *ShadowPtr = memToShadow(AddrLong, IRB); | ||||
1855 | Value *CmpVal = Constant::getNullValue(ShadowTy); | ||||
1856 | Value *ShadowValue = | ||||
1857 | IRB.CreateLoad(ShadowTy, IRB.CreateIntToPtr(ShadowPtr, ShadowPtrTy)); | ||||
1858 | |||||
1859 | Value *Cmp = IRB.CreateICmpNE(ShadowValue, CmpVal); | ||||
1860 | size_t Granularity = 1ULL << Mapping.Scale; | ||||
1861 | Instruction *CrashTerm = nullptr; | ||||
1862 | |||||
1863 | if (ClAlwaysSlowPath || (TypeSize < 8 * Granularity)) { | ||||
1864 | // We use branch weights for the slow path check, to indicate that the slow | ||||
1865 | // path is rarely taken. This seems to be the case for SPEC benchmarks. | ||||
1866 | Instruction *CheckTerm = SplitBlockAndInsertIfThen( | ||||
1867 | Cmp, InsertBefore, false, MDBuilder(*C).createBranchWeights(1, 100000)); | ||||
1868 | assert(cast<BranchInst>(CheckTerm)->isUnconditional())(static_cast <bool> (cast<BranchInst>(CheckTerm)-> isUnconditional()) ? void (0) : __assert_fail ("cast<BranchInst>(CheckTerm)->isUnconditional()" , "llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp", 1868, __extension__ __PRETTY_FUNCTION__)); | ||||
1869 | BasicBlock *NextBB = CheckTerm->getSuccessor(0); | ||||
1870 | IRB.SetInsertPoint(CheckTerm); | ||||
1871 | Value *Cmp2 = createSlowPathCmp(IRB, AddrLong, ShadowValue, TypeSize); | ||||
1872 | if (Recover) { | ||||
1873 | CrashTerm = SplitBlockAndInsertIfThen(Cmp2, CheckTerm, false); | ||||
1874 | } else { | ||||
1875 | BasicBlock *CrashBlock = | ||||
1876 | BasicBlock::Create(*C, "", NextBB->getParent(), NextBB); | ||||
1877 | CrashTerm = new UnreachableInst(*C, CrashBlock); | ||||
1878 | BranchInst *NewTerm = BranchInst::Create(CrashBlock, NextBB, Cmp2); | ||||
1879 | ReplaceInstWithInst(CheckTerm, NewTerm); | ||||
1880 | } | ||||
1881 | } else { | ||||
1882 | CrashTerm = SplitBlockAndInsertIfThen(Cmp, InsertBefore, !Recover); | ||||
1883 | } | ||||
1884 | |||||
1885 | Instruction *Crash = generateCrashCode(CrashTerm, AddrLong, IsWrite, | ||||
1886 | AccessSizeIndex, SizeArgument, Exp); | ||||
1887 | Crash->setDebugLoc(OrigIns->getDebugLoc()); | ||||
1888 | } | ||||
1889 | |||||
1890 | // Instrument unusual size or unusual alignment. | ||||
1891 | // We can not do it with a single check, so we do 1-byte check for the first | ||||
1892 | // and the last bytes. We call __asan_report_*_n(addr, real_size) to be able | ||||
1893 | // to report the actual access size. | ||||
1894 | void AddressSanitizer::instrumentUnusualSizeOrAlignment( | ||||
1895 | Instruction *I, Instruction *InsertBefore, Value *Addr, uint32_t TypeSize, | ||||
1896 | bool IsWrite, Value *SizeArgument, bool UseCalls, uint32_t Exp) { | ||||
1897 | IRBuilder<> IRB(InsertBefore); | ||||
1898 | Value *Size = ConstantInt::get(IntptrTy, TypeSize / 8); | ||||
1899 | Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy); | ||||
1900 | if (UseCalls) { | ||||
1901 | if (Exp == 0) | ||||
1902 | IRB.CreateCall(AsanMemoryAccessCallbackSized[IsWrite][0], | ||||
1903 | {AddrLong, Size}); | ||||
1904 | else | ||||
1905 | IRB.CreateCall(AsanMemoryAccessCallbackSized[IsWrite][1], | ||||
1906 | {AddrLong, Size, ConstantInt::get(IRB.getInt32Ty(), Exp)}); | ||||
1907 | } else { | ||||
1908 | Value *LastByte = IRB.CreateIntToPtr( | ||||
1909 | IRB.CreateAdd(AddrLong, ConstantInt::get(IntptrTy, TypeSize / 8 - 1)), | ||||
1910 | Addr->getType()); | ||||
1911 | instrumentAddress(I, InsertBefore, Addr, 8, IsWrite, Size, false, Exp); | ||||
1912 | instrumentAddress(I, InsertBefore, LastByte, 8, IsWrite, Size, false, Exp); | ||||
1913 | } | ||||
1914 | } | ||||
1915 | |||||
1916 | void ModuleAddressSanitizer::poisonOneInitializer(Function &GlobalInit, | ||||
1917 | GlobalValue *ModuleName) { | ||||
1918 | // Set up the arguments to our poison/unpoison functions. | ||||
1919 | IRBuilder<> IRB(&GlobalInit.front(), | ||||
1920 | GlobalInit.front().getFirstInsertionPt()); | ||||
1921 | |||||
1922 | // Add a call to poison all external globals before the given function starts. | ||||
1923 | Value *ModuleNameAddr = ConstantExpr::getPointerCast(ModuleName, IntptrTy); | ||||
1924 | IRB.CreateCall(AsanPoisonGlobals, ModuleNameAddr); | ||||
1925 | |||||
1926 | // Add calls to unpoison all globals before each return instruction. | ||||
1927 | for (auto &BB : GlobalInit.getBasicBlockList()) | ||||
1928 | if (ReturnInst *RI = dyn_cast<ReturnInst>(BB.getTerminator())) | ||||
1929 | CallInst::Create(AsanUnpoisonGlobals, "", RI); | ||||
1930 | } | ||||
1931 | |||||
1932 | void ModuleAddressSanitizer::createInitializerPoisonCalls( | ||||
1933 | Module &M, GlobalValue *ModuleName) { | ||||
1934 | GlobalVariable *GV = M.getGlobalVariable("llvm.global_ctors"); | ||||
1935 | if (!GV) | ||||
1936 | return; | ||||
1937 | |||||
1938 | ConstantArray *CA = dyn_cast<ConstantArray>(GV->getInitializer()); | ||||
1939 | if (!CA) | ||||
1940 | return; | ||||
1941 | |||||
1942 | for (Use &OP : CA->operands()) { | ||||
1943 | if (isa<ConstantAggregateZero>(OP)) continue; | ||||
1944 | ConstantStruct *CS = cast<ConstantStruct>(OP); | ||||
1945 | |||||
1946 | // Must have a function or null ptr. | ||||
1947 | if (Function *F = dyn_cast<Function>(CS->getOperand(1))) { | ||||
1948 | if (F->getName() == kAsanModuleCtorName) continue; | ||||
1949 | auto *Priority = cast<ConstantInt>(CS->getOperand(0)); | ||||
1950 | // Don't instrument CTORs that will run before asan.module_ctor. | ||||
1951 | if (Priority->getLimitedValue() <= GetCtorAndDtorPriority(TargetTriple)) | ||||
1952 | continue; | ||||
1953 | poisonOneInitializer(*F, ModuleName); | ||||
1954 | } | ||||
1955 | } | ||||
1956 | } | ||||
1957 | |||||
1958 | const GlobalVariable * | ||||
1959 | ModuleAddressSanitizer::getExcludedAliasedGlobal(const GlobalAlias &GA) const { | ||||
1960 | // In case this function should be expanded to include rules that do not just | ||||
1961 | // apply when CompileKernel is true, either guard all existing rules with an | ||||
1962 | // 'if (CompileKernel) { ... }' or be absolutely sure that all these rules | ||||
1963 | // should also apply to user space. | ||||
1964 | assert(CompileKernel && "Only expecting to be called when compiling kernel")(static_cast <bool> (CompileKernel && "Only expecting to be called when compiling kernel" ) ? void (0) : __assert_fail ("CompileKernel && \"Only expecting to be called when compiling kernel\"" , "llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp", 1964, __extension__ __PRETTY_FUNCTION__)); | ||||
1965 | |||||
1966 | const Constant *C = GA.getAliasee(); | ||||
1967 | |||||
1968 | // When compiling the kernel, globals that are aliased by symbols prefixed | ||||
1969 | // by "__" are special and cannot be padded with a redzone. | ||||
1970 | if (GA.getName().startswith("__")) | ||||
1971 | return dyn_cast<GlobalVariable>(C->stripPointerCastsAndAliases()); | ||||
1972 | |||||
1973 | return nullptr; | ||||
1974 | } | ||||
1975 | |||||
1976 | bool ModuleAddressSanitizer::shouldInstrumentGlobal(GlobalVariable *G) const { | ||||
1977 | Type *Ty = G->getValueType(); | ||||
1978 | LLVM_DEBUG(dbgs() << "GLOBAL: " << *G << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("asan")) { dbgs() << "GLOBAL: " << *G << "\n" ; } } while (false); | ||||
1979 | |||||
1980 | // FIXME: Metadata should be attched directly to the global directly instead | ||||
1981 | // of being added to llvm.asan.globals. | ||||
1982 | if (GlobalsMD.get(G).IsExcluded) return false; | ||||
1983 | if (!Ty->isSized()) return false; | ||||
1984 | if (!G->hasInitializer()) return false; | ||||
1985 | // Globals in address space 1 and 4 are supported for AMDGPU. | ||||
1986 | if (G->getAddressSpace() && | ||||
1987 | !(TargetTriple.isAMDGPU() && !isUnsupportedAMDGPUAddrspace(G))) | ||||
1988 | return false; | ||||
1989 | if (GlobalWasGeneratedByCompiler(G)) return false; // Our own globals. | ||||
1990 | // Two problems with thread-locals: | ||||
1991 | // - The address of the main thread's copy can't be computed at link-time. | ||||
1992 | // - Need to poison all copies, not just the main thread's one. | ||||
1993 | if (G->isThreadLocal()) return false; | ||||
1994 | // For now, just ignore this Global if the alignment is large. | ||||
1995 | if (G->getAlignment() > getMinRedzoneSizeForGlobal()) return false; | ||||
1996 | |||||
1997 | // For non-COFF targets, only instrument globals known to be defined by this | ||||
1998 | // TU. | ||||
1999 | // FIXME: We can instrument comdat globals on ELF if we are using the | ||||
2000 | // GC-friendly metadata scheme. | ||||
2001 | if (!TargetTriple.isOSBinFormatCOFF()) { | ||||
2002 | if (!G->hasExactDefinition() || G->hasComdat()) | ||||
2003 | return false; | ||||
2004 | } else { | ||||
2005 | // On COFF, don't instrument non-ODR linkages. | ||||
2006 | if (G->isInterposable()) | ||||
2007 | return false; | ||||
2008 | } | ||||
2009 | |||||
2010 | // If a comdat is present, it must have a selection kind that implies ODR | ||||
2011 | // semantics: no duplicates, any, or exact match. | ||||
2012 | if (Comdat *C = G->getComdat()) { | ||||
2013 | switch (C->getSelectionKind()) { | ||||
2014 | case Comdat::Any: | ||||
2015 | case Comdat::ExactMatch: | ||||
2016 | case Comdat::NoDeduplicate: | ||||
2017 | break; | ||||
2018 | case Comdat::Largest: | ||||
2019 | case Comdat::SameSize: | ||||
2020 | return false; | ||||
2021 | } | ||||
2022 | } | ||||
2023 | |||||
2024 | if (G->hasSection()) { | ||||
2025 | // The kernel uses explicit sections for mostly special global variables | ||||
2026 | // that we should not instrument. E.g. the kernel may rely on their layout | ||||
2027 | // without redzones, or remove them at link time ("discard.*"), etc. | ||||
2028 | if (CompileKernel) | ||||
2029 | return false; | ||||
2030 | |||||
2031 | StringRef Section = G->getSection(); | ||||
2032 | |||||
2033 | // Globals from llvm.metadata aren't emitted, do not instrument them. | ||||
2034 | if (Section == "llvm.metadata") return false; | ||||
2035 | // Do not instrument globals from special LLVM sections. | ||||
2036 | if (Section.contains("__llvm") || Section.contains("__LLVM")) | ||||
2037 | return false; | ||||
2038 | |||||
2039 | // Do not instrument function pointers to initialization and termination | ||||
2040 | // routines: dynamic linker will not properly handle redzones. | ||||
2041 | if (Section.startswith(".preinit_array") || | ||||
2042 | Section.startswith(".init_array") || | ||||
2043 | Section.startswith(".fini_array")) { | ||||
2044 | return false; | ||||
2045 | } | ||||
2046 | |||||
2047 | // Do not instrument user-defined sections (with names resembling | ||||
2048 | // valid C identifiers) | ||||
2049 | if (TargetTriple.isOSBinFormatELF()) { | ||||
2050 | if (llvm::all_of(Section, | ||||
2051 | [](char c) { return llvm::isAlnum(c) || c == '_'; })) | ||||
2052 | return false; | ||||
2053 | } | ||||
2054 | |||||
2055 | // On COFF, if the section name contains '$', it is highly likely that the | ||||
2056 | // user is using section sorting to create an array of globals similar to | ||||
2057 | // the way initialization callbacks are registered in .init_array and | ||||
2058 | // .CRT$XCU. The ATL also registers things in .ATL$__[azm]. Adding redzones | ||||
2059 | // to such globals is counterproductive, because the intent is that they | ||||
2060 | // will form an array, and out-of-bounds accesses are expected. | ||||
2061 | // See https://github.com/google/sanitizers/issues/305 | ||||
2062 | // and http://msdn.microsoft.com/en-US/en-en/library/bb918180(v=vs.120).aspx | ||||
2063 | if (TargetTriple.isOSBinFormatCOFF() && Section.contains('$')) { | ||||
2064 | LLVM_DEBUG(dbgs() << "Ignoring global in sorted section (contains '$'): "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("asan")) { dbgs() << "Ignoring global in sorted section (contains '$'): " << *G << "\n"; } } while (false) | ||||
2065 | << *G << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("asan")) { dbgs() << "Ignoring global in sorted section (contains '$'): " << *G << "\n"; } } while (false); | ||||
2066 | return false; | ||||
2067 | } | ||||
2068 | |||||
2069 | if (TargetTriple.isOSBinFormatMachO()) { | ||||
2070 | StringRef ParsedSegment, ParsedSection; | ||||
2071 | unsigned TAA = 0, StubSize = 0; | ||||
2072 | bool TAAParsed; | ||||
2073 | cantFail(MCSectionMachO::ParseSectionSpecifier( | ||||
2074 | Section, ParsedSegment, ParsedSection, TAA, TAAParsed, StubSize)); | ||||
2075 | |||||
2076 | // Ignore the globals from the __OBJC section. The ObjC runtime assumes | ||||
2077 | // those conform to /usr/lib/objc/runtime.h, so we can't add redzones to | ||||
2078 | // them. | ||||
2079 | if (ParsedSegment == "__OBJC" || | ||||
2080 | (ParsedSegment == "__DATA" && ParsedSection.startswith("__objc_"))) { | ||||
2081 | LLVM_DEBUG(dbgs() << "Ignoring ObjC runtime global: " << *G << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("asan")) { dbgs() << "Ignoring ObjC runtime global: " << *G << "\n"; } } while (false); | ||||
2082 | return false; | ||||
2083 | } | ||||
2084 | // See https://github.com/google/sanitizers/issues/32 | ||||
2085 | // Constant CFString instances are compiled in the following way: | ||||
2086 | // -- the string buffer is emitted into | ||||
2087 | // __TEXT,__cstring,cstring_literals | ||||
2088 | // -- the constant NSConstantString structure referencing that buffer | ||||
2089 | // is placed into __DATA,__cfstring | ||||
2090 | // Therefore there's no point in placing redzones into __DATA,__cfstring. | ||||
2091 | // Moreover, it causes the linker to crash on OS X 10.7 | ||||
2092 | if (ParsedSegment == "__DATA" && ParsedSection == "__cfstring") { | ||||
2093 | LLVM_DEBUG(dbgs() << "Ignoring CFString: " << *G << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("asan")) { dbgs() << "Ignoring CFString: " << *G << "\n"; } } while (false); | ||||
2094 | return false; | ||||
2095 | } | ||||
2096 | // The linker merges the contents of cstring_literals and removes the | ||||
2097 | // trailing zeroes. | ||||
2098 | if (ParsedSegment == "__TEXT" && (TAA & MachO::S_CSTRING_LITERALS)) { | ||||
2099 | LLVM_DEBUG(dbgs() << "Ignoring a cstring literal: " << *G << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("asan")) { dbgs() << "Ignoring a cstring literal: " << *G << "\n"; } } while (false); | ||||
2100 | return false; | ||||
2101 | } | ||||
2102 | } | ||||
2103 | } | ||||
2104 | |||||
2105 | if (CompileKernel) { | ||||
2106 | // Globals that prefixed by "__" are special and cannot be padded with a | ||||
2107 | // redzone. | ||||
2108 | if (G->getName().startswith("__")) | ||||
2109 | return false; | ||||
2110 | } | ||||
2111 | |||||
2112 | return true; | ||||
2113 | } | ||||
2114 | |||||
2115 | // On Mach-O platforms, we emit global metadata in a separate section of the | ||||
2116 | // binary in order to allow the linker to properly dead strip. This is only | ||||
2117 | // supported on recent versions of ld64. | ||||
2118 | bool ModuleAddressSanitizer::ShouldUseMachOGlobalsSection() const { | ||||
2119 | if (!TargetTriple.isOSBinFormatMachO()) | ||||
2120 | return false; | ||||
2121 | |||||
2122 | if (TargetTriple.isMacOSX() && !TargetTriple.isMacOSXVersionLT(10, 11)) | ||||
2123 | return true; | ||||
2124 | if (TargetTriple.isiOS() /* or tvOS */ && !TargetTriple.isOSVersionLT(9)) | ||||
2125 | return true; | ||||
2126 | if (TargetTriple.isWatchOS() && !TargetTriple.isOSVersionLT(2)) | ||||
2127 | return true; | ||||
2128 | |||||
2129 | return false; | ||||
2130 | } | ||||
2131 | |||||
2132 | StringRef ModuleAddressSanitizer::getGlobalMetadataSection() const { | ||||
2133 | switch (TargetTriple.getObjectFormat()) { | ||||
2134 | case Triple::COFF: return ".ASAN$GL"; | ||||
2135 | case Triple::ELF: return "asan_globals"; | ||||
2136 | case Triple::MachO: return "__DATA,__asan_globals,regular"; | ||||
2137 | case Triple::Wasm: | ||||
2138 | case Triple::GOFF: | ||||
2139 | case Triple::XCOFF: | ||||
2140 | report_fatal_error( | ||||
2141 | "ModuleAddressSanitizer not implemented for object file format"); | ||||
2142 | case Triple::UnknownObjectFormat: | ||||
2143 | break; | ||||
2144 | } | ||||
2145 | llvm_unreachable("unsupported object format")::llvm::llvm_unreachable_internal("unsupported object format" , "llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp", 2145); | ||||
2146 | } | ||||
2147 | |||||
2148 | void ModuleAddressSanitizer::initializeCallbacks(Module &M) { | ||||
2149 | IRBuilder<> IRB(*C); | ||||
2150 | |||||
2151 | // Declare our poisoning and unpoisoning functions. | ||||
2152 | AsanPoisonGlobals = | ||||
2153 | M.getOrInsertFunction(kAsanPoisonGlobalsName, IRB.getVoidTy(), IntptrTy); | ||||
2154 | AsanUnpoisonGlobals = | ||||
2155 | M.getOrInsertFunction(kAsanUnpoisonGlobalsName, IRB.getVoidTy()); | ||||
2156 | |||||
2157 | // Declare functions that register/unregister globals. | ||||
2158 | AsanRegisterGlobals = M.getOrInsertFunction( | ||||
2159 | kAsanRegisterGlobalsName, IRB.getVoidTy(), IntptrTy, IntptrTy); | ||||
2160 | AsanUnregisterGlobals = M.getOrInsertFunction( | ||||
2161 | kAsanUnregisterGlobalsName, IRB.getVoidTy(), IntptrTy, IntptrTy); | ||||
2162 | |||||
2163 | // Declare the functions that find globals in a shared object and then invoke | ||||
2164 | // the (un)register function on them. | ||||
2165 | AsanRegisterImageGlobals = M.getOrInsertFunction( | ||||
2166 | kAsanRegisterImageGlobalsName, IRB.getVoidTy(), IntptrTy); | ||||
2167 | AsanUnregisterImageGlobals = M.getOrInsertFunction( | ||||
2168 | kAsanUnregisterImageGlobalsName, IRB.getVoidTy(), IntptrTy); | ||||
2169 | |||||
2170 | AsanRegisterElfGlobals = | ||||
2171 | M.getOrInsertFunction(kAsanRegisterElfGlobalsName, IRB.getVoidTy(), | ||||
2172 | IntptrTy, IntptrTy, IntptrTy); | ||||
2173 | AsanUnregisterElfGlobals = | ||||
2174 | M.getOrInsertFunction(kAsanUnregisterElfGlobalsName, IRB.getVoidTy(), | ||||
2175 | IntptrTy, IntptrTy, IntptrTy); | ||||
2176 | } | ||||
2177 | |||||
2178 | // Put the metadata and the instrumented global in the same group. This ensures | ||||
2179 | // that the metadata is discarded if the instrumented global is discarded. | ||||
2180 | void ModuleAddressSanitizer::SetComdatForGlobalMetadata( | ||||
2181 | GlobalVariable *G, GlobalVariable *Metadata, StringRef InternalSuffix) { | ||||
2182 | Module &M = *G->getParent(); | ||||
2183 | Comdat *C = G->getComdat(); | ||||
2184 | if (!C) { | ||||
2185 | if (!G->hasName()) { | ||||
2186 | // If G is unnamed, it must be internal. Give it an artificial name | ||||
2187 | // so we can put it in a comdat. | ||||
2188 | assert(G->hasLocalLinkage())(static_cast <bool> (G->hasLocalLinkage()) ? void (0 ) : __assert_fail ("G->hasLocalLinkage()", "llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp" , 2188, __extension__ __PRETTY_FUNCTION__)); | ||||
2189 | G->setName(Twine(kAsanGenPrefix) + "_anon_global"); | ||||
2190 | } | ||||
2191 | |||||
2192 | if (!InternalSuffix.empty() && G->hasLocalLinkage()) { | ||||
2193 | std::string Name = std::string(G->getName()); | ||||
2194 | Name += InternalSuffix; | ||||
2195 | C = M.getOrInsertComdat(Name); | ||||
2196 | } else { | ||||
2197 | C = M.getOrInsertComdat(G->getName()); | ||||
2198 | } | ||||
2199 | |||||
2200 | // Make this IMAGE_COMDAT_SELECT_NODUPLICATES on COFF. Also upgrade private | ||||
2201 | // linkage to internal linkage so that a symbol table entry is emitted. This | ||||
2202 | // is necessary in order to create the comdat group. | ||||
2203 | if (TargetTriple.isOSBinFormatCOFF()) { | ||||
2204 | C->setSelectionKind(Comdat::NoDeduplicate); | ||||
2205 | if (G->hasPrivateLinkage()) | ||||
2206 | G->setLinkage(GlobalValue::InternalLinkage); | ||||
2207 | } | ||||
2208 | G->setComdat(C); | ||||
2209 | } | ||||
2210 | |||||
2211 | assert(G->hasComdat())(static_cast <bool> (G->hasComdat()) ? void (0) : __assert_fail ("G->hasComdat()", "llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp" , 2211, __extension__ __PRETTY_FUNCTION__)); | ||||
2212 | Metadata->setComdat(G->getComdat()); | ||||
2213 | } | ||||
2214 | |||||
2215 | // Create a separate metadata global and put it in the appropriate ASan | ||||
2216 | // global registration section. | ||||
2217 | GlobalVariable * | ||||
2218 | ModuleAddressSanitizer::CreateMetadataGlobal(Module &M, Constant *Initializer, | ||||
2219 | StringRef OriginalName) { | ||||
2220 | auto Linkage = TargetTriple.isOSBinFormatMachO() | ||||
2221 | ? GlobalVariable::InternalLinkage | ||||
2222 | : GlobalVariable::PrivateLinkage; | ||||
2223 | GlobalVariable *Metadata = new GlobalVariable( | ||||
2224 | M, Initializer->getType(), false, Linkage, Initializer, | ||||
2225 | Twine("__asan_global_") + GlobalValue::dropLLVMManglingEscape(OriginalName)); | ||||
2226 | Metadata->setSection(getGlobalMetadataSection()); | ||||
2227 | return Metadata; | ||||
2228 | } | ||||
2229 | |||||
2230 | Instruction *ModuleAddressSanitizer::CreateAsanModuleDtor(Module &M) { | ||||
2231 | AsanDtorFunction = Function::createWithDefaultAttr( | ||||
2232 | FunctionType::get(Type::getVoidTy(*C), false), | ||||
2233 | GlobalValue::InternalLinkage, 0, kAsanModuleDtorName, &M); | ||||
2234 | AsanDtorFunction->addFnAttr(Attribute::NoUnwind); | ||||
2235 | // Ensure Dtor cannot be discarded, even if in a comdat. | ||||
2236 | appendToUsed(M, {AsanDtorFunction}); | ||||
2237 | BasicBlock *AsanDtorBB = BasicBlock::Create(*C, "", AsanDtorFunction); | ||||
2238 | |||||
2239 | return ReturnInst::Create(*C, AsanDtorBB); | ||||
2240 | } | ||||
2241 | |||||
2242 | void ModuleAddressSanitizer::InstrumentGlobalsCOFF( | ||||
2243 | IRBuilder<> &IRB, Module &M, ArrayRef<GlobalVariable *> ExtendedGlobals, | ||||
2244 | ArrayRef<Constant *> MetadataInitializers) { | ||||
2245 | assert(ExtendedGlobals.size() == MetadataInitializers.size())(static_cast <bool> (ExtendedGlobals.size() == MetadataInitializers .size()) ? void (0) : __assert_fail ("ExtendedGlobals.size() == MetadataInitializers.size()" , "llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp", 2245, __extension__ __PRETTY_FUNCTION__)); | ||||
2246 | auto &DL = M.getDataLayout(); | ||||
2247 | |||||
2248 | SmallVector<GlobalValue *, 16> MetadataGlobals(ExtendedGlobals.size()); | ||||
2249 | for (size_t i = 0; i < ExtendedGlobals.size(); i++) { | ||||
2250 | Constant *Initializer = MetadataInitializers[i]; | ||||
2251 | GlobalVariable *G = ExtendedGlobals[i]; | ||||
2252 | GlobalVariable *Metadata = | ||||
2253 | CreateMetadataGlobal(M, Initializer, G->getName()); | ||||
2254 | MDNode *MD = MDNode::get(M.getContext(), ValueAsMetadata::get(G)); | ||||
2255 | Metadata->setMetadata(LLVMContext::MD_associated, MD); | ||||
2256 | MetadataGlobals[i] = Metadata; | ||||
2257 | |||||
2258 | // The MSVC linker always inserts padding when linking incrementally. We | ||||
2259 | // cope with that by aligning each struct to its size, which must be a power | ||||
2260 | // of two. | ||||
2261 | unsigned SizeOfGlobalStruct = DL.getTypeAllocSize(Initializer->getType()); | ||||
2262 | assert(isPowerOf2_32(SizeOfGlobalStruct) &&(static_cast <bool> (isPowerOf2_32(SizeOfGlobalStruct) && "global metadata will not be padded appropriately") ? void ( 0) : __assert_fail ("isPowerOf2_32(SizeOfGlobalStruct) && \"global metadata will not be padded appropriately\"" , "llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp", 2263, __extension__ __PRETTY_FUNCTION__)) | ||||
2263 | "global metadata will not be padded appropriately")(static_cast <bool> (isPowerOf2_32(SizeOfGlobalStruct) && "global metadata will not be padded appropriately") ? void ( 0) : __assert_fail ("isPowerOf2_32(SizeOfGlobalStruct) && \"global metadata will not be padded appropriately\"" , "llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp", 2263, __extension__ __PRETTY_FUNCTION__)); | ||||
2264 | Metadata->setAlignment(assumeAligned(SizeOfGlobalStruct)); | ||||
2265 | |||||
2266 | SetComdatForGlobalMetadata(G, Metadata, ""); | ||||
2267 | } | ||||
2268 | |||||
2269 | // Update llvm.compiler.used, adding the new metadata globals. This is | ||||
2270 | // needed so that during LTO these variables stay alive. | ||||
2271 | if (!MetadataGlobals.empty()) | ||||
2272 | appendToCompilerUsed(M, MetadataGlobals); | ||||
2273 | } | ||||
2274 | |||||
2275 | void ModuleAddressSanitizer::InstrumentGlobalsELF( | ||||
2276 | IRBuilder<> &IRB, Module &M, ArrayRef<GlobalVariable *> ExtendedGlobals, | ||||
2277 | ArrayRef<Constant *> MetadataInitializers, | ||||
2278 | const std::string &UniqueModuleId) { | ||||
2279 | assert(ExtendedGlobals.size() == MetadataInitializers.size())(static_cast <bool> (ExtendedGlobals.size() == MetadataInitializers .size()) ? void (0) : __assert_fail ("ExtendedGlobals.size() == MetadataInitializers.size()" , "llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp", 2279, __extension__ __PRETTY_FUNCTION__)); | ||||
2280 | |||||
2281 | // Putting globals in a comdat changes the semantic and potentially cause | ||||
2282 | // false negative odr violations at link time. If odr indicators are used, we | ||||
2283 | // keep the comdat sections, as link time odr violations will be dectected on | ||||
2284 | // the odr indicator symbols. | ||||
2285 | bool UseComdatForGlobalsGC = UseOdrIndicator; | ||||
2286 | |||||
2287 | SmallVector<GlobalValue *, 16> MetadataGlobals(ExtendedGlobals.size()); | ||||
2288 | for (size_t i = 0; i < ExtendedGlobals.size(); i++) { | ||||
2289 | GlobalVariable *G = ExtendedGlobals[i]; | ||||
2290 | GlobalVariable *Metadata = | ||||
2291 | CreateMetadataGlobal(M, MetadataInitializers[i], G->getName()); | ||||
2292 | MDNode *MD = MDNode::get(M.getContext(), ValueAsMetadata::get(G)); | ||||
2293 | Metadata->setMetadata(LLVMContext::MD_associated, MD); | ||||
2294 | MetadataGlobals[i] = Metadata; | ||||
2295 | |||||
2296 | if (UseComdatForGlobalsGC) | ||||
2297 | SetComdatForGlobalMetadata(G, Metadata, UniqueModuleId); | ||||
2298 | } | ||||
2299 | |||||
2300 | // Update llvm.compiler.used, adding the new metadata globals. This is | ||||
2301 | // needed so that during LTO these variables stay alive. | ||||
2302 | if (!MetadataGlobals.empty()) | ||||
2303 | appendToCompilerUsed(M, MetadataGlobals); | ||||
2304 | |||||
2305 | // RegisteredFlag serves two purposes. First, we can pass it to dladdr() | ||||
2306 | // to look up the loaded image that contains it. Second, we can store in it | ||||
2307 | // whether registration has already occurred, to prevent duplicate | ||||
2308 | // registration. | ||||
2309 | // | ||||
2310 | // Common linkage ensures that there is only one global per shared library. | ||||
2311 | GlobalVariable *RegisteredFlag = new GlobalVariable( | ||||
2312 | M, IntptrTy, false, GlobalVariable::CommonLinkage, | ||||
2313 | ConstantInt::get(IntptrTy, 0), kAsanGlobalsRegisteredFlagName); | ||||
2314 | RegisteredFlag->setVisibility(GlobalVariable::HiddenVisibility); | ||||
2315 | |||||
2316 | // Create start and stop symbols. | ||||
2317 | GlobalVariable *StartELFMetadata = new GlobalVariable( | ||||
2318 | M, IntptrTy, false, GlobalVariable::ExternalWeakLinkage, nullptr, | ||||
2319 | "__start_" + getGlobalMetadataSection()); | ||||
2320 | StartELFMetadata->setVisibility(GlobalVariable::HiddenVisibility); | ||||
2321 | GlobalVariable *StopELFMetadata = new GlobalVariable( | ||||
2322 | M, IntptrTy, false, GlobalVariable::ExternalWeakLinkage, nullptr, | ||||
2323 | "__stop_" + getGlobalMetadataSection()); | ||||
2324 | StopELFMetadata->setVisibility(GlobalVariable::HiddenVisibility); | ||||
2325 | |||||
2326 | // Create a call to register the globals with the runtime. | ||||
2327 | IRB.CreateCall(AsanRegisterElfGlobals, | ||||
2328 | {IRB.CreatePointerCast(RegisteredFlag, IntptrTy), | ||||
2329 | IRB.CreatePointerCast(StartELFMetadata, IntptrTy), | ||||
2330 | IRB.CreatePointerCast(StopELFMetadata, IntptrTy)}); | ||||
2331 | |||||
2332 | // We also need to unregister globals at the end, e.g., when a shared library | ||||
2333 | // gets closed. | ||||
2334 | if (DestructorKind != AsanDtorKind::None) { | ||||
2335 | IRBuilder<> IrbDtor(CreateAsanModuleDtor(M)); | ||||
2336 | IrbDtor.CreateCall(AsanUnregisterElfGlobals, | ||||
2337 | {IRB.CreatePointerCast(RegisteredFlag, IntptrTy), | ||||
2338 | IRB.CreatePointerCast(StartELFMetadata, IntptrTy), | ||||
2339 | IRB.CreatePointerCast(StopELFMetadata, IntptrTy)}); | ||||
2340 | } | ||||
2341 | } | ||||
2342 | |||||
2343 | void ModuleAddressSanitizer::InstrumentGlobalsMachO( | ||||
2344 | IRBuilder<> &IRB, Module &M, ArrayRef<GlobalVariable *> ExtendedGlobals, | ||||
2345 | ArrayRef<Constant *> MetadataInitializers) { | ||||
2346 | assert(ExtendedGlobals.size() == MetadataInitializers.size())(static_cast <bool> (ExtendedGlobals.size() == MetadataInitializers .size()) ? void (0) : __assert_fail ("ExtendedGlobals.size() == MetadataInitializers.size()" , "llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp", 2346, __extension__ __PRETTY_FUNCTION__)); | ||||
2347 | |||||
2348 | // On recent Mach-O platforms, use a structure which binds the liveness of | ||||
2349 | // the global variable to the metadata struct. Keep the list of "Liveness" GV | ||||
2350 | // created to be added to llvm.compiler.used | ||||
2351 | StructType *LivenessTy = StructType::get(IntptrTy, IntptrTy); | ||||
2352 | SmallVector<GlobalValue *, 16> LivenessGlobals(ExtendedGlobals.size()); | ||||
2353 | |||||
2354 | for (size_t i = 0; i < ExtendedGlobals.size(); i++) { | ||||
2355 | Constant *Initializer = MetadataInitializers[i]; | ||||
2356 | GlobalVariable *G = ExtendedGlobals[i]; | ||||
2357 | GlobalVariable *Metadata = | ||||
2358 | CreateMetadataGlobal(M, Initializer, G->getName()); | ||||
2359 | |||||
2360 | // On recent Mach-O platforms, we emit the global metadata in a way that | ||||
2361 | // allows the linker to properly strip dead globals. | ||||
2362 | auto LivenessBinder = | ||||
2363 | ConstantStruct::get(LivenessTy, Initializer->getAggregateElement(0u), | ||||
2364 | ConstantExpr::getPointerCast(Metadata, IntptrTy)); | ||||
2365 | GlobalVariable *Liveness = new GlobalVariable( | ||||
2366 | M, LivenessTy, false, GlobalVariable::InternalLinkage, LivenessBinder, | ||||
2367 | Twine("__asan_binder_") + G->getName()); | ||||
2368 | Liveness->setSection("__DATA,__asan_liveness,regular,live_support"); | ||||
2369 | LivenessGlobals[i] = Liveness; | ||||
2370 | } | ||||
2371 | |||||
2372 | // Update llvm.compiler.used, adding the new liveness globals. This is | ||||
2373 | // needed so that during LTO these variables stay alive. The alternative | ||||
2374 | // would be to have the linker handling the LTO symbols, but libLTO | ||||
2375 | // current API does not expose access to the section for each symbol. | ||||
2376 | if (!LivenessGlobals.empty()) | ||||
2377 | appendToCompilerUsed(M, LivenessGlobals); | ||||
2378 | |||||
2379 | // RegisteredFlag serves two purposes. First, we can pass it to dladdr() | ||||
2380 | // to look up the loaded image that contains it. Second, we can store in it | ||||
2381 | // whether registration has already occurred, to prevent duplicate | ||||
2382 | // registration. | ||||
2383 | // | ||||
2384 | // common linkage ensures that there is only one global per shared library. | ||||
2385 | GlobalVariable *RegisteredFlag = new GlobalVariable( | ||||
2386 | M, IntptrTy, false, GlobalVariable::CommonLinkage, | ||||
2387 | ConstantInt::get(IntptrTy, 0), kAsanGlobalsRegisteredFlagName); | ||||
2388 | RegisteredFlag->setVisibility(GlobalVariable::HiddenVisibility); | ||||
2389 | |||||
2390 | IRB.CreateCall(AsanRegisterImageGlobals, | ||||
2391 | {IRB.CreatePointerCast(RegisteredFlag, IntptrTy)}); | ||||
2392 | |||||
2393 | // We also need to unregister globals at the end, e.g., when a shared library | ||||
2394 | // gets closed. | ||||
2395 | if (DestructorKind != AsanDtorKind::None) { | ||||
2396 | IRBuilder<> IrbDtor(CreateAsanModuleDtor(M)); | ||||
2397 | IrbDtor.CreateCall(AsanUnregisterImageGlobals, | ||||
2398 | {IRB.CreatePointerCast(RegisteredFlag, IntptrTy)}); | ||||
2399 | } | ||||
2400 | } | ||||
2401 | |||||
2402 | void ModuleAddressSanitizer::InstrumentGlobalsWithMetadataArray( | ||||
2403 | IRBuilder<> &IRB, Module &M, ArrayRef<GlobalVariable *> ExtendedGlobals, | ||||
2404 | ArrayRef<Constant *> MetadataInitializers) { | ||||
2405 | assert(ExtendedGlobals.size() == MetadataInitializers.size())(static_cast <bool> (ExtendedGlobals.size() == MetadataInitializers .size()) ? void (0) : __assert_fail ("ExtendedGlobals.size() == MetadataInitializers.size()" , "llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp", 2405, __extension__ __PRETTY_FUNCTION__)); | ||||
2406 | unsigned N = ExtendedGlobals.size(); | ||||
2407 | assert(N > 0)(static_cast <bool> (N > 0) ? void (0) : __assert_fail ("N > 0", "llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp" , 2407, __extension__ __PRETTY_FUNCTION__)); | ||||
2408 | |||||
2409 | // On platforms that don't have a custom metadata section, we emit an array | ||||
2410 | // of global metadata structures. | ||||
2411 | ArrayType *ArrayOfGlobalStructTy = | ||||
2412 | ArrayType::get(MetadataInitializers[0]->getType(), N); | ||||
2413 | auto AllGlobals = new GlobalVariable( | ||||
2414 | M, ArrayOfGlobalStructTy, false, GlobalVariable::InternalLinkage, | ||||
2415 | ConstantArray::get(ArrayOfGlobalStructTy, MetadataInitializers), ""); | ||||
2416 | if (Mapping.Scale > 3) | ||||
2417 | AllGlobals->setAlignment(Align(1ULL << Mapping.Scale)); | ||||
2418 | |||||
2419 | IRB.CreateCall(AsanRegisterGlobals, | ||||
2420 | {IRB.CreatePointerCast(AllGlobals, IntptrTy), | ||||
2421 | ConstantInt::get(IntptrTy, N)}); | ||||
2422 | |||||
2423 | // We also need to unregister globals at the end, e.g., when a shared library | ||||
2424 | // gets closed. | ||||
2425 | if (DestructorKind != AsanDtorKind::None) { | ||||
2426 | IRBuilder<> IrbDtor(CreateAsanModuleDtor(M)); | ||||
2427 | IrbDtor.CreateCall(AsanUnregisterGlobals, | ||||
2428 | {IRB.CreatePointerCast(AllGlobals, IntptrTy), | ||||
2429 | ConstantInt::get(IntptrTy, N)}); | ||||
2430 | } | ||||
2431 | } | ||||
2432 | |||||
2433 | // This function replaces all global variables with new variables that have | ||||
2434 | // trailing redzones. It also creates a function that poisons | ||||
2435 | // redzones and inserts this function into llvm.global_ctors. | ||||
2436 | // Sets *CtorComdat to true if the global registration code emitted into the | ||||
2437 | // asan constructor is comdat-compatible. | ||||
2438 | bool ModuleAddressSanitizer::InstrumentGlobals(IRBuilder<> &IRB, Module &M, | ||||
2439 | bool *CtorComdat) { | ||||
2440 | *CtorComdat = false; | ||||
2441 | |||||
2442 | // Build set of globals that are aliased by some GA, where | ||||
2443 | // getExcludedAliasedGlobal(GA) returns the relevant GlobalVariable. | ||||
2444 | SmallPtrSet<const GlobalVariable *, 16> AliasedGlobalExclusions; | ||||
2445 | if (CompileKernel) { | ||||
2446 | for (auto &GA : M.aliases()) { | ||||
2447 | if (const GlobalVariable *GV = getExcludedAliasedGlobal(GA)) | ||||
2448 | AliasedGlobalExclusions.insert(GV); | ||||
2449 | } | ||||
2450 | } | ||||
2451 | |||||
2452 | SmallVector<GlobalVariable *, 16> GlobalsToChange; | ||||
2453 | for (auto &G : M.globals()) { | ||||
2454 | if (!AliasedGlobalExclusions.count(&G) && shouldInstrumentGlobal(&G)) | ||||
2455 | GlobalsToChange.push_back(&G); | ||||
2456 | } | ||||
2457 | |||||
2458 | size_t n = GlobalsToChange.size(); | ||||
2459 | if (n == 0) { | ||||
2460 | *CtorComdat = true; | ||||
2461 | return false; | ||||
2462 | } | ||||
2463 | |||||
2464 | auto &DL = M.getDataLayout(); | ||||
2465 | |||||
2466 | // A global is described by a structure | ||||
2467 | // size_t beg; | ||||
2468 | // size_t size; | ||||
2469 | // size_t size_with_redzone; | ||||
2470 | // const char *name; | ||||
2471 | // const char *module_name; | ||||
2472 | // size_t has_dynamic_init; | ||||
2473 | // void *source_location; | ||||
2474 | // size_t odr_indicator; | ||||
2475 | // We initialize an array of such structures and pass it to a run-time call. | ||||
2476 | StructType *GlobalStructTy = | ||||
2477 | StructType::get(IntptrTy, IntptrTy, IntptrTy, IntptrTy, IntptrTy, | ||||
2478 | IntptrTy, IntptrTy, IntptrTy); | ||||
2479 | SmallVector<GlobalVariable *, 16> NewGlobals(n); | ||||
2480 | SmallVector<Constant *, 16> Initializers(n); | ||||
2481 | |||||
2482 | bool HasDynamicallyInitializedGlobals = false; | ||||
2483 | |||||
2484 | // We shouldn't merge same module names, as this string serves as unique | ||||
2485 | // module ID in runtime. | ||||
2486 | GlobalVariable *ModuleName = createPrivateGlobalForString( | ||||
2487 | M, M.getModuleIdentifier(), /*AllowMerging*/ false, kAsanGenPrefix); | ||||
2488 | |||||
2489 | for (size_t i = 0; i < n; i++) { | ||||
2490 | GlobalVariable *G = GlobalsToChange[i]; | ||||
2491 | |||||
2492 | // FIXME: Metadata should be attched directly to the global directly instead | ||||
2493 | // of being added to llvm.asan.globals. | ||||
2494 | auto MD = GlobalsMD.get(G); | ||||
2495 | StringRef NameForGlobal = G->getName(); | ||||
2496 | // Create string holding the global name (use global name from metadata | ||||
2497 | // if it's available, otherwise just write the name of global variable). | ||||
2498 | GlobalVariable *Name = createPrivateGlobalForString( | ||||
2499 | M, MD.Name.empty() ? NameForGlobal : MD.Name, | ||||
2500 | /*AllowMerging*/ true, kAsanGenPrefix); | ||||
2501 | |||||
2502 | Type *Ty = G->getValueType(); | ||||
2503 | const uint64_t SizeInBytes = DL.getTypeAllocSize(Ty); | ||||
2504 | const uint64_t RightRedzoneSize = getRedzoneSizeForGlobal(SizeInBytes); | ||||
2505 | Type *RightRedZoneTy = ArrayType::get(IRB.getInt8Ty(), RightRedzoneSize); | ||||
2506 | |||||
2507 | StructType *NewTy = StructType::get(Ty, RightRedZoneTy); | ||||
2508 | Constant *NewInitializer = ConstantStruct::get( | ||||
2509 | NewTy, G->getInitializer(), Constant::getNullValue(RightRedZoneTy)); | ||||
2510 | |||||
2511 | // Create a new global variable with enough space for a redzone. | ||||
2512 | GlobalValue::LinkageTypes Linkage = G->getLinkage(); | ||||
2513 | if (G->isConstant() && Linkage == GlobalValue::PrivateLinkage) | ||||
2514 | Linkage = GlobalValue::InternalLinkage; | ||||
2515 | GlobalVariable *NewGlobal = new GlobalVariable( | ||||
2516 | M, NewTy, G->isConstant(), Linkage, NewInitializer, "", G, | ||||
2517 | G->getThreadLocalMode(), G->getAddressSpace()); | ||||
2518 | NewGlobal->copyAttributesFrom(G); | ||||
2519 | NewGlobal->setComdat(G->getComdat()); | ||||
2520 | NewGlobal->setAlignment(MaybeAlign(getMinRedzoneSizeForGlobal())); | ||||
2521 | // Don't fold globals with redzones. ODR violation detector and redzone | ||||
2522 | // poisoning implicitly creates a dependence on the global's address, so it | ||||
2523 | // is no longer valid for it to be marked unnamed_addr. | ||||
2524 | NewGlobal->setUnnamedAddr(GlobalValue::UnnamedAddr::None); | ||||
2525 | |||||
2526 | // Move null-terminated C strings to "__asan_cstring" section on Darwin. | ||||
2527 | if (TargetTriple.isOSBinFormatMachO() && !G->hasSection() && | ||||
2528 | G->isConstant()) { | ||||
2529 | auto Seq = dyn_cast<ConstantDataSequential>(G->getInitializer()); | ||||
2530 | if (Seq && Seq->isCString()) | ||||
2531 | NewGlobal->setSection("__TEXT,__asan_cstring,regular"); | ||||
2532 | } | ||||
2533 | |||||
2534 | // Transfer the debug info and type metadata. The payload starts at offset | ||||
2535 | // zero so we can copy the metadata over as is. | ||||
2536 | NewGlobal->copyMetadata(G, 0); | ||||
2537 | |||||
2538 | Value *Indices2[2]; | ||||
2539 | Indices2[0] = IRB.getInt32(0); | ||||
2540 | Indices2[1] = IRB.getInt32(0); | ||||
2541 | |||||
2542 | G->replaceAllUsesWith( | ||||
2543 | ConstantExpr::getGetElementPtr(NewTy, NewGlobal, Indices2, true)); | ||||
2544 | NewGlobal->takeName(G); | ||||
2545 | G->eraseFromParent(); | ||||
2546 | NewGlobals[i] = NewGlobal; | ||||
2547 | |||||
2548 | Constant *SourceLoc; | ||||
2549 | if (!MD.SourceLoc.empty()) { | ||||
2550 | auto SourceLocGlobal = createPrivateGlobalForSourceLoc(M, MD.SourceLoc); | ||||
2551 | SourceLoc = ConstantExpr::getPointerCast(SourceLocGlobal, IntptrTy); | ||||
2552 | } else { | ||||
2553 | SourceLoc = ConstantInt::get(IntptrTy, 0); | ||||
2554 | } | ||||
2555 | |||||
2556 | Constant *ODRIndicator = ConstantExpr::getNullValue(IRB.getInt8PtrTy()); | ||||
2557 | GlobalValue *InstrumentedGlobal = NewGlobal; | ||||
2558 | |||||
2559 | bool CanUsePrivateAliases = | ||||
2560 | TargetTriple.isOSBinFormatELF() || TargetTriple.isOSBinFormatMachO() || | ||||
2561 | TargetTriple.isOSBinFormatWasm(); | ||||
2562 | if (CanUsePrivateAliases && UsePrivateAlias) { | ||||
2563 | // Create local alias for NewGlobal to avoid crash on ODR between | ||||
2564 | // instrumented and non-instrumented libraries. | ||||
2565 | InstrumentedGlobal = | ||||
2566 | GlobalAlias::create(GlobalValue::PrivateLinkage, "", NewGlobal); | ||||
2567 | } | ||||
2568 | |||||
2569 | // ODR should not happen for local linkage. | ||||
2570 | if (NewGlobal->hasLocalLinkage()) { | ||||
2571 | ODRIndicator = ConstantExpr::getIntToPtr(ConstantInt::get(IntptrTy, -1), | ||||
2572 | IRB.getInt8PtrTy()); | ||||
2573 | } else if (UseOdrIndicator) { | ||||
2574 | // With local aliases, we need to provide another externally visible | ||||
2575 | // symbol __odr_asan_XXX to detect ODR violation. | ||||
2576 | auto *ODRIndicatorSym = | ||||
2577 | new GlobalVariable(M, IRB.getInt8Ty(), false, Linkage, | ||||
2578 | Constant::getNullValue(IRB.getInt8Ty()), | ||||
2579 | kODRGenPrefix + NameForGlobal, nullptr, | ||||
2580 | NewGlobal->getThreadLocalMode()); | ||||
2581 | |||||
2582 | // Set meaningful attributes for indicator symbol. | ||||
2583 | ODRIndicatorSym->setVisibility(NewGlobal->getVisibility()); | ||||
2584 | ODRIndicatorSym->setDLLStorageClass(NewGlobal->getDLLStorageClass()); | ||||
2585 | ODRIndicatorSym->setAlignment(Align(1)); | ||||
2586 | ODRIndicator = ODRIndicatorSym; | ||||
2587 | } | ||||
2588 | |||||
2589 | Constant *Initializer = ConstantStruct::get( | ||||
2590 | GlobalStructTy, | ||||
2591 | ConstantExpr::getPointerCast(InstrumentedGlobal, IntptrTy), | ||||
2592 | ConstantInt::get(IntptrTy, SizeInBytes), | ||||
2593 | ConstantInt::get(IntptrTy, SizeInBytes + RightRedzoneSize), | ||||
2594 | ConstantExpr::getPointerCast(Name, IntptrTy), | ||||
2595 | ConstantExpr::getPointerCast(ModuleName, IntptrTy), | ||||
2596 | ConstantInt::get(IntptrTy, MD.IsDynInit), SourceLoc, | ||||
2597 | ConstantExpr::getPointerCast(ODRIndicator, IntptrTy)); | ||||
2598 | |||||
2599 | if (ClInitializers && MD.IsDynInit) HasDynamicallyInitializedGlobals = true; | ||||
2600 | |||||
2601 | LLVM_DEBUG(dbgs() << "NEW GLOBAL: " << *NewGlobal << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("asan")) { dbgs() << "NEW GLOBAL: " << *NewGlobal << "\n"; } } while (false); | ||||
2602 | |||||
2603 | Initializers[i] = Initializer; | ||||
2604 | } | ||||
2605 | |||||
2606 | // Add instrumented globals to llvm.compiler.used list to avoid LTO from | ||||
2607 | // ConstantMerge'ing them. | ||||
2608 | SmallVector<GlobalValue *, 16> GlobalsToAddToUsedList; | ||||
2609 | for (size_t i = 0; i < n; i++) { | ||||
2610 | GlobalVariable *G = NewGlobals[i]; | ||||
2611 | if (G->getName().empty()) continue; | ||||
2612 | GlobalsToAddToUsedList.push_back(G); | ||||
2613 | } | ||||
2614 | appendToCompilerUsed(M, ArrayRef<GlobalValue *>(GlobalsToAddToUsedList)); | ||||
2615 | |||||
2616 | std::string ELFUniqueModuleId = | ||||
2617 | (UseGlobalsGC && TargetTriple.isOSBinFormatELF()) ? getUniqueModuleId(&M) | ||||
2618 | : ""; | ||||
2619 | |||||
2620 | if (!ELFUniqueModuleId.empty()) { | ||||
2621 | InstrumentGlobalsELF(IRB, M, NewGlobals, Initializers, ELFUniqueModuleId); | ||||
2622 | *CtorComdat = true; | ||||
2623 | } else if (UseGlobalsGC && TargetTriple.isOSBinFormatCOFF()) { | ||||
2624 | InstrumentGlobalsCOFF(IRB, M, NewGlobals, Initializers); | ||||
2625 | } else if (UseGlobalsGC && ShouldUseMachOGlobalsSection()) { | ||||
2626 | InstrumentGlobalsMachO(IRB, M, NewGlobals, Initializers); | ||||
2627 | } else { | ||||
2628 | InstrumentGlobalsWithMetadataArray(IRB, M, NewGlobals, Initializers); | ||||
2629 | } | ||||
2630 | |||||
2631 | // Create calls for poisoning before initializers run and unpoisoning after. | ||||
2632 | if (HasDynamicallyInitializedGlobals) | ||||
2633 | createInitializerPoisonCalls(M, ModuleName); | ||||
2634 | |||||
2635 | LLVM_DEBUG(dbgs() << M)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("asan")) { dbgs() << M; } } while (false); | ||||
2636 | return true; | ||||
2637 | } | ||||
2638 | |||||
2639 | uint64_t | ||||
2640 | ModuleAddressSanitizer::getRedzoneSizeForGlobal(uint64_t SizeInBytes) const { | ||||
2641 | constexpr uint64_t kMaxRZ = 1 << 18; | ||||
2642 | const uint64_t MinRZ = getMinRedzoneSizeForGlobal(); | ||||
2643 | |||||
2644 | uint64_t RZ = 0; | ||||
2645 | if (SizeInBytes <= MinRZ / 2) { | ||||
2646 | // Reduce redzone size for small size objects, e.g. int, char[1]. MinRZ is | ||||
2647 | // at least 32 bytes, optimize when SizeInBytes is less than or equal to | ||||
2648 | // half of MinRZ. | ||||
2649 | RZ = MinRZ - SizeInBytes; | ||||
2650 | } else { | ||||
2651 | // Calculate RZ, where MinRZ <= RZ <= MaxRZ, and RZ ~ 1/4 * SizeInBytes. | ||||
2652 | RZ = std::max(MinRZ, std::min(kMaxRZ, (SizeInBytes / MinRZ / 4) * MinRZ)); | ||||
2653 | |||||
2654 | // Round up to multiple of MinRZ. | ||||
2655 | if (SizeInBytes % MinRZ) | ||||
2656 | RZ += MinRZ - (SizeInBytes % MinRZ); | ||||
2657 | } | ||||
2658 | |||||
2659 | assert((RZ + SizeInBytes) % MinRZ == 0)(static_cast <bool> ((RZ + SizeInBytes) % MinRZ == 0) ? void (0) : __assert_fail ("(RZ + SizeInBytes) % MinRZ == 0", "llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp", 2659 , __extension__ __PRETTY_FUNCTION__)); | ||||
2660 | |||||
2661 | return RZ; | ||||
2662 | } | ||||
2663 | |||||
2664 | int ModuleAddressSanitizer::GetAsanVersion(const Module &M) const { | ||||
2665 | int LongSize = M.getDataLayout().getPointerSizeInBits(); | ||||
2666 | bool isAndroid = Triple(M.getTargetTriple()).isAndroid(); | ||||
2667 | int Version = 8; | ||||
2668 | // 32-bit Android is one version ahead because of the switch to dynamic | ||||
2669 | // shadow. | ||||
2670 | Version += (LongSize == 32 && isAndroid); | ||||
2671 | return Version; | ||||
2672 | } | ||||
2673 | |||||
2674 | bool ModuleAddressSanitizer::instrumentModule(Module &M) { | ||||
2675 | initializeCallbacks(M); | ||||
2676 | |||||
2677 | // Create a module constructor. A destructor is created lazily because not all | ||||
2678 | // platforms, and not all modules need it. | ||||
2679 | if (CompileKernel) { | ||||
2680 | // The kernel always builds with its own runtime, and therefore does not | ||||
2681 | // need the init and version check calls. | ||||
2682 | AsanCtorFunction = createSanitizerCtor(M, kAsanModuleCtorName); | ||||
2683 | } else { | ||||
2684 | std::string AsanVersion = std::to_string(GetAsanVersion(M)); | ||||
2685 | std::string VersionCheckName = | ||||
2686 | ClInsertVersionCheck ? (kAsanVersionCheckNamePrefix + AsanVersion) : ""; | ||||
2687 | std::tie(AsanCtorFunction, std::ignore) = | ||||
2688 | createSanitizerCtorAndInitFunctions(M, kAsanModuleCtorName, | ||||
2689 | kAsanInitName, /*InitArgTypes=*/{}, | ||||
2690 | /*InitArgs=*/{}, VersionCheckName); | ||||
2691 | } | ||||
2692 | |||||
2693 | bool CtorComdat = true; | ||||
2694 | if (ClGlobals) { | ||||
2695 | IRBuilder<> IRB(AsanCtorFunction->getEntryBlock().getTerminator()); | ||||
2696 | InstrumentGlobals(IRB, M, &CtorComdat); | ||||
2697 | } | ||||
2698 | |||||
2699 | const uint64_t Priority = GetCtorAndDtorPriority(TargetTriple); | ||||
2700 | |||||
2701 | // Put the constructor and destructor in comdat if both | ||||
2702 | // (1) global instrumentation is not TU-specific | ||||
2703 | // (2) target is ELF. | ||||
2704 | if (UseCtorComdat && TargetTriple.isOSBinFormatELF() && CtorComdat) { | ||||
2705 | AsanCtorFunction->setComdat(M.getOrInsertComdat(kAsanModuleCtorName)); | ||||
2706 | appendToGlobalCtors(M, AsanCtorFunction, Priority, AsanCtorFunction); | ||||
2707 | if (AsanDtorFunction) { | ||||
2708 | AsanDtorFunction->setComdat(M.getOrInsertComdat(kAsanModuleDtorName)); | ||||
2709 | appendToGlobalDtors(M, AsanDtorFunction, Priority, AsanDtorFunction); | ||||
2710 | } | ||||
2711 | } else { | ||||
2712 | appendToGlobalCtors(M, AsanCtorFunction, Priority); | ||||
2713 | if (AsanDtorFunction) | ||||
2714 | appendToGlobalDtors(M, AsanDtorFunction, Priority); | ||||
2715 | } | ||||
2716 | |||||
2717 | return true; | ||||
2718 | } | ||||
2719 | |||||
2720 | void AddressSanitizer::initializeCallbacks(Module &M) { | ||||
2721 | IRBuilder<> IRB(*C); | ||||
2722 | // Create __asan_report* callbacks. | ||||
2723 | // IsWrite, TypeSize and Exp are encoded in the function name. | ||||
2724 | for (int Exp = 0; Exp < 2; Exp++) { | ||||
2725 | for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) { | ||||
2726 | const std::string TypeStr = AccessIsWrite ? "store" : "load"; | ||||
2727 | const std::string ExpStr = Exp ? "exp_" : ""; | ||||
2728 | const std::string EndingStr = Recover ? "_noabort" : ""; | ||||
2729 | |||||
2730 | SmallVector<Type *, 3> Args2 = {IntptrTy, IntptrTy}; | ||||
2731 | SmallVector<Type *, 2> Args1{1, IntptrTy}; | ||||
2732 | if (Exp) { | ||||
2733 | Type *ExpType = Type::getInt32Ty(*C); | ||||
2734 | Args2.push_back(ExpType); | ||||
2735 | Args1.push_back(ExpType); | ||||
2736 | } | ||||
2737 | AsanErrorCallbackSized[AccessIsWrite][Exp] = M.getOrInsertFunction( | ||||
2738 | kAsanReportErrorTemplate + ExpStr + TypeStr + "_n" + EndingStr, | ||||
2739 | FunctionType::get(IRB.getVoidTy(), Args2, false)); | ||||
2740 | |||||
2741 | AsanMemoryAccessCallbackSized[AccessIsWrite][Exp] = M.getOrInsertFunction( | ||||
2742 | ClMemoryAccessCallbackPrefix + ExpStr + TypeStr + "N" + EndingStr, | ||||
2743 | FunctionType::get(IRB.getVoidTy(), Args2, false)); | ||||
2744 | |||||
2745 | for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes; | ||||
2746 | AccessSizeIndex++) { | ||||
2747 | const std::string Suffix = TypeStr + itostr(1ULL << AccessSizeIndex); | ||||
2748 | AsanErrorCallback[AccessIsWrite][Exp][AccessSizeIndex] = | ||||
2749 | M.getOrInsertFunction( | ||||
2750 | kAsanReportErrorTemplate + ExpStr + Suffix + EndingStr, | ||||
2751 | FunctionType::get(IRB.getVoidTy(), Args1, false)); | ||||
2752 | |||||
2753 | AsanMemoryAccessCallback[AccessIsWrite][Exp][AccessSizeIndex] = | ||||
2754 | M.getOrInsertFunction( | ||||
2755 | ClMemoryAccessCallbackPrefix + ExpStr + Suffix + EndingStr, | ||||
2756 | FunctionType::get(IRB.getVoidTy(), Args1, false)); | ||||
2757 | } | ||||
2758 | } | ||||
2759 | } | ||||
2760 | |||||
2761 | const std::string MemIntrinCallbackPrefix = | ||||
2762 | CompileKernel ? std::string("") : ClMemoryAccessCallbackPrefix; | ||||
2763 | AsanMemmove = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memmove", | ||||
2764 | IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), | ||||
2765 | IRB.getInt8PtrTy(), IntptrTy); | ||||
2766 | AsanMemcpy = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memcpy", | ||||
2767 | IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), | ||||
2768 | IRB.getInt8PtrTy(), IntptrTy); | ||||
2769 | AsanMemset = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memset", | ||||
2770 | IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), | ||||
2771 | IRB.getInt32Ty(), IntptrTy); | ||||
2772 | |||||
2773 | AsanHandleNoReturnFunc = | ||||
2774 | M.getOrInsertFunction(kAsanHandleNoReturnName, IRB.getVoidTy()); | ||||
2775 | |||||
2776 | AsanPtrCmpFunction = | ||||
2777 | M.getOrInsertFunction(kAsanPtrCmp, IRB.getVoidTy(), IntptrTy, IntptrTy); | ||||
2778 | AsanPtrSubFunction = | ||||
2779 | M.getOrInsertFunction(kAsanPtrSub, IRB.getVoidTy(), IntptrTy, IntptrTy); | ||||
2780 | if (Mapping.InGlobal) | ||||
2781 | AsanShadowGlobal = M.getOrInsertGlobal("__asan_shadow", | ||||
2782 | ArrayType::get(IRB.getInt8Ty(), 0)); | ||||
2783 | |||||
2784 | AMDGPUAddressShared = M.getOrInsertFunction( | ||||
2785 | kAMDGPUAddressSharedName, IRB.getInt1Ty(), IRB.getInt8PtrTy()); | ||||
2786 | AMDGPUAddressPrivate = M.getOrInsertFunction( | ||||
2787 | kAMDGPUAddressPrivateName, IRB.getInt1Ty(), IRB.getInt8PtrTy()); | ||||
2788 | } | ||||
2789 | |||||
2790 | bool AddressSanitizer::maybeInsertAsanInitAtFunctionEntry(Function &F) { | ||||
2791 | // For each NSObject descendant having a +load method, this method is invoked | ||||
2792 | // by the ObjC runtime before any of the static constructors is called. | ||||
2793 | // Therefore we need to instrument such methods with a call to __asan_init | ||||
2794 | // at the beginning in order to initialize our runtime before any access to | ||||
2795 | // the shadow memory. | ||||
2796 | // We cannot just ignore these methods, because they may call other | ||||
2797 | // instrumented functions. | ||||
2798 | if (F.getName().find(" load]") != std::string::npos) { | ||||
2799 | FunctionCallee AsanInitFunction = | ||||
2800 | declareSanitizerInitFunction(*F.getParent(), kAsanInitName, {}); | ||||
2801 | IRBuilder<> IRB(&F.front(), F.front().begin()); | ||||
2802 | IRB.CreateCall(AsanInitFunction, {}); | ||||
2803 | return true; | ||||
2804 | } | ||||
2805 | return false; | ||||
2806 | } | ||||
2807 | |||||
2808 | bool AddressSanitizer::maybeInsertDynamicShadowAtFunctionEntry(Function &F) { | ||||
2809 | // Generate code only when dynamic addressing is needed. | ||||
2810 | if (Mapping.Offset != kDynamicShadowSentinel) | ||||
2811 | return false; | ||||
2812 | |||||
2813 | IRBuilder<> IRB(&F.front().front()); | ||||
2814 | if (Mapping.InGlobal) { | ||||
2815 | if (ClWithIfuncSuppressRemat) { | ||||
2816 | // An empty inline asm with input reg == output reg. | ||||
2817 | // An opaque pointer-to-int cast, basically. | ||||
2818 | InlineAsm *Asm = InlineAsm::get( | ||||
2819 | FunctionType::get(IntptrTy, {AsanShadowGlobal->getType()}, false), | ||||
2820 | StringRef(""), StringRef("=r,0"), | ||||
2821 | /*hasSideEffects=*/false); | ||||
2822 | LocalDynamicShadow = | ||||
2823 | IRB.CreateCall(Asm, {AsanShadowGlobal}, ".asan.shadow"); | ||||
2824 | } else { | ||||
2825 | LocalDynamicShadow = | ||||
2826 | IRB.CreatePointerCast(AsanShadowGlobal, IntptrTy, ".asan.shadow"); | ||||
2827 | } | ||||
2828 | } else { | ||||
2829 | Value *GlobalDynamicAddress = F.getParent()->getOrInsertGlobal( | ||||
2830 | kAsanShadowMemoryDynamicAddress, IntptrTy); | ||||
2831 | LocalDynamicShadow = IRB.CreateLoad(IntptrTy, GlobalDynamicAddress); | ||||
2832 | } | ||||
2833 | return true; | ||||
2834 | } | ||||
2835 | |||||
2836 | void AddressSanitizer::markEscapedLocalAllocas(Function &F) { | ||||
2837 | // Find the one possible call to llvm.localescape and pre-mark allocas passed | ||||
2838 | // to it as uninteresting. This assumes we haven't started processing allocas | ||||
2839 | // yet. This check is done up front because iterating the use list in | ||||
2840 | // isInterestingAlloca would be algorithmically slower. | ||||
2841 | assert(ProcessedAllocas.empty() && "must process localescape before allocas")(static_cast <bool> (ProcessedAllocas.empty() && "must process localescape before allocas") ? void (0) : __assert_fail ("ProcessedAllocas.empty() && \"must process localescape before allocas\"" , "llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp", 2841, __extension__ __PRETTY_FUNCTION__)); | ||||
2842 | |||||
2843 | // Try to get the declaration of llvm.localescape. If it's not in the module, | ||||
2844 | // we can exit early. | ||||
2845 | if (!F.getParent()->getFunction("llvm.localescape")) return; | ||||
2846 | |||||
2847 | // Look for a call to llvm.localescape call in the entry block. It can't be in | ||||
2848 | // any other block. | ||||
2849 | for (Instruction &I : F.getEntryBlock()) { | ||||
2850 | IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I); | ||||
2851 | if (II && II->getIntrinsicID() == Intrinsic::localescape) { | ||||
2852 | // We found a call. Mark all the allocas passed in as uninteresting. | ||||
2853 | for (Value *Arg : II->args()) { | ||||
2854 | AllocaInst *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts()); | ||||
2855 | assert(AI && AI->isStaticAlloca() &&(static_cast <bool> (AI && AI->isStaticAlloca () && "non-static alloca arg to localescape") ? void ( 0) : __assert_fail ("AI && AI->isStaticAlloca() && \"non-static alloca arg to localescape\"" , "llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp", 2856, __extension__ __PRETTY_FUNCTION__)) | ||||
2856 | "non-static alloca arg to localescape")(static_cast <bool> (AI && AI->isStaticAlloca () && "non-static alloca arg to localescape") ? void ( 0) : __assert_fail ("AI && AI->isStaticAlloca() && \"non-static alloca arg to localescape\"" , "llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp", 2856, __extension__ __PRETTY_FUNCTION__)); | ||||
2857 | ProcessedAllocas[AI] = false; | ||||
2858 | } | ||||
2859 | break; | ||||
2860 | } | ||||
2861 | } | ||||
2862 | } | ||||
2863 | |||||
2864 | bool AddressSanitizer::suppressInstrumentationSiteForDebug(int &Instrumented) { | ||||
2865 | bool ShouldInstrument = | ||||
2866 | ClDebugMin < 0 || ClDebugMax < 0 || | ||||
2867 | (Instrumented >= ClDebugMin && Instrumented <= ClDebugMax); | ||||
2868 | Instrumented++; | ||||
2869 | return !ShouldInstrument; | ||||
2870 | } | ||||
2871 | |||||
2872 | bool AddressSanitizer::instrumentFunction(Function &F, | ||||
2873 | const TargetLibraryInfo *TLI) { | ||||
2874 | if (F.empty()) | ||||
2875 | return false; | ||||
2876 | if (F.getLinkage() == GlobalValue::AvailableExternallyLinkage) return false; | ||||
2877 | if (!ClDebugFunc.empty() && ClDebugFunc == F.getName()) return false; | ||||
2878 | if (F.getName().startswith("__asan_")) return false; | ||||
2879 | |||||
2880 | bool FunctionModified = false; | ||||
2881 | |||||
2882 | // If needed, insert __asan_init before checking for SanitizeAddress attr. | ||||
2883 | // This function needs to be called even if the function body is not | ||||
2884 | // instrumented. | ||||
2885 | if (maybeInsertAsanInitAtFunctionEntry(F)) | ||||
2886 | FunctionModified = true; | ||||
2887 | |||||
2888 | // Leave if the function doesn't need instrumentation. | ||||
2889 | if (!F.hasFnAttribute(Attribute::SanitizeAddress)) return FunctionModified; | ||||
2890 | |||||
2891 | LLVM_DEBUG(dbgs() << "ASAN instrumenting:\n" << F << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("asan")) { dbgs() << "ASAN instrumenting:\n" << F << "\n"; } } while (false); | ||||
2892 | |||||
2893 | initializeCallbacks(*F.getParent()); | ||||
2894 | |||||
2895 | FunctionStateRAII CleanupObj(this); | ||||
2896 | |||||
2897 | FunctionModified |= maybeInsertDynamicShadowAtFunctionEntry(F); | ||||
2898 | |||||
2899 | // We can't instrument allocas used with llvm.localescape. Only static allocas | ||||
2900 | // can be passed to that intrinsic. | ||||
2901 | markEscapedLocalAllocas(F); | ||||
2902 | |||||
2903 | // We want to instrument every address only once per basic block (unless there | ||||
2904 | // are calls between uses). | ||||
2905 | SmallPtrSet<Value *, 16> TempsToInstrument; | ||||
2906 | SmallVector<InterestingMemoryOperand, 16> OperandsToInstrument; | ||||
2907 | SmallVector<MemIntrinsic *, 16> IntrinToInstrument; | ||||
2908 | SmallVector<Instruction *, 8> NoReturnCalls; | ||||
2909 | SmallVector<BasicBlock *, 16> AllBlocks; | ||||
2910 | SmallVector<Instruction *, 16> PointerComparisonsOrSubtracts; | ||||
2911 | int NumAllocas = 0; | ||||
2912 | |||||
2913 | // Fill the set of memory operations to instrument. | ||||
2914 | for (auto &BB : F) { | ||||
2915 | AllBlocks.push_back(&BB); | ||||
2916 | TempsToInstrument.clear(); | ||||
2917 | int NumInsnsPerBB = 0; | ||||
2918 | for (auto &Inst : BB) { | ||||
2919 | if (LooksLikeCodeInBug11395(&Inst)) return false; | ||||
2920 | SmallVector<InterestingMemoryOperand, 1> InterestingOperands; | ||||
2921 | getInterestingMemoryOperands(&Inst, InterestingOperands); | ||||
2922 | |||||
2923 | if (!InterestingOperands.empty()) { | ||||
2924 | for (auto &Operand : InterestingOperands) { | ||||
2925 | if (ClOpt && ClOptSameTemp) { | ||||
2926 | Value *Ptr = Operand.getPtr(); | ||||
2927 | // If we have a mask, skip instrumentation if we've already | ||||
2928 | // instrumented the full object. But don't add to TempsToInstrument | ||||
2929 | // because we might get another load/store with a different mask. | ||||
2930 | if (Operand.MaybeMask) { | ||||
2931 | if (TempsToInstrument.count(Ptr)) | ||||
2932 | continue; // We've seen this (whole) temp in the current BB. | ||||
2933 | } else { | ||||
2934 | if (!TempsToInstrument.insert(Ptr).second) | ||||
2935 | continue; // We've seen this temp in the current BB. | ||||
2936 | } | ||||
2937 | } | ||||
2938 | OperandsToInstrument.push_back(Operand); | ||||
2939 | NumInsnsPerBB++; | ||||
2940 | } | ||||
2941 | } else if (((ClInvalidPointerPairs || ClInvalidPointerCmp) && | ||||
2942 | isInterestingPointerComparison(&Inst)) || | ||||
2943 | ((ClInvalidPointerPairs || ClInvalidPointerSub) && | ||||
2944 | isInterestingPointerSubtraction(&Inst))) { | ||||
2945 | PointerComparisonsOrSubtracts.push_back(&Inst); | ||||
2946 | } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(&Inst)) { | ||||
2947 | // ok, take it. | ||||
2948 | IntrinToInstrument.push_back(MI); | ||||
2949 | NumInsnsPerBB++; | ||||
2950 | } else { | ||||
2951 | if (isa<AllocaInst>(Inst)) NumAllocas++; | ||||
2952 | if (auto *CB = dyn_cast<CallBase>(&Inst)) { | ||||
2953 | // A call inside BB. | ||||
2954 | TempsToInstrument.clear(); | ||||
2955 | if (CB->doesNotReturn() && !CB->hasMetadata("nosanitize")) | ||||
2956 | NoReturnCalls.push_back(CB); | ||||
2957 | } | ||||
2958 | if (CallInst *CI = dyn_cast<CallInst>(&Inst)) | ||||
2959 | maybeMarkSanitizerLibraryCallNoBuiltin(CI, TLI); | ||||
2960 | } | ||||
2961 | if (NumInsnsPerBB >= ClMaxInsnsToInstrumentPerBB) break; | ||||
2962 | } | ||||
2963 | } | ||||
2964 | |||||
2965 | bool UseCalls = (ClInstrumentationWithCallsThreshold >= 0 && | ||||
2966 | OperandsToInstrument.size() + IntrinToInstrument.size() > | ||||
2967 | (unsigned)ClInstrumentationWithCallsThreshold); | ||||
2968 | const DataLayout &DL = F.getParent()->getDataLayout(); | ||||
2969 | ObjectSizeOpts ObjSizeOpts; | ||||
2970 | ObjSizeOpts.RoundToAlign = true; | ||||
2971 | ObjectSizeOffsetVisitor ObjSizeVis(DL, TLI, F.getContext(), ObjSizeOpts); | ||||
2972 | |||||
2973 | // Instrument. | ||||
2974 | int NumInstrumented = 0; | ||||
2975 | for (auto &Operand : OperandsToInstrument) { | ||||
2976 | if (!suppressInstrumentationSiteForDebug(NumInstrumented)) | ||||
2977 | instrumentMop(ObjSizeVis, Operand, UseCalls, | ||||
2978 | F.getParent()->getDataLayout()); | ||||
2979 | FunctionModified = true; | ||||
2980 | } | ||||
2981 | for (auto Inst : IntrinToInstrument) { | ||||
2982 | if (!suppressInstrumentationSiteForDebug(NumInstrumented)) | ||||
2983 | instrumentMemIntrinsic(Inst); | ||||
2984 | FunctionModified = true; | ||||
2985 | } | ||||
2986 | |||||
2987 | FunctionStackPoisoner FSP(F, *this); | ||||
2988 | bool ChangedStack = FSP.runOnFunction(); | ||||
2989 | |||||
2990 | // We must unpoison the stack before NoReturn calls (throw, _exit, etc). | ||||
2991 | // See e.g. https://github.com/google/sanitizers/issues/37 | ||||
2992 | for (auto CI : NoReturnCalls) { | ||||
2993 | IRBuilder<> IRB(CI); | ||||
2994 | IRB.CreateCall(AsanHandleNoReturnFunc, {}); | ||||
2995 | } | ||||
2996 | |||||
2997 | for (auto Inst : PointerComparisonsOrSubtracts) { | ||||
2998 | instrumentPointerComparisonOrSubtraction(Inst); | ||||
2999 | FunctionModified = true; | ||||
3000 | } | ||||
3001 | |||||
3002 | if (ChangedStack || !NoReturnCalls.empty()) | ||||
3003 | FunctionModified = true; | ||||
3004 | |||||
3005 | LLVM_DEBUG(dbgs() << "ASAN done instrumenting: " << FunctionModified << " "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("asan")) { dbgs() << "ASAN done instrumenting: " << FunctionModified << " " << F << "\n"; } } while (false) | ||||
3006 | << F << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("asan")) { dbgs() << "ASAN done instrumenting: " << FunctionModified << " " << F << "\n"; } } while (false); | ||||
3007 | |||||
3008 | return FunctionModified; | ||||
3009 | } | ||||
3010 | |||||
3011 | // Workaround for bug 11395: we don't want to instrument stack in functions | ||||
3012 | // with large assembly blobs (32-bit only), otherwise reg alloc may crash. | ||||
3013 | // FIXME: remove once the bug 11395 is fixed. | ||||
3014 | bool AddressSanitizer::LooksLikeCodeInBug11395(Instruction *I) { | ||||
3015 | if (LongSize != 32) return false; | ||||
3016 | CallInst *CI = dyn_cast<CallInst>(I); | ||||
3017 | if (!CI || !CI->isInlineAsm()) return false; | ||||
3018 | if (CI->arg_size() <= 5) | ||||
3019 | return false; | ||||
3020 | // We have inline assembly with quite a few arguments. | ||||
3021 | return true; | ||||
3022 | } | ||||
3023 | |||||
3024 | void FunctionStackPoisoner::initializeCallbacks(Module &M) { | ||||
3025 | IRBuilder<> IRB(*C); | ||||
3026 | if (ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Always || | ||||
3027 | ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Runtime) { | ||||
3028 | const char *MallocNameTemplate = | ||||
3029 | ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Always | ||||
3030 | ? kAsanStackMallocAlwaysNameTemplate | ||||
3031 | : kAsanStackMallocNameTemplate; | ||||
3032 | for (int Index = 0; Index <= kMaxAsanStackMallocSizeClass; Index++) { | ||||
3033 | std::string Suffix = itostr(Index); | ||||
3034 | AsanStackMallocFunc[Index] = M.getOrInsertFunction( | ||||
3035 | MallocNameTemplate + Suffix, IntptrTy, IntptrTy); | ||||
3036 | AsanStackFreeFunc[Index] = | ||||
3037 | M.getOrInsertFunction(kAsanStackFreeNameTemplate + Suffix, | ||||
3038 | IRB.getVoidTy(), IntptrTy, IntptrTy); | ||||
3039 | } | ||||
3040 | } | ||||
3041 | if (ASan.UseAfterScope) { | ||||
3042 | AsanPoisonStackMemoryFunc = M.getOrInsertFunction( | ||||
3043 | kAsanPoisonStackMemoryName, IRB.getVoidTy(), IntptrTy, IntptrTy); | ||||
3044 | AsanUnpoisonStackMemoryFunc = M.getOrInsertFunction( | ||||
3045 | kAsanUnpoisonStackMemoryName, IRB.getVoidTy(), IntptrTy, IntptrTy); | ||||
3046 | } | ||||
3047 | |||||
3048 | for (size_t Val : {0x00, 0xf1, 0xf2, 0xf3, 0xf5, 0xf8}) { | ||||
3049 | std::ostringstream Name; | ||||
3050 | Name << kAsanSetShadowPrefix; | ||||
3051 | Name << std::setw(2) << std::setfill('0') << std::hex << Val; | ||||
3052 | AsanSetShadowFunc[Val] = | ||||
3053 | M.getOrInsertFunction(Name.str(), IRB.getVoidTy(), IntptrTy, IntptrTy); | ||||
3054 | } | ||||
3055 | |||||
3056 | AsanAllocaPoisonFunc = M.getOrInsertFunction( | ||||
3057 | kAsanAllocaPoison, IRB.getVoidTy(), IntptrTy, IntptrTy); | ||||
3058 | AsanAllocasUnpoisonFunc = M.getOrInsertFunction( | ||||
3059 | kAsanAllocasUnpoison, IRB.getVoidTy(), IntptrTy, IntptrTy); | ||||
3060 | } | ||||
3061 | |||||
3062 | void FunctionStackPoisoner::copyToShadowInline(ArrayRef<uint8_t> ShadowMask, | ||||
3063 | ArrayRef<uint8_t> ShadowBytes, | ||||
3064 | size_t Begin, size_t End, | ||||
3065 | IRBuilder<> &IRB, | ||||
3066 | Value *ShadowBase) { | ||||
3067 | if (Begin >= End) | ||||
3068 | return; | ||||
3069 | |||||
3070 | const size_t LargestStoreSizeInBytes = | ||||
3071 | std::min<size_t>(sizeof(uint64_t), ASan.LongSize / 8); | ||||
3072 | |||||
3073 | const bool IsLittleEndian = F.getParent()->getDataLayout().isLittleEndian(); | ||||
3074 | |||||
3075 | // Poison given range in shadow using larges store size with out leading and | ||||
3076 | // trailing zeros in ShadowMask. Zeros never change, so they need neither | ||||
3077 | // poisoning nor up-poisoning. Still we don't mind if some of them get into a | ||||
3078 | // middle of a store. | ||||
3079 | for (size_t i = Begin; i < End;) { | ||||
3080 | if (!ShadowMask[i]) { | ||||
3081 | assert(!ShadowBytes[i])(static_cast <bool> (!ShadowBytes[i]) ? void (0) : __assert_fail ("!ShadowBytes[i]", "llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp" , 3081, __extension__ __PRETTY_FUNCTION__)); | ||||
3082 | ++i; | ||||
3083 | continue; | ||||
3084 | } | ||||
3085 | |||||
3086 | size_t StoreSizeInBytes = LargestStoreSizeInBytes; | ||||
3087 | // Fit store size into the range. | ||||
3088 | while (StoreSizeInBytes > End - i) | ||||
3089 | StoreSizeInBytes /= 2; | ||||
3090 | |||||
3091 | // Minimize store size by trimming trailing zeros. | ||||
3092 | for (size_t j = StoreSizeInBytes - 1; j && !ShadowMask[i + j]; --j) { | ||||
3093 | while (j <= StoreSizeInBytes / 2) | ||||
3094 | StoreSizeInBytes /= 2; | ||||
3095 | } | ||||
3096 | |||||
3097 | uint64_t Val = 0; | ||||
3098 | for (size_t j = 0; j < StoreSizeInBytes; j++) { | ||||
3099 | if (IsLittleEndian) | ||||
3100 | Val |= (uint64_t)ShadowBytes[i + j] << (8 * j); | ||||
3101 | else | ||||
3102 | Val = (Val << 8) | ShadowBytes[i + j]; | ||||
3103 | } | ||||
3104 | |||||
3105 | Value *Ptr = IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i)); | ||||
3106 | Value *Poison = IRB.getIntN(StoreSizeInBytes * 8, Val); | ||||
3107 | IRB.CreateAlignedStore( | ||||
3108 | Poison, IRB.CreateIntToPtr(Ptr, Poison->getType()->getPointerTo()), | ||||
3109 | Align(1)); | ||||
3110 | |||||
3111 | i += StoreSizeInBytes; | ||||
3112 | } | ||||
3113 | } | ||||
3114 | |||||
3115 | void FunctionStackPoisoner::copyToShadow(ArrayRef<uint8_t> ShadowMask, | ||||
3116 | ArrayRef<uint8_t> ShadowBytes, | ||||
3117 | IRBuilder<> &IRB, Value *ShadowBase) { | ||||
3118 | copyToShadow(ShadowMask, ShadowBytes, 0, ShadowMask.size(), IRB, ShadowBase); | ||||
3119 | } | ||||
3120 | |||||
3121 | void FunctionStackPoisoner::copyToShadow(ArrayRef<uint8_t> ShadowMask, | ||||
3122 | ArrayRef<uint8_t> ShadowBytes, | ||||
3123 | size_t Begin, size_t End, | ||||
3124 | IRBuilder<> &IRB, Value *ShadowBase) { | ||||
3125 | assert(ShadowMask.size() == ShadowBytes.size())(static_cast <bool> (ShadowMask.size() == ShadowBytes.size ()) ? void (0) : __assert_fail ("ShadowMask.size() == ShadowBytes.size()" , "llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp", 3125, __extension__ __PRETTY_FUNCTION__)); | ||||
3126 | size_t Done = Begin; | ||||
3127 | for (size_t i = Begin, j = Begin + 1; i < End; i = j++) { | ||||
3128 | if (!ShadowMask[i]) { | ||||
3129 | assert(!ShadowBytes[i])(static_cast <bool> (!ShadowBytes[i]) ? void (0) : __assert_fail ("!ShadowBytes[i]", "llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp" , 3129, __extension__ __PRETTY_FUNCTION__)); | ||||
3130 | continue; | ||||
3131 | } | ||||
3132 | uint8_t Val = ShadowBytes[i]; | ||||
3133 | if (!AsanSetShadowFunc[Val]) | ||||
3134 | continue; | ||||
3135 | |||||
3136 | // Skip same values. | ||||
3137 | for (; j < End && ShadowMask[j] && Val == ShadowBytes[j]; ++j) { | ||||
3138 | } | ||||
3139 | |||||
3140 | if (j - i >= ClMaxInlinePoisoningSize) { | ||||
3141 | copyToShadowInline(ShadowMask, ShadowBytes, Done, i, IRB, ShadowBase); | ||||
3142 | IRB.CreateCall(AsanSetShadowFunc[Val], | ||||
3143 | {IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i)), | ||||
3144 | ConstantInt::get(IntptrTy, j - i)}); | ||||
3145 | Done = j; | ||||
3146 | } | ||||
3147 | } | ||||
3148 | |||||
3149 | copyToShadowInline(ShadowMask, ShadowBytes, Done, End, IRB, ShadowBase); | ||||
3150 | } | ||||
3151 | |||||
3152 | // Fake stack allocator (asan_fake_stack.h) has 11 size classes | ||||
3153 | // for every power of 2 from kMinStackMallocSize to kMaxAsanStackMallocSizeClass | ||||
3154 | static int StackMallocSizeClass(uint64_t LocalStackSize) { | ||||
3155 | assert(LocalStackSize <= kMaxStackMallocSize)(static_cast <bool> (LocalStackSize <= kMaxStackMallocSize ) ? void (0) : __assert_fail ("LocalStackSize <= kMaxStackMallocSize" , "llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp", 3155, __extension__ __PRETTY_FUNCTION__)); | ||||
3156 | uint64_t MaxSize = kMinStackMallocSize; | ||||
3157 | for (int i = 0;; i++, MaxSize *= 2) | ||||
3158 | if (LocalStackSize <= MaxSize) return i; | ||||
3159 | llvm_unreachable("impossible LocalStackSize")::llvm::llvm_unreachable_internal("impossible LocalStackSize" , "llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp", 3159); | ||||
3160 | } | ||||
3161 | |||||
3162 | void FunctionStackPoisoner::copyArgsPassedByValToAllocas() { | ||||
3163 | Instruction *CopyInsertPoint = &F.front().front(); | ||||
3164 | if (CopyInsertPoint == ASan.LocalDynamicShadow) { | ||||
3165 | // Insert after the dynamic shadow location is determined | ||||
3166 | CopyInsertPoint = CopyInsertPoint->getNextNode(); | ||||
3167 | assert(CopyInsertPoint)(static_cast <bool> (CopyInsertPoint) ? void (0) : __assert_fail ("CopyInsertPoint", "llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp" , 3167, __extension__ __PRETTY_FUNCTION__)); | ||||
3168 | } | ||||
3169 | IRBuilder<> IRB(CopyInsertPoint); | ||||
3170 | const DataLayout &DL = F.getParent()->getDataLayout(); | ||||
3171 | for (Argument &Arg : F.args()) { | ||||
3172 | if (Arg.hasByValAttr()) { | ||||
3173 | Type *Ty = Arg.getParamByValType(); | ||||
3174 | const Align Alignment = | ||||
3175 | DL.getValueOrABITypeAlignment(Arg.getParamAlign(), Ty); | ||||
3176 | |||||
3177 | AllocaInst *AI = IRB.CreateAlloca( | ||||
3178 | Ty, nullptr, | ||||
3179 | (Arg.hasName() ? Arg.getName() : "Arg" + Twine(Arg.getArgNo())) + | ||||
3180 | ".byval"); | ||||
3181 | AI->setAlignment(Alignment); | ||||
3182 | Arg.replaceAllUsesWith(AI); | ||||
3183 | |||||
3184 | uint64_t AllocSize = DL.getTypeAllocSize(Ty); | ||||
3185 | IRB.CreateMemCpy(AI, Alignment, &Arg, Alignment, AllocSize); | ||||
3186 | } | ||||
3187 | } | ||||
3188 | } | ||||
3189 | |||||
3190 | PHINode *FunctionStackPoisoner::createPHI(IRBuilder<> &IRB, Value *Cond, | ||||
3191 | Value *ValueIfTrue, | ||||
3192 | Instruction *ThenTerm, | ||||
3193 | Value *ValueIfFalse) { | ||||
3194 | PHINode *PHI = IRB.CreatePHI(IntptrTy, 2); | ||||
3195 | BasicBlock *CondBlock = cast<Instruction>(Cond)->getParent(); | ||||
3196 | PHI->addIncoming(ValueIfFalse, CondBlock); | ||||
3197 | BasicBlock *ThenBlock = ThenTerm->getParent(); | ||||
3198 | PHI->addIncoming(ValueIfTrue, ThenBlock); | ||||
3199 | return PHI; | ||||
3200 | } | ||||
3201 | |||||
3202 | Value *FunctionStackPoisoner::createAllocaForLayout( | ||||
3203 | IRBuilder<> &IRB, const ASanStackFrameLayout &L, bool Dynamic) { | ||||
3204 | AllocaInst *Alloca; | ||||
3205 | if (Dynamic) { | ||||
3206 | Alloca = IRB.CreateAlloca(IRB.getInt8Ty(), | ||||
3207 | ConstantInt::get(IRB.getInt64Ty(), L.FrameSize), | ||||
3208 | "MyAlloca"); | ||||
3209 | } else { | ||||
3210 | Alloca = IRB.CreateAlloca(ArrayType::get(IRB.getInt8Ty(), L.FrameSize), | ||||
3211 | nullptr, "MyAlloca"); | ||||
3212 | assert(Alloca->isStaticAlloca())(static_cast <bool> (Alloca->isStaticAlloca()) ? void (0) : __assert_fail ("Alloca->isStaticAlloca()", "llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp" , 3212, __extension__ __PRETTY_FUNCTION__)); | ||||
3213 | } | ||||
3214 | assert((ClRealignStack & (ClRealignStack - 1)) == 0)(static_cast <bool> ((ClRealignStack & (ClRealignStack - 1)) == 0) ? void (0) : __assert_fail ("(ClRealignStack & (ClRealignStack - 1)) == 0" , "llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp", 3214, __extension__ __PRETTY_FUNCTION__)); | ||||
3215 | uint64_t FrameAlignment = std::max(L.FrameAlignment, uint64_t(ClRealignStack)); | ||||
3216 | Alloca->setAlignment(Align(FrameAlignment)); | ||||
3217 | return IRB.CreatePointerCast(Alloca, IntptrTy); | ||||
3218 | } | ||||
3219 | |||||
3220 | void FunctionStackPoisoner::createDynamicAllocasInitStorage() { | ||||
3221 | BasicBlock &FirstBB = *F.begin(); | ||||
3222 | IRBuilder<> IRB(dyn_cast<Instruction>(FirstBB.begin())); | ||||
3223 | DynamicAllocaLayout = IRB.CreateAlloca(IntptrTy, nullptr); | ||||
3224 | IRB.CreateStore(Constant::getNullValue(IntptrTy), DynamicAllocaLayout); | ||||
3225 | DynamicAllocaLayout->setAlignment(Align(32)); | ||||
3226 | } | ||||
3227 | |||||
3228 | void FunctionStackPoisoner::processDynamicAllocas() { | ||||
3229 | if (!ClInstrumentDynamicAllocas || DynamicAllocaVec.empty()) { | ||||
3230 | assert(DynamicAllocaPoisonCallVec.empty())(static_cast <bool> (DynamicAllocaPoisonCallVec.empty() ) ? void (0) : __assert_fail ("DynamicAllocaPoisonCallVec.empty()" , "llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp", 3230, __extension__ __PRETTY_FUNCTION__)); | ||||
3231 | return; | ||||
3232 | } | ||||
3233 | |||||
3234 | // Insert poison calls for lifetime intrinsics for dynamic allocas. | ||||
3235 | for (const auto &APC : DynamicAllocaPoisonCallVec) { | ||||
3236 | assert(APC.InsBefore)(static_cast <bool> (APC.InsBefore) ? void (0) : __assert_fail ("APC.InsBefore", "llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp" , 3236, __extension__ __PRETTY_FUNCTION__)); | ||||
3237 | assert(APC.AI)(static_cast <bool> (APC.AI) ? void (0) : __assert_fail ("APC.AI", "llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp" , 3237, __extension__ __PRETTY_FUNCTION__)); | ||||
3238 | assert(ASan.isInterestingAlloca(*APC.AI))(static_cast <bool> (ASan.isInterestingAlloca(*APC.AI)) ? void (0) : __assert_fail ("ASan.isInterestingAlloca(*APC.AI)" , "llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp", 3238, __extension__ __PRETTY_FUNCTION__)); | ||||
3239 | assert(!APC.AI->isStaticAlloca())(static_cast <bool> (!APC.AI->isStaticAlloca()) ? void (0) : __assert_fail ("!APC.AI->isStaticAlloca()", "llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp" , 3239, __extension__ __PRETTY_FUNCTION__)); | ||||
3240 | |||||
3241 | IRBuilder<> IRB(APC.InsBefore); | ||||
3242 | poisonAlloca(APC.AI, APC.Size, IRB, APC.DoPoison); | ||||
3243 | // Dynamic allocas will be unpoisoned unconditionally below in | ||||
3244 | // unpoisonDynamicAllocas. | ||||
3245 | // Flag that we need unpoison static allocas. | ||||
3246 | } | ||||
3247 | |||||
3248 | // Handle dynamic allocas. | ||||
3249 | createDynamicAllocasInitStorage(); | ||||
3250 | for (auto &AI : DynamicAllocaVec) | ||||
3251 | handleDynamicAllocaCall(AI); | ||||
3252 | unpoisonDynamicAllocas(); | ||||
3253 | } | ||||
3254 | |||||
3255 | /// Collect instructions in the entry block after \p InsBefore which initialize | ||||
3256 | /// permanent storage for a function argument. These instructions must remain in | ||||
3257 | /// the entry block so that uninitialized values do not appear in backtraces. An | ||||
3258 | /// added benefit is that this conserves spill slots. This does not move stores | ||||
3259 | /// before instrumented / "interesting" allocas. | ||||
3260 | static void findStoresToUninstrumentedArgAllocas( | ||||
3261 | AddressSanitizer &ASan, Instruction &InsBefore, | ||||
3262 | SmallVectorImpl<Instruction *> &InitInsts) { | ||||
3263 | Instruction *Start = InsBefore.getNextNonDebugInstruction(); | ||||
3264 | for (Instruction *It = Start; It; It = It->getNextNonDebugInstruction()) { | ||||
3265 | // Argument initialization looks like: | ||||
3266 | // 1) store <Argument>, <Alloca> OR | ||||
3267 | // 2) <CastArgument> = cast <Argument> to ... | ||||
3268 | // store <CastArgument> to <Alloca> | ||||
3269 | // Do not consider any other kind of instruction. | ||||
3270 | // | ||||
3271 | // Note: This covers all known cases, but may not be exhaustive. An | ||||
3272 | // alternative to pattern-matching stores is to DFS over all Argument uses: | ||||
3273 | // this might be more general, but is probably much more complicated. | ||||
3274 | if (isa<AllocaInst>(It) || isa<CastInst>(It)) | ||||
3275 | continue; | ||||
3276 | if (auto *Store = dyn_cast<StoreInst>(It)) { | ||||
3277 | // The store destination must be an alloca that isn't interesting for | ||||
3278 | // ASan to instrument. These are moved up before InsBefore, and they're | ||||
3279 | // not interesting because allocas for arguments can be mem2reg'd. | ||||
3280 | auto *Alloca = dyn_cast<AllocaInst>(Store->getPointerOperand()); | ||||
3281 | if (!Alloca || ASan.isInterestingAlloca(*Alloca)) | ||||
3282 | continue; | ||||
3283 | |||||
3284 | Value *Val = Store->getValueOperand(); | ||||
3285 | bool IsDirectArgInit = isa<Argument>(Val); | ||||
3286 | bool IsArgInitViaCast = | ||||
3287 | isa<CastInst>(Val) && | ||||
3288 | isa<Argument>(cast<CastInst>(Val)->getOperand(0)) && | ||||
3289 | // Check that the cast appears directly before the store. Otherwise | ||||
3290 | // moving the cast before InsBefore may break the IR. | ||||
3291 | Val == It->getPrevNonDebugInstruction(); | ||||
3292 | bool IsArgInit = IsDirectArgInit || IsArgInitViaCast; | ||||
3293 | if (!IsArgInit) | ||||
3294 | continue; | ||||
3295 | |||||
3296 | if (IsArgInitViaCast) | ||||
3297 | InitInsts.push_back(cast<Instruction>(Val)); | ||||
3298 | InitInsts.push_back(Store); | ||||
3299 | continue; | ||||
3300 | } | ||||
3301 | |||||
3302 | // Do not reorder past unknown instructions: argument initialization should | ||||
3303 | // only involve casts and stores. | ||||
3304 | return; | ||||
3305 | } | ||||
3306 | } | ||||
3307 | |||||
3308 | void FunctionStackPoisoner::processStaticAllocas() { | ||||
3309 | if (AllocaVec.empty()) { | ||||
3310 | assert(StaticAllocaPoisonCallVec.empty())(static_cast <bool> (StaticAllocaPoisonCallVec.empty()) ? void (0) : __assert_fail ("StaticAllocaPoisonCallVec.empty()" , "llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp", 3310, __extension__ __PRETTY_FUNCTION__)); | ||||
3311 | return; | ||||
3312 | } | ||||
3313 | |||||
3314 | int StackMallocIdx = -1; | ||||
3315 | DebugLoc EntryDebugLocation; | ||||
3316 | if (auto SP = F.getSubprogram()) | ||||
3317 | EntryDebugLocation = | ||||
3318 | DILocation::get(SP->getContext(), SP->getScopeLine(), 0, SP); | ||||
3319 | |||||
3320 | Instruction *InsBefore = AllocaVec[0]; | ||||
3321 | IRBuilder<> IRB(InsBefore); | ||||
3322 | |||||
3323 | // Make sure non-instrumented allocas stay in the entry block. Otherwise, | ||||
3324 | // debug info is broken, because only entry-block allocas are treated as | ||||
3325 | // regular stack slots. | ||||
3326 | auto InsBeforeB = InsBefore->getParent(); | ||||
3327 | assert(InsBeforeB == &F.getEntryBlock())(static_cast <bool> (InsBeforeB == &F.getEntryBlock ()) ? void (0) : __assert_fail ("InsBeforeB == &F.getEntryBlock()" , "llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp", 3327, __extension__ __PRETTY_FUNCTION__)); | ||||
3328 | for (auto *AI : StaticAllocasToMoveUp) | ||||
3329 | if (AI->getParent() == InsBeforeB) | ||||
3330 | AI->moveBefore(InsBefore); | ||||
3331 | |||||
3332 | // Move stores of arguments into entry-block allocas as well. This prevents | ||||
3333 | // extra stack slots from being generated (to house the argument values until | ||||
3334 | // they can be stored into the allocas). This also prevents uninitialized | ||||
3335 | // values from being shown in backtraces. | ||||
3336 | SmallVector<Instruction *, 8> ArgInitInsts; | ||||
3337 | findStoresToUninstrumentedArgAllocas(ASan, *InsBefore, ArgInitInsts); | ||||
3338 | for (Instruction *ArgInitInst : ArgInitInsts) | ||||
3339 | ArgInitInst->moveBefore(InsBefore); | ||||
3340 | |||||
3341 | // If we have a call to llvm.localescape, keep it in the entry block. | ||||
3342 | if (LocalEscapeCall) LocalEscapeCall->moveBefore(InsBefore); | ||||
3343 | |||||
3344 | SmallVector<ASanStackVariableDescription, 16> SVD; | ||||
3345 | SVD.reserve(AllocaVec.size()); | ||||
3346 | for (AllocaInst *AI : AllocaVec) { | ||||
3347 | ASanStackVariableDescription D = {AI->getName().data(), | ||||
3348 | ASan.getAllocaSizeInBytes(*AI), | ||||
3349 | 0, | ||||
3350 | AI->getAlignment(), | ||||
3351 | AI, | ||||
3352 | 0, | ||||
3353 | 0}; | ||||
3354 | SVD.push_back(D); | ||||
3355 | } | ||||
3356 | |||||
3357 | // Minimal header size (left redzone) is 4 pointers, | ||||
3358 | // i.e. 32 bytes on 64-bit platforms and 16 bytes in 32-bit platforms. | ||||
3359 | uint64_t Granularity = 1ULL << Mapping.Scale; | ||||
3360 | uint64_t MinHeaderSize = std::max((uint64_t)ASan.LongSize / 2, Granularity); | ||||
3361 | const ASanStackFrameLayout &L = | ||||
3362 | ComputeASanStackFrameLayout(SVD, Granularity, MinHeaderSize); | ||||
3363 | |||||
3364 | // Build AllocaToSVDMap for ASanStackVariableDescription lookup. | ||||
3365 | DenseMap<const AllocaInst *, ASanStackVariableDescription *> AllocaToSVDMap; | ||||
3366 | for (auto &Desc : SVD) | ||||
3367 | AllocaToSVDMap[Desc.AI] = &Desc; | ||||
3368 | |||||
3369 | // Update SVD with information from lifetime intrinsics. | ||||
3370 | for (const auto &APC : StaticAllocaPoisonCallVec) { | ||||
3371 | assert(APC.InsBefore)(static_cast <bool> (APC.InsBefore) ? void (0) : __assert_fail ("APC.InsBefore", "llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp" , 3371, __extension__ __PRETTY_FUNCTION__)); | ||||
3372 | assert(APC.AI)(static_cast <bool> (APC.AI) ? void (0) : __assert_fail ("APC.AI", "llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp" , 3372, __extension__ __PRETTY_FUNCTION__)); | ||||
3373 | assert(ASan.isInterestingAlloca(*APC.AI))(static_cast <bool> (ASan.isInterestingAlloca(*APC.AI)) ? void (0) : __assert_fail ("ASan.isInterestingAlloca(*APC.AI)" , "llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp", 3373, __extension__ __PRETTY_FUNCTION__)); | ||||
3374 | assert(APC.AI->isStaticAlloca())(static_cast <bool> (APC.AI->isStaticAlloca()) ? void (0) : __assert_fail ("APC.AI->isStaticAlloca()", "llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp" , 3374, __extension__ __PRETTY_FUNCTION__)); | ||||
3375 | |||||
3376 | ASanStackVariableDescription &Desc = *AllocaToSVDMap[APC.AI]; | ||||
3377 | Desc.LifetimeSize = Desc.Size; | ||||
3378 | if (const DILocation *FnLoc = EntryDebugLocation.get()) { | ||||
3379 | if (const DILocation *LifetimeLoc = APC.InsBefore->getDebugLoc().get()) { | ||||
3380 | if (LifetimeLoc->getFile() == FnLoc->getFile()) | ||||
3381 | if (unsigned Line = LifetimeLoc->getLine()) | ||||
3382 | Desc.Line = std::min(Desc.Line ? Desc.Line : Line, Line); | ||||
3383 | } | ||||
3384 | } | ||||
3385 | } | ||||
3386 | |||||
3387 | auto DescriptionString = ComputeASanStackFrameDescription(SVD); | ||||
3388 | LLVM_DEBUG(dbgs() << DescriptionString << " --- " << L.FrameSize << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("asan")) { dbgs() << DescriptionString << " --- " << L.FrameSize << "\n"; } } while (false); | ||||
3389 | uint64_t LocalStackSize = L.FrameSize; | ||||
3390 | bool DoStackMalloc = | ||||
3391 | ASan.UseAfterReturn != AsanDetectStackUseAfterReturnMode::Never && | ||||
3392 | !ASan.CompileKernel && LocalStackSize <= kMaxStackMallocSize; | ||||
3393 | bool DoDynamicAlloca = ClDynamicAllocaStack; | ||||
3394 | // Don't do dynamic alloca or stack malloc if: | ||||
3395 | // 1) There is inline asm: too often it makes assumptions on which registers | ||||
3396 | // are available. | ||||
3397 | // 2) There is a returns_twice call (typically setjmp), which is | ||||
3398 | // optimization-hostile, and doesn't play well with introduced indirect | ||||
3399 | // register-relative calculation of local variable addresses. | ||||
3400 | DoDynamicAlloca &= !HasInlineAsm && !HasReturnsTwiceCall; | ||||
3401 | DoStackMalloc &= !HasInlineAsm && !HasReturnsTwiceCall; | ||||
3402 | |||||
3403 | Value *StaticAlloca = | ||||
3404 | DoDynamicAlloca ? nullptr : createAllocaForLayout(IRB, L, false); | ||||
3405 | |||||
3406 | Value *FakeStack; | ||||
3407 | Value *LocalStackBase; | ||||
3408 | Value *LocalStackBaseAlloca; | ||||
3409 | uint8_t DIExprFlags = DIExpression::ApplyOffset; | ||||
3410 | |||||
3411 | if (DoStackMalloc) { | ||||
3412 | LocalStackBaseAlloca = | ||||
3413 | IRB.CreateAlloca(IntptrTy, nullptr, "asan_local_stack_base"); | ||||
3414 | if (ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Runtime) { | ||||
3415 | // void *FakeStack = __asan_option_detect_stack_use_after_return | ||||
3416 | // ? __asan_stack_malloc_N(LocalStackSize) | ||||
3417 | // : nullptr; | ||||
3418 | // void *LocalStackBase = (FakeStack) ? FakeStack : | ||||
3419 | // alloca(LocalStackSize); | ||||
3420 | Constant *OptionDetectUseAfterReturn = F.getParent()->getOrInsertGlobal( | ||||
3421 | kAsanOptionDetectUseAfterReturn, IRB.getInt32Ty()); | ||||
3422 | Value *UseAfterReturnIsEnabled = IRB.CreateICmpNE( | ||||
3423 | IRB.CreateLoad(IRB.getInt32Ty(), OptionDetectUseAfterReturn), | ||||
3424 | Constant::getNullValue(IRB.getInt32Ty())); | ||||
3425 | Instruction *Term = | ||||
3426 | SplitBlockAndInsertIfThen(UseAfterReturnIsEnabled, InsBefore, false); | ||||
3427 | IRBuilder<> IRBIf(Term); | ||||
3428 | StackMallocIdx = StackMallocSizeClass(LocalStackSize); | ||||
3429 | assert(StackMallocIdx <= kMaxAsanStackMallocSizeClass)(static_cast <bool> (StackMallocIdx <= kMaxAsanStackMallocSizeClass ) ? void (0) : __assert_fail ("StackMallocIdx <= kMaxAsanStackMallocSizeClass" , "llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp", 3429, __extension__ __PRETTY_FUNCTION__)); | ||||
3430 | Value *FakeStackValue = | ||||
3431 | IRBIf.CreateCall(AsanStackMallocFunc[StackMallocIdx], | ||||
3432 | ConstantInt::get(IntptrTy, LocalStackSize)); | ||||
3433 | IRB.SetInsertPoint(InsBefore); | ||||
3434 | FakeStack = createPHI(IRB, UseAfterReturnIsEnabled, FakeStackValue, Term, | ||||
3435 | ConstantInt::get(IntptrTy, 0)); | ||||
3436 | } else { | ||||
3437 | // assert(ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode:Always) | ||||
3438 | // void *FakeStack = __asan_stack_malloc_N(LocalStackSize); | ||||
3439 | // void *LocalStackBase = (FakeStack) ? FakeStack : | ||||
3440 | // alloca(LocalStackSize); | ||||
3441 | StackMallocIdx = StackMallocSizeClass(LocalStackSize); | ||||
3442 | FakeStack = IRB.CreateCall(AsanStackMallocFunc[StackMallocIdx], | ||||
3443 | ConstantInt::get(IntptrTy, LocalStackSize)); | ||||
3444 | } | ||||
3445 | Value *NoFakeStack = | ||||
3446 | IRB.CreateICmpEQ(FakeStack, Constant::getNullValue(IntptrTy)); | ||||
3447 | Instruction *Term = | ||||
3448 | SplitBlockAndInsertIfThen(NoFakeStack, InsBefore, false); | ||||
3449 | IRBuilder<> IRBIf(Term); | ||||
3450 | Value *AllocaValue = | ||||
3451 | DoDynamicAlloca ? createAllocaForLayout(IRBIf, L, true) : StaticAlloca; | ||||
3452 | |||||
3453 | IRB.SetInsertPoint(InsBefore); | ||||
3454 | LocalStackBase = createPHI(IRB, NoFakeStack, AllocaValue, Term, FakeStack); | ||||
3455 | IRB.CreateStore(LocalStackBase, LocalStackBaseAlloca); | ||||
3456 | DIExprFlags |= DIExpression::DerefBefore; | ||||
3457 | } else { | ||||
3458 | // void *FakeStack = nullptr; | ||||
3459 | // void *LocalStackBase = alloca(LocalStackSize); | ||||
3460 | FakeStack = ConstantInt::get(IntptrTy, 0); | ||||
3461 | LocalStackBase = | ||||
3462 | DoDynamicAlloca ? createAllocaForLayout(IRB, L, true) : StaticAlloca; | ||||
3463 | LocalStackBaseAlloca = LocalStackBase; | ||||
3464 | } | ||||
3465 | |||||
3466 | // It shouldn't matter whether we pass an `alloca` or a `ptrtoint` as the | ||||
3467 | // dbg.declare address opereand, but passing a `ptrtoint` seems to confuse | ||||
3468 | // later passes and can result in dropped variable coverage in debug info. | ||||
3469 | Value *LocalStackBaseAllocaPtr = | ||||
3470 | isa<PtrToIntInst>(LocalStackBaseAlloca) | ||||
3471 | ? cast<PtrToIntInst>(LocalStackBaseAlloca)->getPointerOperand() | ||||
3472 | : LocalStackBaseAlloca; | ||||
3473 | assert(isa<AllocaInst>(LocalStackBaseAllocaPtr) &&(static_cast <bool> (isa<AllocaInst>(LocalStackBaseAllocaPtr ) && "Variable descriptions relative to ASan stack base will be dropped" ) ? void (0) : __assert_fail ("isa<AllocaInst>(LocalStackBaseAllocaPtr) && \"Variable descriptions relative to ASan stack base will be dropped\"" , "llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp", 3474, __extension__ __PRETTY_FUNCTION__)) | ||||
3474 | "Variable descriptions relative to ASan stack base will be dropped")(static_cast <bool> (isa<AllocaInst>(LocalStackBaseAllocaPtr ) && "Variable descriptions relative to ASan stack base will be dropped" ) ? void (0) : __assert_fail ("isa<AllocaInst>(LocalStackBaseAllocaPtr) && \"Variable descriptions relative to ASan stack base will be dropped\"" , "llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp", 3474, __extension__ __PRETTY_FUNCTION__)); | ||||
3475 | |||||
3476 | // Replace Alloca instructions with base+offset. | ||||
3477 | for (const auto &Desc : SVD) { | ||||
3478 | AllocaInst *AI = Desc.AI; | ||||
3479 | replaceDbgDeclare(AI, LocalStackBaseAllocaPtr, DIB, DIExprFlags, | ||||
3480 | Desc.Offset); | ||||
3481 | Value *NewAllocaPtr = IRB.CreateIntToPtr( | ||||
3482 | IRB.CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy, Desc.Offset)), | ||||
3483 | AI->getType()); | ||||
3484 | AI->replaceAllUsesWith(NewAllocaPtr); | ||||
3485 | } | ||||
3486 | |||||
3487 | // The left-most redzone has enough space for at least 4 pointers. | ||||
3488 | // Write the Magic value to redzone[0]. | ||||
3489 | Value *BasePlus0 = IRB.CreateIntToPtr(LocalStackBase, IntptrPtrTy); | ||||
3490 | IRB.CreateStore(ConstantInt::get(IntptrTy, kCurrentStackFrameMagic), | ||||
3491 | BasePlus0); | ||||
3492 | // Write the frame description constant to redzone[1]. | ||||
3493 | Value *BasePlus1 = IRB.CreateIntToPtr( | ||||
3494 | IRB.CreateAdd(LocalStackBase, | ||||
3495 | ConstantInt::get(IntptrTy, ASan.LongSize / 8)), | ||||
3496 | IntptrPtrTy); | ||||
3497 | GlobalVariable *StackDescriptionGlobal = | ||||
3498 | createPrivateGlobalForString(*F.getParent(), DescriptionString, | ||||
3499 | /*AllowMerging*/ true, kAsanGenPrefix); | ||||
3500 | Value *Description = IRB.CreatePointerCast(StackDescriptionGlobal, IntptrTy); | ||||
3501 | IRB.CreateStore(Description, BasePlus1); | ||||
3502 | // Write the PC to redzone[2]. | ||||
3503 | Value *BasePlus2 = IRB.CreateIntToPtr( | ||||
3504 | IRB.CreateAdd(LocalStackBase, | ||||
3505 | ConstantInt::get(IntptrTy, 2 * ASan.LongSize / 8)), | ||||
3506 | IntptrPtrTy); | ||||
3507 | IRB.CreateStore(IRB.CreatePointerCast(&F, IntptrTy), BasePlus2); | ||||
3508 | |||||
3509 | const auto &ShadowAfterScope = GetShadowBytesAfterScope(SVD, L); | ||||
3510 | |||||
3511 | // Poison the stack red zones at the entry. | ||||
3512 | Value *ShadowBase = ASan.memToShadow(LocalStackBase, IRB); | ||||
3513 | // As mask we must use most poisoned case: red zones and after scope. | ||||
3514 | // As bytes we can use either the same or just red zones only. | ||||
3515 | copyToShadow(ShadowAfterScope, ShadowAfterScope, IRB, ShadowBase); | ||||
3516 | |||||
3517 | if (!StaticAllocaPoisonCallVec.empty()) { | ||||
3518 | const auto &ShadowInScope = GetShadowBytes(SVD, L); | ||||
3519 | |||||
3520 | // Poison static allocas near lifetime intrinsics. | ||||
3521 | for (const auto &APC : StaticAllocaPoisonCallVec) { | ||||
3522 | const ASanStackVariableDescription &Desc = *AllocaToSVDMap[APC.AI]; | ||||
3523 | assert(Desc.Offset % L.Granularity == 0)(static_cast <bool> (Desc.Offset % L.Granularity == 0) ? void (0) : __assert_fail ("Desc.Offset % L.Granularity == 0" , "llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp", 3523, __extension__ __PRETTY_FUNCTION__)); | ||||
3524 | size_t Begin = Desc.Offset / L.Granularity; | ||||
3525 | size_t End = Begin + (APC.Size + L.Granularity - 1) / L.Granularity; | ||||
3526 | |||||
3527 | IRBuilder<> IRB(APC.InsBefore); | ||||
3528 | copyToShadow(ShadowAfterScope, | ||||
3529 | APC.DoPoison ? ShadowAfterScope : ShadowInScope, Begin, End, | ||||
3530 | IRB, ShadowBase); | ||||
3531 | } | ||||
3532 | } | ||||
3533 | |||||
3534 | SmallVector<uint8_t, 64> ShadowClean(ShadowAfterScope.size(), 0); | ||||
3535 | SmallVector<uint8_t, 64> ShadowAfterReturn; | ||||
3536 | |||||
3537 | // (Un)poison the stack before all ret instructions. | ||||
3538 | for (Instruction *Ret : RetVec) { | ||||
3539 | IRBuilder<> IRBRet(Ret); | ||||
3540 | // Mark the current frame as retired. | ||||
3541 | IRBRet.CreateStore(ConstantInt::get(IntptrTy, kRetiredStackFrameMagic), | ||||
3542 | BasePlus0); | ||||
3543 | if (DoStackMalloc) { | ||||
3544 | assert(StackMallocIdx >= 0)(static_cast <bool> (StackMallocIdx >= 0) ? void (0) : __assert_fail ("StackMallocIdx >= 0", "llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp" , 3544, __extension__ __PRETTY_FUNCTION__)); | ||||
3545 | // if FakeStack != 0 // LocalStackBase == FakeStack | ||||
3546 | // // In use-after-return mode, poison the whole stack frame. | ||||
3547 | // if StackMallocIdx <= 4 | ||||
3548 | // // For small sizes inline the whole thing: | ||||
3549 | // memset(ShadowBase, kAsanStackAfterReturnMagic, ShadowSize); | ||||
3550 | // **SavedFlagPtr(FakeStack) = 0 | ||||
3551 | // else | ||||
3552 | // __asan_stack_free_N(FakeStack, LocalStackSize) | ||||
3553 | // else | ||||
3554 | // <This is not a fake stack; unpoison the redzones> | ||||
3555 | Value *Cmp = | ||||
3556 | IRBRet.CreateICmpNE(FakeStack, Constant::getNullValue(IntptrTy)); | ||||
3557 | Instruction *ThenTerm, *ElseTerm; | ||||
3558 | SplitBlockAndInsertIfThenElse(Cmp, Ret, &ThenTerm, &ElseTerm); | ||||
3559 | |||||
3560 | IRBuilder<> IRBPoison(ThenTerm); | ||||
3561 | if (StackMallocIdx <= 4) { | ||||
3562 | int ClassSize = kMinStackMallocSize << StackMallocIdx; | ||||
3563 | ShadowAfterReturn.resize(ClassSize / L.Granularity, | ||||
3564 | kAsanStackUseAfterReturnMagic); | ||||
3565 | copyToShadow(ShadowAfterReturn, ShadowAfterReturn, IRBPoison, | ||||
3566 | ShadowBase); | ||||
3567 | Value *SavedFlagPtrPtr = IRBPoison.CreateAdd( | ||||
3568 | FakeStack, | ||||
3569 | ConstantInt::get(IntptrTy, ClassSize - ASan.LongSize / 8)); | ||||
3570 | Value *SavedFlagPtr = IRBPoison.CreateLoad( | ||||
3571 | IntptrTy, IRBPoison.CreateIntToPtr(SavedFlagPtrPtr, IntptrPtrTy)); | ||||
3572 | IRBPoison.CreateStore( | ||||
3573 | Constant::getNullValue(IRBPoison.getInt8Ty()), | ||||
3574 | IRBPoison.CreateIntToPtr(SavedFlagPtr, IRBPoison.getInt8PtrTy())); | ||||
3575 | } else { | ||||
3576 | // For larger frames call __asan_stack_free_*. | ||||
3577 | IRBPoison.CreateCall( | ||||
3578 | AsanStackFreeFunc[StackMallocIdx], | ||||
3579 | {FakeStack, ConstantInt::get(IntptrTy, LocalStackSize)}); | ||||
3580 | } | ||||
3581 | |||||
3582 | IRBuilder<> IRBElse(ElseTerm); | ||||
3583 | copyToShadow(ShadowAfterScope, ShadowClean, IRBElse, ShadowBase); | ||||
3584 | } else { | ||||
3585 | copyToShadow(ShadowAfterScope, ShadowClean, IRBRet, ShadowBase); | ||||
3586 | } | ||||
3587 | } | ||||
3588 | |||||
3589 | // We are done. Remove the old unused alloca instructions. | ||||
3590 | for (auto AI : AllocaVec) AI->eraseFromParent(); | ||||
3591 | } | ||||
3592 | |||||
3593 | void FunctionStackPoisoner::poisonAlloca(Value *V, uint64_t Size, | ||||
3594 | IRBuilder<> &IRB, bool DoPoison) { | ||||
3595 | // For now just insert the call to ASan runtime. | ||||
3596 | Value *AddrArg = IRB.CreatePointerCast(V, IntptrTy); | ||||
3597 | Value *SizeArg = ConstantInt::get(IntptrTy, Size); | ||||
3598 | IRB.CreateCall( | ||||
3599 | DoPoison ? AsanPoisonStackMemoryFunc : AsanUnpoisonStackMemoryFunc, | ||||
3600 | {AddrArg, SizeArg}); | ||||
3601 | } | ||||
3602 | |||||
3603 | // Handling llvm.lifetime intrinsics for a given %alloca: | ||||
3604 | // (1) collect all llvm.lifetime.xxx(%size, %value) describing the alloca. | ||||
3605 | // (2) if %size is constant, poison memory for llvm.lifetime.end (to detect | ||||
3606 | // invalid accesses) and unpoison it for llvm.lifetime.start (the memory | ||||
3607 | // could be poisoned by previous llvm.lifetime.end instruction, as the | ||||
3608 | // variable may go in and out of scope several times, e.g. in loops). | ||||
3609 | // (3) if we poisoned at least one %alloca in a function, | ||||
3610 | // unpoison the whole stack frame at function exit. | ||||
3611 | void FunctionStackPoisoner::handleDynamicAllocaCall(AllocaInst *AI) { | ||||
3612 | IRBuilder<> IRB(AI); | ||||
3613 | |||||
3614 | const uint64_t Alignment = std::max(kAllocaRzSize, AI->getAlignment()); | ||||
3615 | const uint64_t AllocaRedzoneMask = kAllocaRzSize - 1; | ||||
3616 | |||||
3617 | Value *Zero = Constant::getNullValue(IntptrTy); | ||||
3618 | Value *AllocaRzSize = ConstantInt::get(IntptrTy, kAllocaRzSize); | ||||
3619 | Value *AllocaRzMask = ConstantInt::get(IntptrTy, AllocaRedzoneMask); | ||||
3620 | |||||
3621 | // Since we need to extend alloca with additional memory to locate | ||||
3622 | // redzones, and OldSize is number of allocated blocks with | ||||
3623 | // ElementSize size, get allocated memory size in bytes by | ||||
3624 | // OldSize * ElementSize. | ||||
3625 | const unsigned ElementSize = | ||||
3626 | F.getParent()->getDataLayout().getTypeAllocSize(AI->getAllocatedType()); | ||||
3627 | Value *OldSize = | ||||
3628 | IRB.CreateMul(IRB.CreateIntCast(AI->getArraySize(), IntptrTy, false), | ||||
3629 | ConstantInt::get(IntptrTy, ElementSize)); | ||||
3630 | |||||
3631 | // PartialSize = OldSize % 32 | ||||
3632 | Value *PartialSize = IRB.CreateAnd(OldSize, AllocaRzMask); | ||||
3633 | |||||
3634 | // Misalign = kAllocaRzSize - PartialSize; | ||||
3635 | Value *Misalign = IRB.CreateSub(AllocaRzSize, PartialSize); | ||||
3636 | |||||
3637 | // PartialPadding = Misalign != kAllocaRzSize ? Misalign : 0; | ||||
3638 | Value *Cond = IRB.CreateICmpNE(Misalign, AllocaRzSize); | ||||
3639 | Value *PartialPadding = IRB.CreateSelect(Cond, Misalign, Zero); | ||||
3640 | |||||
3641 | // AdditionalChunkSize = Alignment + PartialPadding + kAllocaRzSize | ||||
3642 | // Alignment is added to locate left redzone, PartialPadding for possible | ||||
3643 | // partial redzone and kAllocaRzSize for right redzone respectively. | ||||
3644 | Value *AdditionalChunkSize = IRB.CreateAdd( | ||||
3645 | ConstantInt::get(IntptrTy, Alignment + kAllocaRzSize), PartialPadding); | ||||
3646 | |||||
3647 | Value *NewSize = IRB.CreateAdd(OldSize, AdditionalChunkSize); | ||||
3648 | |||||
3649 | // Insert new alloca with new NewSize and Alignment params. | ||||
3650 | AllocaInst *NewAlloca = IRB.CreateAlloca(IRB.getInt8Ty(), NewSize); | ||||
3651 | NewAlloca->setAlignment(Align(Alignment)); | ||||
3652 | |||||
3653 | // NewAddress = Address + Alignment | ||||
3654 | Value *NewAddress = IRB.CreateAdd(IRB.CreatePtrToInt(NewAlloca, IntptrTy), | ||||
3655 | ConstantInt::get(IntptrTy, Alignment)); | ||||
3656 | |||||
3657 | // Insert __asan_alloca_poison call for new created alloca. | ||||
3658 | IRB.CreateCall(AsanAllocaPoisonFunc, {NewAddress, OldSize}); | ||||
3659 | |||||
3660 | // Store the last alloca's address to DynamicAllocaLayout. We'll need this | ||||
3661 | // for unpoisoning stuff. | ||||
3662 | IRB.CreateStore(IRB.CreatePtrToInt(NewAlloca, IntptrTy), DynamicAllocaLayout); | ||||
3663 | |||||
3664 | Value *NewAddressPtr = IRB.CreateIntToPtr(NewAddress, AI->getType()); | ||||
3665 | |||||
3666 | // Replace all uses of AddessReturnedByAlloca with NewAddressPtr. | ||||
3667 | AI->replaceAllUsesWith(NewAddressPtr); | ||||
3668 | |||||
3669 | // We are done. Erase old alloca from parent. | ||||
3670 | AI->eraseFromParent(); | ||||
3671 | } | ||||
3672 | |||||
3673 | // isSafeAccess returns true if Addr is always inbounds with respect to its | ||||
3674 | // base object. For example, it is a field access or an array access with | ||||
3675 | // constant inbounds index. | ||||
3676 | bool AddressSanitizer::isSafeAccess(ObjectSizeOffsetVisitor &ObjSizeVis, | ||||
3677 | Value *Addr, uint64_t TypeSize) const { | ||||
3678 | SizeOffsetType SizeOffset = ObjSizeVis.compute(Addr); | ||||
3679 | if (!ObjSizeVis.bothKnown(SizeOffset)) return false; | ||||
3680 | uint64_t Size = SizeOffset.first.getZExtValue(); | ||||
3681 | int64_t Offset = SizeOffset.second.getSExtValue(); | ||||
3682 | // Three checks are required to ensure safety: | ||||
3683 | // . Offset >= 0 (since the offset is given from the base ptr) | ||||
3684 | // . Size >= Offset (unsigned) | ||||
3685 | // . Size - Offset >= NeededSize (unsigned) | ||||
3686 | return Offset >= 0 && Size >= uint64_t(Offset) && | ||||
3687 | Size - uint64_t(Offset) >= TypeSize / 8; | ||||
3688 | } |
1 | //===-- llvm/Instruction.h - Instruction class definition -------*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file contains the declaration of the Instruction class, which is the |
10 | // base class for all of the LLVM instructions. |
11 | // |
12 | //===----------------------------------------------------------------------===// |
13 | |
14 | #ifndef LLVM_IR_INSTRUCTION_H |
15 | #define LLVM_IR_INSTRUCTION_H |
16 | |
17 | #include "llvm/ADT/ArrayRef.h" |
18 | #include "llvm/ADT/Bitfields.h" |
19 | #include "llvm/ADT/None.h" |
20 | #include "llvm/ADT/StringRef.h" |
21 | #include "llvm/ADT/ilist_node.h" |
22 | #include "llvm/IR/DebugLoc.h" |
23 | #include "llvm/IR/SymbolTableListTraits.h" |
24 | #include "llvm/IR/User.h" |
25 | #include "llvm/IR/Value.h" |
26 | #include "llvm/Support/AtomicOrdering.h" |
27 | #include "llvm/Support/Casting.h" |
28 | #include <algorithm> |
29 | #include <cassert> |
30 | #include <cstdint> |
31 | #include <utility> |
32 | |
33 | namespace llvm { |
34 | |
35 | class BasicBlock; |
36 | class FastMathFlags; |
37 | class MDNode; |
38 | class Module; |
39 | struct AAMDNodes; |
40 | |
41 | template <> struct ilist_alloc_traits<Instruction> { |
42 | static inline void deleteNode(Instruction *V); |
43 | }; |
44 | |
45 | class Instruction : public User, |
46 | public ilist_node_with_parent<Instruction, BasicBlock> { |
47 | BasicBlock *Parent; |
48 | DebugLoc DbgLoc; // 'dbg' Metadata cache. |
49 | |
50 | /// Relative order of this instruction in its parent basic block. Used for |
51 | /// O(1) local dominance checks between instructions. |
52 | mutable unsigned Order = 0; |
53 | |
54 | protected: |
55 | // The 15 first bits of `Value::SubclassData` are available for subclasses of |
56 | // `Instruction` to use. |
57 | using OpaqueField = Bitfield::Element<uint16_t, 0, 15>; |
58 | |
59 | // Template alias so that all Instruction storing alignment use the same |
60 | // definiton. |
61 | // Valid alignments are powers of two from 2^0 to 2^MaxAlignmentExponent = |
62 | // 2^32. We store them as Log2(Alignment), so we need 6 bits to encode the 33 |
63 | // possible values. |
64 | template <unsigned Offset> |
65 | using AlignmentBitfieldElementT = |
66 | typename Bitfield::Element<unsigned, Offset, 6, |
67 | Value::MaxAlignmentExponent>; |
68 | |
69 | template <unsigned Offset> |
70 | using BoolBitfieldElementT = typename Bitfield::Element<bool, Offset, 1>; |
71 | |
72 | template <unsigned Offset> |
73 | using AtomicOrderingBitfieldElementT = |
74 | typename Bitfield::Element<AtomicOrdering, Offset, 3, |
75 | AtomicOrdering::LAST>; |
76 | |
77 | private: |
78 | // The last bit is used to store whether the instruction has metadata attached |
79 | // or not. |
80 | using HasMetadataField = Bitfield::Element<bool, 15, 1>; |
81 | |
82 | protected: |
83 | ~Instruction(); // Use deleteValue() to delete a generic Instruction. |
84 | |
85 | public: |
86 | Instruction(const Instruction &) = delete; |
87 | Instruction &operator=(const Instruction &) = delete; |
88 | |
89 | /// Specialize the methods defined in Value, as we know that an instruction |
90 | /// can only be used by other instructions. |
91 | Instruction *user_back() { return cast<Instruction>(*user_begin());} |
92 | const Instruction *user_back() const { return cast<Instruction>(*user_begin());} |
93 | |
94 | inline const BasicBlock *getParent() const { return Parent; } |
95 | inline BasicBlock *getParent() { return Parent; } |
96 | |
97 | /// Return the module owning the function this instruction belongs to |
98 | /// or nullptr it the function does not have a module. |
99 | /// |
100 | /// Note: this is undefined behavior if the instruction does not have a |
101 | /// parent, or the parent basic block does not have a parent function. |
102 | const Module *getModule() const; |
103 | Module *getModule() { |
104 | return const_cast<Module *>( |
105 | static_cast<const Instruction *>(this)->getModule()); |
106 | } |
107 | |
108 | /// Return the function this instruction belongs to. |
109 | /// |
110 | /// Note: it is undefined behavior to call this on an instruction not |
111 | /// currently inserted into a function. |
112 | const Function *getFunction() const; |
113 | Function *getFunction() { |
114 | return const_cast<Function *>( |
115 | static_cast<const Instruction *>(this)->getFunction()); |
116 | } |
117 | |
118 | /// This method unlinks 'this' from the containing basic block, but does not |
119 | /// delete it. |
120 | void removeFromParent(); |
121 | |
122 | /// This method unlinks 'this' from the containing basic block and deletes it. |
123 | /// |
124 | /// \returns an iterator pointing to the element after the erased one |
125 | SymbolTableList<Instruction>::iterator eraseFromParent(); |
126 | |
127 | /// Insert an unlinked instruction into a basic block immediately before |
128 | /// the specified instruction. |
129 | void insertBefore(Instruction *InsertPos); |
130 | |
131 | /// Insert an unlinked instruction into a basic block immediately after the |
132 | /// specified instruction. |
133 | void insertAfter(Instruction *InsertPos); |
134 | |
135 | /// Unlink this instruction from its current basic block and insert it into |
136 | /// the basic block that MovePos lives in, right before MovePos. |
137 | void moveBefore(Instruction *MovePos); |
138 | |
139 | /// Unlink this instruction and insert into BB before I. |
140 | /// |
141 | /// \pre I is a valid iterator into BB. |
142 | void moveBefore(BasicBlock &BB, SymbolTableList<Instruction>::iterator I); |
143 | |
144 | /// Unlink this instruction from its current basic block and insert it into |
145 | /// the basic block that MovePos lives in, right after MovePos. |
146 | void moveAfter(Instruction *MovePos); |
147 | |
148 | /// Given an instruction Other in the same basic block as this instruction, |
149 | /// return true if this instruction comes before Other. In this worst case, |
150 | /// this takes linear time in the number of instructions in the block. The |
151 | /// results are cached, so in common cases when the block remains unmodified, |
152 | /// it takes constant time. |
153 | bool comesBefore(const Instruction *Other) const; |
154 | |
155 | //===--------------------------------------------------------------------===// |
156 | // Subclass classification. |
157 | //===--------------------------------------------------------------------===// |
158 | |
159 | /// Returns a member of one of the enums like Instruction::Add. |
160 | unsigned getOpcode() const { return getValueID() - InstructionVal; } |
161 | |
162 | const char *getOpcodeName() const { return getOpcodeName(getOpcode()); } |
163 | bool isTerminator() const { return isTerminator(getOpcode()); } |
164 | bool isUnaryOp() const { return isUnaryOp(getOpcode()); } |
165 | bool isBinaryOp() const { return isBinaryOp(getOpcode()); } |
166 | bool isIntDivRem() const { return isIntDivRem(getOpcode()); } |
167 | bool isShift() const { return isShift(getOpcode()); } |
168 | bool isCast() const { return isCast(getOpcode()); } |
169 | bool isFuncletPad() const { return isFuncletPad(getOpcode()); } |
170 | bool isExceptionalTerminator() const { |
171 | return isExceptionalTerminator(getOpcode()); |
172 | } |
173 | |
174 | /// It checks if this instruction is the only user of at least one of |
175 | /// its operands. |
176 | bool isOnlyUserOfAnyOperand(); |
177 | |
178 | bool isIndirectTerminator() const { |
179 | return isIndirectTerminator(getOpcode()); |
180 | } |
181 | |
182 | static const char* getOpcodeName(unsigned OpCode); |
183 | |
184 | static inline bool isTerminator(unsigned OpCode) { |
185 | return OpCode >= TermOpsBegin && OpCode < TermOpsEnd; |
186 | } |
187 | |
188 | static inline bool isUnaryOp(unsigned Opcode) { |
189 | return Opcode >= UnaryOpsBegin && Opcode < UnaryOpsEnd; |
190 | } |
191 | static inline bool isBinaryOp(unsigned Opcode) { |
192 | return Opcode >= BinaryOpsBegin && Opcode < BinaryOpsEnd; |
193 | } |
194 | |
195 | static inline bool isIntDivRem(unsigned Opcode) { |
196 | return Opcode == UDiv || Opcode == SDiv || Opcode == URem || Opcode == SRem; |
197 | } |
198 | |
199 | /// Determine if the Opcode is one of the shift instructions. |
200 | static inline bool isShift(unsigned Opcode) { |
201 | return Opcode >= Shl && Opcode <= AShr; |
202 | } |
203 | |
204 | /// Return true if this is a logical shift left or a logical shift right. |
205 | inline bool isLogicalShift() const { |
206 | return getOpcode() == Shl || getOpcode() == LShr; |
207 | } |
208 | |
209 | /// Return true if this is an arithmetic shift right. |
210 | inline bool isArithmeticShift() const { |
211 | return getOpcode() == AShr; |
212 | } |
213 | |
214 | /// Determine if the Opcode is and/or/xor. |
215 | static inline bool isBitwiseLogicOp(unsigned Opcode) { |
216 | return Opcode == And || Opcode == Or || Opcode == Xor; |
217 | } |
218 | |
219 | /// Return true if this is and/or/xor. |
220 | inline bool isBitwiseLogicOp() const { |
221 | return isBitwiseLogicOp(getOpcode()); |
222 | } |
223 | |
224 | /// Determine if the OpCode is one of the CastInst instructions. |
225 | static inline bool isCast(unsigned OpCode) { |
226 | return OpCode >= CastOpsBegin && OpCode < CastOpsEnd; |
227 | } |
228 | |
229 | /// Determine if the OpCode is one of the FuncletPadInst instructions. |
230 | static inline bool isFuncletPad(unsigned OpCode) { |
231 | return OpCode >= FuncletPadOpsBegin && OpCode < FuncletPadOpsEnd; |
232 | } |
233 | |
234 | /// Returns true if the OpCode is a terminator related to exception handling. |
235 | static inline bool isExceptionalTerminator(unsigned OpCode) { |
236 | switch (OpCode) { |
237 | case Instruction::CatchSwitch: |
238 | case Instruction::CatchRet: |
239 | case Instruction::CleanupRet: |
240 | case Instruction::Invoke: |
241 | case Instruction::Resume: |
242 | return true; |
243 | default: |
244 | return false; |
245 | } |
246 | } |
247 | |
248 | /// Returns true if the OpCode is a terminator with indirect targets. |
249 | static inline bool isIndirectTerminator(unsigned OpCode) { |
250 | switch (OpCode) { |
251 | case Instruction::IndirectBr: |
252 | case Instruction::CallBr: |
253 | return true; |
254 | default: |
255 | return false; |
256 | } |
257 | } |
258 | |
259 | //===--------------------------------------------------------------------===// |
260 | // Metadata manipulation. |
261 | //===--------------------------------------------------------------------===// |
262 | |
263 | /// Return true if this instruction has any metadata attached to it. |
264 | bool hasMetadata() const { return DbgLoc || Value::hasMetadata(); } |
265 | |
266 | /// Return true if this instruction has metadata attached to it other than a |
267 | /// debug location. |
268 | bool hasMetadataOtherThanDebugLoc() const { return Value::hasMetadata(); } |
269 | |
270 | /// Return true if this instruction has the given type of metadata attached. |
271 | bool hasMetadata(unsigned KindID) const { |
272 | return getMetadata(KindID) != nullptr; |
273 | } |
274 | |
275 | /// Return true if this instruction has the given type of metadata attached. |
276 | bool hasMetadata(StringRef Kind) const { |
277 | return getMetadata(Kind) != nullptr; |
278 | } |
279 | |
280 | /// Get the metadata of given kind attached to this Instruction. |
281 | /// If the metadata is not found then return null. |
282 | MDNode *getMetadata(unsigned KindID) const { |
283 | if (!hasMetadata()) return nullptr; |
284 | return getMetadataImpl(KindID); |
285 | } |
286 | |
287 | /// Get the metadata of given kind attached to this Instruction. |
288 | /// If the metadata is not found then return null. |
289 | MDNode *getMetadata(StringRef Kind) const { |
290 | if (!hasMetadata()) return nullptr; |
291 | return getMetadataImpl(Kind); |
292 | } |
293 | |
294 | /// Get all metadata attached to this Instruction. The first element of each |
295 | /// pair returned is the KindID, the second element is the metadata value. |
296 | /// This list is returned sorted by the KindID. |
297 | void |
298 | getAllMetadata(SmallVectorImpl<std::pair<unsigned, MDNode *>> &MDs) const { |
299 | if (hasMetadata()) |
300 | getAllMetadataImpl(MDs); |
301 | } |
302 | |
303 | /// This does the same thing as getAllMetadata, except that it filters out the |
304 | /// debug location. |
305 | void getAllMetadataOtherThanDebugLoc( |
306 | SmallVectorImpl<std::pair<unsigned, MDNode *>> &MDs) const { |
307 | Value::getAllMetadata(MDs); |
308 | } |
309 | |
310 | /// Set the metadata of the specified kind to the specified node. This updates |
311 | /// or replaces metadata if already present, or removes it if Node is null. |
312 | void setMetadata(unsigned KindID, MDNode *Node); |
313 | void setMetadata(StringRef Kind, MDNode *Node); |
314 | |
315 | /// Copy metadata from \p SrcInst to this instruction. \p WL, if not empty, |
316 | /// specifies the list of meta data that needs to be copied. If \p WL is |
317 | /// empty, all meta data will be copied. |
318 | void copyMetadata(const Instruction &SrcInst, |
319 | ArrayRef<unsigned> WL = ArrayRef<unsigned>()); |
320 | |
321 | /// If the instruction has "branch_weights" MD_prof metadata and the MDNode |
322 | /// has three operands (including name string), swap the order of the |
323 | /// metadata. |
324 | void swapProfMetadata(); |
325 | |
326 | /// Drop all unknown metadata except for debug locations. |
327 | /// @{ |
328 | /// Passes are required to drop metadata they don't understand. This is a |
329 | /// convenience method for passes to do so. |
330 | /// dropUndefImplyingAttrsAndUnknownMetadata should be used instead of |
331 | /// this API if the Instruction being modified is a call. |
332 | void dropUnknownNonDebugMetadata(ArrayRef<unsigned> KnownIDs); |
333 | void dropUnknownNonDebugMetadata() { |
334 | return dropUnknownNonDebugMetadata(None); |
335 | } |
336 | void dropUnknownNonDebugMetadata(unsigned ID1) { |
337 | return dropUnknownNonDebugMetadata(makeArrayRef(ID1)); |
338 | } |
339 | void dropUnknownNonDebugMetadata(unsigned ID1, unsigned ID2) { |
340 | unsigned IDs[] = {ID1, ID2}; |
341 | return dropUnknownNonDebugMetadata(IDs); |
342 | } |
343 | /// @} |
344 | |
345 | /// Adds an !annotation metadata node with \p Annotation to this instruction. |
346 | /// If this instruction already has !annotation metadata, append \p Annotation |
347 | /// to the existing node. |
348 | void addAnnotationMetadata(StringRef Annotation); |
349 | |
350 | /// Returns the AA metadata for this instruction. |
351 | AAMDNodes getAAMetadata() const; |
352 | |
353 | /// Sets the AA metadata on this instruction from the AAMDNodes structure. |
354 | void setAAMetadata(const AAMDNodes &N); |
355 | |
356 | /// Retrieve the raw weight values of a conditional branch or select. |
357 | /// Returns true on success with profile weights filled in. |
358 | /// Returns false if no metadata or invalid metadata was found. |
359 | bool extractProfMetadata(uint64_t &TrueVal, uint64_t &FalseVal) const; |
360 | |
361 | /// Retrieve total raw weight values of a branch. |
362 | /// Returns true on success with profile total weights filled in. |
363 | /// Returns false if no metadata was found. |
364 | bool extractProfTotalWeight(uint64_t &TotalVal) const; |
365 | |
366 | /// Set the debug location information for this instruction. |
367 | void setDebugLoc(DebugLoc Loc) { DbgLoc = std::move(Loc); } |
368 | |
369 | /// Return the debug location for this node as a DebugLoc. |
370 | const DebugLoc &getDebugLoc() const { return DbgLoc; } |
371 | |
372 | /// Set or clear the nuw flag on this instruction, which must be an operator |
373 | /// which supports this flag. See LangRef.html for the meaning of this flag. |
374 | void setHasNoUnsignedWrap(bool b = true); |
375 | |
376 | /// Set or clear the nsw flag on this instruction, which must be an operator |
377 | /// which supports this flag. See LangRef.html for the meaning of this flag. |
378 | void setHasNoSignedWrap(bool b = true); |
379 | |
380 | /// Set or clear the exact flag on this instruction, which must be an operator |
381 | /// which supports this flag. See LangRef.html for the meaning of this flag. |
382 | void setIsExact(bool b = true); |
383 | |
384 | /// Determine whether the no unsigned wrap flag is set. |
385 | bool hasNoUnsignedWrap() const; |
386 | |
387 | /// Determine whether the no signed wrap flag is set. |
388 | bool hasNoSignedWrap() const; |
389 | |
390 | /// Return true if this operator has flags which may cause this instruction |
391 | /// to evaluate to poison despite having non-poison inputs. |
392 | bool hasPoisonGeneratingFlags() const; |
393 | |
394 | /// Drops flags that may cause this instruction to evaluate to poison despite |
395 | /// having non-poison inputs. |
396 | void dropPoisonGeneratingFlags(); |
397 | |
398 | /// This function drops non-debug unknown metadata (through |
399 | /// dropUnknownNonDebugMetadata). For calls, it also drops parameter and |
400 | /// return attributes that can cause undefined behaviour. Both of these should |
401 | /// be done by passes which move instructions in IR. |
402 | void |
403 | dropUndefImplyingAttrsAndUnknownMetadata(ArrayRef<unsigned> KnownIDs = {}); |
404 | |
405 | /// Determine whether the exact flag is set. |
406 | bool isExact() const; |
407 | |
408 | /// Set or clear all fast-math-flags on this instruction, which must be an |
409 | /// operator which supports this flag. See LangRef.html for the meaning of |
410 | /// this flag. |
411 | void setFast(bool B); |
412 | |
413 | /// Set or clear the reassociation flag on this instruction, which must be |
414 | /// an operator which supports this flag. See LangRef.html for the meaning of |
415 | /// this flag. |
416 | void setHasAllowReassoc(bool B); |
417 | |
418 | /// Set or clear the no-nans flag on this instruction, which must be an |
419 | /// operator which supports this flag. See LangRef.html for the meaning of |
420 | /// this flag. |
421 | void setHasNoNaNs(bool B); |
422 | |
423 | /// Set or clear the no-infs flag on this instruction, which must be an |
424 | /// operator which supports this flag. See LangRef.html for the meaning of |
425 | /// this flag. |
426 | void setHasNoInfs(bool B); |
427 | |
428 | /// Set or clear the no-signed-zeros flag on this instruction, which must be |
429 | /// an operator which supports this flag. See LangRef.html for the meaning of |
430 | /// this flag. |
431 | void setHasNoSignedZeros(bool B); |
432 | |
433 | /// Set or clear the allow-reciprocal flag on this instruction, which must be |
434 | /// an operator which supports this flag. See LangRef.html for the meaning of |
435 | /// this flag. |
436 | void setHasAllowReciprocal(bool B); |
437 | |
438 | /// Set or clear the allow-contract flag on this instruction, which must be |
439 | /// an operator which supports this flag. See LangRef.html for the meaning of |
440 | /// this flag. |
441 | void setHasAllowContract(bool B); |
442 | |
443 | /// Set or clear the approximate-math-functions flag on this instruction, |
444 | /// which must be an operator which supports this flag. See LangRef.html for |
445 | /// the meaning of this flag. |
446 | void setHasApproxFunc(bool B); |
447 | |
448 | /// Convenience function for setting multiple fast-math flags on this |
449 | /// instruction, which must be an operator which supports these flags. See |
450 | /// LangRef.html for the meaning of these flags. |
451 | void setFastMathFlags(FastMathFlags FMF); |
452 | |
453 | /// Convenience function for transferring all fast-math flag values to this |
454 | /// instruction, which must be an operator which supports these flags. See |
455 | /// LangRef.html for the meaning of these flags. |
456 | void copyFastMathFlags(FastMathFlags FMF); |
457 | |
458 | /// Determine whether all fast-math-flags are set. |
459 | bool isFast() const; |
460 | |
461 | /// Determine whether the allow-reassociation flag is set. |
462 | bool hasAllowReassoc() const; |
463 | |
464 | /// Determine whether the no-NaNs flag is set. |
465 | bool hasNoNaNs() const; |
466 | |
467 | /// Determine whether the no-infs flag is set. |
468 | bool hasNoInfs() const; |
469 | |
470 | /// Determine whether the no-signed-zeros flag is set. |
471 | bool hasNoSignedZeros() const; |
472 | |
473 | /// Determine whether the allow-reciprocal flag is set. |
474 | bool hasAllowReciprocal() const; |
475 | |
476 | /// Determine whether the allow-contract flag is set. |
477 | bool hasAllowContract() const; |
478 | |
479 | /// Determine whether the approximate-math-functions flag is set. |
480 | bool hasApproxFunc() const; |
481 | |
482 | /// Convenience function for getting all the fast-math flags, which must be an |
483 | /// operator which supports these flags. See LangRef.html for the meaning of |
484 | /// these flags. |
485 | FastMathFlags getFastMathFlags() const; |
486 | |
487 | /// Copy I's fast-math flags |
488 | void copyFastMathFlags(const Instruction *I); |
489 | |
490 | /// Convenience method to copy supported exact, fast-math, and (optionally) |
491 | /// wrapping flags from V to this instruction. |
492 | void copyIRFlags(const Value *V, bool IncludeWrapFlags = true); |
493 | |
494 | /// Logical 'and' of any supported wrapping, exact, and fast-math flags of |
495 | /// V and this instruction. |
496 | void andIRFlags(const Value *V); |
497 | |
498 | /// Merge 2 debug locations and apply it to the Instruction. If the |
499 | /// instruction is a CallIns, we need to traverse the inline chain to find |
500 | /// the common scope. This is not efficient for N-way merging as each time |
501 | /// you merge 2 iterations, you need to rebuild the hashmap to find the |
502 | /// common scope. However, we still choose this API because: |
503 | /// 1) Simplicity: it takes 2 locations instead of a list of locations. |
504 | /// 2) In worst case, it increases the complexity from O(N*I) to |
505 | /// O(2*N*I), where N is # of Instructions to merge, and I is the |
506 | /// maximum level of inline stack. So it is still linear. |
507 | /// 3) Merging of call instructions should be extremely rare in real |
508 | /// applications, thus the N-way merging should be in code path. |
509 | /// The DebugLoc attached to this instruction will be overwritten by the |
510 | /// merged DebugLoc. |
511 | void applyMergedLocation(const DILocation *LocA, const DILocation *LocB); |
512 | |
513 | /// Updates the debug location given that the instruction has been hoisted |
514 | /// from a block to a predecessor of that block. |
515 | /// Note: it is undefined behavior to call this on an instruction not |
516 | /// currently inserted into a function. |
517 | void updateLocationAfterHoist(); |
518 | |
519 | /// Drop the instruction's debug location. This does not guarantee removal |
520 | /// of the !dbg source location attachment, as it must set a line 0 location |
521 | /// with scope information attached on call instructions. To guarantee |
522 | /// removal of the !dbg attachment, use the \ref setDebugLoc() API. |
523 | /// Note: it is undefined behavior to call this on an instruction not |
524 | /// currently inserted into a function. |
525 | void dropLocation(); |
526 | |
527 | private: |
528 | // These are all implemented in Metadata.cpp. |
529 | MDNode *getMetadataImpl(unsigned KindID) const; |
530 | MDNode *getMetadataImpl(StringRef Kind) const; |
531 | void |
532 | getAllMetadataImpl(SmallVectorImpl<std::pair<unsigned, MDNode *>> &) const; |
533 | |
534 | public: |
535 | //===--------------------------------------------------------------------===// |
536 | // Predicates and helper methods. |
537 | //===--------------------------------------------------------------------===// |
538 | |
539 | /// Return true if the instruction is associative: |
540 | /// |
541 | /// Associative operators satisfy: x op (y op z) === (x op y) op z |
542 | /// |
543 | /// In LLVM, the Add, Mul, And, Or, and Xor operators are associative. |
544 | /// |
545 | bool isAssociative() const LLVM_READONLY__attribute__((__pure__)); |
546 | static bool isAssociative(unsigned Opcode) { |
547 | return Opcode == And || Opcode == Or || Opcode == Xor || |
548 | Opcode == Add || Opcode == Mul; |
549 | } |
550 | |
551 | /// Return true if the instruction is commutative: |
552 | /// |
553 | /// Commutative operators satisfy: (x op y) === (y op x) |
554 | /// |
555 | /// In LLVM, these are the commutative operators, plus SetEQ and SetNE, when |
556 | /// applied to any type. |
557 | /// |
558 | bool isCommutative() const LLVM_READONLY__attribute__((__pure__)); |
559 | static bool isCommutative(unsigned Opcode) { |
560 | switch (Opcode) { |
561 | case Add: case FAdd: |
562 | case Mul: case FMul: |
563 | case And: case Or: case Xor: |
564 | return true; |
565 | default: |
566 | return false; |
567 | } |
568 | } |
569 | |
570 | /// Return true if the instruction is idempotent: |
571 | /// |
572 | /// Idempotent operators satisfy: x op x === x |
573 | /// |
574 | /// In LLVM, the And and Or operators are idempotent. |
575 | /// |
576 | bool isIdempotent() const { return isIdempotent(getOpcode()); } |
577 | static bool isIdempotent(unsigned Opcode) { |
578 | return Opcode == And || Opcode == Or; |
579 | } |
580 | |
581 | /// Return true if the instruction is nilpotent: |
582 | /// |
583 | /// Nilpotent operators satisfy: x op x === Id, |
584 | /// |
585 | /// where Id is the identity for the operator, i.e. a constant such that |
586 | /// x op Id === x and Id op x === x for all x. |
587 | /// |
588 | /// In LLVM, the Xor operator is nilpotent. |
589 | /// |
590 | bool isNilpotent() const { return isNilpotent(getOpcode()); } |
591 | static bool isNilpotent(unsigned Opcode) { |
592 | return Opcode == Xor; |
593 | } |
594 | |
595 | /// Return true if this instruction may modify memory. |
596 | bool mayWriteToMemory() const; |
597 | |
598 | /// Return true if this instruction may read memory. |
599 | bool mayReadFromMemory() const; |
600 | |
601 | /// Return true if this instruction may read or write memory. |
602 | bool mayReadOrWriteMemory() const { |
603 | return mayReadFromMemory() || mayWriteToMemory(); |
604 | } |
605 | |
606 | /// Return true if this instruction has an AtomicOrdering of unordered or |
607 | /// higher. |
608 | bool isAtomic() const; |
609 | |
610 | /// Return true if this atomic instruction loads from memory. |
611 | bool hasAtomicLoad() const; |
612 | |
613 | /// Return true if this atomic instruction stores to memory. |
614 | bool hasAtomicStore() const; |
615 | |
616 | /// Return true if this instruction has a volatile memory access. |
617 | bool isVolatile() const; |
618 | |
619 | /// Return true if this instruction may throw an exception. |
620 | bool mayThrow() const; |
621 | |
622 | /// Return true if this instruction behaves like a memory fence: it can load |
623 | /// or store to memory location without being given a memory location. |
624 | bool isFenceLike() const { |
625 | switch (getOpcode()) { |
626 | default: |
627 | return false; |
628 | // This list should be kept in sync with the list in mayWriteToMemory for |
629 | // all opcodes which don't have a memory location. |
630 | case Instruction::Fence: |
631 | case Instruction::CatchPad: |
632 | case Instruction::CatchRet: |
633 | case Instruction::Call: |
634 | case Instruction::Invoke: |
635 | return true; |
636 | } |
637 | } |
638 | |
639 | /// Return true if the instruction may have side effects. |
640 | /// |
641 | /// Side effects are: |
642 | /// * Writing to memory. |
643 | /// * Unwinding. |
644 | /// * Not returning (e.g. an infinite loop). |
645 | /// |
646 | /// Note that this does not consider malloc and alloca to have side |
647 | /// effects because the newly allocated memory is completely invisible to |
648 | /// instructions which don't use the returned value. For cases where this |
649 | /// matters, isSafeToSpeculativelyExecute may be more appropriate. |
650 | bool mayHaveSideEffects() const; |
651 | |
652 | /// Return true if the instruction can be removed if the result is unused. |
653 | /// |
654 | /// When constant folding some instructions cannot be removed even if their |
655 | /// results are unused. Specifically terminator instructions and calls that |
656 | /// may have side effects cannot be removed without semantically changing the |
657 | /// generated program. |
658 | bool isSafeToRemove() const; |
659 | |
660 | /// Return true if the instruction will return (unwinding is considered as |
661 | /// a form of returning control flow here). |
662 | bool willReturn() const; |
663 | |
664 | /// Return true if the instruction is a variety of EH-block. |
665 | bool isEHPad() const { |
666 | switch (getOpcode()) { |
667 | case Instruction::CatchSwitch: |
668 | case Instruction::CatchPad: |
669 | case Instruction::CleanupPad: |
670 | case Instruction::LandingPad: |
671 | return true; |
672 | default: |
673 | return false; |
674 | } |
675 | } |
676 | |
677 | /// Return true if the instruction is a llvm.lifetime.start or |
678 | /// llvm.lifetime.end marker. |
679 | bool isLifetimeStartOrEnd() const; |
680 | |
681 | /// Return true if the instruction is a llvm.launder.invariant.group or |
682 | /// llvm.strip.invariant.group. |
683 | bool isLaunderOrStripInvariantGroup() const; |
684 | |
685 | /// Return true if the instruction is a DbgInfoIntrinsic or PseudoProbeInst. |
686 | bool isDebugOrPseudoInst() const; |
687 | |
688 | /// Return a pointer to the next non-debug instruction in the same basic |
689 | /// block as 'this', or nullptr if no such instruction exists. Skip any pseudo |
690 | /// operations if \c SkipPseudoOp is true. |
691 | const Instruction * |
692 | getNextNonDebugInstruction(bool SkipPseudoOp = false) const; |
693 | Instruction *getNextNonDebugInstruction(bool SkipPseudoOp = false) { |
694 | return const_cast<Instruction *>( |
695 | static_cast<const Instruction *>(this)->getNextNonDebugInstruction( |
696 | SkipPseudoOp)); |
697 | } |
698 | |
699 | /// Return a pointer to the previous non-debug instruction in the same basic |
700 | /// block as 'this', or nullptr if no such instruction exists. Skip any pseudo |
701 | /// operations if \c SkipPseudoOp is true. |
702 | const Instruction * |
703 | getPrevNonDebugInstruction(bool SkipPseudoOp = false) const; |
704 | Instruction *getPrevNonDebugInstruction(bool SkipPseudoOp = false) { |
705 | return const_cast<Instruction *>( |
706 | static_cast<const Instruction *>(this)->getPrevNonDebugInstruction( |
707 | SkipPseudoOp)); |
708 | } |
709 | |
710 | /// Create a copy of 'this' instruction that is identical in all ways except |
711 | /// the following: |
712 | /// * The instruction has no parent |
713 | /// * The instruction has no name |
714 | /// |
715 | Instruction *clone() const; |
716 | |
717 | /// Return true if the specified instruction is exactly identical to the |
718 | /// current one. This means that all operands match and any extra information |
719 | /// (e.g. load is volatile) agree. |
720 | bool isIdenticalTo(const Instruction *I) const; |
721 | |
722 | /// This is like isIdenticalTo, except that it ignores the |
723 | /// SubclassOptionalData flags, which may specify conditions under which the |
724 | /// instruction's result is undefined. |
725 | bool isIdenticalToWhenDefined(const Instruction *I) const; |
726 | |
727 | /// When checking for operation equivalence (using isSameOperationAs) it is |
728 | /// sometimes useful to ignore certain attributes. |
729 | enum OperationEquivalenceFlags { |
730 | /// Check for equivalence ignoring load/store alignment. |
731 | CompareIgnoringAlignment = 1<<0, |
732 | /// Check for equivalence treating a type and a vector of that type |
733 | /// as equivalent. |
734 | CompareUsingScalarTypes = 1<<1 |
735 | }; |
736 | |
737 | /// This function determines if the specified instruction executes the same |
738 | /// operation as the current one. This means that the opcodes, type, operand |
739 | /// types and any other factors affecting the operation must be the same. This |
740 | /// is similar to isIdenticalTo except the operands themselves don't have to |
741 | /// be identical. |
742 | /// @returns true if the specified instruction is the same operation as |
743 | /// the current one. |
744 | /// Determine if one instruction is the same operation as another. |
745 | bool isSameOperationAs(const Instruction *I, unsigned flags = 0) const; |
746 | |
747 | /// Return true if there are any uses of this instruction in blocks other than |
748 | /// the specified block. Note that PHI nodes are considered to evaluate their |
749 | /// operands in the corresponding predecessor block. |
750 | bool isUsedOutsideOfBlock(const BasicBlock *BB) const; |
751 | |
752 | /// Return the number of successors that this instruction has. The instruction |
753 | /// must be a terminator. |
754 | unsigned getNumSuccessors() const; |
755 | |
756 | /// Return the specified successor. This instruction must be a terminator. |
757 | BasicBlock *getSuccessor(unsigned Idx) const; |
758 | |
759 | /// Update the specified successor to point at the provided block. This |
760 | /// instruction must be a terminator. |
761 | void setSuccessor(unsigned Idx, BasicBlock *BB); |
762 | |
763 | /// Replace specified successor OldBB to point at the provided block. |
764 | /// This instruction must be a terminator. |
765 | void replaceSuccessorWith(BasicBlock *OldBB, BasicBlock *NewBB); |
766 | |
767 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
768 | static bool classof(const Value *V) { |
769 | return V->getValueID() >= Value::InstructionVal; |
770 | } |
771 | |
772 | //---------------------------------------------------------------------- |
773 | // Exported enumerations. |
774 | // |
775 | enum TermOps { // These terminate basic blocks |
776 | #define FIRST_TERM_INST(N) TermOpsBegin = N, |
777 | #define HANDLE_TERM_INST(N, OPC, CLASS) OPC = N, |
778 | #define LAST_TERM_INST(N) TermOpsEnd = N+1 |
779 | #include "llvm/IR/Instruction.def" |
780 | }; |
781 | |
782 | enum UnaryOps { |
783 | #define FIRST_UNARY_INST(N) UnaryOpsBegin = N, |
784 | #define HANDLE_UNARY_INST(N, OPC, CLASS) OPC = N, |
785 | #define LAST_UNARY_INST(N) UnaryOpsEnd = N+1 |
786 | #include "llvm/IR/Instruction.def" |
787 | }; |
788 | |
789 | enum BinaryOps { |
790 | #define FIRST_BINARY_INST(N) BinaryOpsBegin = N, |
791 | #define HANDLE_BINARY_INST(N, OPC, CLASS) OPC = N, |
792 | #define LAST_BINARY_INST(N) BinaryOpsEnd = N+1 |
793 | #include "llvm/IR/Instruction.def" |
794 | }; |
795 | |
796 | enum MemoryOps { |
797 | #define FIRST_MEMORY_INST(N) MemoryOpsBegin = N, |
798 | #define HANDLE_MEMORY_INST(N, OPC, CLASS) OPC = N, |
799 | #define LAST_MEMORY_INST(N) MemoryOpsEnd = N+1 |
800 | #include "llvm/IR/Instruction.def" |
801 | }; |
802 | |
803 | enum CastOps { |
804 | #define FIRST_CAST_INST(N) CastOpsBegin = N, |
805 | #define HANDLE_CAST_INST(N, OPC, CLASS) OPC = N, |
806 | #define LAST_CAST_INST(N) CastOpsEnd = N+1 |
807 | #include "llvm/IR/Instruction.def" |
808 | }; |
809 | |
810 | enum FuncletPadOps { |
811 | #define FIRST_FUNCLETPAD_INST(N) FuncletPadOpsBegin = N, |
812 | #define HANDLE_FUNCLETPAD_INST(N, OPC, CLASS) OPC = N, |
813 | #define LAST_FUNCLETPAD_INST(N) FuncletPadOpsEnd = N+1 |
814 | #include "llvm/IR/Instruction.def" |
815 | }; |
816 | |
817 | enum OtherOps { |
818 | #define FIRST_OTHER_INST(N) OtherOpsBegin = N, |
819 | #define HANDLE_OTHER_INST(N, OPC, CLASS) OPC = N, |
820 | #define LAST_OTHER_INST(N) OtherOpsEnd = N+1 |
821 | #include "llvm/IR/Instruction.def" |
822 | }; |
823 | |
824 | private: |
825 | friend class SymbolTableListTraits<Instruction>; |
826 | friend class BasicBlock; // For renumbering. |
827 | |
828 | // Shadow Value::setValueSubclassData with a private forwarding method so that |
829 | // subclasses cannot accidentally use it. |
830 | void setValueSubclassData(unsigned short D) { |
831 | Value::setValueSubclassData(D); |
832 | } |
833 | |
834 | unsigned short getSubclassDataFromValue() const { |
835 | return Value::getSubclassDataFromValue(); |
836 | } |
837 | |
838 | void setParent(BasicBlock *P); |
839 | |
840 | protected: |
841 | // Instruction subclasses can stick up to 15 bits of stuff into the |
842 | // SubclassData field of instruction with these members. |
843 | |
844 | template <typename BitfieldElement> |
845 | typename BitfieldElement::Type getSubclassData() const { |
846 | static_assert( |
847 | std::is_same<BitfieldElement, HasMetadataField>::value || |
848 | !Bitfield::isOverlapping<BitfieldElement, HasMetadataField>(), |
849 | "Must not overlap with the metadata bit"); |
850 | return Bitfield::get<BitfieldElement>(getSubclassDataFromValue()); |
851 | } |
852 | |
853 | template <typename BitfieldElement> |
854 | void setSubclassData(typename BitfieldElement::Type Value) { |
855 | static_assert( |
856 | std::is_same<BitfieldElement, HasMetadataField>::value || |
857 | !Bitfield::isOverlapping<BitfieldElement, HasMetadataField>(), |
858 | "Must not overlap with the metadata bit"); |
859 | auto Storage = getSubclassDataFromValue(); |
860 | Bitfield::set<BitfieldElement>(Storage, Value); |
861 | setValueSubclassData(Storage); |
862 | } |
863 | |
864 | Instruction(Type *Ty, unsigned iType, Use *Ops, unsigned NumOps, |
865 | Instruction *InsertBefore = nullptr); |
866 | Instruction(Type *Ty, unsigned iType, Use *Ops, unsigned NumOps, |
867 | BasicBlock *InsertAtEnd); |
868 | |
869 | private: |
870 | /// Create a copy of this instruction. |
871 | Instruction *cloneImpl() const; |
872 | }; |
873 | |
874 | inline void ilist_alloc_traits<Instruction>::deleteNode(Instruction *V) { |
875 | V->deleteValue(); |
876 | } |
877 | |
878 | } // end namespace llvm |
879 | |
880 | #endif // LLVM_IR_INSTRUCTION_H |