LLVM 20.0.0git
DataFlowSanitizer.cpp
Go to the documentation of this file.
1//===- DataFlowSanitizer.cpp - dynamic data flow analysis -----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This file is a part of DataFlowSanitizer, a generalised dynamic data flow
11/// analysis.
12///
13/// Unlike other Sanitizer tools, this tool is not designed to detect a specific
14/// class of bugs on its own. Instead, it provides a generic dynamic data flow
15/// analysis framework to be used by clients to help detect application-specific
16/// issues within their own code.
17///
18/// The analysis is based on automatic propagation of data flow labels (also
19/// known as taint labels) through a program as it performs computation.
20///
21/// Argument and return value labels are passed through TLS variables
22/// __dfsan_arg_tls and __dfsan_retval_tls.
23///
24/// Each byte of application memory is backed by a shadow memory byte. The
25/// shadow byte can represent up to 8 labels. On Linux/x86_64, memory is then
26/// laid out as follows:
27///
28/// +--------------------+ 0x800000000000 (top of memory)
29/// | application 3 |
30/// +--------------------+ 0x700000000000
31/// | invalid |
32/// +--------------------+ 0x610000000000
33/// | origin 1 |
34/// +--------------------+ 0x600000000000
35/// | application 2 |
36/// +--------------------+ 0x510000000000
37/// | shadow 1 |
38/// +--------------------+ 0x500000000000
39/// | invalid |
40/// +--------------------+ 0x400000000000
41/// | origin 3 |
42/// +--------------------+ 0x300000000000
43/// | shadow 3 |
44/// +--------------------+ 0x200000000000
45/// | origin 2 |
46/// +--------------------+ 0x110000000000
47/// | invalid |
48/// +--------------------+ 0x100000000000
49/// | shadow 2 |
50/// +--------------------+ 0x010000000000
51/// | application 1 |
52/// +--------------------+ 0x000000000000
53///
54/// MEM_TO_SHADOW(mem) = mem ^ 0x500000000000
55/// SHADOW_TO_ORIGIN(shadow) = shadow + 0x100000000000
56///
57/// For more information, please refer to the design document:
58/// http://clang.llvm.org/docs/DataFlowSanitizerDesign.html
59//
60//===----------------------------------------------------------------------===//
61
63#include "llvm/ADT/DenseMap.h"
64#include "llvm/ADT/DenseSet.h"
68#include "llvm/ADT/StringRef.h"
69#include "llvm/ADT/StringSet.h"
70#include "llvm/ADT/iterator.h"
75#include "llvm/IR/Argument.h"
77#include "llvm/IR/Attributes.h"
78#include "llvm/IR/BasicBlock.h"
79#include "llvm/IR/Constant.h"
80#include "llvm/IR/Constants.h"
81#include "llvm/IR/DataLayout.h"
83#include "llvm/IR/Dominators.h"
84#include "llvm/IR/Function.h"
85#include "llvm/IR/GlobalAlias.h"
86#include "llvm/IR/GlobalValue.h"
88#include "llvm/IR/IRBuilder.h"
89#include "llvm/IR/InstVisitor.h"
90#include "llvm/IR/InstrTypes.h"
91#include "llvm/IR/Instruction.h"
94#include "llvm/IR/MDBuilder.h"
95#include "llvm/IR/Module.h"
96#include "llvm/IR/PassManager.h"
97#include "llvm/IR/Type.h"
98#include "llvm/IR/User.h"
99#include "llvm/IR/Value.h"
101#include "llvm/Support/Casting.h"
110#include <algorithm>
111#include <cassert>
112#include <cstddef>
113#include <cstdint>
114#include <memory>
115#include <set>
116#include <string>
117#include <utility>
118#include <vector>
119
120using namespace llvm;
121
122// This must be consistent with ShadowWidthBits.
124
126
127// The size of TLS variables. These constants must be kept in sync with the ones
128// in dfsan.cpp.
129static const unsigned ArgTLSSize = 800;
130static const unsigned RetvalTLSSize = 800;
131
132// The -dfsan-preserve-alignment flag controls whether this pass assumes that
133// alignment requirements provided by the input IR are correct. For example,
134// if the input IR contains a load with alignment 8, this flag will cause
135// the shadow load to have alignment 16. This flag is disabled by default as
136// we have unfortunately encountered too much code (including Clang itself;
137// see PR14291) which performs misaligned access.
139 "dfsan-preserve-alignment",
140 cl::desc("respect alignment requirements provided by input IR"), cl::Hidden,
141 cl::init(false));
142
143// The ABI list files control how shadow parameters are passed. The pass treats
144// every function labelled "uninstrumented" in the ABI list file as conforming
145// to the "native" (i.e. unsanitized) ABI. Unless the ABI list contains
146// additional annotations for those functions, a call to one of those functions
147// will produce a warning message, as the labelling behaviour of the function is
148// unknown. The other supported annotations for uninstrumented functions are
149// "functional" and "discard", which are described below under
150// DataFlowSanitizer::WrapperKind.
151// Functions will often be labelled with both "uninstrumented" and one of
152// "functional" or "discard". This will leave the function unchanged by this
153// pass, and create a wrapper function that will call the original.
154//
155// Instrumented functions can also be annotated as "force_zero_labels", which
156// will make all shadow and return values set zero labels.
157// Functions should never be labelled with both "force_zero_labels" and
158// "uninstrumented" or any of the unistrumented wrapper kinds.
160 "dfsan-abilist",
161 cl::desc("File listing native ABI functions and how the pass treats them"),
162 cl::Hidden);
163
164// Controls whether the pass includes or ignores the labels of pointers in load
165// instructions.
167 "dfsan-combine-pointer-labels-on-load",
168 cl::desc("Combine the label of the pointer with the label of the data when "
169 "loading from memory."),
170 cl::Hidden, cl::init(true));
171
172// Controls whether the pass includes or ignores the labels of pointers in
173// stores instructions.
175 "dfsan-combine-pointer-labels-on-store",
176 cl::desc("Combine the label of the pointer with the label of the data when "
177 "storing in memory."),
178 cl::Hidden, cl::init(false));
179
180// Controls whether the pass propagates labels of offsets in GEP instructions.
182 "dfsan-combine-offset-labels-on-gep",
183 cl::desc(
184 "Combine the label of the offset with the label of the pointer when "
185 "doing pointer arithmetic."),
186 cl::Hidden, cl::init(true));
187
189 "dfsan-combine-taint-lookup-table",
190 cl::desc(
191 "When dfsan-combine-offset-labels-on-gep and/or "
192 "dfsan-combine-pointer-labels-on-load are false, this flag can "
193 "be used to re-enable combining offset and/or pointer taint when "
194 "loading specific constant global variables (i.e. lookup tables)."),
195 cl::Hidden);
196
198 "dfsan-debug-nonzero-labels",
199 cl::desc("Insert calls to __dfsan_nonzero_label on observing a parameter, "
200 "load or return with a nonzero label"),
201 cl::Hidden);
202
203// Experimental feature that inserts callbacks for certain data events.
204// Currently callbacks are only inserted for loads, stores, memory transfers
205// (i.e. memcpy and memmove), and comparisons.
206//
207// If this flag is set to true, the user must provide definitions for the
208// following callback functions:
209// void __dfsan_load_callback(dfsan_label Label, void* addr);
210// void __dfsan_store_callback(dfsan_label Label, void* addr);
211// void __dfsan_mem_transfer_callback(dfsan_label *Start, size_t Len);
212// void __dfsan_cmp_callback(dfsan_label CombinedLabel);
214 "dfsan-event-callbacks",
215 cl::desc("Insert calls to __dfsan_*_callback functions on data events."),
216 cl::Hidden, cl::init(false));
217
218// Experimental feature that inserts callbacks for conditionals, including:
219// conditional branch, switch, select.
220// This must be true for dfsan_set_conditional_callback() to have effect.
222 "dfsan-conditional-callbacks",
223 cl::desc("Insert calls to callback functions on conditionals."), cl::Hidden,
224 cl::init(false));
225
226// Experimental feature that inserts callbacks for data reaching a function,
227// either via function arguments and loads.
228// This must be true for dfsan_set_reaches_function_callback() to have effect.
230 "dfsan-reaches-function-callbacks",
231 cl::desc("Insert calls to callback functions on data reaching a function."),
232 cl::Hidden, cl::init(false));
233
234// Controls whether the pass tracks the control flow of select instructions.
236 "dfsan-track-select-control-flow",
237 cl::desc("Propagate labels from condition values of select instructions "
238 "to results."),
239 cl::Hidden, cl::init(true));
240
241// TODO: This default value follows MSan. DFSan may use a different value.
243 "dfsan-instrument-with-call-threshold",
244 cl::desc("If the function being instrumented requires more than "
245 "this number of origin stores, use callbacks instead of "
246 "inline checks (-1 means never use callbacks)."),
247 cl::Hidden, cl::init(3500));
248
249// Controls how to track origins.
250// * 0: do not track origins.
251// * 1: track origins at memory store operations.
252// * 2: track origins at memory load and store operations.
253// TODO: track callsites.
254static cl::opt<int> ClTrackOrigins("dfsan-track-origins",
255 cl::desc("Track origins of labels"),
256 cl::Hidden, cl::init(0));
257
259 "dfsan-ignore-personality-routine",
260 cl::desc("If a personality routine is marked uninstrumented from the ABI "
261 "list, do not create a wrapper for it."),
262 cl::Hidden, cl::init(false));
263
265 // Types of GlobalVariables are always pointer types.
266 Type *GType = G.getValueType();
267 // For now we support excluding struct types only.
268 if (StructType *SGType = dyn_cast<StructType>(GType)) {
269 if (!SGType->isLiteral())
270 return SGType->getName();
271 }
272 return "<unknown type>";
273}
274
275namespace {
276
277// Memory map parameters used in application-to-shadow address calculation.
278// Offset = (Addr & ~AndMask) ^ XorMask
279// Shadow = ShadowBase + Offset
280// Origin = (OriginBase + Offset) & ~3ULL
281struct MemoryMapParams {
282 uint64_t AndMask;
283 uint64_t XorMask;
284 uint64_t ShadowBase;
285 uint64_t OriginBase;
286};
287
288} // end anonymous namespace
289
290// NOLINTBEGIN(readability-identifier-naming)
291// aarch64 Linux
292const MemoryMapParams Linux_AArch64_MemoryMapParams = {
293 0, // AndMask (not used)
294 0x0B00000000000, // XorMask
295 0, // ShadowBase (not used)
296 0x0200000000000, // OriginBase
297};
298
299// x86_64 Linux
300const MemoryMapParams Linux_X86_64_MemoryMapParams = {
301 0, // AndMask (not used)
302 0x500000000000, // XorMask
303 0, // ShadowBase (not used)
304 0x100000000000, // OriginBase
305};
306// NOLINTEND(readability-identifier-naming)
307
308// loongarch64 Linux
309const MemoryMapParams Linux_LoongArch64_MemoryMapParams = {
310 0, // AndMask (not used)
311 0x500000000000, // XorMask
312 0, // ShadowBase (not used)
313 0x100000000000, // OriginBase
314};
315
316namespace {
317
318class DFSanABIList {
319 std::unique_ptr<SpecialCaseList> SCL;
320
321public:
322 DFSanABIList() = default;
323
324 void set(std::unique_ptr<SpecialCaseList> List) { SCL = std::move(List); }
325
326 /// Returns whether either this function or its source file are listed in the
327 /// given category.
328 bool isIn(const Function &F, StringRef Category) const {
329 return isIn(*F.getParent(), Category) ||
330 SCL->inSection("dataflow", "fun", F.getName(), Category);
331 }
332
333 /// Returns whether this global alias is listed in the given category.
334 ///
335 /// If GA aliases a function, the alias's name is matched as a function name
336 /// would be. Similarly, aliases of globals are matched like globals.
337 bool isIn(const GlobalAlias &GA, StringRef Category) const {
338 if (isIn(*GA.getParent(), Category))
339 return true;
340
341 if (isa<FunctionType>(GA.getValueType()))
342 return SCL->inSection("dataflow", "fun", GA.getName(), Category);
343
344 return SCL->inSection("dataflow", "global", GA.getName(), Category) ||
345 SCL->inSection("dataflow", "type", getGlobalTypeString(GA),
346 Category);
347 }
348
349 /// Returns whether this module is listed in the given category.
350 bool isIn(const Module &M, StringRef Category) const {
351 return SCL->inSection("dataflow", "src", M.getModuleIdentifier(), Category);
352 }
353};
354
355/// TransformedFunction is used to express the result of transforming one
356/// function type into another. This struct is immutable. It holds metadata
357/// useful for updating calls of the old function to the new type.
358struct TransformedFunction {
359 TransformedFunction(FunctionType *OriginalType, FunctionType *TransformedType,
360 const std::vector<unsigned> &ArgumentIndexMapping)
361 : OriginalType(OriginalType), TransformedType(TransformedType),
362 ArgumentIndexMapping(ArgumentIndexMapping) {}
363
364 // Disallow copies.
365 TransformedFunction(const TransformedFunction &) = delete;
366 TransformedFunction &operator=(const TransformedFunction &) = delete;
367
368 // Allow moves.
369 TransformedFunction(TransformedFunction &&) = default;
370 TransformedFunction &operator=(TransformedFunction &&) = default;
371
372 /// Type of the function before the transformation.
373 FunctionType *OriginalType;
374
375 /// Type of the function after the transformation.
377
378 /// Transforming a function may change the position of arguments. This
379 /// member records the mapping from each argument's old position to its new
380 /// position. Argument positions are zero-indexed. If the transformation
381 /// from F to F' made the first argument of F into the third argument of F',
382 /// then ArgumentIndexMapping[0] will equal 2.
383 std::vector<unsigned> ArgumentIndexMapping;
384};
385
386/// Given function attributes from a call site for the original function,
387/// return function attributes appropriate for a call to the transformed
388/// function.
390transformFunctionAttributes(const TransformedFunction &TransformedFunction,
391 LLVMContext &Ctx, AttributeList CallSiteAttrs) {
392
393 // Construct a vector of AttributeSet for each function argument.
394 std::vector<llvm::AttributeSet> ArgumentAttributes(
395 TransformedFunction.TransformedType->getNumParams());
396
397 // Copy attributes from the parameter of the original function to the
398 // transformed version. 'ArgumentIndexMapping' holds the mapping from
399 // old argument position to new.
400 for (unsigned I = 0, IE = TransformedFunction.ArgumentIndexMapping.size();
401 I < IE; ++I) {
402 unsigned TransformedIndex = TransformedFunction.ArgumentIndexMapping[I];
403 ArgumentAttributes[TransformedIndex] = CallSiteAttrs.getParamAttrs(I);
404 }
405
406 // Copy annotations on varargs arguments.
407 for (unsigned I = TransformedFunction.OriginalType->getNumParams(),
408 IE = CallSiteAttrs.getNumAttrSets();
409 I < IE; ++I) {
410 ArgumentAttributes.push_back(CallSiteAttrs.getParamAttrs(I));
411 }
412
413 return AttributeList::get(Ctx, CallSiteAttrs.getFnAttrs(),
414 CallSiteAttrs.getRetAttrs(),
415 llvm::ArrayRef(ArgumentAttributes));
416}
417
418class DataFlowSanitizer {
419 friend struct DFSanFunction;
420 friend class DFSanVisitor;
421
422 enum { ShadowWidthBits = 8, ShadowWidthBytes = ShadowWidthBits / 8 };
423
424 enum { OriginWidthBits = 32, OriginWidthBytes = OriginWidthBits / 8 };
425
426 /// How should calls to uninstrumented functions be handled?
427 enum WrapperKind {
428 /// This function is present in an uninstrumented form but we don't know
429 /// how it should be handled. Print a warning and call the function anyway.
430 /// Don't label the return value.
431 WK_Warning,
432
433 /// This function does not write to (user-accessible) memory, and its return
434 /// value is unlabelled.
435 WK_Discard,
436
437 /// This function does not write to (user-accessible) memory, and the label
438 /// of its return value is the union of the label of its arguments.
439 WK_Functional,
440
441 /// Instead of calling the function, a custom wrapper __dfsw_F is called,
442 /// where F is the name of the function. This function may wrap the
443 /// original function or provide its own implementation. WK_Custom uses an
444 /// extra pointer argument to return the shadow. This allows the wrapped
445 /// form of the function type to be expressed in C.
446 WK_Custom
447 };
448
449 Module *Mod;
450 LLVMContext *Ctx;
451 Type *Int8Ptr;
452 IntegerType *OriginTy;
453 PointerType *OriginPtrTy;
454 ConstantInt *ZeroOrigin;
455 /// The shadow type for all primitive types and vector types.
456 IntegerType *PrimitiveShadowTy;
457 PointerType *PrimitiveShadowPtrTy;
458 IntegerType *IntptrTy;
459 ConstantInt *ZeroPrimitiveShadow;
460 Constant *ArgTLS;
461 ArrayType *ArgOriginTLSTy;
462 Constant *ArgOriginTLS;
463 Constant *RetvalTLS;
464 Constant *RetvalOriginTLS;
465 FunctionType *DFSanUnionLoadFnTy;
466 FunctionType *DFSanLoadLabelAndOriginFnTy;
467 FunctionType *DFSanUnimplementedFnTy;
468 FunctionType *DFSanWrapperExternWeakNullFnTy;
469 FunctionType *DFSanSetLabelFnTy;
470 FunctionType *DFSanNonzeroLabelFnTy;
471 FunctionType *DFSanVarargWrapperFnTy;
472 FunctionType *DFSanConditionalCallbackFnTy;
473 FunctionType *DFSanConditionalCallbackOriginFnTy;
474 FunctionType *DFSanReachesFunctionCallbackFnTy;
475 FunctionType *DFSanReachesFunctionCallbackOriginFnTy;
476 FunctionType *DFSanCmpCallbackFnTy;
477 FunctionType *DFSanLoadStoreCallbackFnTy;
478 FunctionType *DFSanMemTransferCallbackFnTy;
479 FunctionType *DFSanChainOriginFnTy;
480 FunctionType *DFSanChainOriginIfTaintedFnTy;
481 FunctionType *DFSanMemOriginTransferFnTy;
482 FunctionType *DFSanMemShadowOriginTransferFnTy;
483 FunctionType *DFSanMemShadowOriginConditionalExchangeFnTy;
484 FunctionType *DFSanMaybeStoreOriginFnTy;
485 FunctionCallee DFSanUnionLoadFn;
486 FunctionCallee DFSanLoadLabelAndOriginFn;
487 FunctionCallee DFSanUnimplementedFn;
488 FunctionCallee DFSanWrapperExternWeakNullFn;
489 FunctionCallee DFSanSetLabelFn;
490 FunctionCallee DFSanNonzeroLabelFn;
491 FunctionCallee DFSanVarargWrapperFn;
492 FunctionCallee DFSanLoadCallbackFn;
493 FunctionCallee DFSanStoreCallbackFn;
494 FunctionCallee DFSanMemTransferCallbackFn;
495 FunctionCallee DFSanConditionalCallbackFn;
496 FunctionCallee DFSanConditionalCallbackOriginFn;
497 FunctionCallee DFSanReachesFunctionCallbackFn;
498 FunctionCallee DFSanReachesFunctionCallbackOriginFn;
499 FunctionCallee DFSanCmpCallbackFn;
500 FunctionCallee DFSanChainOriginFn;
501 FunctionCallee DFSanChainOriginIfTaintedFn;
502 FunctionCallee DFSanMemOriginTransferFn;
503 FunctionCallee DFSanMemShadowOriginTransferFn;
504 FunctionCallee DFSanMemShadowOriginConditionalExchangeFn;
505 FunctionCallee DFSanMaybeStoreOriginFn;
506 SmallPtrSet<Value *, 16> DFSanRuntimeFunctions;
507 MDNode *ColdCallWeights;
508 MDNode *OriginStoreWeights;
509 DFSanABIList ABIList;
510 DenseMap<Value *, Function *> UnwrappedFnMap;
511 AttributeMask ReadOnlyNoneAttrs;
512 StringSet<> CombineTaintLookupTableNames;
513
514 /// Memory map parameters used in calculation mapping application addresses
515 /// to shadow addresses and origin addresses.
516 const MemoryMapParams *MapParams;
517
518 Value *getShadowOffset(Value *Addr, IRBuilder<> &IRB);
519 Value *getShadowAddress(Value *Addr, BasicBlock::iterator Pos);
520 Value *getShadowAddress(Value *Addr, BasicBlock::iterator Pos,
521 Value *ShadowOffset);
522 std::pair<Value *, Value *> getShadowOriginAddress(Value *Addr,
523 Align InstAlignment,
525 bool isInstrumented(const Function *F);
526 bool isInstrumented(const GlobalAlias *GA);
527 bool isForceZeroLabels(const Function *F);
528 TransformedFunction getCustomFunctionType(FunctionType *T);
529 WrapperKind getWrapperKind(Function *F);
530 void addGlobalNameSuffix(GlobalValue *GV);
531 void buildExternWeakCheckIfNeeded(IRBuilder<> &IRB, Function *F);
532 Function *buildWrapperFunction(Function *F, StringRef NewFName,
534 FunctionType *NewFT);
535 void initializeCallbackFunctions(Module &M);
536 void initializeRuntimeFunctions(Module &M);
537 bool initializeModule(Module &M);
538
539 /// Advances \p OriginAddr to point to the next 32-bit origin and then loads
540 /// from it. Returns the origin's loaded value.
541 Value *loadNextOrigin(BasicBlock::iterator Pos, Align OriginAlign,
542 Value **OriginAddr);
543
544 /// Returns whether the given load byte size is amenable to inlined
545 /// optimization patterns.
546 bool hasLoadSizeForFastPath(uint64_t Size);
547
548 /// Returns whether the pass tracks origins. Supports only TLS ABI mode.
549 bool shouldTrackOrigins();
550
551 /// Returns a zero constant with the shadow type of OrigTy.
552 ///
553 /// getZeroShadow({T1,T2,...}) = {getZeroShadow(T1),getZeroShadow(T2,...}
554 /// getZeroShadow([n x T]) = [n x getZeroShadow(T)]
555 /// getZeroShadow(other type) = i16(0)
556 Constant *getZeroShadow(Type *OrigTy);
557 /// Returns a zero constant with the shadow type of V's type.
558 Constant *getZeroShadow(Value *V);
559
560 /// Checks if V is a zero shadow.
561 bool isZeroShadow(Value *V);
562
563 /// Returns the shadow type of OrigTy.
564 ///
565 /// getShadowTy({T1,T2,...}) = {getShadowTy(T1),getShadowTy(T2),...}
566 /// getShadowTy([n x T]) = [n x getShadowTy(T)]
567 /// getShadowTy(other type) = i16
568 Type *getShadowTy(Type *OrigTy);
569 /// Returns the shadow type of V's type.
570 Type *getShadowTy(Value *V);
571
572 const uint64_t NumOfElementsInArgOrgTLS = ArgTLSSize / OriginWidthBytes;
573
574public:
575 DataFlowSanitizer(const std::vector<std::string> &ABIListFiles);
576
577 bool runImpl(Module &M,
579};
580
581struct DFSanFunction {
582 DataFlowSanitizer &DFS;
583 Function *F;
584 DominatorTree DT;
585 bool IsNativeABI;
586 bool IsForceZeroLabels;
588 AllocaInst *LabelReturnAlloca = nullptr;
589 AllocaInst *OriginReturnAlloca = nullptr;
590 DenseMap<Value *, Value *> ValShadowMap;
591 DenseMap<Value *, Value *> ValOriginMap;
594
595 struct PHIFixupElement {
596 PHINode *Phi;
597 PHINode *ShadowPhi;
598 PHINode *OriginPhi;
599 };
600 std::vector<PHIFixupElement> PHIFixups;
601
602 DenseSet<Instruction *> SkipInsts;
603 std::vector<Value *> NonZeroChecks;
604
605 struct CachedShadow {
606 BasicBlock *Block; // The block where Shadow is defined.
607 Value *Shadow;
608 };
609 /// Maps a value to its latest shadow value in terms of domination tree.
610 DenseMap<std::pair<Value *, Value *>, CachedShadow> CachedShadows;
611 /// Maps a value to its latest collapsed shadow value it was converted to in
612 /// terms of domination tree. When ClDebugNonzeroLabels is on, this cache is
613 /// used at a post process where CFG blocks are split. So it does not cache
614 /// BasicBlock like CachedShadows, but uses domination between values.
615 DenseMap<Value *, Value *> CachedCollapsedShadows;
617
618 DFSanFunction(DataFlowSanitizer &DFS, Function *F, bool IsNativeABI,
619 bool IsForceZeroLabels, TargetLibraryInfo &TLI)
620 : DFS(DFS), F(F), IsNativeABI(IsNativeABI),
621 IsForceZeroLabels(IsForceZeroLabels), TLI(TLI) {
622 DT.recalculate(*F);
623 }
624
625 /// Computes the shadow address for a given function argument.
626 ///
627 /// Shadow = ArgTLS+ArgOffset.
628 Value *getArgTLS(Type *T, unsigned ArgOffset, IRBuilder<> &IRB);
629
630 /// Computes the shadow address for a return value.
631 Value *getRetvalTLS(Type *T, IRBuilder<> &IRB);
632
633 /// Computes the origin address for a given function argument.
634 ///
635 /// Origin = ArgOriginTLS[ArgNo].
636 Value *getArgOriginTLS(unsigned ArgNo, IRBuilder<> &IRB);
637
638 /// Computes the origin address for a return value.
639 Value *getRetvalOriginTLS();
640
641 Value *getOrigin(Value *V);
642 void setOrigin(Instruction *I, Value *Origin);
643 /// Generates IR to compute the origin of the last operand with a taint label.
644 Value *combineOperandOrigins(Instruction *Inst);
645 /// Before the instruction Pos, generates IR to compute the last origin with a
646 /// taint label. Labels and origins are from vectors Shadows and Origins
647 /// correspondingly. The generated IR is like
648 /// Sn-1 != Zero ? On-1: ... S2 != Zero ? O2: S1 != Zero ? O1: O0
649 /// When Zero is nullptr, it uses ZeroPrimitiveShadow. Otherwise it can be
650 /// zeros with other bitwidths.
651 Value *combineOrigins(const std::vector<Value *> &Shadows,
652 const std::vector<Value *> &Origins,
653 BasicBlock::iterator Pos, ConstantInt *Zero = nullptr);
654
655 Value *getShadow(Value *V);
656 void setShadow(Instruction *I, Value *Shadow);
657 /// Generates IR to compute the union of the two given shadows, inserting it
658 /// before Pos. The combined value is with primitive type.
659 Value *combineShadows(Value *V1, Value *V2, BasicBlock::iterator Pos);
660 /// Combines the shadow values of V1 and V2, then converts the combined value
661 /// with primitive type into a shadow value with the original type T.
662 Value *combineShadowsThenConvert(Type *T, Value *V1, Value *V2,
664 Value *combineOperandShadows(Instruction *Inst);
665
666 /// Generates IR to load shadow and origin corresponding to bytes [\p
667 /// Addr, \p Addr + \p Size), where addr has alignment \p
668 /// InstAlignment, and take the union of each of those shadows. The returned
669 /// shadow always has primitive type.
670 ///
671 /// When tracking loads is enabled, the returned origin is a chain at the
672 /// current stack if the returned shadow is tainted.
673 std::pair<Value *, Value *> loadShadowOrigin(Value *Addr, uint64_t Size,
674 Align InstAlignment,
676
677 void storePrimitiveShadowOrigin(Value *Addr, uint64_t Size,
678 Align InstAlignment, Value *PrimitiveShadow,
679 Value *Origin, BasicBlock::iterator Pos);
680 /// Applies PrimitiveShadow to all primitive subtypes of T, returning
681 /// the expanded shadow value.
682 ///
683 /// EFP({T1,T2, ...}, PS) = {EFP(T1,PS),EFP(T2,PS),...}
684 /// EFP([n x T], PS) = [n x EFP(T,PS)]
685 /// EFP(other types, PS) = PS
686 Value *expandFromPrimitiveShadow(Type *T, Value *PrimitiveShadow,
688 /// Collapses Shadow into a single primitive shadow value, unioning all
689 /// primitive shadow values in the process. Returns the final primitive
690 /// shadow value.
691 ///
692 /// CTP({V1,V2, ...}) = UNION(CFP(V1,PS),CFP(V2,PS),...)
693 /// CTP([V1,V2,...]) = UNION(CFP(V1,PS),CFP(V2,PS),...)
694 /// CTP(other types, PS) = PS
695 Value *collapseToPrimitiveShadow(Value *Shadow, BasicBlock::iterator Pos);
696
697 void storeZeroPrimitiveShadow(Value *Addr, uint64_t Size, Align ShadowAlign,
699
700 Align getShadowAlign(Align InstAlignment);
701
702 // If ClConditionalCallbacks is enabled, insert a callback after a given
703 // branch instruction using the given conditional expression.
704 void addConditionalCallbacksIfEnabled(Instruction &I, Value *Condition);
705
706 // If ClReachesFunctionCallbacks is enabled, insert a callback for each
707 // argument and load instruction.
708 void addReachesFunctionCallbacksIfEnabled(IRBuilder<> &IRB, Instruction &I,
709 Value *Data);
710
711 bool isLookupTableConstant(Value *P);
712
713private:
714 /// Collapses the shadow with aggregate type into a single primitive shadow
715 /// value.
716 template <class AggregateType>
717 Value *collapseAggregateShadow(AggregateType *AT, Value *Shadow,
718 IRBuilder<> &IRB);
719
720 Value *collapseToPrimitiveShadow(Value *Shadow, IRBuilder<> &IRB);
721
722 /// Returns the shadow value of an argument A.
723 Value *getShadowForTLSArgument(Argument *A);
724
725 /// The fast path of loading shadows.
726 std::pair<Value *, Value *>
727 loadShadowFast(Value *ShadowAddr, Value *OriginAddr, uint64_t Size,
728 Align ShadowAlign, Align OriginAlign, Value *FirstOrigin,
730
731 Align getOriginAlign(Align InstAlignment);
732
733 /// Because 4 contiguous bytes share one 4-byte origin, the most accurate load
734 /// is __dfsan_load_label_and_origin. This function returns the union of all
735 /// labels and the origin of the first taint label. However this is an
736 /// additional call with many instructions. To ensure common cases are fast,
737 /// checks if it is possible to load labels and origins without using the
738 /// callback function.
739 ///
740 /// When enabling tracking load instructions, we always use
741 /// __dfsan_load_label_and_origin to reduce code size.
742 bool useCallbackLoadLabelAndOrigin(uint64_t Size, Align InstAlignment);
743
744 /// Returns a chain at the current stack with previous origin V.
745 Value *updateOrigin(Value *V, IRBuilder<> &IRB);
746
747 /// Returns a chain at the current stack with previous origin V if Shadow is
748 /// tainted.
749 Value *updateOriginIfTainted(Value *Shadow, Value *Origin, IRBuilder<> &IRB);
750
751 /// Creates an Intptr = Origin | Origin << 32 if Intptr's size is 64. Returns
752 /// Origin otherwise.
753 Value *originToIntptr(IRBuilder<> &IRB, Value *Origin);
754
755 /// Stores Origin into the address range [StoreOriginAddr, StoreOriginAddr +
756 /// Size).
757 void paintOrigin(IRBuilder<> &IRB, Value *Origin, Value *StoreOriginAddr,
758 uint64_t StoreOriginSize, Align Alignment);
759
760 /// Stores Origin in terms of its Shadow value.
761 /// * Do not write origins for zero shadows because we do not trace origins
762 /// for untainted sinks.
763 /// * Use __dfsan_maybe_store_origin if there are too many origin store
764 /// instrumentations.
765 void storeOrigin(BasicBlock::iterator Pos, Value *Addr, uint64_t Size,
766 Value *Shadow, Value *Origin, Value *StoreOriginAddr,
767 Align InstAlignment);
768
769 /// Convert a scalar value to an i1 by comparing with 0.
770 Value *convertToBool(Value *V, IRBuilder<> &IRB, const Twine &Name = "");
771
772 bool shouldInstrumentWithCall();
773
774 /// Generates IR to load shadow and origin corresponding to bytes [\p
775 /// Addr, \p Addr + \p Size), where addr has alignment \p
776 /// InstAlignment, and take the union of each of those shadows. The returned
777 /// shadow always has primitive type.
778 std::pair<Value *, Value *>
779 loadShadowOriginSansLoadTracking(Value *Addr, uint64_t Size,
780 Align InstAlignment,
782 int NumOriginStores = 0;
783};
784
785class DFSanVisitor : public InstVisitor<DFSanVisitor> {
786public:
787 DFSanFunction &DFSF;
788
789 DFSanVisitor(DFSanFunction &DFSF) : DFSF(DFSF) {}
790
791 const DataLayout &getDataLayout() const {
792 return DFSF.F->getDataLayout();
793 }
794
795 // Combines shadow values and origins for all of I's operands.
796 void visitInstOperands(Instruction &I);
797
800 void visitBitCastInst(BitCastInst &BCI);
801 void visitCastInst(CastInst &CI);
802 void visitCmpInst(CmpInst &CI);
805 void visitLoadInst(LoadInst &LI);
806 void visitStoreInst(StoreInst &SI);
809 void visitReturnInst(ReturnInst &RI);
810 void visitLibAtomicLoad(CallBase &CB);
811 void visitLibAtomicStore(CallBase &CB);
812 void visitLibAtomicExchange(CallBase &CB);
813 void visitLibAtomicCompareExchange(CallBase &CB);
814 void visitCallBase(CallBase &CB);
815 void visitPHINode(PHINode &PN);
825 void visitBranchInst(BranchInst &BR);
826 void visitSwitchInst(SwitchInst &SW);
827
828private:
829 void visitCASOrRMW(Align InstAlignment, Instruction &I);
830
831 // Returns false when this is an invoke of a custom function.
832 bool visitWrappedCallBase(Function &F, CallBase &CB);
833
834 // Combines origins for all of I's operands.
835 void visitInstOperandOrigins(Instruction &I);
836
837 void addShadowArguments(Function &F, CallBase &CB, std::vector<Value *> &Args,
838 IRBuilder<> &IRB);
839
840 void addOriginArguments(Function &F, CallBase &CB, std::vector<Value *> &Args,
841 IRBuilder<> &IRB);
842
843 Value *makeAddAcquireOrderingTable(IRBuilder<> &IRB);
844 Value *makeAddReleaseOrderingTable(IRBuilder<> &IRB);
845};
846
847bool LibAtomicFunction(const Function &F) {
848 // This is a bit of a hack because TargetLibraryInfo is a function pass.
849 // The DFSan pass would need to be refactored to be function pass oriented
850 // (like MSan is) in order to fit together nicely with TargetLibraryInfo.
851 // We need this check to prevent them from being instrumented, or wrapped.
852 // Match on name and number of arguments.
853 if (!F.hasName() || F.isVarArg())
854 return false;
855 switch (F.arg_size()) {
856 case 4:
857 return F.getName() == "__atomic_load" || F.getName() == "__atomic_store";
858 case 5:
859 return F.getName() == "__atomic_exchange";
860 case 6:
861 return F.getName() == "__atomic_compare_exchange";
862 default:
863 return false;
864 }
865}
866
867} // end anonymous namespace
868
869DataFlowSanitizer::DataFlowSanitizer(
870 const std::vector<std::string> &ABIListFiles) {
871 std::vector<std::string> AllABIListFiles(std::move(ABIListFiles));
872 llvm::append_range(AllABIListFiles, ClABIListFiles);
873 // FIXME: should we propagate vfs::FileSystem to this constructor?
874 ABIList.set(
876
878 CombineTaintLookupTableNames.insert(v);
879}
880
881TransformedFunction DataFlowSanitizer::getCustomFunctionType(FunctionType *T) {
882 SmallVector<Type *, 4> ArgTypes;
883
884 // Some parameters of the custom function being constructed are
885 // parameters of T. Record the mapping from parameters of T to
886 // parameters of the custom function, so that parameter attributes
887 // at call sites can be updated.
888 std::vector<unsigned> ArgumentIndexMapping;
889 for (unsigned I = 0, E = T->getNumParams(); I != E; ++I) {
890 Type *ParamType = T->getParamType(I);
891 ArgumentIndexMapping.push_back(ArgTypes.size());
892 ArgTypes.push_back(ParamType);
893 }
894 for (unsigned I = 0, E = T->getNumParams(); I != E; ++I)
895 ArgTypes.push_back(PrimitiveShadowTy);
896 if (T->isVarArg())
897 ArgTypes.push_back(PrimitiveShadowPtrTy);
898 Type *RetType = T->getReturnType();
899 if (!RetType->isVoidTy())
900 ArgTypes.push_back(PrimitiveShadowPtrTy);
901
902 if (shouldTrackOrigins()) {
903 for (unsigned I = 0, E = T->getNumParams(); I != E; ++I)
904 ArgTypes.push_back(OriginTy);
905 if (T->isVarArg())
906 ArgTypes.push_back(OriginPtrTy);
907 if (!RetType->isVoidTy())
908 ArgTypes.push_back(OriginPtrTy);
909 }
910
911 return TransformedFunction(
912 T, FunctionType::get(T->getReturnType(), ArgTypes, T->isVarArg()),
913 ArgumentIndexMapping);
914}
915
916bool DataFlowSanitizer::isZeroShadow(Value *V) {
917 Type *T = V->getType();
918 if (!isa<ArrayType>(T) && !isa<StructType>(T)) {
919 if (const ConstantInt *CI = dyn_cast<ConstantInt>(V))
920 return CI->isZero();
921 return false;
922 }
923
924 return isa<ConstantAggregateZero>(V);
925}
926
927bool DataFlowSanitizer::hasLoadSizeForFastPath(uint64_t Size) {
928 uint64_t ShadowSize = Size * ShadowWidthBytes;
929 return ShadowSize % 8 == 0 || ShadowSize == 4;
930}
931
932bool DataFlowSanitizer::shouldTrackOrigins() {
933 static const bool ShouldTrackOrigins = ClTrackOrigins;
934 return ShouldTrackOrigins;
935}
936
937Constant *DataFlowSanitizer::getZeroShadow(Type *OrigTy) {
938 if (!isa<ArrayType>(OrigTy) && !isa<StructType>(OrigTy))
939 return ZeroPrimitiveShadow;
940 Type *ShadowTy = getShadowTy(OrigTy);
941 return ConstantAggregateZero::get(ShadowTy);
942}
943
944Constant *DataFlowSanitizer::getZeroShadow(Value *V) {
945 return getZeroShadow(V->getType());
946}
947
949 Value *Shadow, SmallVector<unsigned, 4> &Indices, Type *SubShadowTy,
950 Value *PrimitiveShadow, IRBuilder<> &IRB) {
951 if (!isa<ArrayType>(SubShadowTy) && !isa<StructType>(SubShadowTy))
952 return IRB.CreateInsertValue(Shadow, PrimitiveShadow, Indices);
953
954 if (ArrayType *AT = dyn_cast<ArrayType>(SubShadowTy)) {
955 for (unsigned Idx = 0; Idx < AT->getNumElements(); Idx++) {
956 Indices.push_back(Idx);
958 Shadow, Indices, AT->getElementType(), PrimitiveShadow, IRB);
959 Indices.pop_back();
960 }
961 return Shadow;
962 }
963
964 if (StructType *ST = dyn_cast<StructType>(SubShadowTy)) {
965 for (unsigned Idx = 0; Idx < ST->getNumElements(); Idx++) {
966 Indices.push_back(Idx);
968 Shadow, Indices, ST->getElementType(Idx), PrimitiveShadow, IRB);
969 Indices.pop_back();
970 }
971 return Shadow;
972 }
973 llvm_unreachable("Unexpected shadow type");
974}
975
976bool DFSanFunction::shouldInstrumentWithCall() {
977 return ClInstrumentWithCallThreshold >= 0 &&
978 NumOriginStores >= ClInstrumentWithCallThreshold;
979}
980
981Value *DFSanFunction::expandFromPrimitiveShadow(Type *T, Value *PrimitiveShadow,
983 Type *ShadowTy = DFS.getShadowTy(T);
984
985 if (!isa<ArrayType>(ShadowTy) && !isa<StructType>(ShadowTy))
986 return PrimitiveShadow;
987
988 if (DFS.isZeroShadow(PrimitiveShadow))
989 return DFS.getZeroShadow(ShadowTy);
990
991 IRBuilder<> IRB(Pos->getParent(), Pos);
993 Value *Shadow = UndefValue::get(ShadowTy);
994 Shadow = expandFromPrimitiveShadowRecursive(Shadow, Indices, ShadowTy,
995 PrimitiveShadow, IRB);
996
997 // Caches the primitive shadow value that built the shadow value.
998 CachedCollapsedShadows[Shadow] = PrimitiveShadow;
999 return Shadow;
1000}
1001
1002template <class AggregateType>
1003Value *DFSanFunction::collapseAggregateShadow(AggregateType *AT, Value *Shadow,
1004 IRBuilder<> &IRB) {
1005 if (!AT->getNumElements())
1006 return DFS.ZeroPrimitiveShadow;
1007
1008 Value *FirstItem = IRB.CreateExtractValue(Shadow, 0);
1009 Value *Aggregator = collapseToPrimitiveShadow(FirstItem, IRB);
1010
1011 for (unsigned Idx = 1; Idx < AT->getNumElements(); Idx++) {
1012 Value *ShadowItem = IRB.CreateExtractValue(Shadow, Idx);
1013 Value *ShadowInner = collapseToPrimitiveShadow(ShadowItem, IRB);
1014 Aggregator = IRB.CreateOr(Aggregator, ShadowInner);
1015 }
1016 return Aggregator;
1017}
1018
1019Value *DFSanFunction::collapseToPrimitiveShadow(Value *Shadow,
1020 IRBuilder<> &IRB) {
1021 Type *ShadowTy = Shadow->getType();
1022 if (!isa<ArrayType>(ShadowTy) && !isa<StructType>(ShadowTy))
1023 return Shadow;
1024 if (ArrayType *AT = dyn_cast<ArrayType>(ShadowTy))
1025 return collapseAggregateShadow<>(AT, Shadow, IRB);
1026 if (StructType *ST = dyn_cast<StructType>(ShadowTy))
1027 return collapseAggregateShadow<>(ST, Shadow, IRB);
1028 llvm_unreachable("Unexpected shadow type");
1029}
1030
1031Value *DFSanFunction::collapseToPrimitiveShadow(Value *Shadow,
1033 Type *ShadowTy = Shadow->getType();
1034 if (!isa<ArrayType>(ShadowTy) && !isa<StructType>(ShadowTy))
1035 return Shadow;
1036
1037 // Checks if the cached collapsed shadow value dominates Pos.
1038 Value *&CS = CachedCollapsedShadows[Shadow];
1039 if (CS && DT.dominates(CS, Pos))
1040 return CS;
1041
1042 IRBuilder<> IRB(Pos->getParent(), Pos);
1043 Value *PrimitiveShadow = collapseToPrimitiveShadow(Shadow, IRB);
1044 // Caches the converted primitive shadow value.
1045 CS = PrimitiveShadow;
1046 return PrimitiveShadow;
1047}
1048
1049void DFSanFunction::addConditionalCallbacksIfEnabled(Instruction &I,
1050 Value *Condition) {
1052 return;
1053 }
1054 IRBuilder<> IRB(&I);
1055 Value *CondShadow = getShadow(Condition);
1056 CallInst *CI;
1057 if (DFS.shouldTrackOrigins()) {
1058 Value *CondOrigin = getOrigin(Condition);
1059 CI = IRB.CreateCall(DFS.DFSanConditionalCallbackOriginFn,
1060 {CondShadow, CondOrigin});
1061 } else {
1062 CI = IRB.CreateCall(DFS.DFSanConditionalCallbackFn, {CondShadow});
1063 }
1064 CI->addParamAttr(0, Attribute::ZExt);
1065}
1066
1067void DFSanFunction::addReachesFunctionCallbacksIfEnabled(IRBuilder<> &IRB,
1068 Instruction &I,
1069 Value *Data) {
1071 return;
1072 }
1073 const DebugLoc &dbgloc = I.getDebugLoc();
1074 Value *DataShadow = collapseToPrimitiveShadow(getShadow(Data), IRB);
1075 ConstantInt *CILine;
1076 llvm::Value *FilePathPtr;
1077
1078 if (dbgloc.get() == nullptr) {
1079 CILine = llvm::ConstantInt::get(I.getContext(), llvm::APInt(32, 0));
1080 FilePathPtr = IRB.CreateGlobalString(
1081 I.getFunction()->getParent()->getSourceFileName());
1082 } else {
1083 CILine = llvm::ConstantInt::get(I.getContext(),
1084 llvm::APInt(32, dbgloc.getLine()));
1085 FilePathPtr = IRB.CreateGlobalString(dbgloc->getFilename());
1086 }
1087
1088 llvm::Value *FunctionNamePtr =
1089 IRB.CreateGlobalString(I.getFunction()->getName());
1090
1091 CallInst *CB;
1092 std::vector<Value *> args;
1093
1094 if (DFS.shouldTrackOrigins()) {
1095 Value *DataOrigin = getOrigin(Data);
1096 args = { DataShadow, DataOrigin, FilePathPtr, CILine, FunctionNamePtr };
1097 CB = IRB.CreateCall(DFS.DFSanReachesFunctionCallbackOriginFn, args);
1098 } else {
1099 args = { DataShadow, FilePathPtr, CILine, FunctionNamePtr };
1100 CB = IRB.CreateCall(DFS.DFSanReachesFunctionCallbackFn, args);
1101 }
1102 CB->addParamAttr(0, Attribute::ZExt);
1103 CB->setDebugLoc(dbgloc);
1104}
1105
1106Type *DataFlowSanitizer::getShadowTy(Type *OrigTy) {
1107 if (!OrigTy->isSized())
1108 return PrimitiveShadowTy;
1109 if (isa<IntegerType>(OrigTy))
1110 return PrimitiveShadowTy;
1111 if (isa<VectorType>(OrigTy))
1112 return PrimitiveShadowTy;
1113 if (ArrayType *AT = dyn_cast<ArrayType>(OrigTy))
1114 return ArrayType::get(getShadowTy(AT->getElementType()),
1115 AT->getNumElements());
1116 if (StructType *ST = dyn_cast<StructType>(OrigTy)) {
1118 for (unsigned I = 0, N = ST->getNumElements(); I < N; ++I)
1119 Elements.push_back(getShadowTy(ST->getElementType(I)));
1120 return StructType::get(*Ctx, Elements);
1121 }
1122 return PrimitiveShadowTy;
1123}
1124
1125Type *DataFlowSanitizer::getShadowTy(Value *V) {
1126 return getShadowTy(V->getType());
1127}
1128
1129bool DataFlowSanitizer::initializeModule(Module &M) {
1130 Triple TargetTriple(M.getTargetTriple());
1131 const DataLayout &DL = M.getDataLayout();
1132
1133 if (TargetTriple.getOS() != Triple::Linux)
1134 report_fatal_error("unsupported operating system");
1135 switch (TargetTriple.getArch()) {
1136 case Triple::aarch64:
1137 MapParams = &Linux_AArch64_MemoryMapParams;
1138 break;
1139 case Triple::x86_64:
1140 MapParams = &Linux_X86_64_MemoryMapParams;
1141 break;
1144 break;
1145 default:
1146 report_fatal_error("unsupported architecture");
1147 }
1148
1149 Mod = &M;
1150 Ctx = &M.getContext();
1151 Int8Ptr = PointerType::getUnqual(*Ctx);
1152 OriginTy = IntegerType::get(*Ctx, OriginWidthBits);
1153 OriginPtrTy = PointerType::getUnqual(OriginTy);
1154 PrimitiveShadowTy = IntegerType::get(*Ctx, ShadowWidthBits);
1155 PrimitiveShadowPtrTy = PointerType::getUnqual(PrimitiveShadowTy);
1156 IntptrTy = DL.getIntPtrType(*Ctx);
1157 ZeroPrimitiveShadow = ConstantInt::getSigned(PrimitiveShadowTy, 0);
1158 ZeroOrigin = ConstantInt::getSigned(OriginTy, 0);
1159
1160 Type *DFSanUnionLoadArgs[2] = {PrimitiveShadowPtrTy, IntptrTy};
1161 DFSanUnionLoadFnTy = FunctionType::get(PrimitiveShadowTy, DFSanUnionLoadArgs,
1162 /*isVarArg=*/false);
1163 Type *DFSanLoadLabelAndOriginArgs[2] = {Int8Ptr, IntptrTy};
1164 DFSanLoadLabelAndOriginFnTy =
1165 FunctionType::get(IntegerType::get(*Ctx, 64), DFSanLoadLabelAndOriginArgs,
1166 /*isVarArg=*/false);
1167 DFSanUnimplementedFnTy = FunctionType::get(
1168 Type::getVoidTy(*Ctx), PointerType::getUnqual(*Ctx), /*isVarArg=*/false);
1169 Type *DFSanWrapperExternWeakNullArgs[2] = {Int8Ptr, Int8Ptr};
1170 DFSanWrapperExternWeakNullFnTy =
1171 FunctionType::get(Type::getVoidTy(*Ctx), DFSanWrapperExternWeakNullArgs,
1172 /*isVarArg=*/false);
1173 Type *DFSanSetLabelArgs[4] = {PrimitiveShadowTy, OriginTy,
1174 PointerType::getUnqual(*Ctx), IntptrTy};
1175 DFSanSetLabelFnTy = FunctionType::get(Type::getVoidTy(*Ctx),
1176 DFSanSetLabelArgs, /*isVarArg=*/false);
1177 DFSanNonzeroLabelFnTy = FunctionType::get(Type::getVoidTy(*Ctx), {},
1178 /*isVarArg=*/false);
1179 DFSanVarargWrapperFnTy = FunctionType::get(
1180 Type::getVoidTy(*Ctx), PointerType::getUnqual(*Ctx), /*isVarArg=*/false);
1181 DFSanConditionalCallbackFnTy =
1182 FunctionType::get(Type::getVoidTy(*Ctx), PrimitiveShadowTy,
1183 /*isVarArg=*/false);
1184 Type *DFSanConditionalCallbackOriginArgs[2] = {PrimitiveShadowTy, OriginTy};
1185 DFSanConditionalCallbackOriginFnTy = FunctionType::get(
1186 Type::getVoidTy(*Ctx), DFSanConditionalCallbackOriginArgs,
1187 /*isVarArg=*/false);
1188 Type *DFSanReachesFunctionCallbackArgs[4] = {PrimitiveShadowTy, Int8Ptr,
1189 OriginTy, Int8Ptr};
1190 DFSanReachesFunctionCallbackFnTy =
1191 FunctionType::get(Type::getVoidTy(*Ctx), DFSanReachesFunctionCallbackArgs,
1192 /*isVarArg=*/false);
1193 Type *DFSanReachesFunctionCallbackOriginArgs[5] = {
1194 PrimitiveShadowTy, OriginTy, Int8Ptr, OriginTy, Int8Ptr};
1195 DFSanReachesFunctionCallbackOriginFnTy = FunctionType::get(
1196 Type::getVoidTy(*Ctx), DFSanReachesFunctionCallbackOriginArgs,
1197 /*isVarArg=*/false);
1198 DFSanCmpCallbackFnTy =
1199 FunctionType::get(Type::getVoidTy(*Ctx), PrimitiveShadowTy,
1200 /*isVarArg=*/false);
1201 DFSanChainOriginFnTy =
1202 FunctionType::get(OriginTy, OriginTy, /*isVarArg=*/false);
1203 Type *DFSanChainOriginIfTaintedArgs[2] = {PrimitiveShadowTy, OriginTy};
1204 DFSanChainOriginIfTaintedFnTy = FunctionType::get(
1205 OriginTy, DFSanChainOriginIfTaintedArgs, /*isVarArg=*/false);
1206 Type *DFSanMaybeStoreOriginArgs[4] = {IntegerType::get(*Ctx, ShadowWidthBits),
1207 Int8Ptr, IntptrTy, OriginTy};
1208 DFSanMaybeStoreOriginFnTy = FunctionType::get(
1209 Type::getVoidTy(*Ctx), DFSanMaybeStoreOriginArgs, /*isVarArg=*/false);
1210 Type *DFSanMemOriginTransferArgs[3] = {Int8Ptr, Int8Ptr, IntptrTy};
1211 DFSanMemOriginTransferFnTy = FunctionType::get(
1212 Type::getVoidTy(*Ctx), DFSanMemOriginTransferArgs, /*isVarArg=*/false);
1213 Type *DFSanMemShadowOriginTransferArgs[3] = {Int8Ptr, Int8Ptr, IntptrTy};
1214 DFSanMemShadowOriginTransferFnTy =
1215 FunctionType::get(Type::getVoidTy(*Ctx), DFSanMemShadowOriginTransferArgs,
1216 /*isVarArg=*/false);
1217 Type *DFSanMemShadowOriginConditionalExchangeArgs[5] = {
1218 IntegerType::get(*Ctx, 8), Int8Ptr, Int8Ptr, Int8Ptr, IntptrTy};
1219 DFSanMemShadowOriginConditionalExchangeFnTy = FunctionType::get(
1220 Type::getVoidTy(*Ctx), DFSanMemShadowOriginConditionalExchangeArgs,
1221 /*isVarArg=*/false);
1222 Type *DFSanLoadStoreCallbackArgs[2] = {PrimitiveShadowTy, Int8Ptr};
1223 DFSanLoadStoreCallbackFnTy =
1224 FunctionType::get(Type::getVoidTy(*Ctx), DFSanLoadStoreCallbackArgs,
1225 /*isVarArg=*/false);
1226 Type *DFSanMemTransferCallbackArgs[2] = {PrimitiveShadowPtrTy, IntptrTy};
1227 DFSanMemTransferCallbackFnTy =
1228 FunctionType::get(Type::getVoidTy(*Ctx), DFSanMemTransferCallbackArgs,
1229 /*isVarArg=*/false);
1230
1231 ColdCallWeights = MDBuilder(*Ctx).createUnlikelyBranchWeights();
1232 OriginStoreWeights = MDBuilder(*Ctx).createUnlikelyBranchWeights();
1233 return true;
1234}
1235
1236bool DataFlowSanitizer::isInstrumented(const Function *F) {
1237 return !ABIList.isIn(*F, "uninstrumented");
1238}
1239
1240bool DataFlowSanitizer::isInstrumented(const GlobalAlias *GA) {
1241 return !ABIList.isIn(*GA, "uninstrumented");
1242}
1243
1244bool DataFlowSanitizer::isForceZeroLabels(const Function *F) {
1245 return ABIList.isIn(*F, "force_zero_labels");
1246}
1247
1248DataFlowSanitizer::WrapperKind DataFlowSanitizer::getWrapperKind(Function *F) {
1249 if (ABIList.isIn(*F, "functional"))
1250 return WK_Functional;
1251 if (ABIList.isIn(*F, "discard"))
1252 return WK_Discard;
1253 if (ABIList.isIn(*F, "custom"))
1254 return WK_Custom;
1255
1256 return WK_Warning;
1257}
1258
1259void DataFlowSanitizer::addGlobalNameSuffix(GlobalValue *GV) {
1260 std::string GVName = std::string(GV->getName()), Suffix = ".dfsan";
1261 GV->setName(GVName + Suffix);
1262
1263 // Try to change the name of the function in module inline asm. We only do
1264 // this for specific asm directives, currently only ".symver", to try to avoid
1265 // corrupting asm which happens to contain the symbol name as a substring.
1266 // Note that the substitution for .symver assumes that the versioned symbol
1267 // also has an instrumented name.
1268 std::string Asm = GV->getParent()->getModuleInlineAsm();
1269 std::string SearchStr = ".symver " + GVName + ",";
1270 size_t Pos = Asm.find(SearchStr);
1271 if (Pos != std::string::npos) {
1272 Asm.replace(Pos, SearchStr.size(), ".symver " + GVName + Suffix + ",");
1273 Pos = Asm.find('@');
1274
1275 if (Pos == std::string::npos)
1276 report_fatal_error(Twine("unsupported .symver: ", Asm));
1277
1278 Asm.replace(Pos, 1, Suffix + "@");
1279 GV->getParent()->setModuleInlineAsm(Asm);
1280 }
1281}
1282
1283void DataFlowSanitizer::buildExternWeakCheckIfNeeded(IRBuilder<> &IRB,
1284 Function *F) {
1285 // If the function we are wrapping was ExternWeak, it may be null.
1286 // The original code before calling this wrapper may have checked for null,
1287 // but replacing with a known-to-not-be-null wrapper can break this check.
1288 // When replacing uses of the extern weak function with the wrapper we try
1289 // to avoid replacing uses in conditionals, but this is not perfect.
1290 // In the case where we fail, and accidentally optimize out a null check
1291 // for a extern weak function, add a check here to help identify the issue.
1292 if (GlobalValue::isExternalWeakLinkage(F->getLinkage())) {
1293 std::vector<Value *> Args;
1294 Args.push_back(F);
1295 Args.push_back(IRB.CreateGlobalString(F->getName()));
1296 IRB.CreateCall(DFSanWrapperExternWeakNullFn, Args);
1297 }
1298}
1299
1300Function *
1301DataFlowSanitizer::buildWrapperFunction(Function *F, StringRef NewFName,
1303 FunctionType *NewFT) {
1304 FunctionType *FT = F->getFunctionType();
1305 Function *NewF = Function::Create(NewFT, NewFLink, F->getAddressSpace(),
1306 NewFName, F->getParent());
1307 NewF->copyAttributesFrom(F);
1309 NewFT->getReturnType(), NewF->getAttributes().getRetAttrs()));
1310
1311 BasicBlock *BB = BasicBlock::Create(*Ctx, "entry", NewF);
1312 if (F->isVarArg()) {
1313 NewF->removeFnAttr("split-stack");
1314 CallInst::Create(DFSanVarargWrapperFn,
1315 IRBuilder<>(BB).CreateGlobalString(F->getName()), "", BB);
1316 new UnreachableInst(*Ctx, BB);
1317 } else {
1318 auto ArgIt = pointer_iterator<Argument *>(NewF->arg_begin());
1319 std::vector<Value *> Args(ArgIt, ArgIt + FT->getNumParams());
1320
1321 CallInst *CI = CallInst::Create(F, Args, "", BB);
1322 if (FT->getReturnType()->isVoidTy())
1323 ReturnInst::Create(*Ctx, BB);
1324 else
1325 ReturnInst::Create(*Ctx, CI, BB);
1326 }
1327
1328 return NewF;
1329}
1330
1331// Initialize DataFlowSanitizer runtime functions and declare them in the module
1332void DataFlowSanitizer::initializeRuntimeFunctions(Module &M) {
1333 LLVMContext &C = M.getContext();
1334 {
1336 AL = AL.addFnAttribute(C, Attribute::NoUnwind);
1337 AL = AL.addFnAttribute(
1339 AL = AL.addRetAttribute(C, Attribute::ZExt);
1340 DFSanUnionLoadFn =
1341 Mod->getOrInsertFunction("__dfsan_union_load", DFSanUnionLoadFnTy, AL);
1342 }
1343 {
1345 AL = AL.addFnAttribute(C, Attribute::NoUnwind);
1346 AL = AL.addFnAttribute(
1348 AL = AL.addRetAttribute(C, Attribute::ZExt);
1349 DFSanLoadLabelAndOriginFn = Mod->getOrInsertFunction(
1350 "__dfsan_load_label_and_origin", DFSanLoadLabelAndOriginFnTy, AL);
1351 }
1352 DFSanUnimplementedFn =
1353 Mod->getOrInsertFunction("__dfsan_unimplemented", DFSanUnimplementedFnTy);
1354 DFSanWrapperExternWeakNullFn = Mod->getOrInsertFunction(
1355 "__dfsan_wrapper_extern_weak_null", DFSanWrapperExternWeakNullFnTy);
1356 {
1358 AL = AL.addParamAttribute(M.getContext(), 0, Attribute::ZExt);
1359 AL = AL.addParamAttribute(M.getContext(), 1, Attribute::ZExt);
1360 DFSanSetLabelFn =
1361 Mod->getOrInsertFunction("__dfsan_set_label", DFSanSetLabelFnTy, AL);
1362 }
1363 DFSanNonzeroLabelFn =
1364 Mod->getOrInsertFunction("__dfsan_nonzero_label", DFSanNonzeroLabelFnTy);
1365 DFSanVarargWrapperFn = Mod->getOrInsertFunction("__dfsan_vararg_wrapper",
1366 DFSanVarargWrapperFnTy);
1367 {
1369 AL = AL.addParamAttribute(M.getContext(), 0, Attribute::ZExt);
1370 AL = AL.addRetAttribute(M.getContext(), Attribute::ZExt);
1371 DFSanChainOriginFn = Mod->getOrInsertFunction("__dfsan_chain_origin",
1372 DFSanChainOriginFnTy, AL);
1373 }
1374 {
1376 AL = AL.addParamAttribute(M.getContext(), 0, Attribute::ZExt);
1377 AL = AL.addParamAttribute(M.getContext(), 1, Attribute::ZExt);
1378 AL = AL.addRetAttribute(M.getContext(), Attribute::ZExt);
1379 DFSanChainOriginIfTaintedFn = Mod->getOrInsertFunction(
1380 "__dfsan_chain_origin_if_tainted", DFSanChainOriginIfTaintedFnTy, AL);
1381 }
1382 DFSanMemOriginTransferFn = Mod->getOrInsertFunction(
1383 "__dfsan_mem_origin_transfer", DFSanMemOriginTransferFnTy);
1384
1385 DFSanMemShadowOriginTransferFn = Mod->getOrInsertFunction(
1386 "__dfsan_mem_shadow_origin_transfer", DFSanMemShadowOriginTransferFnTy);
1387
1388 DFSanMemShadowOriginConditionalExchangeFn =
1389 Mod->getOrInsertFunction("__dfsan_mem_shadow_origin_conditional_exchange",
1390 DFSanMemShadowOriginConditionalExchangeFnTy);
1391
1392 {
1394 AL = AL.addParamAttribute(M.getContext(), 0, Attribute::ZExt);
1395 AL = AL.addParamAttribute(M.getContext(), 3, Attribute::ZExt);
1396 DFSanMaybeStoreOriginFn = Mod->getOrInsertFunction(
1397 "__dfsan_maybe_store_origin", DFSanMaybeStoreOriginFnTy, AL);
1398 }
1399
1400 DFSanRuntimeFunctions.insert(
1401 DFSanUnionLoadFn.getCallee()->stripPointerCasts());
1402 DFSanRuntimeFunctions.insert(
1403 DFSanLoadLabelAndOriginFn.getCallee()->stripPointerCasts());
1404 DFSanRuntimeFunctions.insert(
1405 DFSanUnimplementedFn.getCallee()->stripPointerCasts());
1406 DFSanRuntimeFunctions.insert(
1407 DFSanWrapperExternWeakNullFn.getCallee()->stripPointerCasts());
1408 DFSanRuntimeFunctions.insert(
1409 DFSanSetLabelFn.getCallee()->stripPointerCasts());
1410 DFSanRuntimeFunctions.insert(
1411 DFSanNonzeroLabelFn.getCallee()->stripPointerCasts());
1412 DFSanRuntimeFunctions.insert(
1413 DFSanVarargWrapperFn.getCallee()->stripPointerCasts());
1414 DFSanRuntimeFunctions.insert(
1415 DFSanLoadCallbackFn.getCallee()->stripPointerCasts());
1416 DFSanRuntimeFunctions.insert(
1417 DFSanStoreCallbackFn.getCallee()->stripPointerCasts());
1418 DFSanRuntimeFunctions.insert(
1419 DFSanMemTransferCallbackFn.getCallee()->stripPointerCasts());
1420 DFSanRuntimeFunctions.insert(
1421 DFSanConditionalCallbackFn.getCallee()->stripPointerCasts());
1422 DFSanRuntimeFunctions.insert(
1423 DFSanConditionalCallbackOriginFn.getCallee()->stripPointerCasts());
1424 DFSanRuntimeFunctions.insert(
1425 DFSanReachesFunctionCallbackFn.getCallee()->stripPointerCasts());
1426 DFSanRuntimeFunctions.insert(
1427 DFSanReachesFunctionCallbackOriginFn.getCallee()->stripPointerCasts());
1428 DFSanRuntimeFunctions.insert(
1429 DFSanCmpCallbackFn.getCallee()->stripPointerCasts());
1430 DFSanRuntimeFunctions.insert(
1431 DFSanChainOriginFn.getCallee()->stripPointerCasts());
1432 DFSanRuntimeFunctions.insert(
1433 DFSanChainOriginIfTaintedFn.getCallee()->stripPointerCasts());
1434 DFSanRuntimeFunctions.insert(
1435 DFSanMemOriginTransferFn.getCallee()->stripPointerCasts());
1436 DFSanRuntimeFunctions.insert(
1437 DFSanMemShadowOriginTransferFn.getCallee()->stripPointerCasts());
1438 DFSanRuntimeFunctions.insert(
1439 DFSanMemShadowOriginConditionalExchangeFn.getCallee()
1440 ->stripPointerCasts());
1441 DFSanRuntimeFunctions.insert(
1442 DFSanMaybeStoreOriginFn.getCallee()->stripPointerCasts());
1443}
1444
1445// Initializes event callback functions and declare them in the module
1446void DataFlowSanitizer::initializeCallbackFunctions(Module &M) {
1447 {
1449 AL = AL.addParamAttribute(M.getContext(), 0, Attribute::ZExt);
1450 DFSanLoadCallbackFn = Mod->getOrInsertFunction(
1451 "__dfsan_load_callback", DFSanLoadStoreCallbackFnTy, AL);
1452 }
1453 {
1455 AL = AL.addParamAttribute(M.getContext(), 0, Attribute::ZExt);
1456 DFSanStoreCallbackFn = Mod->getOrInsertFunction(
1457 "__dfsan_store_callback", DFSanLoadStoreCallbackFnTy, AL);
1458 }
1459 DFSanMemTransferCallbackFn = Mod->getOrInsertFunction(
1460 "__dfsan_mem_transfer_callback", DFSanMemTransferCallbackFnTy);
1461 {
1463 AL = AL.addParamAttribute(M.getContext(), 0, Attribute::ZExt);
1464 DFSanCmpCallbackFn = Mod->getOrInsertFunction("__dfsan_cmp_callback",
1465 DFSanCmpCallbackFnTy, AL);
1466 }
1467 {
1469 AL = AL.addParamAttribute(M.getContext(), 0, Attribute::ZExt);
1470 DFSanConditionalCallbackFn = Mod->getOrInsertFunction(
1471 "__dfsan_conditional_callback", DFSanConditionalCallbackFnTy, AL);
1472 }
1473 {
1475 AL = AL.addParamAttribute(M.getContext(), 0, Attribute::ZExt);
1476 DFSanConditionalCallbackOriginFn =
1477 Mod->getOrInsertFunction("__dfsan_conditional_callback_origin",
1478 DFSanConditionalCallbackOriginFnTy, AL);
1479 }
1480 {
1482 AL = AL.addParamAttribute(M.getContext(), 0, Attribute::ZExt);
1483 DFSanReachesFunctionCallbackFn =
1484 Mod->getOrInsertFunction("__dfsan_reaches_function_callback",
1485 DFSanReachesFunctionCallbackFnTy, AL);
1486 }
1487 {
1489 AL = AL.addParamAttribute(M.getContext(), 0, Attribute::ZExt);
1490 DFSanReachesFunctionCallbackOriginFn =
1491 Mod->getOrInsertFunction("__dfsan_reaches_function_callback_origin",
1492 DFSanReachesFunctionCallbackOriginFnTy, AL);
1493 }
1494}
1495
1496bool DataFlowSanitizer::runImpl(
1498 initializeModule(M);
1499
1500 if (ABIList.isIn(M, "skip"))
1501 return false;
1502
1503 const unsigned InitialGlobalSize = M.global_size();
1504 const unsigned InitialModuleSize = M.size();
1505
1506 bool Changed = false;
1507
1508 auto GetOrInsertGlobal = [this, &Changed](StringRef Name,
1509 Type *Ty) -> Constant * {
1510 Constant *C = Mod->getOrInsertGlobal(Name, Ty);
1511 if (GlobalVariable *G = dyn_cast<GlobalVariable>(C)) {
1512 Changed |= G->getThreadLocalMode() != GlobalVariable::InitialExecTLSModel;
1513 G->setThreadLocalMode(GlobalVariable::InitialExecTLSModel);
1514 }
1515 return C;
1516 };
1517
1518 // These globals must be kept in sync with the ones in dfsan.cpp.
1519 ArgTLS =
1520 GetOrInsertGlobal("__dfsan_arg_tls",
1521 ArrayType::get(Type::getInt64Ty(*Ctx), ArgTLSSize / 8));
1522 RetvalTLS = GetOrInsertGlobal(
1523 "__dfsan_retval_tls",
1524 ArrayType::get(Type::getInt64Ty(*Ctx), RetvalTLSSize / 8));
1525 ArgOriginTLSTy = ArrayType::get(OriginTy, NumOfElementsInArgOrgTLS);
1526 ArgOriginTLS = GetOrInsertGlobal("__dfsan_arg_origin_tls", ArgOriginTLSTy);
1527 RetvalOriginTLS = GetOrInsertGlobal("__dfsan_retval_origin_tls", OriginTy);
1528
1529 (void)Mod->getOrInsertGlobal("__dfsan_track_origins", OriginTy, [&] {
1530 Changed = true;
1531 return new GlobalVariable(
1532 M, OriginTy, true, GlobalValue::WeakODRLinkage,
1533 ConstantInt::getSigned(OriginTy,
1534 shouldTrackOrigins() ? ClTrackOrigins : 0),
1535 "__dfsan_track_origins");
1536 });
1537
1538 initializeCallbackFunctions(M);
1539 initializeRuntimeFunctions(M);
1540
1541 std::vector<Function *> FnsToInstrument;
1542 SmallPtrSet<Function *, 2> FnsWithNativeABI;
1543 SmallPtrSet<Function *, 2> FnsWithForceZeroLabel;
1544 SmallPtrSet<Constant *, 1> PersonalityFns;
1545 for (Function &F : M)
1546 if (!F.isIntrinsic() && !DFSanRuntimeFunctions.contains(&F) &&
1547 !LibAtomicFunction(F) &&
1548 !F.hasFnAttribute(Attribute::DisableSanitizerInstrumentation)) {
1549 FnsToInstrument.push_back(&F);
1550 if (F.hasPersonalityFn())
1551 PersonalityFns.insert(F.getPersonalityFn()->stripPointerCasts());
1552 }
1553
1555 for (auto *C : PersonalityFns) {
1556 assert(isa<Function>(C) && "Personality routine is not a function!");
1557 Function *F = cast<Function>(C);
1558 if (!isInstrumented(F))
1559 llvm::erase(FnsToInstrument, F);
1560 }
1561 }
1562
1563 // Give function aliases prefixes when necessary, and build wrappers where the
1564 // instrumentedness is inconsistent.
1565 for (GlobalAlias &GA : llvm::make_early_inc_range(M.aliases())) {
1566 // Don't stop on weak. We assume people aren't playing games with the
1567 // instrumentedness of overridden weak aliases.
1568 auto *F = dyn_cast<Function>(GA.getAliaseeObject());
1569 if (!F)
1570 continue;
1571
1572 bool GAInst = isInstrumented(&GA), FInst = isInstrumented(F);
1573 if (GAInst && FInst) {
1574 addGlobalNameSuffix(&GA);
1575 } else if (GAInst != FInst) {
1576 // Non-instrumented alias of an instrumented function, or vice versa.
1577 // Replace the alias with a native-ABI wrapper of the aliasee. The pass
1578 // below will take care of instrumenting it.
1579 Function *NewF =
1580 buildWrapperFunction(F, "", GA.getLinkage(), F->getFunctionType());
1581 GA.replaceAllUsesWith(NewF);
1582 NewF->takeName(&GA);
1583 GA.eraseFromParent();
1584 FnsToInstrument.push_back(NewF);
1585 }
1586 }
1587
1588 // TODO: This could be more precise.
1589 ReadOnlyNoneAttrs.addAttribute(Attribute::Memory);
1590
1591 // First, change the ABI of every function in the module. ABI-listed
1592 // functions keep their original ABI and get a wrapper function.
1593 for (std::vector<Function *>::iterator FI = FnsToInstrument.begin(),
1594 FE = FnsToInstrument.end();
1595 FI != FE; ++FI) {
1596 Function &F = **FI;
1597 FunctionType *FT = F.getFunctionType();
1598
1599 bool IsZeroArgsVoidRet = (FT->getNumParams() == 0 && !FT->isVarArg() &&
1600 FT->getReturnType()->isVoidTy());
1601
1602 if (isInstrumented(&F)) {
1603 if (isForceZeroLabels(&F))
1604 FnsWithForceZeroLabel.insert(&F);
1605
1606 // Instrumented functions get a '.dfsan' suffix. This allows us to more
1607 // easily identify cases of mismatching ABIs. This naming scheme is
1608 // mangling-compatible (see Itanium ABI), using a vendor-specific suffix.
1609 addGlobalNameSuffix(&F);
1610 } else if (!IsZeroArgsVoidRet || getWrapperKind(&F) == WK_Custom) {
1611 // Build a wrapper function for F. The wrapper simply calls F, and is
1612 // added to FnsToInstrument so that any instrumentation according to its
1613 // WrapperKind is done in the second pass below.
1614
1615 // If the function being wrapped has local linkage, then preserve the
1616 // function's linkage in the wrapper function.
1617 GlobalValue::LinkageTypes WrapperLinkage =
1618 F.hasLocalLinkage() ? F.getLinkage()
1620
1621 Function *NewF = buildWrapperFunction(
1622 &F,
1623 (shouldTrackOrigins() ? std::string("dfso$") : std::string("dfsw$")) +
1624 std::string(F.getName()),
1625 WrapperLinkage, FT);
1626 NewF->removeFnAttrs(ReadOnlyNoneAttrs);
1627
1628 // Extern weak functions can sometimes be null at execution time.
1629 // Code will sometimes check if an extern weak function is null.
1630 // This could look something like:
1631 // declare extern_weak i8 @my_func(i8)
1632 // br i1 icmp ne (i8 (i8)* @my_func, i8 (i8)* null), label %use_my_func,
1633 // label %avoid_my_func
1634 // The @"dfsw$my_func" wrapper is never null, so if we replace this use
1635 // in the comparison, the icmp will simplify to false and we have
1636 // accidentally optimized away a null check that is necessary.
1637 // This can lead to a crash when the null extern_weak my_func is called.
1638 //
1639 // To prevent (the most common pattern of) this problem,
1640 // do not replace uses in comparisons with the wrapper.
1641 // We definitely want to replace uses in call instructions.
1642 // Other uses (e.g. store the function address somewhere) might be
1643 // called or compared or both - this case may not be handled correctly.
1644 // We will default to replacing with wrapper in cases we are unsure.
1645 auto IsNotCmpUse = [](Use &U) -> bool {
1646 User *Usr = U.getUser();
1647 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Usr)) {
1648 // This is the most common case for icmp ne null
1649 if (CE->getOpcode() == Instruction::ICmp) {
1650 return false;
1651 }
1652 }
1653 if (Instruction *I = dyn_cast<Instruction>(Usr)) {
1654 if (I->getOpcode() == Instruction::ICmp) {
1655 return false;
1656 }
1657 }
1658 return true;
1659 };
1660 F.replaceUsesWithIf(NewF, IsNotCmpUse);
1661
1662 UnwrappedFnMap[NewF] = &F;
1663 *FI = NewF;
1664
1665 if (!F.isDeclaration()) {
1666 // This function is probably defining an interposition of an
1667 // uninstrumented function and hence needs to keep the original ABI.
1668 // But any functions it may call need to use the instrumented ABI, so
1669 // we instrument it in a mode which preserves the original ABI.
1670 FnsWithNativeABI.insert(&F);
1671
1672 // This code needs to rebuild the iterators, as they may be invalidated
1673 // by the push_back, taking care that the new range does not include
1674 // any functions added by this code.
1675 size_t N = FI - FnsToInstrument.begin(),
1676 Count = FE - FnsToInstrument.begin();
1677 FnsToInstrument.push_back(&F);
1678 FI = FnsToInstrument.begin() + N;
1679 FE = FnsToInstrument.begin() + Count;
1680 }
1681 // Hopefully, nobody will try to indirectly call a vararg
1682 // function... yet.
1683 } else if (FT->isVarArg()) {
1684 UnwrappedFnMap[&F] = &F;
1685 *FI = nullptr;
1686 }
1687 }
1688
1689 for (Function *F : FnsToInstrument) {
1690 if (!F || F->isDeclaration())
1691 continue;
1692
1694
1695 DFSanFunction DFSF(*this, F, FnsWithNativeABI.count(F),
1696 FnsWithForceZeroLabel.count(F), GetTLI(*F));
1697
1699 // Add callback for arguments reaching this function.
1700 for (auto &FArg : F->args()) {
1701 Instruction *Next = &F->getEntryBlock().front();
1702 Value *FArgShadow = DFSF.getShadow(&FArg);
1703 if (isZeroShadow(FArgShadow))
1704 continue;
1705 if (Instruction *FArgShadowInst = dyn_cast<Instruction>(FArgShadow)) {
1706 Next = FArgShadowInst->getNextNode();
1707 }
1708 if (shouldTrackOrigins()) {
1709 if (Instruction *Origin =
1710 dyn_cast<Instruction>(DFSF.getOrigin(&FArg))) {
1711 // Ensure IRB insertion point is after loads for shadow and origin.
1712 Instruction *OriginNext = Origin->getNextNode();
1713 if (Next->comesBefore(OriginNext)) {
1714 Next = OriginNext;
1715 }
1716 }
1717 }
1718 IRBuilder<> IRB(Next);
1719 DFSF.addReachesFunctionCallbacksIfEnabled(IRB, *Next, &FArg);
1720 }
1721 }
1722
1723 // DFSanVisitor may create new basic blocks, which confuses df_iterator.
1724 // Build a copy of the list before iterating over it.
1725 SmallVector<BasicBlock *, 4> BBList(depth_first(&F->getEntryBlock()));
1726
1727 for (BasicBlock *BB : BBList) {
1728 Instruction *Inst = &BB->front();
1729 while (true) {
1730 // DFSanVisitor may split the current basic block, changing the current
1731 // instruction's next pointer and moving the next instruction to the
1732 // tail block from which we should continue.
1733 Instruction *Next = Inst->getNextNode();
1734 // DFSanVisitor may delete Inst, so keep track of whether it was a
1735 // terminator.
1736 bool IsTerminator = Inst->isTerminator();
1737 if (!DFSF.SkipInsts.count(Inst))
1738 DFSanVisitor(DFSF).visit(Inst);
1739 if (IsTerminator)
1740 break;
1741 Inst = Next;
1742 }
1743 }
1744
1745 // We will not necessarily be able to compute the shadow for every phi node
1746 // until we have visited every block. Therefore, the code that handles phi
1747 // nodes adds them to the PHIFixups list so that they can be properly
1748 // handled here.
1749 for (DFSanFunction::PHIFixupElement &P : DFSF.PHIFixups) {
1750 for (unsigned Val = 0, N = P.Phi->getNumIncomingValues(); Val != N;
1751 ++Val) {
1752 P.ShadowPhi->setIncomingValue(
1753 Val, DFSF.getShadow(P.Phi->getIncomingValue(Val)));
1754 if (P.OriginPhi)
1755 P.OriginPhi->setIncomingValue(
1756 Val, DFSF.getOrigin(P.Phi->getIncomingValue(Val)));
1757 }
1758 }
1759
1760 // -dfsan-debug-nonzero-labels will split the CFG in all kinds of crazy
1761 // places (i.e. instructions in basic blocks we haven't even begun visiting
1762 // yet). To make our life easier, do this work in a pass after the main
1763 // instrumentation.
1765 for (Value *V : DFSF.NonZeroChecks) {
1767 if (Instruction *I = dyn_cast<Instruction>(V))
1768 Pos = std::next(I->getIterator());
1769 else
1770 Pos = DFSF.F->getEntryBlock().begin();
1771 while (isa<PHINode>(Pos) || isa<AllocaInst>(Pos))
1772 Pos = std::next(Pos->getIterator());
1773 IRBuilder<> IRB(Pos->getParent(), Pos);
1774 Value *PrimitiveShadow = DFSF.collapseToPrimitiveShadow(V, Pos);
1775 Value *Ne =
1776 IRB.CreateICmpNE(PrimitiveShadow, DFSF.DFS.ZeroPrimitiveShadow);
1777 BranchInst *BI = cast<BranchInst>(SplitBlockAndInsertIfThen(
1778 Ne, Pos, /*Unreachable=*/false, ColdCallWeights));
1779 IRBuilder<> ThenIRB(BI);
1780 ThenIRB.CreateCall(DFSF.DFS.DFSanNonzeroLabelFn, {});
1781 }
1782 }
1783 }
1784
1785 return Changed || !FnsToInstrument.empty() ||
1786 M.global_size() != InitialGlobalSize || M.size() != InitialModuleSize;
1787}
1788
1789Value *DFSanFunction::getArgTLS(Type *T, unsigned ArgOffset, IRBuilder<> &IRB) {
1790 Value *Base = IRB.CreatePointerCast(DFS.ArgTLS, DFS.IntptrTy);
1791 if (ArgOffset)
1792 Base = IRB.CreateAdd(Base, ConstantInt::get(DFS.IntptrTy, ArgOffset));
1793 return IRB.CreateIntToPtr(Base, PointerType::get(DFS.getShadowTy(T), 0),
1794 "_dfsarg");
1795}
1796
1797Value *DFSanFunction::getRetvalTLS(Type *T, IRBuilder<> &IRB) {
1798 return IRB.CreatePointerCast(
1799 DFS.RetvalTLS, PointerType::get(DFS.getShadowTy(T), 0), "_dfsret");
1800}
1801
1802Value *DFSanFunction::getRetvalOriginTLS() { return DFS.RetvalOriginTLS; }
1803
1804Value *DFSanFunction::getArgOriginTLS(unsigned ArgNo, IRBuilder<> &IRB) {
1805 return IRB.CreateConstInBoundsGEP2_64(DFS.ArgOriginTLSTy, DFS.ArgOriginTLS, 0,
1806 ArgNo, "_dfsarg_o");
1807}
1808
1809Value *DFSanFunction::getOrigin(Value *V) {
1810 assert(DFS.shouldTrackOrigins());
1811 if (!isa<Argument>(V) && !isa<Instruction>(V))
1812 return DFS.ZeroOrigin;
1813 Value *&Origin = ValOriginMap[V];
1814 if (!Origin) {
1815 if (Argument *A = dyn_cast<Argument>(V)) {
1816 if (IsNativeABI)
1817 return DFS.ZeroOrigin;
1818 if (A->getArgNo() < DFS.NumOfElementsInArgOrgTLS) {
1819 Instruction *ArgOriginTLSPos = &*F->getEntryBlock().begin();
1820 IRBuilder<> IRB(ArgOriginTLSPos);
1821 Value *ArgOriginPtr = getArgOriginTLS(A->getArgNo(), IRB);
1822 Origin = IRB.CreateLoad(DFS.OriginTy, ArgOriginPtr);
1823 } else {
1824 // Overflow
1825 Origin = DFS.ZeroOrigin;
1826 }
1827 } else {
1828 Origin = DFS.ZeroOrigin;
1829 }
1830 }
1831 return Origin;
1832}
1833
1834void DFSanFunction::setOrigin(Instruction *I, Value *Origin) {
1835 if (!DFS.shouldTrackOrigins())
1836 return;
1837 assert(!ValOriginMap.count(I));
1838 assert(Origin->getType() == DFS.OriginTy);
1839 ValOriginMap[I] = Origin;
1840}
1841
1842Value *DFSanFunction::getShadowForTLSArgument(Argument *A) {
1843 unsigned ArgOffset = 0;
1844 const DataLayout &DL = F->getDataLayout();
1845 for (auto &FArg : F->args()) {
1846 if (!FArg.getType()->isSized()) {
1847 if (A == &FArg)
1848 break;
1849 continue;
1850 }
1851
1852 unsigned Size = DL.getTypeAllocSize(DFS.getShadowTy(&FArg));
1853 if (A != &FArg) {
1854 ArgOffset += alignTo(Size, ShadowTLSAlignment);
1855 if (ArgOffset > ArgTLSSize)
1856 break; // ArgTLS overflows, uses a zero shadow.
1857 continue;
1858 }
1859
1860 if (ArgOffset + Size > ArgTLSSize)
1861 break; // ArgTLS overflows, uses a zero shadow.
1862
1863 Instruction *ArgTLSPos = &*F->getEntryBlock().begin();
1864 IRBuilder<> IRB(ArgTLSPos);
1865 Value *ArgShadowPtr = getArgTLS(FArg.getType(), ArgOffset, IRB);
1866 return IRB.CreateAlignedLoad(DFS.getShadowTy(&FArg), ArgShadowPtr,
1868 }
1869
1870 return DFS.getZeroShadow(A);
1871}
1872
1873Value *DFSanFunction::getShadow(Value *V) {
1874 if (!isa<Argument>(V) && !isa<Instruction>(V))
1875 return DFS.getZeroShadow(V);
1876 if (IsForceZeroLabels)
1877 return DFS.getZeroShadow(V);
1878 Value *&Shadow = ValShadowMap[V];
1879 if (!Shadow) {
1880 if (Argument *A = dyn_cast<Argument>(V)) {
1881 if (IsNativeABI)
1882 return DFS.getZeroShadow(V);
1883 Shadow = getShadowForTLSArgument(A);
1884 NonZeroChecks.push_back(Shadow);
1885 } else {
1886 Shadow = DFS.getZeroShadow(V);
1887 }
1888 }
1889 return Shadow;
1890}
1891
1892void DFSanFunction::setShadow(Instruction *I, Value *Shadow) {
1893 assert(!ValShadowMap.count(I));
1894 ValShadowMap[I] = Shadow;
1895}
1896
1897/// Compute the integer shadow offset that corresponds to a given
1898/// application address.
1899///
1900/// Offset = (Addr & ~AndMask) ^ XorMask
1901Value *DataFlowSanitizer::getShadowOffset(Value *Addr, IRBuilder<> &IRB) {
1902 assert(Addr != RetvalTLS && "Reinstrumenting?");
1903 Value *OffsetLong = IRB.CreatePointerCast(Addr, IntptrTy);
1904
1905 uint64_t AndMask = MapParams->AndMask;
1906 if (AndMask)
1907 OffsetLong =
1908 IRB.CreateAnd(OffsetLong, ConstantInt::get(IntptrTy, ~AndMask));
1909
1910 uint64_t XorMask = MapParams->XorMask;
1911 if (XorMask)
1912 OffsetLong = IRB.CreateXor(OffsetLong, ConstantInt::get(IntptrTy, XorMask));
1913 return OffsetLong;
1914}
1915
1916std::pair<Value *, Value *>
1917DataFlowSanitizer::getShadowOriginAddress(Value *Addr, Align InstAlignment,
1919 // Returns ((Addr & shadow_mask) + origin_base - shadow_base) & ~4UL
1920 IRBuilder<> IRB(Pos->getParent(), Pos);
1921 Value *ShadowOffset = getShadowOffset(Addr, IRB);
1922 Value *ShadowLong = ShadowOffset;
1923 uint64_t ShadowBase = MapParams->ShadowBase;
1924 if (ShadowBase != 0) {
1925 ShadowLong =
1926 IRB.CreateAdd(ShadowLong, ConstantInt::get(IntptrTy, ShadowBase));
1927 }
1928 IntegerType *ShadowTy = IntegerType::get(*Ctx, ShadowWidthBits);
1929 Value *ShadowPtr =
1930 IRB.CreateIntToPtr(ShadowLong, PointerType::get(ShadowTy, 0));
1931 Value *OriginPtr = nullptr;
1932 if (shouldTrackOrigins()) {
1933 Value *OriginLong = ShadowOffset;
1934 uint64_t OriginBase = MapParams->OriginBase;
1935 if (OriginBase != 0)
1936 OriginLong =
1937 IRB.CreateAdd(OriginLong, ConstantInt::get(IntptrTy, OriginBase));
1938 const Align Alignment = llvm::assumeAligned(InstAlignment.value());
1939 // When alignment is >= 4, Addr must be aligned to 4, otherwise it is UB.
1940 // So Mask is unnecessary.
1941 if (Alignment < MinOriginAlignment) {
1943 OriginLong = IRB.CreateAnd(OriginLong, ConstantInt::get(IntptrTy, ~Mask));
1944 }
1945 OriginPtr = IRB.CreateIntToPtr(OriginLong, OriginPtrTy);
1946 }
1947 return std::make_pair(ShadowPtr, OriginPtr);
1948}
1949
1950Value *DataFlowSanitizer::getShadowAddress(Value *Addr,
1952 Value *ShadowOffset) {
1953 IRBuilder<> IRB(Pos->getParent(), Pos);
1954 return IRB.CreateIntToPtr(ShadowOffset, PrimitiveShadowPtrTy);
1955}
1956
1957Value *DataFlowSanitizer::getShadowAddress(Value *Addr,
1959 IRBuilder<> IRB(Pos->getParent(), Pos);
1960 Value *ShadowOffset = getShadowOffset(Addr, IRB);
1961 return getShadowAddress(Addr, Pos, ShadowOffset);
1962}
1963
1964Value *DFSanFunction::combineShadowsThenConvert(Type *T, Value *V1, Value *V2,
1966 Value *PrimitiveValue = combineShadows(V1, V2, Pos);
1967 return expandFromPrimitiveShadow(T, PrimitiveValue, Pos);
1968}
1969
1970// Generates IR to compute the union of the two given shadows, inserting it
1971// before Pos. The combined value is with primitive type.
1972Value *DFSanFunction::combineShadows(Value *V1, Value *V2,
1974 if (DFS.isZeroShadow(V1))
1975 return collapseToPrimitiveShadow(V2, Pos);
1976 if (DFS.isZeroShadow(V2))
1977 return collapseToPrimitiveShadow(V1, Pos);
1978 if (V1 == V2)
1979 return collapseToPrimitiveShadow(V1, Pos);
1980
1981 auto V1Elems = ShadowElements.find(V1);
1982 auto V2Elems = ShadowElements.find(V2);
1983 if (V1Elems != ShadowElements.end() && V2Elems != ShadowElements.end()) {
1984 if (std::includes(V1Elems->second.begin(), V1Elems->second.end(),
1985 V2Elems->second.begin(), V2Elems->second.end())) {
1986 return collapseToPrimitiveShadow(V1, Pos);
1987 }
1988 if (std::includes(V2Elems->second.begin(), V2Elems->second.end(),
1989 V1Elems->second.begin(), V1Elems->second.end())) {
1990 return collapseToPrimitiveShadow(V2, Pos);
1991 }
1992 } else if (V1Elems != ShadowElements.end()) {
1993 if (V1Elems->second.count(V2))
1994 return collapseToPrimitiveShadow(V1, Pos);
1995 } else if (V2Elems != ShadowElements.end()) {
1996 if (V2Elems->second.count(V1))
1997 return collapseToPrimitiveShadow(V2, Pos);
1998 }
1999
2000 auto Key = std::make_pair(V1, V2);
2001 if (V1 > V2)
2002 std::swap(Key.first, Key.second);
2003 CachedShadow &CCS = CachedShadows[Key];
2004 if (CCS.Block && DT.dominates(CCS.Block, Pos->getParent()))
2005 return CCS.Shadow;
2006
2007 // Converts inputs shadows to shadows with primitive types.
2008 Value *PV1 = collapseToPrimitiveShadow(V1, Pos);
2009 Value *PV2 = collapseToPrimitiveShadow(V2, Pos);
2010
2011 IRBuilder<> IRB(Pos->getParent(), Pos);
2012 CCS.Block = Pos->getParent();
2013 CCS.Shadow = IRB.CreateOr(PV1, PV2);
2014
2015 std::set<Value *> UnionElems;
2016 if (V1Elems != ShadowElements.end()) {
2017 UnionElems = V1Elems->second;
2018 } else {
2019 UnionElems.insert(V1);
2020 }
2021 if (V2Elems != ShadowElements.end()) {
2022 UnionElems.insert(V2Elems->second.begin(), V2Elems->second.end());
2023 } else {
2024 UnionElems.insert(V2);
2025 }
2026 ShadowElements[CCS.Shadow] = std::move(UnionElems);
2027
2028 return CCS.Shadow;
2029}
2030
2031// A convenience function which folds the shadows of each of the operands
2032// of the provided instruction Inst, inserting the IR before Inst. Returns
2033// the computed union Value.
2034Value *DFSanFunction::combineOperandShadows(Instruction *Inst) {
2035 if (Inst->getNumOperands() == 0)
2036 return DFS.getZeroShadow(Inst);
2037
2038 Value *Shadow = getShadow(Inst->getOperand(0));
2039 for (unsigned I = 1, N = Inst->getNumOperands(); I < N; ++I)
2040 Shadow = combineShadows(Shadow, getShadow(Inst->getOperand(I)),
2041 Inst->getIterator());
2042
2043 return expandFromPrimitiveShadow(Inst->getType(), Shadow,
2044 Inst->getIterator());
2045}
2046
2047void DFSanVisitor::visitInstOperands(Instruction &I) {
2048 Value *CombinedShadow = DFSF.combineOperandShadows(&I);
2049 DFSF.setShadow(&I, CombinedShadow);
2050 visitInstOperandOrigins(I);
2051}
2052
2053Value *DFSanFunction::combineOrigins(const std::vector<Value *> &Shadows,
2054 const std::vector<Value *> &Origins,
2056 ConstantInt *Zero) {
2057 assert(Shadows.size() == Origins.size());
2058 size_t Size = Origins.size();
2059 if (Size == 0)
2060 return DFS.ZeroOrigin;
2061 Value *Origin = nullptr;
2062 if (!Zero)
2063 Zero = DFS.ZeroPrimitiveShadow;
2064 for (size_t I = 0; I != Size; ++I) {
2065 Value *OpOrigin = Origins[I];
2066 Constant *ConstOpOrigin = dyn_cast<Constant>(OpOrigin);
2067 if (ConstOpOrigin && ConstOpOrigin->isNullValue())
2068 continue;
2069 if (!Origin) {
2070 Origin = OpOrigin;
2071 continue;
2072 }
2073 Value *OpShadow = Shadows[I];
2074 Value *PrimitiveShadow = collapseToPrimitiveShadow(OpShadow, Pos);
2075 IRBuilder<> IRB(Pos->getParent(), Pos);
2076 Value *Cond = IRB.CreateICmpNE(PrimitiveShadow, Zero);
2077 Origin = IRB.CreateSelect(Cond, OpOrigin, Origin);
2078 }
2079 return Origin ? Origin : DFS.ZeroOrigin;
2080}
2081
2082Value *DFSanFunction::combineOperandOrigins(Instruction *Inst) {
2083 size_t Size = Inst->getNumOperands();
2084 std::vector<Value *> Shadows(Size);
2085 std::vector<Value *> Origins(Size);
2086 for (unsigned I = 0; I != Size; ++I) {
2087 Shadows[I] = getShadow(Inst->getOperand(I));
2088 Origins[I] = getOrigin(Inst->getOperand(I));
2089 }
2090 return combineOrigins(Shadows, Origins, Inst->getIterator());
2091}
2092
2093void DFSanVisitor::visitInstOperandOrigins(Instruction &I) {
2094 if (!DFSF.DFS.shouldTrackOrigins())
2095 return;
2096 Value *CombinedOrigin = DFSF.combineOperandOrigins(&I);
2097 DFSF.setOrigin(&I, CombinedOrigin);
2098}
2099
2100Align DFSanFunction::getShadowAlign(Align InstAlignment) {
2101 const Align Alignment = ClPreserveAlignment ? InstAlignment : Align(1);
2102 return Align(Alignment.value() * DFS.ShadowWidthBytes);
2103}
2104
2105Align DFSanFunction::getOriginAlign(Align InstAlignment) {
2106 const Align Alignment = llvm::assumeAligned(InstAlignment.value());
2107 return Align(std::max(MinOriginAlignment, Alignment));
2108}
2109
2110bool DFSanFunction::isLookupTableConstant(Value *P) {
2111 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(P->stripPointerCasts()))
2112 if (GV->isConstant() && GV->hasName())
2113 return DFS.CombineTaintLookupTableNames.count(GV->getName());
2114
2115 return false;
2116}
2117
2118bool DFSanFunction::useCallbackLoadLabelAndOrigin(uint64_t Size,
2119 Align InstAlignment) {
2120 // When enabling tracking load instructions, we always use
2121 // __dfsan_load_label_and_origin to reduce code size.
2122 if (ClTrackOrigins == 2)
2123 return true;
2124
2125 assert(Size != 0);
2126 // * if Size == 1, it is sufficient to load its origin aligned at 4.
2127 // * if Size == 2, we assume most cases Addr % 2 == 0, so it is sufficient to
2128 // load its origin aligned at 4. If not, although origins may be lost, it
2129 // should not happen very often.
2130 // * if align >= 4, Addr must be aligned to 4, otherwise it is UB. When
2131 // Size % 4 == 0, it is more efficient to load origins without callbacks.
2132 // * Otherwise we use __dfsan_load_label_and_origin.
2133 // This should ensure that common cases run efficiently.
2134 if (Size <= 2)
2135 return false;
2136
2137 const Align Alignment = llvm::assumeAligned(InstAlignment.value());
2138 return Alignment < MinOriginAlignment || !DFS.hasLoadSizeForFastPath(Size);
2139}
2140
2141Value *DataFlowSanitizer::loadNextOrigin(BasicBlock::iterator Pos,
2142 Align OriginAlign,
2143 Value **OriginAddr) {
2144 IRBuilder<> IRB(Pos->getParent(), Pos);
2145 *OriginAddr =
2146 IRB.CreateGEP(OriginTy, *OriginAddr, ConstantInt::get(IntptrTy, 1));
2147 return IRB.CreateAlignedLoad(OriginTy, *OriginAddr, OriginAlign);
2148}
2149
2150std::pair<Value *, Value *> DFSanFunction::loadShadowFast(
2151 Value *ShadowAddr, Value *OriginAddr, uint64_t Size, Align ShadowAlign,
2152 Align OriginAlign, Value *FirstOrigin, BasicBlock::iterator Pos) {
2153 const bool ShouldTrackOrigins = DFS.shouldTrackOrigins();
2154 const uint64_t ShadowSize = Size * DFS.ShadowWidthBytes;
2155
2156 assert(Size >= 4 && "Not large enough load size for fast path!");
2157
2158 // Used for origin tracking.
2159 std::vector<Value *> Shadows;
2160 std::vector<Value *> Origins;
2161
2162 // Load instructions in LLVM can have arbitrary byte sizes (e.g., 3, 12, 20)
2163 // but this function is only used in a subset of cases that make it possible
2164 // to optimize the instrumentation.
2165 //
2166 // Specifically, when the shadow size in bytes (i.e., loaded bytes x shadow
2167 // per byte) is either:
2168 // - a multiple of 8 (common)
2169 // - equal to 4 (only for load32)
2170 //
2171 // For the second case, we can fit the wide shadow in a 32-bit integer. In all
2172 // other cases, we use a 64-bit integer to hold the wide shadow.
2173 Type *WideShadowTy =
2174 ShadowSize == 4 ? Type::getInt32Ty(*DFS.Ctx) : Type::getInt64Ty(*DFS.Ctx);
2175
2176 IRBuilder<> IRB(Pos->getParent(), Pos);
2177 Value *CombinedWideShadow =
2178 IRB.CreateAlignedLoad(WideShadowTy, ShadowAddr, ShadowAlign);
2179
2180 unsigned WideShadowBitWidth = WideShadowTy->getIntegerBitWidth();
2181 const uint64_t BytesPerWideShadow = WideShadowBitWidth / DFS.ShadowWidthBits;
2182
2183 auto AppendWideShadowAndOrigin = [&](Value *WideShadow, Value *Origin) {
2184 if (BytesPerWideShadow > 4) {
2185 assert(BytesPerWideShadow == 8);
2186 // The wide shadow relates to two origin pointers: one for the first four
2187 // application bytes, and one for the latest four. We use a left shift to
2188 // get just the shadow bytes that correspond to the first origin pointer,
2189 // and then the entire shadow for the second origin pointer (which will be
2190 // chosen by combineOrigins() iff the least-significant half of the wide
2191 // shadow was empty but the other half was not).
2192 Value *WideShadowLo = IRB.CreateShl(
2193 WideShadow, ConstantInt::get(WideShadowTy, WideShadowBitWidth / 2));
2194 Shadows.push_back(WideShadow);
2195 Origins.push_back(DFS.loadNextOrigin(Pos, OriginAlign, &OriginAddr));
2196
2197 Shadows.push_back(WideShadowLo);
2198 Origins.push_back(Origin);
2199 } else {
2200 Shadows.push_back(WideShadow);
2201 Origins.push_back(Origin);
2202 }
2203 };
2204
2205 if (ShouldTrackOrigins)
2206 AppendWideShadowAndOrigin(CombinedWideShadow, FirstOrigin);
2207
2208 // First OR all the WideShadows (i.e., 64bit or 32bit shadow chunks) linearly;
2209 // then OR individual shadows within the combined WideShadow by binary ORing.
2210 // This is fewer instructions than ORing shadows individually, since it
2211 // needs logN shift/or instructions (N being the bytes of the combined wide
2212 // shadow).
2213 for (uint64_t ByteOfs = BytesPerWideShadow; ByteOfs < Size;
2214 ByteOfs += BytesPerWideShadow) {
2215 ShadowAddr = IRB.CreateGEP(WideShadowTy, ShadowAddr,
2216 ConstantInt::get(DFS.IntptrTy, 1));
2217 Value *NextWideShadow =
2218 IRB.CreateAlignedLoad(WideShadowTy, ShadowAddr, ShadowAlign);
2219 CombinedWideShadow = IRB.CreateOr(CombinedWideShadow, NextWideShadow);
2220 if (ShouldTrackOrigins) {
2221 Value *NextOrigin = DFS.loadNextOrigin(Pos, OriginAlign, &OriginAddr);
2222 AppendWideShadowAndOrigin(NextWideShadow, NextOrigin);
2223 }
2224 }
2225 for (unsigned Width = WideShadowBitWidth / 2; Width >= DFS.ShadowWidthBits;
2226 Width >>= 1) {
2227 Value *ShrShadow = IRB.CreateLShr(CombinedWideShadow, Width);
2228 CombinedWideShadow = IRB.CreateOr(CombinedWideShadow, ShrShadow);
2229 }
2230 return {IRB.CreateTrunc(CombinedWideShadow, DFS.PrimitiveShadowTy),
2231 ShouldTrackOrigins
2232 ? combineOrigins(Shadows, Origins, Pos,
2234 : DFS.ZeroOrigin};
2235}
2236
2237std::pair<Value *, Value *> DFSanFunction::loadShadowOriginSansLoadTracking(
2238 Value *Addr, uint64_t Size, Align InstAlignment, BasicBlock::iterator Pos) {
2239 const bool ShouldTrackOrigins = DFS.shouldTrackOrigins();
2240
2241 // Non-escaped loads.
2242 if (AllocaInst *AI = dyn_cast<AllocaInst>(Addr)) {
2243 const auto SI = AllocaShadowMap.find(AI);
2244 if (SI != AllocaShadowMap.end()) {
2245 IRBuilder<> IRB(Pos->getParent(), Pos);
2246 Value *ShadowLI = IRB.CreateLoad(DFS.PrimitiveShadowTy, SI->second);
2247 const auto OI = AllocaOriginMap.find(AI);
2248 assert(!ShouldTrackOrigins || OI != AllocaOriginMap.end());
2249 return {ShadowLI, ShouldTrackOrigins
2250 ? IRB.CreateLoad(DFS.OriginTy, OI->second)
2251 : nullptr};
2252 }
2253 }
2254
2255 // Load from constant addresses.
2258 bool AllConstants = true;
2259 for (const Value *Obj : Objs) {
2260 if (isa<Function>(Obj) || isa<BlockAddress>(Obj))
2261 continue;
2262 if (isa<GlobalVariable>(Obj) && cast<GlobalVariable>(Obj)->isConstant())
2263 continue;
2264
2265 AllConstants = false;
2266 break;
2267 }
2268 if (AllConstants)
2269 return {DFS.ZeroPrimitiveShadow,
2270 ShouldTrackOrigins ? DFS.ZeroOrigin : nullptr};
2271
2272 if (Size == 0)
2273 return {DFS.ZeroPrimitiveShadow,
2274 ShouldTrackOrigins ? DFS.ZeroOrigin : nullptr};
2275
2276 // Use callback to load if this is not an optimizable case for origin
2277 // tracking.
2278 if (ShouldTrackOrigins &&
2279 useCallbackLoadLabelAndOrigin(Size, InstAlignment)) {
2280 IRBuilder<> IRB(Pos->getParent(), Pos);
2281 CallInst *Call =
2282 IRB.CreateCall(DFS.DFSanLoadLabelAndOriginFn,
2283 {Addr, ConstantInt::get(DFS.IntptrTy, Size)});
2284 Call->addRetAttr(Attribute::ZExt);
2285 return {IRB.CreateTrunc(IRB.CreateLShr(Call, DFS.OriginWidthBits),
2286 DFS.PrimitiveShadowTy),
2287 IRB.CreateTrunc(Call, DFS.OriginTy)};
2288 }
2289
2290 // Other cases that support loading shadows or origins in a fast way.
2291 Value *ShadowAddr, *OriginAddr;
2292 std::tie(ShadowAddr, OriginAddr) =
2293 DFS.getShadowOriginAddress(Addr, InstAlignment, Pos);
2294
2295 const Align ShadowAlign = getShadowAlign(InstAlignment);
2296 const Align OriginAlign = getOriginAlign(InstAlignment);
2297 Value *Origin = nullptr;
2298 if (ShouldTrackOrigins) {
2299 IRBuilder<> IRB(Pos->getParent(), Pos);
2300 Origin = IRB.CreateAlignedLoad(DFS.OriginTy, OriginAddr, OriginAlign);
2301 }
2302
2303 // When the byte size is small enough, we can load the shadow directly with
2304 // just a few instructions.
2305 switch (Size) {
2306 case 1: {
2307 LoadInst *LI = new LoadInst(DFS.PrimitiveShadowTy, ShadowAddr, "", Pos);
2308 LI->setAlignment(ShadowAlign);
2309 return {LI, Origin};
2310 }
2311 case 2: {
2312 IRBuilder<> IRB(Pos->getParent(), Pos);
2313 Value *ShadowAddr1 = IRB.CreateGEP(DFS.PrimitiveShadowTy, ShadowAddr,
2314 ConstantInt::get(DFS.IntptrTy, 1));
2315 Value *Load =
2316 IRB.CreateAlignedLoad(DFS.PrimitiveShadowTy, ShadowAddr, ShadowAlign);
2317 Value *Load1 =
2318 IRB.CreateAlignedLoad(DFS.PrimitiveShadowTy, ShadowAddr1, ShadowAlign);
2319 return {combineShadows(Load, Load1, Pos), Origin};
2320 }
2321 }
2322 bool HasSizeForFastPath = DFS.hasLoadSizeForFastPath(Size);
2323
2324 if (HasSizeForFastPath)
2325 return loadShadowFast(ShadowAddr, OriginAddr, Size, ShadowAlign,
2326 OriginAlign, Origin, Pos);
2327
2328 IRBuilder<> IRB(Pos->getParent(), Pos);
2329 CallInst *FallbackCall = IRB.CreateCall(
2330 DFS.DFSanUnionLoadFn, {ShadowAddr, ConstantInt::get(DFS.IntptrTy, Size)});
2331 FallbackCall->addRetAttr(Attribute::ZExt);
2332 return {FallbackCall, Origin};
2333}
2334
2335std::pair<Value *, Value *>
2336DFSanFunction::loadShadowOrigin(Value *Addr, uint64_t Size, Align InstAlignment,
2338 Value *PrimitiveShadow, *Origin;
2339 std::tie(PrimitiveShadow, Origin) =
2340 loadShadowOriginSansLoadTracking(Addr, Size, InstAlignment, Pos);
2341 if (DFS.shouldTrackOrigins()) {
2342 if (ClTrackOrigins == 2) {
2343 IRBuilder<> IRB(Pos->getParent(), Pos);
2344 auto *ConstantShadow = dyn_cast<Constant>(PrimitiveShadow);
2345 if (!ConstantShadow || !ConstantShadow->isZeroValue())
2346 Origin = updateOriginIfTainted(PrimitiveShadow, Origin, IRB);
2347 }
2348 }
2349 return {PrimitiveShadow, Origin};
2350}
2351
2353 switch (AO) {
2354 case AtomicOrdering::NotAtomic:
2355 return AtomicOrdering::NotAtomic;
2356 case AtomicOrdering::Unordered:
2357 case AtomicOrdering::Monotonic:
2358 case AtomicOrdering::Acquire:
2359 return AtomicOrdering::Acquire;
2360 case AtomicOrdering::Release:
2361 case AtomicOrdering::AcquireRelease:
2362 return AtomicOrdering::AcquireRelease;
2363 case AtomicOrdering::SequentiallyConsistent:
2364 return AtomicOrdering::SequentiallyConsistent;
2365 }
2366 llvm_unreachable("Unknown ordering");
2367}
2368
2370 if (!V->getType()->isPointerTy())
2371 return V;
2372
2373 // DFSan pass should be running on valid IR, but we'll
2374 // keep a seen set to ensure there are no issues.
2376 Visited.insert(V);
2377 do {
2378 if (auto *GEP = dyn_cast<GEPOperator>(V)) {
2379 V = GEP->getPointerOperand();
2380 } else if (Operator::getOpcode(V) == Instruction::BitCast) {
2381 V = cast<Operator>(V)->getOperand(0);
2382 if (!V->getType()->isPointerTy())
2383 return V;
2384 } else if (isa<GlobalAlias>(V)) {
2385 V = cast<GlobalAlias>(V)->getAliasee();
2386 }
2387 } while (Visited.insert(V).second);
2388
2389 return V;
2390}
2391
2392void DFSanVisitor::visitLoadInst(LoadInst &LI) {
2393 auto &DL = LI.getDataLayout();
2394 uint64_t Size = DL.getTypeStoreSize(LI.getType());
2395 if (Size == 0) {
2396 DFSF.setShadow(&LI, DFSF.DFS.getZeroShadow(&LI));
2397 DFSF.setOrigin(&LI, DFSF.DFS.ZeroOrigin);
2398 return;
2399 }
2400
2401 // When an application load is atomic, increase atomic ordering between
2402 // atomic application loads and stores to ensure happen-before order; load
2403 // shadow data after application data; store zero shadow data before
2404 // application data. This ensure shadow loads return either labels of the
2405 // initial application data or zeros.
2406 if (LI.isAtomic())
2408
2409 BasicBlock::iterator AfterLi = std::next(LI.getIterator());
2411 if (LI.isAtomic())
2412 Pos = std::next(Pos);
2413
2414 std::vector<Value *> Shadows;
2415 std::vector<Value *> Origins;
2416 Value *PrimitiveShadow, *Origin;
2417 std::tie(PrimitiveShadow, Origin) =
2418 DFSF.loadShadowOrigin(LI.getPointerOperand(), Size, LI.getAlign(), Pos);
2419 const bool ShouldTrackOrigins = DFSF.DFS.shouldTrackOrigins();
2420 if (ShouldTrackOrigins) {
2421 Shadows.push_back(PrimitiveShadow);
2422 Origins.push_back(Origin);
2423 }
2425 DFSF.isLookupTableConstant(
2427 Value *PtrShadow = DFSF.getShadow(LI.getPointerOperand());
2428 PrimitiveShadow = DFSF.combineShadows(PrimitiveShadow, PtrShadow, Pos);
2429 if (ShouldTrackOrigins) {
2430 Shadows.push_back(PtrShadow);
2431 Origins.push_back(DFSF.getOrigin(LI.getPointerOperand()));
2432 }
2433 }
2434 if (!DFSF.DFS.isZeroShadow(PrimitiveShadow))
2435 DFSF.NonZeroChecks.push_back(PrimitiveShadow);
2436
2437 Value *Shadow =
2438 DFSF.expandFromPrimitiveShadow(LI.getType(), PrimitiveShadow, Pos);
2439 DFSF.setShadow(&LI, Shadow);
2440
2441 if (ShouldTrackOrigins) {
2442 DFSF.setOrigin(&LI, DFSF.combineOrigins(Shadows, Origins, Pos));
2443 }
2444
2445 if (ClEventCallbacks) {
2446 IRBuilder<> IRB(Pos->getParent(), Pos);
2448 CallInst *CI =
2449 IRB.CreateCall(DFSF.DFS.DFSanLoadCallbackFn, {PrimitiveShadow, Addr});
2450 CI->addParamAttr(0, Attribute::ZExt);
2451 }
2452
2453 IRBuilder<> IRB(AfterLi->getParent(), AfterLi);
2454 DFSF.addReachesFunctionCallbacksIfEnabled(IRB, LI, &LI);
2455}
2456
2457Value *DFSanFunction::updateOriginIfTainted(Value *Shadow, Value *Origin,
2458 IRBuilder<> &IRB) {
2459 assert(DFS.shouldTrackOrigins());
2460 return IRB.CreateCall(DFS.DFSanChainOriginIfTaintedFn, {Shadow, Origin});
2461}
2462
2463Value *DFSanFunction::updateOrigin(Value *V, IRBuilder<> &IRB) {
2464 if (!DFS.shouldTrackOrigins())
2465 return V;
2466 return IRB.CreateCall(DFS.DFSanChainOriginFn, V);
2467}
2468
2469Value *DFSanFunction::originToIntptr(IRBuilder<> &IRB, Value *Origin) {
2470 const unsigned OriginSize = DataFlowSanitizer::OriginWidthBytes;
2471 const DataLayout &DL = F->getDataLayout();
2472 unsigned IntptrSize = DL.getTypeStoreSize(DFS.IntptrTy);
2473 if (IntptrSize == OriginSize)
2474 return Origin;
2475 assert(IntptrSize == OriginSize * 2);
2476 Origin = IRB.CreateIntCast(Origin, DFS.IntptrTy, /* isSigned */ false);
2477 return IRB.CreateOr(Origin, IRB.CreateShl(Origin, OriginSize * 8));
2478}
2479
2480void DFSanFunction::paintOrigin(IRBuilder<> &IRB, Value *Origin,
2481 Value *StoreOriginAddr,
2482 uint64_t StoreOriginSize, Align Alignment) {
2483 const unsigned OriginSize = DataFlowSanitizer::OriginWidthBytes;
2484 const DataLayout &DL = F->getDataLayout();
2485 const Align IntptrAlignment = DL.getABITypeAlign(DFS.IntptrTy);
2486 unsigned IntptrSize = DL.getTypeStoreSize(DFS.IntptrTy);
2487 assert(IntptrAlignment >= MinOriginAlignment);
2488 assert(IntptrSize >= OriginSize);
2489
2490 unsigned Ofs = 0;
2491 Align CurrentAlignment = Alignment;
2492 if (Alignment >= IntptrAlignment && IntptrSize > OriginSize) {
2493 Value *IntptrOrigin = originToIntptr(IRB, Origin);
2494 Value *IntptrStoreOriginPtr = IRB.CreatePointerCast(
2495 StoreOriginAddr, PointerType::get(DFS.IntptrTy, 0));
2496 for (unsigned I = 0; I < StoreOriginSize / IntptrSize; ++I) {
2497 Value *Ptr =
2498 I ? IRB.CreateConstGEP1_32(DFS.IntptrTy, IntptrStoreOriginPtr, I)
2499 : IntptrStoreOriginPtr;
2500 IRB.CreateAlignedStore(IntptrOrigin, Ptr, CurrentAlignment);
2501 Ofs += IntptrSize / OriginSize;
2502 CurrentAlignment = IntptrAlignment;
2503 }
2504 }
2505
2506 for (unsigned I = Ofs; I < (StoreOriginSize + OriginSize - 1) / OriginSize;
2507 ++I) {
2508 Value *GEP = I ? IRB.CreateConstGEP1_32(DFS.OriginTy, StoreOriginAddr, I)
2509 : StoreOriginAddr;
2510 IRB.CreateAlignedStore(Origin, GEP, CurrentAlignment);
2511 CurrentAlignment = MinOriginAlignment;
2512 }
2513}
2514
2515Value *DFSanFunction::convertToBool(Value *V, IRBuilder<> &IRB,
2516 const Twine &Name) {
2517 Type *VTy = V->getType();
2518 assert(VTy->isIntegerTy());
2519 if (VTy->getIntegerBitWidth() == 1)
2520 // Just converting a bool to a bool, so do nothing.
2521 return V;
2522 return IRB.CreateICmpNE(V, ConstantInt::get(VTy, 0), Name);
2523}
2524
2525void DFSanFunction::storeOrigin(BasicBlock::iterator Pos, Value *Addr,
2526 uint64_t Size, Value *Shadow, Value *Origin,
2527 Value *StoreOriginAddr, Align InstAlignment) {
2528 // Do not write origins for zero shadows because we do not trace origins for
2529 // untainted sinks.
2530 const Align OriginAlignment = getOriginAlign(InstAlignment);
2531 Value *CollapsedShadow = collapseToPrimitiveShadow(Shadow, Pos);
2532 IRBuilder<> IRB(Pos->getParent(), Pos);
2533 if (auto *ConstantShadow = dyn_cast<Constant>(CollapsedShadow)) {
2534 if (!ConstantShadow->isZeroValue())
2535 paintOrigin(IRB, updateOrigin(Origin, IRB), StoreOriginAddr, Size,
2536 OriginAlignment);
2537 return;
2538 }
2539
2540 if (shouldInstrumentWithCall()) {
2541 IRB.CreateCall(
2542 DFS.DFSanMaybeStoreOriginFn,
2543 {CollapsedShadow, Addr, ConstantInt::get(DFS.IntptrTy, Size), Origin});
2544 } else {
2545 Value *Cmp = convertToBool(CollapsedShadow, IRB, "_dfscmp");
2546 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Lazy);
2548 Cmp, &*IRB.GetInsertPoint(), false, DFS.OriginStoreWeights, &DTU);
2549 IRBuilder<> IRBNew(CheckTerm);
2550 paintOrigin(IRBNew, updateOrigin(Origin, IRBNew), StoreOriginAddr, Size,
2551 OriginAlignment);
2552 ++NumOriginStores;
2553 }
2554}
2555
2556void DFSanFunction::storeZeroPrimitiveShadow(Value *Addr, uint64_t Size,
2557 Align ShadowAlign,
2559 IRBuilder<> IRB(Pos->getParent(), Pos);
2560 IntegerType *ShadowTy =
2561 IntegerType::get(*DFS.Ctx, Size * DFS.ShadowWidthBits);
2562 Value *ExtZeroShadow = ConstantInt::get(ShadowTy, 0);
2563 Value *ShadowAddr = DFS.getShadowAddress(Addr, Pos);
2564 IRB.CreateAlignedStore(ExtZeroShadow, ShadowAddr, ShadowAlign);
2565 // Do not write origins for 0 shadows because we do not trace origins for
2566 // untainted sinks.
2567}
2568
2569void DFSanFunction::storePrimitiveShadowOrigin(Value *Addr, uint64_t Size,
2570 Align InstAlignment,
2571 Value *PrimitiveShadow,
2572 Value *Origin,
2574 const bool ShouldTrackOrigins = DFS.shouldTrackOrigins() && Origin;
2575
2576 if (AllocaInst *AI = dyn_cast<AllocaInst>(Addr)) {
2577 const auto SI = AllocaShadowMap.find(AI);
2578 if (SI != AllocaShadowMap.end()) {
2579 IRBuilder<> IRB(Pos->getParent(), Pos);
2580 IRB.CreateStore(PrimitiveShadow, SI->second);
2581
2582 // Do not write origins for 0 shadows because we do not trace origins for
2583 // untainted sinks.
2584 if (ShouldTrackOrigins && !DFS.isZeroShadow(PrimitiveShadow)) {
2585 const auto OI = AllocaOriginMap.find(AI);
2586 assert(OI != AllocaOriginMap.end() && Origin);
2587 IRB.CreateStore(Origin, OI->second);
2588 }
2589 return;
2590 }
2591 }
2592
2593 const Align ShadowAlign = getShadowAlign(InstAlignment);
2594 if (DFS.isZeroShadow(PrimitiveShadow)) {
2595 storeZeroPrimitiveShadow(Addr, Size, ShadowAlign, Pos);
2596 return;
2597 }
2598
2599 IRBuilder<> IRB(Pos->getParent(), Pos);
2600 Value *ShadowAddr, *OriginAddr;
2601 std::tie(ShadowAddr, OriginAddr) =
2602 DFS.getShadowOriginAddress(Addr, InstAlignment, Pos);
2603
2604 const unsigned ShadowVecSize = 8;
2605 assert(ShadowVecSize * DFS.ShadowWidthBits <= 128 &&
2606 "Shadow vector is too large!");
2607
2608 uint64_t Offset = 0;
2609 uint64_t LeftSize = Size;
2610 if (LeftSize >= ShadowVecSize) {
2611 auto *ShadowVecTy =
2612 FixedVectorType::get(DFS.PrimitiveShadowTy, ShadowVecSize);
2613 Value *ShadowVec = PoisonValue::get(ShadowVecTy);
2614 for (unsigned I = 0; I != ShadowVecSize; ++I) {
2615 ShadowVec = IRB.CreateInsertElement(
2616 ShadowVec, PrimitiveShadow,
2617 ConstantInt::get(Type::getInt32Ty(*DFS.Ctx), I));
2618 }
2619 do {
2620 Value *CurShadowVecAddr =
2621 IRB.CreateConstGEP1_32(ShadowVecTy, ShadowAddr, Offset);
2622 IRB.CreateAlignedStore(ShadowVec, CurShadowVecAddr, ShadowAlign);
2623 LeftSize -= ShadowVecSize;
2624 ++Offset;
2625 } while (LeftSize >= ShadowVecSize);
2626 Offset *= ShadowVecSize;
2627 }
2628 while (LeftSize > 0) {
2629 Value *CurShadowAddr =
2630 IRB.CreateConstGEP1_32(DFS.PrimitiveShadowTy, ShadowAddr, Offset);
2631 IRB.CreateAlignedStore(PrimitiveShadow, CurShadowAddr, ShadowAlign);
2632 --LeftSize;
2633 ++Offset;
2634 }
2635
2636 if (ShouldTrackOrigins) {
2637 storeOrigin(Pos, Addr, Size, PrimitiveShadow, Origin, OriginAddr,
2638 InstAlignment);
2639 }
2640}
2641
2643 switch (AO) {
2644 case AtomicOrdering::NotAtomic:
2645 return AtomicOrdering::NotAtomic;
2646 case AtomicOrdering::Unordered:
2647 case AtomicOrdering::Monotonic:
2648 case AtomicOrdering::Release:
2649 return AtomicOrdering::Release;
2650 case AtomicOrdering::Acquire:
2651 case AtomicOrdering::AcquireRelease:
2652 return AtomicOrdering::AcquireRelease;
2653 case AtomicOrdering::SequentiallyConsistent:
2654 return AtomicOrdering::SequentiallyConsistent;
2655 }
2656 llvm_unreachable("Unknown ordering");
2657}
2658
2659void DFSanVisitor::visitStoreInst(StoreInst &SI) {
2660 auto &DL = SI.getDataLayout();
2661 Value *Val = SI.getValueOperand();
2662 uint64_t Size = DL.getTypeStoreSize(Val->getType());
2663 if (Size == 0)
2664 return;
2665
2666 // When an application store is atomic, increase atomic ordering between
2667 // atomic application loads and stores to ensure happen-before order; load
2668 // shadow data after application data; store zero shadow data before
2669 // application data. This ensure shadow loads return either labels of the
2670 // initial application data or zeros.
2671 if (SI.isAtomic())
2672 SI.setOrdering(addReleaseOrdering(SI.getOrdering()));
2673
2674 const bool ShouldTrackOrigins =
2675 DFSF.DFS.shouldTrackOrigins() && !SI.isAtomic();
2676 std::vector<Value *> Shadows;
2677 std::vector<Value *> Origins;
2678
2679 Value *Shadow =
2680 SI.isAtomic() ? DFSF.DFS.getZeroShadow(Val) : DFSF.getShadow(Val);
2681
2682 if (ShouldTrackOrigins) {
2683 Shadows.push_back(Shadow);
2684 Origins.push_back(DFSF.getOrigin(Val));
2685 }
2686
2687 Value *PrimitiveShadow;
2689 Value *PtrShadow = DFSF.getShadow(SI.getPointerOperand());
2690 if (ShouldTrackOrigins) {
2691 Shadows.push_back(PtrShadow);
2692 Origins.push_back(DFSF.getOrigin(SI.getPointerOperand()));
2693 }
2694 PrimitiveShadow = DFSF.combineShadows(Shadow, PtrShadow, SI.getIterator());
2695 } else {
2696 PrimitiveShadow = DFSF.collapseToPrimitiveShadow(Shadow, SI.getIterator());
2697 }
2698 Value *Origin = nullptr;
2699 if (ShouldTrackOrigins)
2700 Origin = DFSF.combineOrigins(Shadows, Origins, SI.getIterator());
2701 DFSF.storePrimitiveShadowOrigin(SI.getPointerOperand(), Size, SI.getAlign(),
2702 PrimitiveShadow, Origin, SI.getIterator());
2703 if (ClEventCallbacks) {
2704 IRBuilder<> IRB(&SI);
2705 Value *Addr = SI.getPointerOperand();
2706 CallInst *CI =
2707 IRB.CreateCall(DFSF.DFS.DFSanStoreCallbackFn, {PrimitiveShadow, Addr});
2708 CI->addParamAttr(0, Attribute::ZExt);
2709 }
2710}
2711
2712void DFSanVisitor::visitCASOrRMW(Align InstAlignment, Instruction &I) {
2713 assert(isa<AtomicRMWInst>(I) || isa<AtomicCmpXchgInst>(I));
2714
2715 Value *Val = I.getOperand(1);
2716 const auto &DL = I.getDataLayout();
2717 uint64_t Size = DL.getTypeStoreSize(Val->getType());
2718 if (Size == 0)
2719 return;
2720
2721 // Conservatively set data at stored addresses and return with zero shadow to
2722 // prevent shadow data races.
2723 IRBuilder<> IRB(&I);
2724 Value *Addr = I.getOperand(0);
2725 const Align ShadowAlign = DFSF.getShadowAlign(InstAlignment);
2726 DFSF.storeZeroPrimitiveShadow(Addr, Size, ShadowAlign, I.getIterator());
2727 DFSF.setShadow(&I, DFSF.DFS.getZeroShadow(&I));
2728 DFSF.setOrigin(&I, DFSF.DFS.ZeroOrigin);
2729}
2730
2731void DFSanVisitor::visitAtomicRMWInst(AtomicRMWInst &I) {
2732 visitCASOrRMW(I.getAlign(), I);
2733 // TODO: The ordering change follows MSan. It is possible not to change
2734 // ordering because we always set and use 0 shadows.
2735 I.setOrdering(addReleaseOrdering(I.getOrdering()));
2736}
2737
2738void DFSanVisitor::visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) {
2739 visitCASOrRMW(I.getAlign(), I);
2740 // TODO: The ordering change follows MSan. It is possible not to change
2741 // ordering because we always set and use 0 shadows.
2742 I.setSuccessOrdering(addReleaseOrdering(I.getSuccessOrdering()));
2743}
2744
2745void DFSanVisitor::visitUnaryOperator(UnaryOperator &UO) {
2746 visitInstOperands(UO);
2747}
2748
2749void DFSanVisitor::visitBinaryOperator(BinaryOperator &BO) {
2750 visitInstOperands(BO);
2751}
2752
2753void DFSanVisitor::visitBitCastInst(BitCastInst &BCI) {
2754 // Special case: if this is the bitcast (there is exactly 1 allowed) between
2755 // a musttail call and a ret, don't instrument. New instructions are not
2756 // allowed after a musttail call.
2757 if (auto *CI = dyn_cast<CallInst>(BCI.getOperand(0)))
2758 if (CI->isMustTailCall())
2759 return;
2760 visitInstOperands(BCI);
2761}
2762
2763void DFSanVisitor::visitCastInst(CastInst &CI) { visitInstOperands(CI); }
2764
2765void DFSanVisitor::visitCmpInst(CmpInst &CI) {
2766 visitInstOperands(CI);
2767 if (ClEventCallbacks) {
2768 IRBuilder<> IRB(&CI);
2769 Value *CombinedShadow = DFSF.getShadow(&CI);
2770 CallInst *CallI =
2771 IRB.CreateCall(DFSF.DFS.DFSanCmpCallbackFn, CombinedShadow);
2772 CallI->addParamAttr(0, Attribute::ZExt);
2773 }
2774}
2775
2776void DFSanVisitor::visitLandingPadInst(LandingPadInst &LPI) {
2777 // We do not need to track data through LandingPadInst.
2778 //
2779 // For the C++ exceptions, if a value is thrown, this value will be stored
2780 // in a memory location provided by __cxa_allocate_exception(...) (on the
2781 // throw side) or __cxa_begin_catch(...) (on the catch side).
2782 // This memory will have a shadow, so with the loads and stores we will be
2783 // able to propagate labels on data thrown through exceptions, without any
2784 // special handling of the LandingPadInst.
2785 //
2786 // The second element in the pair result of the LandingPadInst is a
2787 // register value, but it is for a type ID and should never be tainted.
2788 DFSF.setShadow(&LPI, DFSF.DFS.getZeroShadow(&LPI));
2789 DFSF.setOrigin(&LPI, DFSF.DFS.ZeroOrigin);
2790}
2791
2792void DFSanVisitor::visitGetElementPtrInst(GetElementPtrInst &GEPI) {
2794 DFSF.isLookupTableConstant(
2796 visitInstOperands(GEPI);
2797 return;
2798 }
2799
2800 // Only propagate shadow/origin of base pointer value but ignore those of
2801 // offset operands.
2802 Value *BasePointer = GEPI.getPointerOperand();
2803 DFSF.setShadow(&GEPI, DFSF.getShadow(BasePointer));
2804 if (DFSF.DFS.shouldTrackOrigins())
2805 DFSF.setOrigin(&GEPI, DFSF.getOrigin(BasePointer));
2806}
2807
2808void DFSanVisitor::visitExtractElementInst(ExtractElementInst &I) {
2809 visitInstOperands(I);
2810}
2811
2812void DFSanVisitor::visitInsertElementInst(InsertElementInst &I) {
2813 visitInstOperands(I);
2814}
2815
2816void DFSanVisitor::visitShuffleVectorInst(ShuffleVectorInst &I) {
2817 visitInstOperands(I);
2818}
2819
2820void DFSanVisitor::visitExtractValueInst(ExtractValueInst &I) {
2821 IRBuilder<> IRB(&I);
2822 Value *Agg = I.getAggregateOperand();
2823 Value *AggShadow = DFSF.getShadow(Agg);
2824 Value *ResShadow = IRB.CreateExtractValue(AggShadow, I.getIndices());
2825 DFSF.setShadow(&I, ResShadow);
2826 visitInstOperandOrigins(I);
2827}
2828
2829void DFSanVisitor::visitInsertValueInst(InsertValueInst &I) {
2830 IRBuilder<> IRB(&I);
2831 Value *AggShadow = DFSF.getShadow(I.getAggregateOperand());
2832 Value *InsShadow = DFSF.getShadow(I.getInsertedValueOperand());
2833 Value *Res = IRB.CreateInsertValue(AggShadow, InsShadow, I.getIndices());
2834 DFSF.setShadow(&I, Res);
2835 visitInstOperandOrigins(I);
2836}
2837
2838void DFSanVisitor::visitAllocaInst(AllocaInst &I) {
2839 bool AllLoadsStores = true;
2840 for (User *U : I.users()) {
2841 if (isa<LoadInst>(U))
2842 continue;
2843
2844 if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
2845 if (SI->getPointerOperand() == &I)
2846 continue;
2847 }
2848
2849 AllLoadsStores = false;
2850 break;
2851 }
2852 if (AllLoadsStores) {
2853 IRBuilder<> IRB(&I);
2854 DFSF.AllocaShadowMap[&I] = IRB.CreateAlloca(DFSF.DFS.PrimitiveShadowTy);
2855 if (DFSF.DFS.shouldTrackOrigins()) {
2856 DFSF.AllocaOriginMap[&I] =
2857 IRB.CreateAlloca(DFSF.DFS.OriginTy, nullptr, "_dfsa");
2858 }
2859 }
2860 DFSF.setShadow(&I, DFSF.DFS.ZeroPrimitiveShadow);
2861 DFSF.setOrigin(&I, DFSF.DFS.ZeroOrigin);
2862}
2863
2864void DFSanVisitor::visitSelectInst(SelectInst &I) {
2865 Value *CondShadow = DFSF.getShadow(I.getCondition());
2866 Value *TrueShadow = DFSF.getShadow(I.getTrueValue());
2867 Value *FalseShadow = DFSF.getShadow(I.getFalseValue());
2868 Value *ShadowSel = nullptr;
2869 const bool ShouldTrackOrigins = DFSF.DFS.shouldTrackOrigins();
2870 std::vector<Value *> Shadows;
2871 std::vector<Value *> Origins;
2872 Value *TrueOrigin =
2873 ShouldTrackOrigins ? DFSF.getOrigin(I.getTrueValue()) : nullptr;
2874 Value *FalseOrigin =
2875 ShouldTrackOrigins ? DFSF.getOrigin(I.getFalseValue()) : nullptr;
2876
2877 DFSF.addConditionalCallbacksIfEnabled(I, I.getCondition());
2878
2879 if (isa<VectorType>(I.getCondition()->getType())) {
2880 ShadowSel = DFSF.combineShadowsThenConvert(I.getType(), TrueShadow,
2881 FalseShadow, I.getIterator());
2882 if (ShouldTrackOrigins) {
2883 Shadows.push_back(TrueShadow);
2884 Shadows.push_back(FalseShadow);
2885 Origins.push_back(TrueOrigin);
2886 Origins.push_back(FalseOrigin);
2887 }
2888 } else {
2889 if (TrueShadow == FalseShadow) {
2890 ShadowSel = TrueShadow;
2891 if (ShouldTrackOrigins) {
2892 Shadows.push_back(TrueShadow);
2893 Origins.push_back(TrueOrigin);
2894 }
2895 } else {
2896 ShadowSel = SelectInst::Create(I.getCondition(), TrueShadow, FalseShadow,
2897 "", I.getIterator());
2898 if (ShouldTrackOrigins) {
2899 Shadows.push_back(ShadowSel);
2900 Origins.push_back(SelectInst::Create(I.getCondition(), TrueOrigin,
2901 FalseOrigin, "", I.getIterator()));
2902 }
2903 }
2904 }
2905 DFSF.setShadow(&I, ClTrackSelectControlFlow ? DFSF.combineShadowsThenConvert(
2906 I.getType(), CondShadow,
2907 ShadowSel, I.getIterator())
2908 : ShadowSel);
2909 if (ShouldTrackOrigins) {
2911 Shadows.push_back(CondShadow);
2912 Origins.push_back(DFSF.getOrigin(I.getCondition()));
2913 }
2914 DFSF.setOrigin(&I, DFSF.combineOrigins(Shadows, Origins, I.getIterator()));
2915 }
2916}
2917
2918void DFSanVisitor::visitMemSetInst(MemSetInst &I) {
2919 IRBuilder<> IRB(&I);
2920 Value *ValShadow = DFSF.getShadow(I.getValue());
2921 Value *ValOrigin = DFSF.DFS.shouldTrackOrigins()
2922 ? DFSF.getOrigin(I.getValue())
2923 : DFSF.DFS.ZeroOrigin;
2924 IRB.CreateCall(DFSF.DFS.DFSanSetLabelFn,
2925 {ValShadow, ValOrigin, I.getDest(),
2926 IRB.CreateZExtOrTrunc(I.getLength(), DFSF.DFS.IntptrTy)});
2927}
2928
2929void DFSanVisitor::visitMemTransferInst(MemTransferInst &I) {
2930 IRBuilder<> IRB(&I);
2931
2932 // CopyOrMoveOrigin transfers origins by refering to their shadows. So we
2933 // need to move origins before moving shadows.
2934 if (DFSF.DFS.shouldTrackOrigins()) {
2935 IRB.CreateCall(
2936 DFSF.DFS.DFSanMemOriginTransferFn,
2937 {I.getArgOperand(0), I.getArgOperand(1),
2938 IRB.CreateIntCast(I.getArgOperand(2), DFSF.DFS.IntptrTy, false)});
2939 }
2940
2941 Value *DestShadow = DFSF.DFS.getShadowAddress(I.getDest(), I.getIterator());
2942 Value *SrcShadow = DFSF.DFS.getShadowAddress(I.getSource(), I.getIterator());
2943 Value *LenShadow =
2944 IRB.CreateMul(I.getLength(), ConstantInt::get(I.getLength()->getType(),
2945 DFSF.DFS.ShadowWidthBytes));
2946 auto *MTI = cast<MemTransferInst>(
2947 IRB.CreateCall(I.getFunctionType(), I.getCalledOperand(),
2948 {DestShadow, SrcShadow, LenShadow, I.getVolatileCst()}));
2949 MTI->setDestAlignment(DFSF.getShadowAlign(I.getDestAlign().valueOrOne()));
2950 MTI->setSourceAlignment(DFSF.getShadowAlign(I.getSourceAlign().valueOrOne()));
2951 if (ClEventCallbacks) {
2952 IRB.CreateCall(
2953 DFSF.DFS.DFSanMemTransferCallbackFn,
2954 {DestShadow, IRB.CreateZExtOrTrunc(I.getLength(), DFSF.DFS.IntptrTy)});
2955 }
2956}
2957
2958void DFSanVisitor::visitBranchInst(BranchInst &BR) {
2959 if (!BR.isConditional())
2960 return;
2961
2962 DFSF.addConditionalCallbacksIfEnabled(BR, BR.getCondition());
2963}
2964
2965void DFSanVisitor::visitSwitchInst(SwitchInst &SW) {
2966 DFSF.addConditionalCallbacksIfEnabled(SW, SW.getCondition());
2967}
2968
2969static bool isAMustTailRetVal(Value *RetVal) {
2970 // Tail call may have a bitcast between return.
2971 if (auto *I = dyn_cast<BitCastInst>(RetVal)) {
2972 RetVal = I->getOperand(0);
2973 }
2974 if (auto *I = dyn_cast<CallInst>(RetVal)) {
2975 return I->isMustTailCall();
2976 }
2977 return false;
2978}
2979
2980void DFSanVisitor::visitReturnInst(ReturnInst &RI) {
2981 if (!DFSF.IsNativeABI && RI.getReturnValue()) {
2982 // Don't emit the instrumentation for musttail call returns.
2984 return;
2985
2986 Value *S = DFSF.getShadow(RI.getReturnValue());
2987 IRBuilder<> IRB(&RI);
2988 Type *RT = DFSF.F->getFunctionType()->getReturnType();
2989 unsigned Size = getDataLayout().getTypeAllocSize(DFSF.DFS.getShadowTy(RT));
2990 if (Size <= RetvalTLSSize) {
2991 // If the size overflows, stores nothing. At callsite, oversized return
2992 // shadows are set to zero.
2993 IRB.CreateAlignedStore(S, DFSF.getRetvalTLS(RT, IRB), ShadowTLSAlignment);
2994 }
2995 if (DFSF.DFS.shouldTrackOrigins()) {
2996 Value *O = DFSF.getOrigin(RI.getReturnValue());
2997 IRB.CreateStore(O, DFSF.getRetvalOriginTLS());
2998 }
2999 }
3000}
3001
3002void DFSanVisitor::addShadowArguments(Function &F, CallBase &CB,
3003 std::vector<Value *> &Args,
3004 IRBuilder<> &IRB) {
3005 FunctionType *FT = F.getFunctionType();
3006
3007 auto *I = CB.arg_begin();
3008
3009 // Adds non-variable argument shadows.
3010 for (unsigned N = FT->getNumParams(); N != 0; ++I, --N)
3011 Args.push_back(
3012 DFSF.collapseToPrimitiveShadow(DFSF.getShadow(*I), CB.getIterator()));
3013
3014 // Adds variable argument shadows.
3015 if (FT->isVarArg()) {
3016 auto *LabelVATy = ArrayType::get(DFSF.DFS.PrimitiveShadowTy,
3017 CB.arg_size() - FT->getNumParams());
3018 auto *LabelVAAlloca =
3019 new AllocaInst(LabelVATy, getDataLayout().getAllocaAddrSpace(),
3020 "labelva", DFSF.F->getEntryBlock().begin());
3021
3022 for (unsigned N = 0; I != CB.arg_end(); ++I, ++N) {
3023 auto *LabelVAPtr = IRB.CreateStructGEP(LabelVATy, LabelVAAlloca, N);
3024 IRB.CreateStore(
3025 DFSF.collapseToPrimitiveShadow(DFSF.getShadow(*I), CB.getIterator()),
3026 LabelVAPtr);
3027 }
3028
3029 Args.push_back(IRB.CreateStructGEP(LabelVATy, LabelVAAlloca, 0));
3030 }
3031
3032 // Adds the return value shadow.
3033 if (!FT->getReturnType()->isVoidTy()) {
3034 if (!DFSF.LabelReturnAlloca) {
3035 DFSF.LabelReturnAlloca = new AllocaInst(
3036 DFSF.DFS.PrimitiveShadowTy, getDataLayout().getAllocaAddrSpace(),
3037 "labelreturn", DFSF.F->getEntryBlock().begin());
3038 }
3039 Args.push_back(DFSF.LabelReturnAlloca);
3040 }
3041}
3042
3043void DFSanVisitor::addOriginArguments(Function &F, CallBase &CB,
3044 std::vector<Value *> &Args,
3045 IRBuilder<> &IRB) {
3046 FunctionType *FT = F.getFunctionType();
3047
3048 auto *I = CB.arg_begin();
3049
3050 // Add non-variable argument origins.
3051 for (unsigned N = FT->getNumParams(); N != 0; ++I, --N)
3052 Args.push_back(DFSF.getOrigin(*I));
3053
3054 // Add variable argument origins.
3055 if (FT->isVarArg()) {
3056 auto *OriginVATy =
3057 ArrayType::get(DFSF.DFS.OriginTy, CB.arg_size() - FT->getNumParams());
3058 auto *OriginVAAlloca =
3059 new AllocaInst(OriginVATy, getDataLayout().getAllocaAddrSpace(),
3060 "originva", DFSF.F->getEntryBlock().begin());
3061
3062 for (unsigned N = 0; I != CB.arg_end(); ++I, ++N) {
3063 auto *OriginVAPtr = IRB.CreateStructGEP(OriginVATy, OriginVAAlloca, N);
3064 IRB.CreateStore(DFSF.getOrigin(*I), OriginVAPtr);
3065 }
3066
3067 Args.push_back(IRB.CreateStructGEP(OriginVATy, OriginVAAlloca, 0));
3068 }
3069
3070 // Add the return value origin.
3071 if (!FT->getReturnType()->isVoidTy()) {
3072 if (!DFSF.OriginReturnAlloca) {
3073 DFSF.OriginReturnAlloca = new AllocaInst(
3074 DFSF.DFS.OriginTy, getDataLayout().getAllocaAddrSpace(),
3075 "originreturn", DFSF.F->getEntryBlock().begin());
3076 }
3077 Args.push_back(DFSF.OriginReturnAlloca);
3078 }
3079}
3080
3081bool DFSanVisitor::visitWrappedCallBase(Function &F, CallBase &CB) {
3082 IRBuilder<> IRB(&CB);
3083 switch (DFSF.DFS.getWrapperKind(&F)) {
3084 case DataFlowSanitizer::WK_Warning:
3085 CB.setCalledFunction(&F);
3086 IRB.CreateCall(DFSF.DFS.DFSanUnimplementedFn,
3087 IRB.CreateGlobalString(F.getName()));
3088 DFSF.DFS.buildExternWeakCheckIfNeeded(IRB, &F);
3089 DFSF.setShadow(&CB, DFSF.DFS.getZeroShadow(&CB));
3090 DFSF.setOrigin(&CB, DFSF.DFS.ZeroOrigin);
3091 return true;
3092 case DataFlowSanitizer::WK_Discard:
3093 CB.setCalledFunction(&F);
3094 DFSF.DFS.buildExternWeakCheckIfNeeded(IRB, &F);
3095 DFSF.setShadow(&CB, DFSF.DFS.getZeroShadow(&CB));
3096 DFSF.setOrigin(&CB, DFSF.DFS.ZeroOrigin);
3097 return true;
3098 case DataFlowSanitizer::WK_Functional:
3099 CB.setCalledFunction(&F);
3100 DFSF.DFS.buildExternWeakCheckIfNeeded(IRB, &F);
3101 visitInstOperands(CB);
3102 return true;
3103 case DataFlowSanitizer::WK_Custom:
3104 // Don't try to handle invokes of custom functions, it's too complicated.
3105 // Instead, invoke the dfsw$ wrapper, which will in turn call the __dfsw_
3106 // wrapper.
3107 CallInst *CI = dyn_cast<CallInst>(&CB);
3108 if (!CI)
3109 return false;
3110
3111 const bool ShouldTrackOrigins = DFSF.DFS.shouldTrackOrigins();
3112 FunctionType *FT = F.getFunctionType();
3113 TransformedFunction CustomFn = DFSF.DFS.getCustomFunctionType(FT);
3114 std::string CustomFName = ShouldTrackOrigins ? "__dfso_" : "__dfsw_";
3115 CustomFName += F.getName();
3116 FunctionCallee CustomF = DFSF.DFS.Mod->getOrInsertFunction(
3117 CustomFName, CustomFn.TransformedType);
3118 if (Function *CustomFn = dyn_cast<Function>(CustomF.getCallee())) {
3119 CustomFn->copyAttributesFrom(&F);
3120
3121 // Custom functions returning non-void will write to the return label.
3122 if (!FT->getReturnType()->isVoidTy()) {
3123 CustomFn->removeFnAttrs(DFSF.DFS.ReadOnlyNoneAttrs);
3124 }
3125 }
3126
3127 std::vector<Value *> Args;
3128
3129 // Adds non-variable arguments.
3130 auto *I = CB.arg_begin();
3131 for (unsigned N = FT->getNumParams(); N != 0; ++I, --N) {
3132 Args.push_back(*I);
3133 }
3134
3135 // Adds shadow arguments.
3136 const unsigned ShadowArgStart = Args.size();
3137 addShadowArguments(F, CB, Args, IRB);
3138
3139 // Adds origin arguments.
3140 const unsigned OriginArgStart = Args.size();
3141 if (ShouldTrackOrigins)
3142 addOriginArguments(F, CB, Args, IRB);
3143
3144 // Adds variable arguments.
3145 append_range(Args, drop_begin(CB.args(), FT->getNumParams()));
3146
3147 CallInst *CustomCI = IRB.CreateCall(CustomF, Args);
3148 CustomCI->setCallingConv(CI->getCallingConv());
3149 CustomCI->setAttributes(transformFunctionAttributes(
3150 CustomFn, CI->getContext(), CI->getAttributes()));
3151
3152 // Update the parameter attributes of the custom call instruction to
3153 // zero extend the shadow parameters. This is required for targets
3154 // which consider PrimitiveShadowTy an illegal type.
3155 for (unsigned N = 0; N < FT->getNumParams(); N++) {
3156 const unsigned ArgNo = ShadowArgStart + N;
3157 if (CustomCI->getArgOperand(ArgNo)->getType() ==
3158 DFSF.DFS.PrimitiveShadowTy)
3159 CustomCI->addParamAttr(ArgNo, Attribute::ZExt);
3160 if (ShouldTrackOrigins) {
3161 const unsigned OriginArgNo = OriginArgStart + N;
3162 if (CustomCI->getArgOperand(OriginArgNo)->getType() ==
3163 DFSF.DFS.OriginTy)
3164 CustomCI->addParamAttr(OriginArgNo, Attribute::ZExt);
3165 }
3166 }
3167
3168 // Loads the return value shadow and origin.
3169 if (!FT->getReturnType()->isVoidTy()) {
3170 LoadInst *LabelLoad =
3171 IRB.CreateLoad(DFSF.DFS.PrimitiveShadowTy, DFSF.LabelReturnAlloca);
3172 DFSF.setShadow(CustomCI,
3173 DFSF.expandFromPrimitiveShadow(
3174 FT->getReturnType(), LabelLoad, CB.getIterator()));
3175 if (ShouldTrackOrigins) {
3176 LoadInst *OriginLoad =
3177 IRB.CreateLoad(DFSF.DFS.OriginTy, DFSF.OriginReturnAlloca);
3178 DFSF.setOrigin(CustomCI, OriginLoad);
3179 }
3180 }
3181
3182 CI->replaceAllUsesWith(CustomCI);
3183 CI->eraseFromParent();
3184 return true;
3185 }
3186 return false;
3187}
3188
3189Value *DFSanVisitor::makeAddAcquireOrderingTable(IRBuilder<> &IRB) {
3190 constexpr int NumOrderings = (int)AtomicOrderingCABI::seq_cst + 1;
3191 uint32_t OrderingTable[NumOrderings] = {};
3192
3193 OrderingTable[(int)AtomicOrderingCABI::relaxed] =
3194 OrderingTable[(int)AtomicOrderingCABI::acquire] =
3195 OrderingTable[(int)AtomicOrderingCABI::consume] =
3196 (int)AtomicOrderingCABI::acquire;
3197 OrderingTable[(int)AtomicOrderingCABI::release] =
3198 OrderingTable[(int)AtomicOrderingCABI::acq_rel] =
3199 (int)AtomicOrderingCABI::acq_rel;
3200 OrderingTable[(int)AtomicOrderingCABI::seq_cst] =
3201 (int)AtomicOrderingCABI::seq_cst;
3202
3203 return ConstantDataVector::get(IRB.getContext(), OrderingTable);
3204}
3205
3206void DFSanVisitor::visitLibAtomicLoad(CallBase &CB) {
3207 // Since we use getNextNode here, we can't have CB terminate the BB.
3208 assert(isa<CallInst>(CB));
3209
3210 IRBuilder<> IRB(&CB);
3211 Value *Size = CB.getArgOperand(0);
3212 Value *SrcPtr = CB.getArgOperand(1);
3213 Value *DstPtr = CB.getArgOperand(2);
3214 Value *Ordering = CB.getArgOperand(3);
3215 // Convert the call to have at least Acquire ordering to make sure
3216 // the shadow operations aren't reordered before it.
3217 Value *NewOrdering =
3218 IRB.CreateExtractElement(makeAddAcquireOrderingTable(IRB), Ordering);
3219 CB.setArgOperand(3, NewOrdering);
3220
3221 IRBuilder<> NextIRB(CB.getNextNode());
3222 NextIRB.SetCurrentDebugLocation(CB.getDebugLoc());
3223
3224 // TODO: Support ClCombinePointerLabelsOnLoad
3225 // TODO: Support ClEventCallbacks
3226
3227 NextIRB.CreateCall(
3228 DFSF.DFS.DFSanMemShadowOriginTransferFn,
3229 {DstPtr, SrcPtr, NextIRB.CreateIntCast(Size, DFSF.DFS.IntptrTy, false)});
3230}
3231
3232Value *DFSanVisitor::makeAddReleaseOrderingTable(IRBuilder<> &IRB) {
3233 constexpr int NumOrderings = (int)AtomicOrderingCABI::seq_cst + 1;
3234 uint32_t OrderingTable[NumOrderings] = {};
3235
3236 OrderingTable[(int)AtomicOrderingCABI::relaxed] =
3237 OrderingTable[(int)AtomicOrderingCABI::release] =
3238 (int)AtomicOrderingCABI::release;
3239 OrderingTable[(int)AtomicOrderingCABI::consume] =
3240 OrderingTable[(int)AtomicOrderingCABI::acquire] =
3241 OrderingTable[(int)AtomicOrderingCABI::acq_rel] =
3242 (int)AtomicOrderingCABI::acq_rel;
3243 OrderingTable[(int)AtomicOrderingCABI::seq_cst] =
3244 (int)AtomicOrderingCABI::seq_cst;
3245
3246 return ConstantDataVector::get(IRB.getContext(), OrderingTable);
3247}
3248
3249void DFSanVisitor::visitLibAtomicStore(CallBase &CB) {
3250 IRBuilder<> IRB(&CB);
3251 Value *Size = CB.getArgOperand(0);
3252 Value *SrcPtr = CB.getArgOperand(1);
3253 Value *DstPtr = CB.getArgOperand(2);
3254 Value *Ordering = CB.getArgOperand(3);
3255 // Convert the call to have at least Release ordering to make sure
3256 // the shadow operations aren't reordered after it.
3257 Value *NewOrdering =
3258 IRB.CreateExtractElement(makeAddReleaseOrderingTable(IRB), Ordering);
3259 CB.setArgOperand(3, NewOrdering);
3260
3261 // TODO: Support ClCombinePointerLabelsOnStore
3262 // TODO: Support ClEventCallbacks
3263
3264 IRB.CreateCall(
3265 DFSF.DFS.DFSanMemShadowOriginTransferFn,
3266 {DstPtr, SrcPtr, IRB.CreateIntCast(Size, DFSF.DFS.IntptrTy, false)});
3267}
3268
3269void DFSanVisitor::visitLibAtomicExchange(CallBase &CB) {
3270 // void __atomic_exchange(size_t size, void *ptr, void *val, void *ret, int
3271 // ordering)
3272 IRBuilder<> IRB(&CB);
3273 Value *Size = CB.getArgOperand(0);
3274 Value *TargetPtr = CB.getArgOperand(1);
3275 Value *SrcPtr = CB.getArgOperand(2);
3276 Value *DstPtr = CB.getArgOperand(3);
3277
3278 // This operation is not atomic for the shadow and origin memory.
3279 // This could result in DFSan false positives or false negatives.
3280 // For now we will assume these operations are rare, and
3281 // the additional complexity to address this is not warrented.
3282
3283 // Current Target to Dest
3284 IRB.CreateCall(
3285 DFSF.DFS.DFSanMemShadowOriginTransferFn,
3286 {DstPtr, TargetPtr, IRB.CreateIntCast(Size, DFSF.DFS.IntptrTy, false)});
3287
3288 // Current Src to Target (overriding)
3289 IRB.CreateCall(
3290 DFSF.DFS.DFSanMemShadowOriginTransferFn,
3291 {TargetPtr, SrcPtr, IRB.CreateIntCast(Size, DFSF.DFS.IntptrTy, false)});
3292}
3293
3294void DFSanVisitor::visitLibAtomicCompareExchange(CallBase &CB) {
3295 // bool __atomic_compare_exchange(size_t size, void *ptr, void *expected, void
3296 // *desired, int success_order, int failure_order)
3297 Value *Size = CB.getArgOperand(0);
3298 Value *TargetPtr = CB.getArgOperand(1);
3299 Value *ExpectedPtr = CB.getArgOperand(2);
3300 Value *DesiredPtr = CB.getArgOperand(3);
3301
3302 // This operation is not atomic for the shadow and origin memory.
3303 // This could result in DFSan false positives or false negatives.
3304 // For now we will assume these operations are rare, and
3305 // the additional complexity to address this is not warrented.
3306
3307 IRBuilder<> NextIRB(CB.getNextNode());
3308 NextIRB.SetCurrentDebugLocation(CB.getDebugLoc());
3309
3310 DFSF.setShadow(&CB, DFSF.DFS.getZeroShadow(&CB));
3311
3312 // If original call returned true, copy Desired to Target.
3313 // If original call returned false, copy Target to Expected.
3314 NextIRB.CreateCall(DFSF.DFS.DFSanMemShadowOriginConditionalExchangeFn,
3315 {NextIRB.CreateIntCast(&CB, NextIRB.getInt8Ty(), false),
3316 TargetPtr, ExpectedPtr, DesiredPtr,
3317 NextIRB.CreateIntCast(Size, DFSF.DFS.IntptrTy, false)});
3318}
3319
3320void DFSanVisitor::visitCallBase(CallBase &CB) {
3322 if ((F && F->isIntrinsic()) || CB.isInlineAsm()) {
3323 visitInstOperands(CB);
3324 return;
3325 }
3326
3327 // Calls to this function are synthesized in wrappers, and we shouldn't
3328 // instrument them.
3329 if (F == DFSF.DFS.DFSanVarargWrapperFn.getCallee()->stripPointerCasts())
3330 return;
3331
3332 LibFunc LF;
3333 if (DFSF.TLI.getLibFunc(CB, LF)) {
3334 // libatomic.a functions need to have special handling because there isn't
3335 // a good way to intercept them or compile the library with
3336 // instrumentation.
3337 switch (LF) {
3338 case LibFunc_atomic_load:
3339 if (!isa<CallInst>(CB)) {
3340 llvm::errs() << "DFSAN -- cannot instrument invoke of libatomic load. "
3341 "Ignoring!\n";
3342 break;
3343 }
3344 visitLibAtomicLoad(CB);
3345 return;
3346 case LibFunc_atomic_store:
3347 visitLibAtomicStore(CB);
3348 return;
3349 default:
3350 break;
3351 }
3352 }
3353
3354 // TODO: These are not supported by TLI? They are not in the enum.
3355 if (F && F->hasName() && !F->isVarArg()) {
3356 if (F->getName() == "__atomic_exchange") {
3357 visitLibAtomicExchange(CB);
3358 return;
3359 }
3360 if (F->getName() == "__atomic_compare_exchange") {
3361 visitLibAtomicCompareExchange(CB);
3362 return;
3363 }
3364 }
3365
3367 DFSF.DFS.UnwrappedFnMap.find(CB.getCalledOperand());
3368 if (UnwrappedFnIt != DFSF.DFS.UnwrappedFnMap.end())
3369 if (visitWrappedCallBase(*UnwrappedFnIt->second, CB))
3370 return;
3371
3372 IRBuilder<> IRB(&CB);
3373
3374 const bool ShouldTrackOrigins = DFSF.DFS.shouldTrackOrigins();
3375 FunctionType *FT = CB.getFunctionType();
3376 const DataLayout &DL = getDataLayout();
3377
3378 // Stores argument shadows.
3379 unsigned ArgOffset = 0;
3380 for (unsigned I = 0, N = FT->getNumParams(); I != N; ++I) {
3381 if (ShouldTrackOrigins) {
3382 // Ignore overflowed origins
3383 Value *ArgShadow = DFSF.getShadow(CB.getArgOperand(I));
3384 if (I < DFSF.DFS.NumOfElementsInArgOrgTLS &&
3385 !DFSF.DFS.isZeroShadow(ArgShadow))
3386 IRB.CreateStore(DFSF.getOrigin(CB.getArgOperand(I)),
3387 DFSF.getArgOriginTLS(I, IRB));
3388 }
3389
3390 unsigned Size =
3391 DL.getTypeAllocSize(DFSF.DFS.getShadowTy(FT->getParamType(I)));
3392 // Stop storing if arguments' size overflows. Inside a function, arguments
3393 // after overflow have zero shadow values.
3394 if (ArgOffset + Size > ArgTLSSize)
3395 break;
3396 IRB.CreateAlignedStore(DFSF.getShadow(CB.getArgOperand(I)),
3397 DFSF.getArgTLS(FT->getParamType(I), ArgOffset, IRB),
3399 ArgOffset += alignTo(Size, ShadowTLSAlignment);
3400 }
3401
3402 Instruction *Next = nullptr;
3403 if (!CB.getType()->isVoidTy()) {
3404 if (InvokeInst *II = dyn_cast<InvokeInst>(&CB)) {
3405 if (II->getNormalDest()->getSinglePredecessor()) {
3406 Next = &II->getNormalDest()->front();
3407 } else {
3408 BasicBlock *NewBB =
3409 SplitEdge(II->getParent(), II->getNormalDest(), &DFSF.DT);
3410 Next = &NewBB->front();
3411 }
3412 } else {
3413 assert(CB.getIterator() != CB.getParent()->end());
3414 Next = CB.getNextNode();
3415 }
3416
3417 // Don't emit the epilogue for musttail call returns.
3418 if (isa<CallInst>(CB) && cast<CallInst>(CB).isMustTailCall())
3419 return;
3420
3421 // Loads the return value shadow.
3422 IRBuilder<> NextIRB(Next);
3423 unsigned Size = DL.getTypeAllocSize(DFSF.DFS.getShadowTy(&CB));
3424 if (Size > RetvalTLSSize) {
3425 // Set overflowed return shadow to be zero.
3426 DFSF.setShadow(&CB, DFSF.DFS.getZeroShadow(&CB));
3427 } else {
3428 LoadInst *LI = NextIRB.CreateAlignedLoad(
3429 DFSF.DFS.getShadowTy(&CB), DFSF.getRetvalTLS(CB.getType(), NextIRB),
3430 ShadowTLSAlignment, "_dfsret");
3431 DFSF.SkipInsts.insert(LI);
3432 DFSF.setShadow(&CB, LI);
3433 DFSF.NonZeroChecks.push_back(LI);
3434 }
3435
3436 if (ShouldTrackOrigins) {
3437 LoadInst *LI = NextIRB.CreateLoad(DFSF.DFS.OriginTy,
3438 DFSF.getRetvalOriginTLS(), "_dfsret_o");
3439 DFSF.SkipInsts.insert(LI);
3440 DFSF.setOrigin(&CB, LI);
3441 }
3442
3443 DFSF.addReachesFunctionCallbacksIfEnabled(NextIRB, CB, &CB);
3444 }
3445}
3446
3447void DFSanVisitor::visitPHINode(PHINode &PN) {
3448 Type *ShadowTy = DFSF.DFS.getShadowTy(&PN);
3449 PHINode *ShadowPN = PHINode::Create(ShadowTy, PN.getNumIncomingValues(), "",
3450 PN.getIterator());
3451
3452 // Give the shadow phi node valid predecessors to fool SplitEdge into working.
3453 Value *UndefShadow = UndefValue::get(ShadowTy);
3454 for (BasicBlock *BB : PN.blocks())
3455 ShadowPN->addIncoming(UndefShadow, BB);
3456
3457 DFSF.setShadow(&PN, ShadowPN);
3458
3459 PHINode *OriginPN = nullptr;
3460 if (DFSF.DFS.shouldTrackOrigins()) {
3461 OriginPN = PHINode::Create(DFSF.DFS.OriginTy, PN.getNumIncomingValues(), "",
3462 PN.getIterator());
3463 Value *UndefOrigin = UndefValue::get(DFSF.DFS.OriginTy);
3464 for (BasicBlock *BB : PN.blocks())
3465 OriginPN->addIncoming(UndefOrigin, BB);
3466 DFSF.setOrigin(&PN, OriginPN);
3467 }
3468
3469 DFSF.PHIFixups.push_back({&PN, ShadowPN, OriginPN});
3470}
3471
3474 // Return early if nosanitize_dataflow module flag is present for the module.
3475 if (checkIfAlreadyInstrumented(M, "nosanitize_dataflow"))
3476 return PreservedAnalyses::all();
3477 auto GetTLI = [&](Function &F) -> TargetLibraryInfo & {
3478 auto &FAM =
3481 };
3482 if (!DataFlowSanitizer(ABIListFiles).runImpl(M, GetTLI))
3483 return PreservedAnalyses::all();
3484
3486 // GlobalsAA is considered stateless and does not get invalidated unless
3487 // explicitly invalidated; PreservedAnalyses::none() is not enough. Sanitizers
3488 // make changes that require GlobalsAA to be invalidated.
3489 PA.abandon<GlobalsAA>();
3490 return PA;
3491}
static bool isConstant(const MachineInstr &MI)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
const MemoryMapParams Linux_LoongArch64_MemoryMapParams
const MemoryMapParams Linux_X86_64_MemoryMapParams
static cl::opt< bool > ClTrackSelectControlFlow("dfsan-track-select-control-flow", cl::desc("Propagate labels from condition values of select instructions " "to results."), cl::Hidden, cl::init(true))
static cl::list< std::string > ClCombineTaintLookupTables("dfsan-combine-taint-lookup-table", cl::desc("When dfsan-combine-offset-labels-on-gep and/or " "dfsan-combine-pointer-labels-on-load are false, this flag can " "be used to re-enable combining offset and/or pointer taint when " "loading specific constant global variables (i.e. lookup tables)."), cl::Hidden)
static const Align MinOriginAlignment
static cl::opt< int > ClTrackOrigins("dfsan-track-origins", cl::desc("Track origins of labels"), cl::Hidden, cl::init(0))
static cl::list< std::string > ClABIListFiles("dfsan-abilist", cl::desc("File listing native ABI functions and how the pass treats them"), cl::Hidden)
static cl::opt< bool > ClReachesFunctionCallbacks("dfsan-reaches-function-callbacks", cl::desc("Insert calls to callback functions on data reaching a function."), cl::Hidden, cl::init(false))
static Value * expandFromPrimitiveShadowRecursive(Value *Shadow, SmallVector< unsigned, 4 > &Indices, Type *SubShadowTy, Value *PrimitiveShadow, IRBuilder<> &IRB)
static cl::opt< int > ClInstrumentWithCallThreshold("dfsan-instrument-with-call-threshold", cl::desc("If the function being instrumented requires more than " "this number of origin stores, use callbacks instead of " "inline checks (-1 means never use callbacks)."), cl::Hidden, cl::init(3500))
static cl::opt< bool > ClPreserveAlignment("dfsan-preserve-alignment", cl::desc("respect alignment requirements provided by input IR"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClDebugNonzeroLabels("dfsan-debug-nonzero-labels", cl::desc("Insert calls to __dfsan_nonzero_label on observing a parameter, " "load or return with a nonzero label"), cl::Hidden)
static cl::opt< bool > ClCombineOffsetLabelsOnGEP("dfsan-combine-offset-labels-on-gep", cl::desc("Combine the label of the offset with the label of the pointer when " "doing pointer arithmetic."), cl::Hidden, cl::init(true))
static cl::opt< bool > ClIgnorePersonalityRoutine("dfsan-ignore-personality-routine", cl::desc("If a personality routine is marked uninstrumented from the ABI " "list, do not create a wrapper for it."), cl::Hidden, cl::init(false))
static const Align ShadowTLSAlignment
static AtomicOrdering addReleaseOrdering(AtomicOrdering AO)
static AtomicOrdering addAcquireOrdering(AtomicOrdering AO)
Value * StripPointerGEPsAndCasts(Value *V)
const MemoryMapParams Linux_AArch64_MemoryMapParams
static cl::opt< bool > ClConditionalCallbacks("dfsan-conditional-callbacks", cl::desc("Insert calls to callback functions on conditionals."), cl::Hidden, cl::init(false))
static cl::opt< bool > ClCombinePointerLabelsOnLoad("dfsan-combine-pointer-labels-on-load", cl::desc("Combine the label of the pointer with the label of the data when " "loading from memory."), cl::Hidden, cl::init(true))
static StringRef getGlobalTypeString(const GlobalValue &G)
static cl::opt< bool > ClCombinePointerLabelsOnStore("dfsan-combine-pointer-labels-on-store", cl::desc("Combine the label of the pointer with the label of the data when " "storing in memory."), cl::Hidden, cl::init(false))
static const unsigned ArgTLSSize
static const unsigned RetvalTLSSize
static bool isAMustTailRetVal(Value *RetVal)
static cl::opt< bool > ClEventCallbacks("dfsan-event-callbacks", cl::desc("Insert calls to __dfsan_*_callback functions on data events."), cl::Hidden, cl::init(false))
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file defines the DenseMap class.
This file defines the DenseSet and SmallDenseSet classes.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
uint64_t Addr
std::string Name
uint64_t Size
static bool runImpl(Function &F, const TargetLowering &TLI)
This is the interface for a simple mod/ref and alias analysis over globals.
Hexagon Common GEP
Module.h This file contains the declarations for the Module class.
This header defines various interfaces for pass management in LLVM.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
#define G(x, y, z)
Definition: MD5.cpp:56
nvptx lower args
uint64_t IntrinsicInst * II
#define P(N)
if(auto Err=PB.parsePassPipeline(MPM, Passes)) return wrap(std MPM run * Mod
FunctionAnalysisManager FAM
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
StringSet - A set-like wrapper for the StringMap.
Defines the virtual file system interface vfs::FileSystem.
Class for arbitrary precision integers.
Definition: APInt.h:78
an instruction to allocate memory on the stack
Definition: Instructions.h:63
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:253
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:410
This class represents an incoming formal argument to a Function.
Definition: Argument.h:31
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
An instruction that atomically checks whether a specified value is in a memory location,...
Definition: Instructions.h:501
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:704
AttributeSet getFnAttrs() const
The function attributes are returned.
static AttributeList get(LLVMContext &C, ArrayRef< std::pair< unsigned, Attribute > > Attrs)
Create an AttributeList with the specified parameters in it.
AttributeSet getRetAttrs() const
The attributes for the ret value are returned.
unsigned getNumAttrSets() const
AttributeSet getParamAttrs(unsigned ArgNo) const
The attributes for the argument or parameter at the given index are returned.
AttributeMask & addAttribute(Attribute::AttrKind Val)
Add an attribute to the mask.
Definition: AttributeMask.h:44
static Attribute getWithMemoryEffects(LLVMContext &Context, MemoryEffects ME)
Definition: Attributes.cpp:281
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
const Instruction & front() const
Definition: BasicBlock.h:471
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition: BasicBlock.h:212
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:177
This class represents a no-op cast from one type to another.
Conditional or Unconditional Branch instruction.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1120
bool isInlineAsm() const
Check if this call is an inline asm statement.
Definition: InstrTypes.h:1416
void setCallingConv(CallingConv::ID CC)
Definition: InstrTypes.h:1411
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
Definition: InstrTypes.h:1349
CallingConv::ID getCallingConv() const
Definition: InstrTypes.h:1407
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
Definition: InstrTypes.h:1269
Value * getCalledOperand() const
Definition: InstrTypes.h:1342
void setAttributes(AttributeList A)
Set the attributes for this call.
Definition: InstrTypes.h:1428
void addRetAttr(Attribute::AttrKind Kind)
Adds the attribute to the return value.
Definition: InstrTypes.h:1492
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1294
void setArgOperand(unsigned i, Value *v)
Definition: InstrTypes.h:1299
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
Definition: InstrTypes.h:1275
FunctionType * getFunctionType() const
Definition: InstrTypes.h:1207
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
Definition: InstrTypes.h:1285
unsigned arg_size() const
Definition: InstrTypes.h:1292
AttributeList getAttributes() const
Return the attributes for this call.
Definition: InstrTypes.h:1425
void addParamAttr(unsigned ArgNo, Attribute::AttrKind Kind)
Adds the attribute to the indicated argument.
Definition: InstrTypes.h:1502
void setCalledFunction(Function *Fn)
Sets the function called, including updating the function type.
Definition: InstrTypes.h:1388
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
bool isMustTailCall() const
This is the base class for all instructions that perform data casts.
Definition: InstrTypes.h:444
This class is the base class for the comparison instructions.
Definition: InstrTypes.h:661
static ConstantAggregateZero * get(Type *Ty)
Definition: Constants.cpp:1672
static Constant * get(LLVMContext &Context, ArrayRef< uint8_t > Elts)
get() constructors - Return a constant with vector type with an element count and element type matchi...
Definition: Constants.cpp:3006
A constant value that is initialized with an expression using other constant values.
Definition: Constants.h:1108
This is the shared class of boolean and integer constants.
Definition: Constants.h:83
static ConstantInt * getSigned(IntegerType *Ty, int64_t V)
Return a ConstantInt with the specified value for the specified type.
Definition: Constants.h:126
This is an important base class in LLVM.
Definition: Constant.h:42
bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition: Constants.cpp:90
PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
A debug info location.
Definition: DebugLoc.h:33
unsigned getLine() const
Definition: DebugLoc.cpp:24
DILocation * get() const
Get the underlying DILocation.
Definition: DebugLoc.cpp:20
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:156
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Definition: DenseMap.h:152
iterator end()
Definition: DenseMap.h:84
Implements a dense probed hash-table based set.
Definition: DenseSet.h:278
void recalculate(ParentType &Func)
recalculate - compute a dominator tree for the given function
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:162
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Definition: Dominators.cpp:122
This instruction extracts a single (scalar) element from a VectorType value.
This instruction extracts a struct member or array element value from an aggregate value.
static FixedVectorType * get(Type *ElementType, unsigned NumElts)
Definition: Type.cpp:791
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
Definition: DerivedTypes.h:170
static Function * Create(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, const Twine &N="", Module *M=nullptr)
Definition: Function.h:173
void removeFnAttrs(const AttributeMask &Attrs)
Definition: Function.cpp:697
AttributeList getAttributes() const
Return the attribute list for this Function.
Definition: Function.h:353
void removeFnAttr(Attribute::AttrKind Kind)
Remove function attributes from this function.
Definition: Function.cpp:689
arg_iterator arg_begin()
Definition: Function.h:868
void removeRetAttrs(const AttributeMask &Attrs)
removes the attributes from the return value list of attributes.
Definition: Function.cpp:709
void copyAttributesFrom(const Function *Src)
copyAttributesFrom - copy all additional attributes (those not needed to create a Function) from the ...
Definition: Function.cpp:860
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:933
void eraseFromParent()
eraseFromParent - This method unlinks 'this' from the containing module and deletes it.
Definition: Globals.cpp:587
const GlobalObject * getAliaseeObject() const
Definition: Globals.cpp:595
static bool isExternalWeakLinkage(LinkageTypes Linkage)
Definition: GlobalValue.h:412
LinkageTypes getLinkage() const
Definition: GlobalValue.h:546
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:656
LinkageTypes
An enumeration for the kinds of linkage for global values.
Definition: GlobalValue.h:51
@ WeakODRLinkage
Same, but only replaced by something equivalent.
Definition: GlobalValue.h:57
@ LinkOnceODRLinkage
Same, but only replaced by something equivalent.
Definition: GlobalValue.h:55
Type * getValueType() const
Definition: GlobalValue.h:296
Analysis pass providing a never-invalidated alias analysis result.
Value * CreateInsertElement(Type *VecTy, Value *NewElt, Value *Idx, const Twine &Name="")
Definition: IRBuilder.h:2503
Value * CreateConstGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0, const Twine &Name="")
Definition: IRBuilder.h:1902
AllocaInst * CreateAlloca(Type *Ty, unsigned AddrSpace, Value *ArraySize=nullptr, const Twine &Name="")
Definition: IRBuilder.h:1796
Value * CreateInsertValue(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &Name="")
Definition: IRBuilder.h:2554
Value * CreateExtractElement(Value *Vec, Value *Idx, const Twine &Name="")
Definition: IRBuilder.h:2491
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
Definition: IRBuilder.h:1830
Value * CreatePointerCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2201
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
Definition: IRBuilder.h:2547
Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
Definition: IRBuilder.cpp:1048
BasicBlock::iterator GetInsertPoint() const
Definition: IRBuilder.h:172
Value * CreateStructGEP(Type *Ty, Value *Ptr, unsigned Idx, const Twine &Name="")
Definition: IRBuilder.h:1995
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2150
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition: IRBuilder.h:1460
IntegerType * getInt64Ty()
Fetch the type representing a 64-bit integer.
Definition: IRBuilder.h:528
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2277
Value * CreateGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition: IRBuilder.h:1889
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Definition: IRBuilder.h:1813
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1439
LLVMContext & getContext() const
Definition: IRBuilder.h:173
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1498
Value * CreateConstInBoundsGEP2_64(Type *Ty, Value *Ptr, uint64_t Idx0, uint64_t Idx1, const Twine &Name="")
Definition: IRBuilder.h:1982
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
Definition: IRBuilder.h:1826
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1350
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:2444
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="", bool IsNUW=false, bool IsNSW=false)
Definition: IRBuilder.h:2034
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1520
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
Definition: IRBuilder.h:2227
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile=false)
Definition: IRBuilder.h:1849
Value * CreateXor(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1542
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1384
GlobalVariable * CreateGlobalString(StringRef Str, const Twine &Name="", unsigned AddressSpace=0, Module *M=nullptr, bool AddNull=true)
Make a new global variable with initializer type i8*.
Definition: IRBuilder.cpp:44
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2697
An analysis over an "outer" IR unit that provides access to an analysis manager over an "inner" IR un...
Definition: PassManager.h:567
This instruction inserts a single (scalar) element into a VectorType value.
This instruction inserts a struct field of array element value into an aggregate value.
Base class for instruction visitors.
Definition: InstVisitor.h:78
RetTy visitCmpInst(CmpInst &I)
Definition: InstVisitor.h:265
RetTy visitExtractElementInst(ExtractElementInst &I)
Definition: InstVisitor.h:191
RetTy visitCallBase(CallBase &I)
Definition: InstVisitor.h:270
RetTy visitInsertValueInst(InsertValueInst &I)
Definition: InstVisitor.h:195
RetTy visitShuffleVectorInst(ShuffleVectorInst &I)
Definition: InstVisitor.h:193
RetTy visitLandingPadInst(LandingPadInst &I)
Definition: InstVisitor.h:196
RetTy visitAtomicCmpXchgInst(AtomicCmpXchgInst &I)
Definition: InstVisitor.h:171
RetTy visitBitCastInst(BitCastInst &I)
Definition: InstVisitor.h:187
RetTy visitSwitchInst(SwitchInst &I)
Definition: InstVisitor.h:235
RetTy visitPHINode(PHINode &I)
Definition: InstVisitor.h:175
RetTy visitReturnInst(ReturnInst &I)
Definition: InstVisitor.h:229
RetTy visitExtractValueInst(ExtractValueInst &I)
Definition: InstVisitor.h:194
RetTy visitUnaryOperator(UnaryOperator &I)
Definition: InstVisitor.h:263
RetTy visitStoreInst(StoreInst &I)
Definition: InstVisitor.h:170
RetTy visitInsertElementInst(InsertElementInst &I)
Definition: InstVisitor.h:192
RetTy visitAtomicRMWInst(AtomicRMWInst &I)
Definition: InstVisitor.h:172
RetTy visitAllocaInst(AllocaInst &I)
Definition: InstVisitor.h:168
RetTy visitBinaryOperator(BinaryOperator &I)
Definition: InstVisitor.h:264
RetTy visitMemTransferInst(MemTransferInst &I)
Definition: InstVisitor.h:217
RetTy visitMemSetInst(MemSetInst &I)
Definition: InstVisitor.h:209
RetTy visitCastInst(CastInst &I)
Definition: InstVisitor.h:262
RetTy visitBranchInst(BranchInst &I)
Definition: InstVisitor.h:232
RetTy visitSelectInst(SelectInst &I)
Definition: InstVisitor.h:189
RetTy visitGetElementPtrInst(GetElementPtrInst &I)
Definition: InstVisitor.h:174
RetTy visitLoadInst(LoadInst &I)
Definition: InstVisitor.h:169
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:471
bool isAtomic() const LLVM_READONLY
Return true if this instruction has an AtomicOrdering of unordered or higher.
InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Definition: Instruction.cpp:92
bool isTerminator() const
Definition: Instruction.h:277
bool comesBefore(const Instruction *Other) const
Given an instruction Other in the same basic block as this instruction, return true if this instructi...
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Definition: Instruction.h:468
const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
Definition: Instruction.cpp:74
Class to represent integer types.
Definition: DerivedTypes.h:42
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:311
Invoke instruction.
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
The landingpad instruction holds all of the information necessary to generate correct exception handl...
An instruction for reading from memory.
Definition: Instructions.h:176
void setAlignment(Align Align)
Definition: Instructions.h:215
Value * getPointerOperand()
Definition: Instructions.h:255
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this load instruction.
Definition: Instructions.h:225
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
Definition: Instructions.h:220
Align getAlign() const
Return the alignment of the access that is being performed.
Definition: Instructions.h:211
MDNode * createUnlikelyBranchWeights()
Return metadata containing two branch weights, with significant bias towards false destination.
Definition: MDBuilder.cpp:47
Metadata node.
Definition: Metadata.h:1069
This class wraps the llvm.memset and llvm.memset.inline intrinsics.
This class wraps the llvm.memcpy/memmove intrinsics.
static MemoryEffectsBase readOnly()
Create MemoryEffectsBase that can read any memory.
Definition: ModRef.h:122
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
const std::string & getModuleInlineAsm() const
Get any module-scope inline assembly blocks.
Definition: Module.h:306
void setModuleInlineAsm(StringRef Asm)
Set the module-scope inline assembly blocks.
Definition: Module.h:345
unsigned getOpcode() const
Return the opcode for this Instruction or ConstantExpr.
Definition: Operator.h:42
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
iterator_range< const_block_iterator > blocks() const
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Definition: Constants.cpp:1878
A set of analyses that are preserved following a run of a transformation pass.
Definition: Analysis.h:111
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
Definition: Analysis.h:114
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: Analysis.h:117
void abandon()
Mark an analysis as abandoned.
Definition: Analysis.h:164
Return a value (possibly void), from a function.
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
static ReturnInst * Create(LLVMContext &C, Value *retVal=nullptr, InsertPosition InsertBefore=nullptr)
This class represents the LLVM 'select' instruction.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, Instruction *MDFrom=nullptr)
This instruction constructs a fixed permutation of two input vectors.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:452
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:384
bool contains(ConstPtrType Ptr) const
Definition: SmallPtrSet.h:458
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:519
size_t size() const
Definition: SmallVector.h:78
void push_back(const T &Elt)
Definition: SmallVector.h:413
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1196
static std::unique_ptr< SpecialCaseList > createOrDie(const std::vector< std::string > &Paths, llvm::vfs::FileSystem &FS)
Parses the special case list entries from files.
An instruction for storing to memory.
Definition: Instructions.h:292
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:51
StringSet - A wrapper for StringMap that provides set-like functionality.
Definition: StringSet.h:23
std::pair< typename Base::iterator, bool > insert(StringRef key)
Definition: StringSet.h:38
Class to represent struct types.
Definition: DerivedTypes.h:218
static StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
Definition: Type.cpp:406
Multiway switch.
Value * getCondition() const
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
@ loongarch64
Definition: Triple.h:62
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
unsigned getIntegerBitWidth() const
static Type * getVoidTy(LLVMContext &C)
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:310
static IntegerType * getInt32Ty(LLVMContext &C)
static IntegerType * getInt64Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:237
bool isVoidTy() const
Return true if this is 'void'.
Definition: Type.h:139
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
Definition: Constants.cpp:1859
This function has undefined behavior.
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
Value * getOperand(unsigned i) const
Definition: User.h:228
unsigned getNumOperands() const
Definition: User.h:250
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
void setName(const Twine &Name)
Change the name of the value.
Definition: Value.cpp:377
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:534
const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition: Value.cpp:694
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1075
bool hasName() const
Definition: Value.h:261
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
void takeName(Value *V)
Transfer the name from V to this value.
Definition: Value.cpp:383
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition: ilist_node.h:32
self_iterator getIterator()
Definition: ilist_node.h:132
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:353
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
Key
PAL metadata keys.
AttributeMask typeIncompatible(Type *Ty, AttributeSet AS, AttributeSafetyKind ASK=ASK_ALL)
Which attributes cannot be applied to a type.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:125
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
@ BR
Control flow instructions. These all have token chains.
Definition: ISDOpcodes.h:1118
@ CE
Windows NT (Windows on ARM)
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
NodeAddr< PhiNode * > Phi
Definition: RDFGraph.h:390
NodeAddr< BlockNode * > Block
Definition: RDFGraph.h:392
IntrusiveRefCntPtr< FileSystem > getRealFileSystem()
Gets an vfs::FileSystem for the 'real' file system, as seen by the operating system.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:329
@ Offset
Definition: DWP.cpp:480
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition: STLExtras.h:2115
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:657
void erase(Container &C, ValueType V)
Wrapper function to remove a value from a container:
Definition: STLExtras.h:2107
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
AtomicOrdering
Atomic ordering for LLVM's memory model.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition: Alignment.h:155
void getUnderlyingObjects(const Value *V, SmallVectorImpl< const Value * > &Objects, const LoopInfo *LI=nullptr, unsigned MaxLookup=6)
This method is similar to getUnderlyingObject except that it can look through phi and select instruct...
Align assumeAligned(uint64_t Value)
Treats the value 0 as a 1, so Align is always at least 1.
Definition: Alignment.h:111
iterator_range< df_iterator< T > > depth_first(const T &G)
Instruction * SplitBlockAndInsertIfThen(Value *Cond, BasicBlock::iterator SplitBefore, bool Unreachable, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, BasicBlock *ThenBlock=nullptr)
Split the containing block at the specified instruction - everything before SplitBefore stays in the ...
BasicBlock * SplitEdge(BasicBlock *From, BasicBlock *To, DominatorTree *DT=nullptr, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, const Twine &BBName="")
Split the edge connecting the specified blocks, and return the newly created basic block between From...
bool removeUnreachableBlocks(Function &F, DomTreeUpdater *DTU=nullptr, MemorySSAUpdater *MSSAU=nullptr)
Remove all blocks that can not be reached from the function's entry.
Definition: Local.cpp:3271
bool checkIfAlreadyInstrumented(Module &M, StringRef Flag)
Check if module has flag attached, if not add the flag.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
uint64_t value() const
This is a hole in the type system and should not be abused.
Definition: Alignment.h:85