Bug Summary

File:llvm/include/llvm/IR/Instructions.h
Warning:line 940, column 7
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name DataFlowSanitizer.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -fno-split-dwarf-inlining -debugger-tuning=gdb -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-12/lib/clang/12.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-12~++20201202100633+b276bf5a572/build-llvm/lib/Transforms/Instrumentation -I /build/llvm-toolchain-snapshot-12~++20201202100633+b276bf5a572/llvm/lib/Transforms/Instrumentation -I /build/llvm-toolchain-snapshot-12~++20201202100633+b276bf5a572/build-llvm/include -I /build/llvm-toolchain-snapshot-12~++20201202100633+b276bf5a572/llvm/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-12/lib/clang/12.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-12~++20201202100633+b276bf5a572/build-llvm/lib/Transforms/Instrumentation -fdebug-prefix-map=/build/llvm-toolchain-snapshot-12~++20201202100633+b276bf5a572=. -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -o /tmp/scan-build-2020-12-03-051126-41451-1 -x c++ /build/llvm-toolchain-snapshot-12~++20201202100633+b276bf5a572/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp

/build/llvm-toolchain-snapshot-12~++20201202100633+b276bf5a572/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp

1//===- DataFlowSanitizer.cpp - dynamic data flow analysis -----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This file is a part of DataFlowSanitizer, a generalised dynamic data flow
11/// analysis.
12///
13/// Unlike other Sanitizer tools, this tool is not designed to detect a specific
14/// class of bugs on its own. Instead, it provides a generic dynamic data flow
15/// analysis framework to be used by clients to help detect application-specific
16/// issues within their own code.
17///
18/// The analysis is based on automatic propagation of data flow labels (also
19/// known as taint labels) through a program as it performs computation. Each
20/// byte of application memory is backed by two bytes of shadow memory which
21/// hold the label. On Linux/x86_64, memory is laid out as follows:
22///
23/// +--------------------+ 0x800000000000 (top of memory)
24/// | application memory |
25/// +--------------------+ 0x700000008000 (kAppAddr)
26/// | |
27/// | unused |
28/// | |
29/// +--------------------+ 0x200200000000 (kUnusedAddr)
30/// | union table |
31/// +--------------------+ 0x200000000000 (kUnionTableAddr)
32/// | shadow memory |
33/// +--------------------+ 0x000000010000 (kShadowAddr)
34/// | reserved by kernel |
35/// +--------------------+ 0x000000000000
36///
37/// To derive a shadow memory address from an application memory address,
38/// bits 44-46 are cleared to bring the address into the range
39/// [0x000000008000,0x100000000000). Then the address is shifted left by 1 to
40/// account for the double byte representation of shadow labels and move the
41/// address into the shadow memory range. See the function
42/// DataFlowSanitizer::getShadowAddress below.
43///
44/// For more information, please refer to the design document:
45/// http://clang.llvm.org/docs/DataFlowSanitizerDesign.html
46//
47//===----------------------------------------------------------------------===//
48
49#include "llvm/Transforms/Instrumentation/DataFlowSanitizer.h"
50#include "llvm/ADT/DenseMap.h"
51#include "llvm/ADT/DenseSet.h"
52#include "llvm/ADT/DepthFirstIterator.h"
53#include "llvm/ADT/None.h"
54#include "llvm/ADT/SmallPtrSet.h"
55#include "llvm/ADT/SmallVector.h"
56#include "llvm/ADT/StringExtras.h"
57#include "llvm/ADT/StringRef.h"
58#include "llvm/ADT/Triple.h"
59#include "llvm/Analysis/ValueTracking.h"
60#include "llvm/IR/Argument.h"
61#include "llvm/IR/Attributes.h"
62#include "llvm/IR/BasicBlock.h"
63#include "llvm/IR/Constant.h"
64#include "llvm/IR/Constants.h"
65#include "llvm/IR/DataLayout.h"
66#include "llvm/IR/DerivedTypes.h"
67#include "llvm/IR/Dominators.h"
68#include "llvm/IR/Function.h"
69#include "llvm/IR/GlobalAlias.h"
70#include "llvm/IR/GlobalValue.h"
71#include "llvm/IR/GlobalVariable.h"
72#include "llvm/IR/IRBuilder.h"
73#include "llvm/IR/InlineAsm.h"
74#include "llvm/IR/InstVisitor.h"
75#include "llvm/IR/InstrTypes.h"
76#include "llvm/IR/Instruction.h"
77#include "llvm/IR/Instructions.h"
78#include "llvm/IR/IntrinsicInst.h"
79#include "llvm/IR/LLVMContext.h"
80#include "llvm/IR/MDBuilder.h"
81#include "llvm/IR/Module.h"
82#include "llvm/IR/PassManager.h"
83#include "llvm/IR/Type.h"
84#include "llvm/IR/User.h"
85#include "llvm/IR/Value.h"
86#include "llvm/InitializePasses.h"
87#include "llvm/Pass.h"
88#include "llvm/Support/Casting.h"
89#include "llvm/Support/CommandLine.h"
90#include "llvm/Support/ErrorHandling.h"
91#include "llvm/Support/SpecialCaseList.h"
92#include "llvm/Support/VirtualFileSystem.h"
93#include "llvm/Transforms/Instrumentation.h"
94#include "llvm/Transforms/Utils/BasicBlockUtils.h"
95#include "llvm/Transforms/Utils/Local.h"
96#include <algorithm>
97#include <cassert>
98#include <cstddef>
99#include <cstdint>
100#include <iterator>
101#include <memory>
102#include <set>
103#include <string>
104#include <utility>
105#include <vector>
106
107using namespace llvm;
108
109// External symbol to be used when generating the shadow address for
110// architectures with multiple VMAs. Instead of using a constant integer
111// the runtime will set the external mask based on the VMA range.
112const char kDFSanExternShadowPtrMask[] = "__dfsan_shadow_ptr_mask";
113
114// The -dfsan-preserve-alignment flag controls whether this pass assumes that
115// alignment requirements provided by the input IR are correct. For example,
116// if the input IR contains a load with alignment 8, this flag will cause
117// the shadow load to have alignment 16. This flag is disabled by default as
118// we have unfortunately encountered too much code (including Clang itself;
119// see PR14291) which performs misaligned access.
120static cl::opt<bool> ClPreserveAlignment(
121 "dfsan-preserve-alignment",
122 cl::desc("respect alignment requirements provided by input IR"), cl::Hidden,
123 cl::init(false));
124
125// The ABI list files control how shadow parameters are passed. The pass treats
126// every function labelled "uninstrumented" in the ABI list file as conforming
127// to the "native" (i.e. unsanitized) ABI. Unless the ABI list contains
128// additional annotations for those functions, a call to one of those functions
129// will produce a warning message, as the labelling behaviour of the function is
130// unknown. The other supported annotations are "functional" and "discard",
131// which are described below under DataFlowSanitizer::WrapperKind.
132static cl::list<std::string> ClABIListFiles(
133 "dfsan-abilist",
134 cl::desc("File listing native ABI functions and how the pass treats them"),
135 cl::Hidden);
136
137// Controls whether the pass uses IA_Args or IA_TLS as the ABI for instrumented
138// functions (see DataFlowSanitizer::InstrumentedABI below).
139static cl::opt<bool> ClArgsABI(
140 "dfsan-args-abi",
141 cl::desc("Use the argument ABI rather than the TLS ABI"),
142 cl::Hidden);
143
144// Controls whether the pass includes or ignores the labels of pointers in load
145// instructions.
146static cl::opt<bool> ClCombinePointerLabelsOnLoad(
147 "dfsan-combine-pointer-labels-on-load",
148 cl::desc("Combine the label of the pointer with the label of the data when "
149 "loading from memory."),
150 cl::Hidden, cl::init(true));
151
152// Controls whether the pass includes or ignores the labels of pointers in
153// stores instructions.
154static cl::opt<bool> ClCombinePointerLabelsOnStore(
155 "dfsan-combine-pointer-labels-on-store",
156 cl::desc("Combine the label of the pointer with the label of the data when "
157 "storing in memory."),
158 cl::Hidden, cl::init(false));
159
160static cl::opt<bool> ClDebugNonzeroLabels(
161 "dfsan-debug-nonzero-labels",
162 cl::desc("Insert calls to __dfsan_nonzero_label on observing a parameter, "
163 "load or return with a nonzero label"),
164 cl::Hidden);
165
166// Experimental feature that inserts callbacks for certain data events.
167// Currently callbacks are only inserted for loads, stores, memory transfers
168// (i.e. memcpy and memmove), and comparisons.
169//
170// If this flag is set to true, the user must provide definitions for the
171// following callback functions:
172// void __dfsan_load_callback(dfsan_label Label, void* addr);
173// void __dfsan_store_callback(dfsan_label Label, void* addr);
174// void __dfsan_mem_transfer_callback(dfsan_label *Start, size_t Len);
175// void __dfsan_cmp_callback(dfsan_label CombinedLabel);
176static cl::opt<bool> ClEventCallbacks(
177 "dfsan-event-callbacks",
178 cl::desc("Insert calls to __dfsan_*_callback functions on data events."),
179 cl::Hidden, cl::init(false));
180
181// Use a distinct bit for each base label, enabling faster unions with less
182// instrumentation. Limits the max number of base labels to 16.
183static cl::opt<bool> ClFast16Labels(
184 "dfsan-fast-16-labels",
185 cl::desc("Use more efficient instrumentation, limiting the number of "
186 "labels to 16."),
187 cl::Hidden, cl::init(false));
188
189// Controls whether the pass tracks the control flow of select instructions.
190static cl::opt<bool> ClTrackSelectControlFlow(
191 "dfsan-track-select-control-flow",
192 cl::desc("Propagate labels from condition values of select instructions "
193 "to results."),
194 cl::Hidden, cl::init(true));
195
196static StringRef GetGlobalTypeString(const GlobalValue &G) {
197 // Types of GlobalVariables are always pointer types.
198 Type *GType = G.getValueType();
199 // For now we support excluding struct types only.
200 if (StructType *SGType = dyn_cast<StructType>(GType)) {
201 if (!SGType->isLiteral())
202 return SGType->getName();
203 }
204 return "<unknown type>";
205}
206
207namespace {
208
209class DFSanABIList {
210 std::unique_ptr<SpecialCaseList> SCL;
211
212 public:
213 DFSanABIList() = default;
214
215 void set(std::unique_ptr<SpecialCaseList> List) { SCL = std::move(List); }
216
217 /// Returns whether either this function or its source file are listed in the
218 /// given category.
219 bool isIn(const Function &F, StringRef Category) const {
220 return isIn(*F.getParent(), Category) ||
221 SCL->inSection("dataflow", "fun", F.getName(), Category);
222 }
223
224 /// Returns whether this global alias is listed in the given category.
225 ///
226 /// If GA aliases a function, the alias's name is matched as a function name
227 /// would be. Similarly, aliases of globals are matched like globals.
228 bool isIn(const GlobalAlias &GA, StringRef Category) const {
229 if (isIn(*GA.getParent(), Category))
230 return true;
231
232 if (isa<FunctionType>(GA.getValueType()))
233 return SCL->inSection("dataflow", "fun", GA.getName(), Category);
234
235 return SCL->inSection("dataflow", "global", GA.getName(), Category) ||
236 SCL->inSection("dataflow", "type", GetGlobalTypeString(GA),
237 Category);
238 }
239
240 /// Returns whether this module is listed in the given category.
241 bool isIn(const Module &M, StringRef Category) const {
242 return SCL->inSection("dataflow", "src", M.getModuleIdentifier(), Category);
243 }
244};
245
246/// TransformedFunction is used to express the result of transforming one
247/// function type into another. This struct is immutable. It holds metadata
248/// useful for updating calls of the old function to the new type.
249struct TransformedFunction {
250 TransformedFunction(FunctionType* OriginalType,
251 FunctionType* TransformedType,
252 std::vector<unsigned> ArgumentIndexMapping)
253 : OriginalType(OriginalType),
254 TransformedType(TransformedType),
255 ArgumentIndexMapping(ArgumentIndexMapping) {}
256
257 // Disallow copies.
258 TransformedFunction(const TransformedFunction&) = delete;
259 TransformedFunction& operator=(const TransformedFunction&) = delete;
260
261 // Allow moves.
262 TransformedFunction(TransformedFunction&&) = default;
263 TransformedFunction& operator=(TransformedFunction&&) = default;
264
265 /// Type of the function before the transformation.
266 FunctionType *OriginalType;
267
268 /// Type of the function after the transformation.
269 FunctionType *TransformedType;
270
271 /// Transforming a function may change the position of arguments. This
272 /// member records the mapping from each argument's old position to its new
273 /// position. Argument positions are zero-indexed. If the transformation
274 /// from F to F' made the first argument of F into the third argument of F',
275 /// then ArgumentIndexMapping[0] will equal 2.
276 std::vector<unsigned> ArgumentIndexMapping;
277};
278
279/// Given function attributes from a call site for the original function,
280/// return function attributes appropriate for a call to the transformed
281/// function.
282AttributeList TransformFunctionAttributes(
283 const TransformedFunction& TransformedFunction,
284 LLVMContext& Ctx, AttributeList CallSiteAttrs) {
285
286 // Construct a vector of AttributeSet for each function argument.
287 std::vector<llvm::AttributeSet> ArgumentAttributes(
288 TransformedFunction.TransformedType->getNumParams());
289
290 // Copy attributes from the parameter of the original function to the
291 // transformed version. 'ArgumentIndexMapping' holds the mapping from
292 // old argument position to new.
293 for (unsigned i=0, ie = TransformedFunction.ArgumentIndexMapping.size();
294 i < ie; ++i) {
295 unsigned TransformedIndex = TransformedFunction.ArgumentIndexMapping[i];
296 ArgumentAttributes[TransformedIndex] = CallSiteAttrs.getParamAttributes(i);
297 }
298
299 // Copy annotations on varargs arguments.
300 for (unsigned i = TransformedFunction.OriginalType->getNumParams(),
301 ie = CallSiteAttrs.getNumAttrSets(); i<ie; ++i) {
302 ArgumentAttributes.push_back(CallSiteAttrs.getParamAttributes(i));
303 }
304
305 return AttributeList::get(
306 Ctx,
307 CallSiteAttrs.getFnAttributes(),
308 CallSiteAttrs.getRetAttributes(),
309 llvm::makeArrayRef(ArgumentAttributes));
310}
311
312class DataFlowSanitizer {
313 friend struct DFSanFunction;
314 friend class DFSanVisitor;
315
316 enum { ShadowWidthBits = 16, ShadowWidthBytes = ShadowWidthBits / 8 };
317
318 /// Which ABI should be used for instrumented functions?
319 enum InstrumentedABI {
320 /// Argument and return value labels are passed through additional
321 /// arguments and by modifying the return type.
322 IA_Args,
323
324 /// Argument and return value labels are passed through TLS variables
325 /// __dfsan_arg_tls and __dfsan_retval_tls.
326 IA_TLS
327 };
328
329 /// How should calls to uninstrumented functions be handled?
330 enum WrapperKind {
331 /// This function is present in an uninstrumented form but we don't know
332 /// how it should be handled. Print a warning and call the function anyway.
333 /// Don't label the return value.
334 WK_Warning,
335
336 /// This function does not write to (user-accessible) memory, and its return
337 /// value is unlabelled.
338 WK_Discard,
339
340 /// This function does not write to (user-accessible) memory, and the label
341 /// of its return value is the union of the label of its arguments.
342 WK_Functional,
343
344 /// Instead of calling the function, a custom wrapper __dfsw_F is called,
345 /// where F is the name of the function. This function may wrap the
346 /// original function or provide its own implementation. This is similar to
347 /// the IA_Args ABI, except that IA_Args uses a struct return type to
348 /// pass the return value shadow in a register, while WK_Custom uses an
349 /// extra pointer argument to return the shadow. This allows the wrapped
350 /// form of the function type to be expressed in C.
351 WK_Custom
352 };
353
354 Module *Mod;
355 LLVMContext *Ctx;
356 Type *Int8Ptr;
357 IntegerType *ShadowTy;
358 PointerType *ShadowPtrTy;
359 IntegerType *IntptrTy;
360 ConstantInt *ZeroShadow;
361 ConstantInt *ShadowPtrMask;
362 ConstantInt *ShadowPtrMul;
363 Constant *ArgTLS;
364 Constant *RetvalTLS;
365 Constant *ExternalShadowMask;
366 FunctionType *DFSanUnionFnTy;
367 FunctionType *DFSanUnionLoadFnTy;
368 FunctionType *DFSanUnimplementedFnTy;
369 FunctionType *DFSanSetLabelFnTy;
370 FunctionType *DFSanNonzeroLabelFnTy;
371 FunctionType *DFSanVarargWrapperFnTy;
372 FunctionType *DFSanCmpCallbackFnTy;
373 FunctionType *DFSanLoadStoreCallbackFnTy;
374 FunctionType *DFSanMemTransferCallbackFnTy;
375 FunctionCallee DFSanUnionFn;
376 FunctionCallee DFSanCheckedUnionFn;
377 FunctionCallee DFSanUnionLoadFn;
378 FunctionCallee DFSanUnionLoadFast16LabelsFn;
379 FunctionCallee DFSanUnimplementedFn;
380 FunctionCallee DFSanSetLabelFn;
381 FunctionCallee DFSanNonzeroLabelFn;
382 FunctionCallee DFSanVarargWrapperFn;
383 FunctionCallee DFSanLoadCallbackFn;
384 FunctionCallee DFSanStoreCallbackFn;
385 FunctionCallee DFSanMemTransferCallbackFn;
386 FunctionCallee DFSanCmpCallbackFn;
387 MDNode *ColdCallWeights;
388 DFSanABIList ABIList;
389 DenseMap<Value *, Function *> UnwrappedFnMap;
390 AttrBuilder ReadOnlyNoneAttrs;
391 bool DFSanRuntimeShadowMask = false;
392
393 Value *getShadowAddress(Value *Addr, Instruction *Pos);
394 bool isInstrumented(const Function *F);
395 bool isInstrumented(const GlobalAlias *GA);
396 FunctionType *getArgsFunctionType(FunctionType *T);
397 FunctionType *getTrampolineFunctionType(FunctionType *T);
398 TransformedFunction getCustomFunctionType(FunctionType *T);
399 InstrumentedABI getInstrumentedABI();
400 WrapperKind getWrapperKind(Function *F);
401 void addGlobalNamePrefix(GlobalValue *GV);
402 Function *buildWrapperFunction(Function *F, StringRef NewFName,
403 GlobalValue::LinkageTypes NewFLink,
404 FunctionType *NewFT);
405 Constant *getOrBuildTrampolineFunction(FunctionType *FT, StringRef FName);
406 void initializeCallbackFunctions(Module &M);
407 void initializeRuntimeFunctions(Module &M);
408
409 bool init(Module &M);
410
411public:
412 DataFlowSanitizer(const std::vector<std::string> &ABIListFiles);
413
414 bool runImpl(Module &M);
415};
416
417struct DFSanFunction {
418 DataFlowSanitizer &DFS;
419 Function *F;
420 DominatorTree DT;
421 DataFlowSanitizer::InstrumentedABI IA;
422 bool IsNativeABI;
423 AllocaInst *LabelReturnAlloca = nullptr;
424 DenseMap<Value *, Value *> ValShadowMap;
425 DenseMap<AllocaInst *, AllocaInst *> AllocaShadowMap;
426 std::vector<std::pair<PHINode *, PHINode *>> PHIFixups;
427 DenseSet<Instruction *> SkipInsts;
428 std::vector<Value *> NonZeroChecks;
429 bool AvoidNewBlocks;
430
431 struct CachedCombinedShadow {
432 BasicBlock *Block;
433 Value *Shadow;
434 };
435 DenseMap<std::pair<Value *, Value *>, CachedCombinedShadow>
436 CachedCombinedShadows;
437 DenseMap<Value *, std::set<Value *>> ShadowElements;
438
439 DFSanFunction(DataFlowSanitizer &DFS, Function *F, bool IsNativeABI)
440 : DFS(DFS), F(F), IA(DFS.getInstrumentedABI()), IsNativeABI(IsNativeABI) {
441 DT.recalculate(*F);
442 // FIXME: Need to track down the register allocator issue which causes poor
443 // performance in pathological cases with large numbers of basic blocks.
444 AvoidNewBlocks = F->size() > 1000;
445 }
446
447 Value *getArgTLS(unsigned Index, Instruction *Pos);
448 Value *getShadow(Value *V);
449 void setShadow(Instruction *I, Value *Shadow);
450 Value *combineShadows(Value *V1, Value *V2, Instruction *Pos);
451 Value *combineOperandShadows(Instruction *Inst);
452 Value *loadShadow(Value *ShadowAddr, uint64_t Size, uint64_t Align,
453 Instruction *Pos);
454 void storeShadow(Value *Addr, uint64_t Size, Align Alignment, Value *Shadow,
455 Instruction *Pos);
456};
457
458class DFSanVisitor : public InstVisitor<DFSanVisitor> {
459public:
460 DFSanFunction &DFSF;
461
462 DFSanVisitor(DFSanFunction &DFSF) : DFSF(DFSF) {}
463
464 const DataLayout &getDataLayout() const {
465 return DFSF.F->getParent()->getDataLayout();
466 }
467
468 // Combines shadow values for all of I's operands. Returns the combined shadow
469 // value.
470 Value *visitOperandShadowInst(Instruction &I);
471
472 void visitUnaryOperator(UnaryOperator &UO);
473 void visitBinaryOperator(BinaryOperator &BO);
474 void visitCastInst(CastInst &CI);
475 void visitCmpInst(CmpInst &CI);
476 void visitGetElementPtrInst(GetElementPtrInst &GEPI);
477 void visitLoadInst(LoadInst &LI);
478 void visitStoreInst(StoreInst &SI);
479 void visitReturnInst(ReturnInst &RI);
480 void visitCallBase(CallBase &CB);
481 void visitPHINode(PHINode &PN);
482 void visitExtractElementInst(ExtractElementInst &I);
483 void visitInsertElementInst(InsertElementInst &I);
484 void visitShuffleVectorInst(ShuffleVectorInst &I);
485 void visitExtractValueInst(ExtractValueInst &I);
486 void visitInsertValueInst(InsertValueInst &I);
487 void visitAllocaInst(AllocaInst &I);
488 void visitSelectInst(SelectInst &I);
489 void visitMemSetInst(MemSetInst &I);
490 void visitMemTransferInst(MemTransferInst &I);
491};
492
493} // end anonymous namespace
494
495DataFlowSanitizer::DataFlowSanitizer(
496 const std::vector<std::string> &ABIListFiles) {
497 std::vector<std::string> AllABIListFiles(std::move(ABIListFiles));
498 AllABIListFiles.insert(AllABIListFiles.end(), ClABIListFiles.begin(),
499 ClABIListFiles.end());
500 // FIXME: should we propagate vfs::FileSystem to this constructor?
501 ABIList.set(
502 SpecialCaseList::createOrDie(AllABIListFiles, *vfs::getRealFileSystem()));
503}
504
505FunctionType *DataFlowSanitizer::getArgsFunctionType(FunctionType *T) {
506 SmallVector<Type *, 4> ArgTypes(T->param_begin(), T->param_end());
507 ArgTypes.append(T->getNumParams(), ShadowTy);
508 if (T->isVarArg())
509 ArgTypes.push_back(ShadowPtrTy);
510 Type *RetType = T->getReturnType();
511 if (!RetType->isVoidTy())
512 RetType = StructType::get(RetType, ShadowTy);
513 return FunctionType::get(RetType, ArgTypes, T->isVarArg());
514}
515
516FunctionType *DataFlowSanitizer::getTrampolineFunctionType(FunctionType *T) {
517 assert(!T->isVarArg())((!T->isVarArg()) ? static_cast<void> (0) : __assert_fail
("!T->isVarArg()", "/build/llvm-toolchain-snapshot-12~++20201202100633+b276bf5a572/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp"
, 517, __PRETTY_FUNCTION__))
;
518 SmallVector<Type *, 4> ArgTypes;
519 ArgTypes.push_back(T->getPointerTo());
520 ArgTypes.append(T->param_begin(), T->param_end());
521 ArgTypes.append(T->getNumParams(), ShadowTy);
522 Type *RetType = T->getReturnType();
523 if (!RetType->isVoidTy())
524 ArgTypes.push_back(ShadowPtrTy);
525 return FunctionType::get(T->getReturnType(), ArgTypes, false);
526}
527
528TransformedFunction DataFlowSanitizer::getCustomFunctionType(FunctionType *T) {
529 SmallVector<Type *, 4> ArgTypes;
530
531 // Some parameters of the custom function being constructed are
532 // parameters of T. Record the mapping from parameters of T to
533 // parameters of the custom function, so that parameter attributes
534 // at call sites can be updated.
535 std::vector<unsigned> ArgumentIndexMapping;
536 for (unsigned i = 0, ie = T->getNumParams(); i != ie; ++i) {
537 Type* param_type = T->getParamType(i);
538 FunctionType *FT;
539 if (isa<PointerType>(param_type) && (FT = dyn_cast<FunctionType>(
540 cast<PointerType>(param_type)->getElementType()))) {
541 ArgumentIndexMapping.push_back(ArgTypes.size());
542 ArgTypes.push_back(getTrampolineFunctionType(FT)->getPointerTo());
543 ArgTypes.push_back(Type::getInt8PtrTy(*Ctx));
544 } else {
545 ArgumentIndexMapping.push_back(ArgTypes.size());
546 ArgTypes.push_back(param_type);
547 }
548 }
549 for (unsigned i = 0, e = T->getNumParams(); i != e; ++i)
550 ArgTypes.push_back(ShadowTy);
551 if (T->isVarArg())
552 ArgTypes.push_back(ShadowPtrTy);
553 Type *RetType = T->getReturnType();
554 if (!RetType->isVoidTy())
555 ArgTypes.push_back(ShadowPtrTy);
556 return TransformedFunction(
557 T, FunctionType::get(T->getReturnType(), ArgTypes, T->isVarArg()),
558 ArgumentIndexMapping);
559}
560
561bool DataFlowSanitizer::init(Module &M) {
562 Triple TargetTriple(M.getTargetTriple());
563 bool IsX86_64 = TargetTriple.getArch() == Triple::x86_64;
564 bool IsMIPS64 = TargetTriple.isMIPS64();
565 bool IsAArch64 = TargetTriple.getArch() == Triple::aarch64 ||
566 TargetTriple.getArch() == Triple::aarch64_be;
567
568 const DataLayout &DL = M.getDataLayout();
569
570 Mod = &M;
571 Ctx = &M.getContext();
572 Int8Ptr = Type::getInt8PtrTy(*Ctx);
573 ShadowTy = IntegerType::get(*Ctx, ShadowWidthBits);
574 ShadowPtrTy = PointerType::getUnqual(ShadowTy);
575 IntptrTy = DL.getIntPtrType(*Ctx);
576 ZeroShadow = ConstantInt::getSigned(ShadowTy, 0);
577 ShadowPtrMul = ConstantInt::getSigned(IntptrTy, ShadowWidthBytes);
578 if (IsX86_64)
579 ShadowPtrMask = ConstantInt::getSigned(IntptrTy, ~0x700000000000LL);
580 else if (IsMIPS64)
581 ShadowPtrMask = ConstantInt::getSigned(IntptrTy, ~0xF000000000LL);
582 // AArch64 supports multiple VMAs and the shadow mask is set at runtime.
583 else if (IsAArch64)
584 DFSanRuntimeShadowMask = true;
585 else
586 report_fatal_error("unsupported triple");
587
588 Type *DFSanUnionArgs[2] = { ShadowTy, ShadowTy };
589 DFSanUnionFnTy =
590 FunctionType::get(ShadowTy, DFSanUnionArgs, /*isVarArg=*/ false);
591 Type *DFSanUnionLoadArgs[2] = { ShadowPtrTy, IntptrTy };
592 DFSanUnionLoadFnTy =
593 FunctionType::get(ShadowTy, DFSanUnionLoadArgs, /*isVarArg=*/ false);
594 DFSanUnimplementedFnTy = FunctionType::get(
595 Type::getVoidTy(*Ctx), Type::getInt8PtrTy(*Ctx), /*isVarArg=*/false);
596 Type *DFSanSetLabelArgs[3] = { ShadowTy, Type::getInt8PtrTy(*Ctx), IntptrTy };
597 DFSanSetLabelFnTy = FunctionType::get(Type::getVoidTy(*Ctx),
598 DFSanSetLabelArgs, /*isVarArg=*/false);
599 DFSanNonzeroLabelFnTy = FunctionType::get(
600 Type::getVoidTy(*Ctx), None, /*isVarArg=*/false);
601 DFSanVarargWrapperFnTy = FunctionType::get(
602 Type::getVoidTy(*Ctx), Type::getInt8PtrTy(*Ctx), /*isVarArg=*/false);
603 DFSanCmpCallbackFnTy = FunctionType::get(Type::getVoidTy(*Ctx), ShadowTy,
604 /*isVarArg=*/false);
605 Type *DFSanLoadStoreCallbackArgs[2] = {ShadowTy, Int8Ptr};
606 DFSanLoadStoreCallbackFnTy =
607 FunctionType::get(Type::getVoidTy(*Ctx), DFSanLoadStoreCallbackArgs,
608 /*isVarArg=*/false);
609 Type *DFSanMemTransferCallbackArgs[2] = {ShadowPtrTy, IntptrTy};
610 DFSanMemTransferCallbackFnTy =
611 FunctionType::get(Type::getVoidTy(*Ctx), DFSanMemTransferCallbackArgs,
612 /*isVarArg=*/false);
613
614 ColdCallWeights = MDBuilder(*Ctx).createBranchWeights(1, 1000);
615 return true;
616}
617
618bool DataFlowSanitizer::isInstrumented(const Function *F) {
619 return !ABIList.isIn(*F, "uninstrumented");
620}
621
622bool DataFlowSanitizer::isInstrumented(const GlobalAlias *GA) {
623 return !ABIList.isIn(*GA, "uninstrumented");
624}
625
626DataFlowSanitizer::InstrumentedABI DataFlowSanitizer::getInstrumentedABI() {
627 return ClArgsABI ? IA_Args : IA_TLS;
628}
629
630DataFlowSanitizer::WrapperKind DataFlowSanitizer::getWrapperKind(Function *F) {
631 if (ABIList.isIn(*F, "functional"))
632 return WK_Functional;
633 if (ABIList.isIn(*F, "discard"))
634 return WK_Discard;
635 if (ABIList.isIn(*F, "custom"))
636 return WK_Custom;
637
638 return WK_Warning;
639}
640
641void DataFlowSanitizer::addGlobalNamePrefix(GlobalValue *GV) {
642 std::string GVName = std::string(GV->getName()), Prefix = "dfs$";
643 GV->setName(Prefix + GVName);
644
645 // Try to change the name of the function in module inline asm. We only do
646 // this for specific asm directives, currently only ".symver", to try to avoid
647 // corrupting asm which happens to contain the symbol name as a substring.
648 // Note that the substitution for .symver assumes that the versioned symbol
649 // also has an instrumented name.
650 std::string Asm = GV->getParent()->getModuleInlineAsm();
651 std::string SearchStr = ".symver " + GVName + ",";
652 size_t Pos = Asm.find(SearchStr);
653 if (Pos != std::string::npos) {
654 Asm.replace(Pos, SearchStr.size(),
655 ".symver " + Prefix + GVName + "," + Prefix);
656 GV->getParent()->setModuleInlineAsm(Asm);
657 }
658}
659
660Function *
661DataFlowSanitizer::buildWrapperFunction(Function *F, StringRef NewFName,
662 GlobalValue::LinkageTypes NewFLink,
663 FunctionType *NewFT) {
664 FunctionType *FT = F->getFunctionType();
665 Function *NewF = Function::Create(NewFT, NewFLink, F->getAddressSpace(),
666 NewFName, F->getParent());
667 NewF->copyAttributesFrom(F);
668 NewF->removeAttributes(
669 AttributeList::ReturnIndex,
670 AttributeFuncs::typeIncompatible(NewFT->getReturnType()));
671
672 BasicBlock *BB = BasicBlock::Create(*Ctx, "entry", NewF);
673 if (F->isVarArg()) {
674 NewF->removeAttributes(AttributeList::FunctionIndex,
675 AttrBuilder().addAttribute("split-stack"));
676 CallInst::Create(DFSanVarargWrapperFn,
677 IRBuilder<>(BB).CreateGlobalStringPtr(F->getName()), "",
678 BB);
679 new UnreachableInst(*Ctx, BB);
680 } else {
681 std::vector<Value *> Args;
682 unsigned n = FT->getNumParams();
683 for (Function::arg_iterator ai = NewF->arg_begin(); n != 0; ++ai, --n)
684 Args.push_back(&*ai);
685 CallInst *CI = CallInst::Create(F, Args, "", BB);
686 if (FT->getReturnType()->isVoidTy())
687 ReturnInst::Create(*Ctx, BB);
688 else
689 ReturnInst::Create(*Ctx, CI, BB);
690 }
691
692 return NewF;
693}
694
695Constant *DataFlowSanitizer::getOrBuildTrampolineFunction(FunctionType *FT,
696 StringRef FName) {
697 FunctionType *FTT = getTrampolineFunctionType(FT);
698 FunctionCallee C = Mod->getOrInsertFunction(FName, FTT);
699 Function *F = dyn_cast<Function>(C.getCallee());
700 if (F && F->isDeclaration()) {
701 F->setLinkage(GlobalValue::LinkOnceODRLinkage);
702 BasicBlock *BB = BasicBlock::Create(*Ctx, "entry", F);
703 std::vector<Value *> Args;
704 Function::arg_iterator AI = F->arg_begin(); ++AI;
705 for (unsigned N = FT->getNumParams(); N != 0; ++AI, --N)
706 Args.push_back(&*AI);
707 CallInst *CI = CallInst::Create(FT, &*F->arg_begin(), Args, "", BB);
708 ReturnInst *RI;
709 if (FT->getReturnType()->isVoidTy())
710 RI = ReturnInst::Create(*Ctx, BB);
711 else
712 RI = ReturnInst::Create(*Ctx, CI, BB);
713
714 DFSanFunction DFSF(*this, F, /*IsNativeABI=*/true);
715 Function::arg_iterator ValAI = F->arg_begin(), ShadowAI = AI; ++ValAI;
716 for (unsigned N = FT->getNumParams(); N != 0; ++ValAI, ++ShadowAI, --N)
717 DFSF.ValShadowMap[&*ValAI] = &*ShadowAI;
718 DFSanVisitor(DFSF).visitCallInst(*CI);
719 if (!FT->getReturnType()->isVoidTy())
720 new StoreInst(DFSF.getShadow(RI->getReturnValue()),
721 &*std::prev(F->arg_end()), RI);
722 }
723
724 return cast<Constant>(C.getCallee());
725}
726
727// Initialize DataFlowSanitizer runtime functions and declare them in the module
728void DataFlowSanitizer::initializeRuntimeFunctions(Module &M) {
729 {
730 AttributeList AL;
731 AL = AL.addAttribute(M.getContext(), AttributeList::FunctionIndex,
732 Attribute::NoUnwind);
733 AL = AL.addAttribute(M.getContext(), AttributeList::FunctionIndex,
734 Attribute::ReadNone);
735 AL = AL.addAttribute(M.getContext(), AttributeList::ReturnIndex,
736 Attribute::ZExt);
737 AL = AL.addParamAttribute(M.getContext(), 0, Attribute::ZExt);
738 AL = AL.addParamAttribute(M.getContext(), 1, Attribute::ZExt);
739 DFSanUnionFn =
740 Mod->getOrInsertFunction("__dfsan_union", DFSanUnionFnTy, AL);
741 }
742 {
743 AttributeList AL;
744 AL = AL.addAttribute(M.getContext(), AttributeList::FunctionIndex,
745 Attribute::NoUnwind);
746 AL = AL.addAttribute(M.getContext(), AttributeList::FunctionIndex,
747 Attribute::ReadNone);
748 AL = AL.addAttribute(M.getContext(), AttributeList::ReturnIndex,
749 Attribute::ZExt);
750 AL = AL.addParamAttribute(M.getContext(), 0, Attribute::ZExt);
751 AL = AL.addParamAttribute(M.getContext(), 1, Attribute::ZExt);
752 DFSanCheckedUnionFn =
753 Mod->getOrInsertFunction("dfsan_union", DFSanUnionFnTy, AL);
754 }
755 {
756 AttributeList AL;
757 AL = AL.addAttribute(M.getContext(), AttributeList::FunctionIndex,
758 Attribute::NoUnwind);
759 AL = AL.addAttribute(M.getContext(), AttributeList::FunctionIndex,
760 Attribute::ReadOnly);
761 AL = AL.addAttribute(M.getContext(), AttributeList::ReturnIndex,
762 Attribute::ZExt);
763 DFSanUnionLoadFn =
764 Mod->getOrInsertFunction("__dfsan_union_load", DFSanUnionLoadFnTy, AL);
765 }
766 {
767 AttributeList AL;
768 AL = AL.addAttribute(M.getContext(), AttributeList::FunctionIndex,
769 Attribute::NoUnwind);
770 AL = AL.addAttribute(M.getContext(), AttributeList::FunctionIndex,
771 Attribute::ReadOnly);
772 AL = AL.addAttribute(M.getContext(), AttributeList::ReturnIndex,
773 Attribute::ZExt);
774 DFSanUnionLoadFast16LabelsFn = Mod->getOrInsertFunction(
775 "__dfsan_union_load_fast16labels", DFSanUnionLoadFnTy, AL);
776 }
777 DFSanUnimplementedFn =
778 Mod->getOrInsertFunction("__dfsan_unimplemented", DFSanUnimplementedFnTy);
779 {
780 AttributeList AL;
781 AL = AL.addParamAttribute(M.getContext(), 0, Attribute::ZExt);
782 DFSanSetLabelFn =
783 Mod->getOrInsertFunction("__dfsan_set_label", DFSanSetLabelFnTy, AL);
784 }
785 DFSanNonzeroLabelFn =
786 Mod->getOrInsertFunction("__dfsan_nonzero_label", DFSanNonzeroLabelFnTy);
787 DFSanVarargWrapperFn = Mod->getOrInsertFunction("__dfsan_vararg_wrapper",
788 DFSanVarargWrapperFnTy);
789}
790
791// Initializes event callback functions and declare them in the module
792void DataFlowSanitizer::initializeCallbackFunctions(Module &M) {
793 DFSanLoadCallbackFn = Mod->getOrInsertFunction("__dfsan_load_callback",
794 DFSanLoadStoreCallbackFnTy);
795 DFSanStoreCallbackFn = Mod->getOrInsertFunction("__dfsan_store_callback",
796 DFSanLoadStoreCallbackFnTy);
797 DFSanMemTransferCallbackFn = Mod->getOrInsertFunction(
798 "__dfsan_mem_transfer_callback", DFSanMemTransferCallbackFnTy);
799 DFSanCmpCallbackFn =
800 Mod->getOrInsertFunction("__dfsan_cmp_callback", DFSanCmpCallbackFnTy);
801}
802
803bool DataFlowSanitizer::runImpl(Module &M) {
804 init(M);
805
806 if (ABIList.isIn(M, "skip"))
1
Assuming the condition is false
2
Taking false branch
807 return false;
808
809 const unsigned InitialGlobalSize = M.global_size();
810 const unsigned InitialModuleSize = M.size();
811
812 bool Changed = false;
813
814 Type *ArgTLSTy = ArrayType::get(ShadowTy, 64);
815 ArgTLS = Mod->getOrInsertGlobal("__dfsan_arg_tls", ArgTLSTy);
816 if (GlobalVariable *G
3.1
'G' is null
3.1
'G' is null
3.1
'G' is null
= dyn_cast<GlobalVariable>(ArgTLS)) {
3
Assuming field 'ArgTLS' is not a 'GlobalVariable'
4
Taking false branch
817 Changed |= G->getThreadLocalMode() != GlobalVariable::InitialExecTLSModel;
818 G->setThreadLocalMode(GlobalVariable::InitialExecTLSModel);
819 }
820 RetvalTLS = Mod->getOrInsertGlobal("__dfsan_retval_tls", ShadowTy);
821 if (GlobalVariable *G
5.1
'G' is null
5.1
'G' is null
5.1
'G' is null
= dyn_cast<GlobalVariable>(RetvalTLS)) {
5
Assuming field 'RetvalTLS' is not a 'GlobalVariable'
6
Taking false branch
822 Changed |= G->getThreadLocalMode() != GlobalVariable::InitialExecTLSModel;
823 G->setThreadLocalMode(GlobalVariable::InitialExecTLSModel);
824 }
825
826 ExternalShadowMask =
827 Mod->getOrInsertGlobal(kDFSanExternShadowPtrMask, IntptrTy);
828
829 initializeCallbackFunctions(M);
830 initializeRuntimeFunctions(M);
831
832 std::vector<Function *> FnsToInstrument;
833 SmallPtrSet<Function *, 2> FnsWithNativeABI;
834 for (Function &i : M) {
835 if (!i.isIntrinsic() &&
836 &i != DFSanUnionFn.getCallee()->stripPointerCasts() &&
837 &i != DFSanCheckedUnionFn.getCallee()->stripPointerCasts() &&
838 &i != DFSanUnionLoadFn.getCallee()->stripPointerCasts() &&
839 &i != DFSanUnionLoadFast16LabelsFn.getCallee()->stripPointerCasts() &&
840 &i != DFSanUnimplementedFn.getCallee()->stripPointerCasts() &&
841 &i != DFSanSetLabelFn.getCallee()->stripPointerCasts() &&
842 &i != DFSanNonzeroLabelFn.getCallee()->stripPointerCasts() &&
843 &i != DFSanVarargWrapperFn.getCallee()->stripPointerCasts() &&
844 &i != DFSanLoadCallbackFn.getCallee()->stripPointerCasts() &&
845 &i != DFSanStoreCallbackFn.getCallee()->stripPointerCasts() &&
846 &i != DFSanMemTransferCallbackFn.getCallee()->stripPointerCasts() &&
847 &i != DFSanCmpCallbackFn.getCallee()->stripPointerCasts())
848 FnsToInstrument.push_back(&i);
849 }
850
851 // Give function aliases prefixes when necessary, and build wrappers where the
852 // instrumentedness is inconsistent.
853 for (Module::alias_iterator i = M.alias_begin(), e = M.alias_end(); i != e;) {
7
Loop condition is false. Execution continues on line 876
854 GlobalAlias *GA = &*i;
855 ++i;
856 // Don't stop on weak. We assume people aren't playing games with the
857 // instrumentedness of overridden weak aliases.
858 if (auto F = dyn_cast<Function>(GA->getBaseObject())) {
859 bool GAInst = isInstrumented(GA), FInst = isInstrumented(F);
860 if (GAInst && FInst) {
861 addGlobalNamePrefix(GA);
862 } else if (GAInst != FInst) {
863 // Non-instrumented alias of an instrumented function, or vice versa.
864 // Replace the alias with a native-ABI wrapper of the aliasee. The pass
865 // below will take care of instrumenting it.
866 Function *NewF =
867 buildWrapperFunction(F, "", GA->getLinkage(), F->getFunctionType());
868 GA->replaceAllUsesWith(ConstantExpr::getBitCast(NewF, GA->getType()));
869 NewF->takeName(GA);
870 GA->eraseFromParent();
871 FnsToInstrument.push_back(NewF);
872 }
873 }
874 }
875
876 ReadOnlyNoneAttrs.addAttribute(Attribute::ReadOnly)
877 .addAttribute(Attribute::ReadNone);
878
879 // First, change the ABI of every function in the module. ABI-listed
880 // functions keep their original ABI and get a wrapper function.
881 for (std::vector<Function *>::iterator i = FnsToInstrument.begin(),
8
Loop condition is false. Execution continues on line 980
882 e = FnsToInstrument.end();
883 i != e; ++i) {
884 Function &F = **i;
885 FunctionType *FT = F.getFunctionType();
886
887 bool IsZeroArgsVoidRet = (FT->getNumParams() == 0 && !FT->isVarArg() &&
888 FT->getReturnType()->isVoidTy());
889
890 if (isInstrumented(&F)) {
891 // Instrumented functions get a 'dfs$' prefix. This allows us to more
892 // easily identify cases of mismatching ABIs.
893 if (getInstrumentedABI() == IA_Args && !IsZeroArgsVoidRet) {
894 FunctionType *NewFT = getArgsFunctionType(FT);
895 Function *NewF = Function::Create(NewFT, F.getLinkage(),
896 F.getAddressSpace(), "", &M);
897 NewF->copyAttributesFrom(&F);
898 NewF->removeAttributes(
899 AttributeList::ReturnIndex,
900 AttributeFuncs::typeIncompatible(NewFT->getReturnType()));
901 for (Function::arg_iterator FArg = F.arg_begin(),
902 NewFArg = NewF->arg_begin(),
903 FArgEnd = F.arg_end();
904 FArg != FArgEnd; ++FArg, ++NewFArg) {
905 FArg->replaceAllUsesWith(&*NewFArg);
906 }
907 NewF->getBasicBlockList().splice(NewF->begin(), F.getBasicBlockList());
908
909 for (Function::user_iterator UI = F.user_begin(), UE = F.user_end();
910 UI != UE;) {
911 BlockAddress *BA = dyn_cast<BlockAddress>(*UI);
912 ++UI;
913 if (BA) {
914 BA->replaceAllUsesWith(
915 BlockAddress::get(NewF, BA->getBasicBlock()));
916 delete BA;
917 }
918 }
919 F.replaceAllUsesWith(
920 ConstantExpr::getBitCast(NewF, PointerType::getUnqual(FT)));
921 NewF->takeName(&F);
922 F.eraseFromParent();
923 *i = NewF;
924 addGlobalNamePrefix(NewF);
925 } else {
926 addGlobalNamePrefix(&F);
927 }
928 } else if (!IsZeroArgsVoidRet || getWrapperKind(&F) == WK_Custom) {
929 // Build a wrapper function for F. The wrapper simply calls F, and is
930 // added to FnsToInstrument so that any instrumentation according to its
931 // WrapperKind is done in the second pass below.
932 FunctionType *NewFT = getInstrumentedABI() == IA_Args
933 ? getArgsFunctionType(FT)
934 : FT;
935
936 // If the function being wrapped has local linkage, then preserve the
937 // function's linkage in the wrapper function.
938 GlobalValue::LinkageTypes wrapperLinkage =
939 F.hasLocalLinkage()
940 ? F.getLinkage()
941 : GlobalValue::LinkOnceODRLinkage;
942
943 Function *NewF = buildWrapperFunction(
944 &F, std::string("dfsw$") + std::string(F.getName()),
945 wrapperLinkage, NewFT);
946 if (getInstrumentedABI() == IA_TLS)
947 NewF->removeAttributes(AttributeList::FunctionIndex, ReadOnlyNoneAttrs);
948
949 Value *WrappedFnCst =
950 ConstantExpr::getBitCast(NewF, PointerType::getUnqual(FT));
951 F.replaceAllUsesWith(WrappedFnCst);
952
953 UnwrappedFnMap[WrappedFnCst] = &F;
954 *i = NewF;
955
956 if (!F.isDeclaration()) {
957 // This function is probably defining an interposition of an
958 // uninstrumented function and hence needs to keep the original ABI.
959 // But any functions it may call need to use the instrumented ABI, so
960 // we instrument it in a mode which preserves the original ABI.
961 FnsWithNativeABI.insert(&F);
962
963 // This code needs to rebuild the iterators, as they may be invalidated
964 // by the push_back, taking care that the new range does not include
965 // any functions added by this code.
966 size_t N = i - FnsToInstrument.begin(),
967 Count = e - FnsToInstrument.begin();
968 FnsToInstrument.push_back(&F);
969 i = FnsToInstrument.begin() + N;
970 e = FnsToInstrument.begin() + Count;
971 }
972 // Hopefully, nobody will try to indirectly call a vararg
973 // function... yet.
974 } else if (FT->isVarArg()) {
975 UnwrappedFnMap[&F] = &F;
976 *i = nullptr;
977 }
978 }
979
980 for (Function *i : FnsToInstrument) {
981 if (!i || i->isDeclaration())
9
Assuming 'i' is non-null
10
Assuming the condition is false
11
Taking false branch
982 continue;
983
984 removeUnreachableBlocks(*i);
985
986 DFSanFunction DFSF(*this, i, FnsWithNativeABI.count(i));
987
988 // DFSanVisitor may create new basic blocks, which confuses df_iterator.
989 // Build a copy of the list before iterating over it.
990 SmallVector<BasicBlock *, 4> BBList(depth_first(&i->getEntryBlock()));
991
992 for (BasicBlock *i : BBList) {
12
Assuming '__begin2' is equal to '__end2'
993 Instruction *Inst = &i->front();
994 while (true) {
995 // DFSanVisitor may split the current basic block, changing the current
996 // instruction's next pointer and moving the next instruction to the
997 // tail block from which we should continue.
998 Instruction *Next = Inst->getNextNode();
999 // DFSanVisitor may delete Inst, so keep track of whether it was a
1000 // terminator.
1001 bool IsTerminator = Inst->isTerminator();
1002 if (!DFSF.SkipInsts.count(Inst))
1003 DFSanVisitor(DFSF).visit(Inst);
1004 if (IsTerminator)
1005 break;
1006 Inst = Next;
1007 }
1008 }
1009
1010 // We will not necessarily be able to compute the shadow for every phi node
1011 // until we have visited every block. Therefore, the code that handles phi
1012 // nodes adds them to the PHIFixups list so that they can be properly
1013 // handled here.
1014 for (std::vector<std::pair<PHINode *, PHINode *>>::iterator
13
Loop condition is true. Entering loop body
1015 i = DFSF.PHIFixups.begin(),
1016 e = DFSF.PHIFixups.end();
1017 i != e; ++i) {
1018 for (unsigned val = 0, n = i->first->getNumIncomingValues(); val != n;
14
Assuming 'val' is not equal to 'n'
15
Loop condition is true. Entering loop body
1019 ++val) {
1020 i->second->setIncomingValue(
1021 val, DFSF.getShadow(i->first->getIncomingValue(val)));
16
Calling 'DFSanFunction::getShadow'
1022 }
1023 }
1024
1025 // -dfsan-debug-nonzero-labels will split the CFG in all kinds of crazy
1026 // places (i.e. instructions in basic blocks we haven't even begun visiting
1027 // yet). To make our life easier, do this work in a pass after the main
1028 // instrumentation.
1029 if (ClDebugNonzeroLabels) {
1030 for (Value *V : DFSF.NonZeroChecks) {
1031 Instruction *Pos;
1032 if (Instruction *I = dyn_cast<Instruction>(V))
1033 Pos = I->getNextNode();
1034 else
1035 Pos = &DFSF.F->getEntryBlock().front();
1036 while (isa<PHINode>(Pos) || isa<AllocaInst>(Pos))
1037 Pos = Pos->getNextNode();
1038 IRBuilder<> IRB(Pos);
1039 Value *Ne = IRB.CreateICmpNE(V, DFSF.DFS.ZeroShadow);
1040 BranchInst *BI = cast<BranchInst>(SplitBlockAndInsertIfThen(
1041 Ne, Pos, /*Unreachable=*/false, ColdCallWeights));
1042 IRBuilder<> ThenIRB(BI);
1043 ThenIRB.CreateCall(DFSF.DFS.DFSanNonzeroLabelFn, {});
1044 }
1045 }
1046 }
1047
1048 return Changed || !FnsToInstrument.empty() ||
1049 M.global_size() != InitialGlobalSize || M.size() != InitialModuleSize;
1050}
1051
1052Value *DFSanFunction::getArgTLS(unsigned Idx, Instruction *Pos) {
1053 IRBuilder<> IRB(Pos);
1054 return IRB.CreateConstGEP2_64(ArrayType::get(DFS.ShadowTy, 64), DFS.ArgTLS, 0,
30
Passing null pointer value via 2nd parameter 'Ptr'
31
Calling 'IRBuilderBase::CreateConstGEP2_64'
1055 Idx);
1056}
1057
1058Value *DFSanFunction::getShadow(Value *V) {
1059 if (!isa<Argument>(V) && !isa<Instruction>(V))
17
Assuming 'V' is not a 'Argument'
18
Assuming 'V' is a 'Instruction'
19
Taking false branch
1060 return DFS.ZeroShadow;
1061 Value *&Shadow = ValShadowMap[V];
1062 if (!Shadow) {
20
Assuming 'Shadow' is null
21
Taking true branch
1063 if (Argument *A
22.1
'A' is non-null
22.1
'A' is non-null
22.1
'A' is non-null
= dyn_cast<Argument>(V)) {
22
Assuming 'V' is a 'Argument'
23
Taking true branch
1064 if (IsNativeABI)
24
Assuming field 'IsNativeABI' is false
25
Taking false branch
1065 return DFS.ZeroShadow;
1066 switch (IA) {
26
Control jumps to 'case IA_TLS:' at line 1067
1067 case DataFlowSanitizer::IA_TLS: {
1068 Value *ArgTLSPtr = DFS.ArgTLS;
1069 Instruction *ArgTLSPos =
1070 DFS.ArgTLS ? &*F->getEntryBlock().begin()
27
Assuming field 'ArgTLS' is null
28
'?' condition is false
1071 : cast<Instruction>(ArgTLSPtr)->getNextNode();
1072 IRBuilder<> IRB(ArgTLSPos);
1073 Shadow =
1074 IRB.CreateLoad(DFS.ShadowTy, getArgTLS(A->getArgNo(), ArgTLSPos));
29
Calling 'DFSanFunction::getArgTLS'
1075 break;
1076 }
1077 case DataFlowSanitizer::IA_Args: {
1078 unsigned ArgIdx = A->getArgNo() + F->arg_size() / 2;
1079 Function::arg_iterator i = F->arg_begin();
1080 while (ArgIdx--)
1081 ++i;
1082 Shadow = &*i;
1083 assert(Shadow->getType() == DFS.ShadowTy)((Shadow->getType() == DFS.ShadowTy) ? static_cast<void
> (0) : __assert_fail ("Shadow->getType() == DFS.ShadowTy"
, "/build/llvm-toolchain-snapshot-12~++20201202100633+b276bf5a572/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp"
, 1083, __PRETTY_FUNCTION__))
;
1084 break;
1085 }
1086 }
1087 NonZeroChecks.push_back(Shadow);
1088 } else {
1089 Shadow = DFS.ZeroShadow;
1090 }
1091 }
1092 return Shadow;
1093}
1094
1095void DFSanFunction::setShadow(Instruction *I, Value *Shadow) {
1096 assert(!ValShadowMap.count(I))((!ValShadowMap.count(I)) ? static_cast<void> (0) : __assert_fail
("!ValShadowMap.count(I)", "/build/llvm-toolchain-snapshot-12~++20201202100633+b276bf5a572/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp"
, 1096, __PRETTY_FUNCTION__))
;
1097 assert(Shadow->getType() == DFS.ShadowTy)((Shadow->getType() == DFS.ShadowTy) ? static_cast<void
> (0) : __assert_fail ("Shadow->getType() == DFS.ShadowTy"
, "/build/llvm-toolchain-snapshot-12~++20201202100633+b276bf5a572/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp"
, 1097, __PRETTY_FUNCTION__))
;
1098 ValShadowMap[I] = Shadow;
1099}
1100
1101Value *DataFlowSanitizer::getShadowAddress(Value *Addr, Instruction *Pos) {
1102 assert(Addr != RetvalTLS && "Reinstrumenting?")((Addr != RetvalTLS && "Reinstrumenting?") ? static_cast
<void> (0) : __assert_fail ("Addr != RetvalTLS && \"Reinstrumenting?\""
, "/build/llvm-toolchain-snapshot-12~++20201202100633+b276bf5a572/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp"
, 1102, __PRETTY_FUNCTION__))
;
1103 IRBuilder<> IRB(Pos);
1104 Value *ShadowPtrMaskValue;
1105 if (DFSanRuntimeShadowMask)
1106 ShadowPtrMaskValue = IRB.CreateLoad(IntptrTy, ExternalShadowMask);
1107 else
1108 ShadowPtrMaskValue = ShadowPtrMask;
1109 return IRB.CreateIntToPtr(
1110 IRB.CreateMul(
1111 IRB.CreateAnd(IRB.CreatePtrToInt(Addr, IntptrTy),
1112 IRB.CreatePtrToInt(ShadowPtrMaskValue, IntptrTy)),
1113 ShadowPtrMul),
1114 ShadowPtrTy);
1115}
1116
1117// Generates IR to compute the union of the two given shadows, inserting it
1118// before Pos. Returns the computed union Value.
1119Value *DFSanFunction::combineShadows(Value *V1, Value *V2, Instruction *Pos) {
1120 if (V1 == DFS.ZeroShadow)
1121 return V2;
1122 if (V2 == DFS.ZeroShadow)
1123 return V1;
1124 if (V1 == V2)
1125 return V1;
1126
1127 auto V1Elems = ShadowElements.find(V1);
1128 auto V2Elems = ShadowElements.find(V2);
1129 if (V1Elems != ShadowElements.end() && V2Elems != ShadowElements.end()) {
1130 if (std::includes(V1Elems->second.begin(), V1Elems->second.end(),
1131 V2Elems->second.begin(), V2Elems->second.end())) {
1132 return V1;
1133 } else if (std::includes(V2Elems->second.begin(), V2Elems->second.end(),
1134 V1Elems->second.begin(), V1Elems->second.end())) {
1135 return V2;
1136 }
1137 } else if (V1Elems != ShadowElements.end()) {
1138 if (V1Elems->second.count(V2))
1139 return V1;
1140 } else if (V2Elems != ShadowElements.end()) {
1141 if (V2Elems->second.count(V1))
1142 return V2;
1143 }
1144
1145 auto Key = std::make_pair(V1, V2);
1146 if (V1 > V2)
1147 std::swap(Key.first, Key.second);
1148 CachedCombinedShadow &CCS = CachedCombinedShadows[Key];
1149 if (CCS.Block && DT.dominates(CCS.Block, Pos->getParent()))
1150 return CCS.Shadow;
1151
1152 IRBuilder<> IRB(Pos);
1153 if (ClFast16Labels) {
1154 CCS.Block = Pos->getParent();
1155 CCS.Shadow = IRB.CreateOr(V1, V2);
1156 } else if (AvoidNewBlocks) {
1157 CallInst *Call = IRB.CreateCall(DFS.DFSanCheckedUnionFn, {V1, V2});
1158 Call->addAttribute(AttributeList::ReturnIndex, Attribute::ZExt);
1159 Call->addParamAttr(0, Attribute::ZExt);
1160 Call->addParamAttr(1, Attribute::ZExt);
1161
1162 CCS.Block = Pos->getParent();
1163 CCS.Shadow = Call;
1164 } else {
1165 BasicBlock *Head = Pos->getParent();
1166 Value *Ne = IRB.CreateICmpNE(V1, V2);
1167 BranchInst *BI = cast<BranchInst>(SplitBlockAndInsertIfThen(
1168 Ne, Pos, /*Unreachable=*/false, DFS.ColdCallWeights, &DT));
1169 IRBuilder<> ThenIRB(BI);
1170 CallInst *Call = ThenIRB.CreateCall(DFS.DFSanUnionFn, {V1, V2});
1171 Call->addAttribute(AttributeList::ReturnIndex, Attribute::ZExt);
1172 Call->addParamAttr(0, Attribute::ZExt);
1173 Call->addParamAttr(1, Attribute::ZExt);
1174
1175 BasicBlock *Tail = BI->getSuccessor(0);
1176 PHINode *Phi = PHINode::Create(DFS.ShadowTy, 2, "", &Tail->front());
1177 Phi->addIncoming(Call, Call->getParent());
1178 Phi->addIncoming(V1, Head);
1179
1180 CCS.Block = Tail;
1181 CCS.Shadow = Phi;
1182 }
1183
1184 std::set<Value *> UnionElems;
1185 if (V1Elems != ShadowElements.end()) {
1186 UnionElems = V1Elems->second;
1187 } else {
1188 UnionElems.insert(V1);
1189 }
1190 if (V2Elems != ShadowElements.end()) {
1191 UnionElems.insert(V2Elems->second.begin(), V2Elems->second.end());
1192 } else {
1193 UnionElems.insert(V2);
1194 }
1195 ShadowElements[CCS.Shadow] = std::move(UnionElems);
1196
1197 return CCS.Shadow;
1198}
1199
1200// A convenience function which folds the shadows of each of the operands
1201// of the provided instruction Inst, inserting the IR before Inst. Returns
1202// the computed union Value.
1203Value *DFSanFunction::combineOperandShadows(Instruction *Inst) {
1204 if (Inst->getNumOperands() == 0)
1205 return DFS.ZeroShadow;
1206
1207 Value *Shadow = getShadow(Inst->getOperand(0));
1208 for (unsigned i = 1, n = Inst->getNumOperands(); i != n; ++i) {
1209 Shadow = combineShadows(Shadow, getShadow(Inst->getOperand(i)), Inst);
1210 }
1211 return Shadow;
1212}
1213
1214Value *DFSanVisitor::visitOperandShadowInst(Instruction &I) {
1215 Value *CombinedShadow = DFSF.combineOperandShadows(&I);
1216 DFSF.setShadow(&I, CombinedShadow);
1217 return CombinedShadow;
1218}
1219
1220// Generates IR to load shadow corresponding to bytes [Addr, Addr+Size), where
1221// Addr has alignment Align, and take the union of each of those shadows.
1222Value *DFSanFunction::loadShadow(Value *Addr, uint64_t Size, uint64_t Align,
1223 Instruction *Pos) {
1224 if (AllocaInst *AI = dyn_cast<AllocaInst>(Addr)) {
1225 const auto i = AllocaShadowMap.find(AI);
1226 if (i != AllocaShadowMap.end()) {
1227 IRBuilder<> IRB(Pos);
1228 return IRB.CreateLoad(DFS.ShadowTy, i->second);
1229 }
1230 }
1231
1232 const llvm::Align ShadowAlign(Align * DFS.ShadowWidthBytes);
1233 SmallVector<const Value *, 2> Objs;
1234 getUnderlyingObjects(Addr, Objs);
1235 bool AllConstants = true;
1236 for (const Value *Obj : Objs) {
1237 if (isa<Function>(Obj) || isa<BlockAddress>(Obj))
1238 continue;
1239 if (isa<GlobalVariable>(Obj) && cast<GlobalVariable>(Obj)->isConstant())
1240 continue;
1241
1242 AllConstants = false;
1243 break;
1244 }
1245 if (AllConstants)
1246 return DFS.ZeroShadow;
1247
1248 Value *ShadowAddr = DFS.getShadowAddress(Addr, Pos);
1249 switch (Size) {
1250 case 0:
1251 return DFS.ZeroShadow;
1252 case 1: {
1253 LoadInst *LI = new LoadInst(DFS.ShadowTy, ShadowAddr, "", Pos);
1254 LI->setAlignment(ShadowAlign);
1255 return LI;
1256 }
1257 case 2: {
1258 IRBuilder<> IRB(Pos);
1259 Value *ShadowAddr1 = IRB.CreateGEP(DFS.ShadowTy, ShadowAddr,
1260 ConstantInt::get(DFS.IntptrTy, 1));
1261 return combineShadows(
1262 IRB.CreateAlignedLoad(DFS.ShadowTy, ShadowAddr, ShadowAlign),
1263 IRB.CreateAlignedLoad(DFS.ShadowTy, ShadowAddr1, ShadowAlign), Pos);
1264 }
1265 }
1266
1267 if (ClFast16Labels && Size % (64 / DFS.ShadowWidthBits) == 0) {
1268 // First OR all the WideShadows, then OR individual shadows within the
1269 // combined WideShadow. This is fewer instructions than ORing shadows
1270 // individually.
1271 IRBuilder<> IRB(Pos);
1272 Value *WideAddr =
1273 IRB.CreateBitCast(ShadowAddr, Type::getInt64PtrTy(*DFS.Ctx));
1274 Value *CombinedWideShadow =
1275 IRB.CreateAlignedLoad(IRB.getInt64Ty(), WideAddr, ShadowAlign);
1276 for (uint64_t Ofs = 64 / DFS.ShadowWidthBits; Ofs != Size;
1277 Ofs += 64 / DFS.ShadowWidthBits) {
1278 WideAddr = IRB.CreateGEP(Type::getInt64Ty(*DFS.Ctx), WideAddr,
1279 ConstantInt::get(DFS.IntptrTy, 1));
1280 Value *NextWideShadow =
1281 IRB.CreateAlignedLoad(IRB.getInt64Ty(), WideAddr, ShadowAlign);
1282 CombinedWideShadow = IRB.CreateOr(CombinedWideShadow, NextWideShadow);
1283 }
1284 for (unsigned Width = 32; Width >= DFS.ShadowWidthBits; Width >>= 1) {
1285 Value *ShrShadow = IRB.CreateLShr(CombinedWideShadow, Width);
1286 CombinedWideShadow = IRB.CreateOr(CombinedWideShadow, ShrShadow);
1287 }
1288 return IRB.CreateTrunc(CombinedWideShadow, DFS.ShadowTy);
1289 }
1290 if (!AvoidNewBlocks && Size % (64 / DFS.ShadowWidthBits) == 0) {
1291 // Fast path for the common case where each byte has identical shadow: load
1292 // shadow 64 bits at a time, fall out to a __dfsan_union_load call if any
1293 // shadow is non-equal.
1294 BasicBlock *FallbackBB = BasicBlock::Create(*DFS.Ctx, "", F);
1295 IRBuilder<> FallbackIRB(FallbackBB);
1296 CallInst *FallbackCall = FallbackIRB.CreateCall(
1297 DFS.DFSanUnionLoadFn,
1298 {ShadowAddr, ConstantInt::get(DFS.IntptrTy, Size)});
1299 FallbackCall->addAttribute(AttributeList::ReturnIndex, Attribute::ZExt);
1300
1301 // Compare each of the shadows stored in the loaded 64 bits to each other,
1302 // by computing (WideShadow rotl ShadowWidthBits) == WideShadow.
1303 IRBuilder<> IRB(Pos);
1304 Value *WideAddr =
1305 IRB.CreateBitCast(ShadowAddr, Type::getInt64PtrTy(*DFS.Ctx));
1306 Value *WideShadow =
1307 IRB.CreateAlignedLoad(IRB.getInt64Ty(), WideAddr, ShadowAlign);
1308 Value *TruncShadow = IRB.CreateTrunc(WideShadow, DFS.ShadowTy);
1309 Value *ShlShadow = IRB.CreateShl(WideShadow, DFS.ShadowWidthBits);
1310 Value *ShrShadow = IRB.CreateLShr(WideShadow, 64 - DFS.ShadowWidthBits);
1311 Value *RotShadow = IRB.CreateOr(ShlShadow, ShrShadow);
1312 Value *ShadowsEq = IRB.CreateICmpEQ(WideShadow, RotShadow);
1313
1314 BasicBlock *Head = Pos->getParent();
1315 BasicBlock *Tail = Head->splitBasicBlock(Pos->getIterator());
1316
1317 if (DomTreeNode *OldNode = DT.getNode(Head)) {
1318 std::vector<DomTreeNode *> Children(OldNode->begin(), OldNode->end());
1319
1320 DomTreeNode *NewNode = DT.addNewBlock(Tail, Head);
1321 for (auto Child : Children)
1322 DT.changeImmediateDominator(Child, NewNode);
1323 }
1324
1325 // In the following code LastBr will refer to the previous basic block's
1326 // conditional branch instruction, whose true successor is fixed up to point
1327 // to the next block during the loop below or to the tail after the final
1328 // iteration.
1329 BranchInst *LastBr = BranchInst::Create(FallbackBB, FallbackBB, ShadowsEq);
1330 ReplaceInstWithInst(Head->getTerminator(), LastBr);
1331 DT.addNewBlock(FallbackBB, Head);
1332
1333 for (uint64_t Ofs = 64 / DFS.ShadowWidthBits; Ofs != Size;
1334 Ofs += 64 / DFS.ShadowWidthBits) {
1335 BasicBlock *NextBB = BasicBlock::Create(*DFS.Ctx, "", F);
1336 DT.addNewBlock(NextBB, LastBr->getParent());
1337 IRBuilder<> NextIRB(NextBB);
1338 WideAddr = NextIRB.CreateGEP(Type::getInt64Ty(*DFS.Ctx), WideAddr,
1339 ConstantInt::get(DFS.IntptrTy, 1));
1340 Value *NextWideShadow = NextIRB.CreateAlignedLoad(NextIRB.getInt64Ty(),
1341 WideAddr, ShadowAlign);
1342 ShadowsEq = NextIRB.CreateICmpEQ(WideShadow, NextWideShadow);
1343 LastBr->setSuccessor(0, NextBB);
1344 LastBr = NextIRB.CreateCondBr(ShadowsEq, FallbackBB, FallbackBB);
1345 }
1346
1347 LastBr->setSuccessor(0, Tail);
1348 FallbackIRB.CreateBr(Tail);
1349 PHINode *Shadow = PHINode::Create(DFS.ShadowTy, 2, "", &Tail->front());
1350 Shadow->addIncoming(FallbackCall, FallbackBB);
1351 Shadow->addIncoming(TruncShadow, LastBr->getParent());
1352 return Shadow;
1353 }
1354
1355 IRBuilder<> IRB(Pos);
1356 FunctionCallee &UnionLoadFn =
1357 ClFast16Labels ? DFS.DFSanUnionLoadFast16LabelsFn : DFS.DFSanUnionLoadFn;
1358 CallInst *FallbackCall = IRB.CreateCall(
1359 UnionLoadFn, {ShadowAddr, ConstantInt::get(DFS.IntptrTy, Size)});
1360 FallbackCall->addAttribute(AttributeList::ReturnIndex, Attribute::ZExt);
1361 return FallbackCall;
1362}
1363
1364void DFSanVisitor::visitLoadInst(LoadInst &LI) {
1365 auto &DL = LI.getModule()->getDataLayout();
1366 uint64_t Size = DL.getTypeStoreSize(LI.getType());
1367 if (Size == 0) {
1368 DFSF.setShadow(&LI, DFSF.DFS.ZeroShadow);
1369 return;
1370 }
1371
1372 Align Alignment = ClPreserveAlignment ? LI.getAlign() : Align(1);
1373 Value *Shadow =
1374 DFSF.loadShadow(LI.getPointerOperand(), Size, Alignment.value(), &LI);
1375 if (ClCombinePointerLabelsOnLoad) {
1376 Value *PtrShadow = DFSF.getShadow(LI.getPointerOperand());
1377 Shadow = DFSF.combineShadows(Shadow, PtrShadow, &LI);
1378 }
1379 if (Shadow != DFSF.DFS.ZeroShadow)
1380 DFSF.NonZeroChecks.push_back(Shadow);
1381
1382 DFSF.setShadow(&LI, Shadow);
1383 if (ClEventCallbacks) {
1384 IRBuilder<> IRB(&LI);
1385 Value *Addr8 = IRB.CreateBitCast(LI.getPointerOperand(), DFSF.DFS.Int8Ptr);
1386 IRB.CreateCall(DFSF.DFS.DFSanLoadCallbackFn, {Shadow, Addr8});
1387 }
1388}
1389
1390void DFSanFunction::storeShadow(Value *Addr, uint64_t Size, Align Alignment,
1391 Value *Shadow, Instruction *Pos) {
1392 if (AllocaInst *AI = dyn_cast<AllocaInst>(Addr)) {
1393 const auto i = AllocaShadowMap.find(AI);
1394 if (i != AllocaShadowMap.end()) {
1395 IRBuilder<> IRB(Pos);
1396 IRB.CreateStore(Shadow, i->second);
1397 return;
1398 }
1399 }
1400
1401 const Align ShadowAlign(Alignment.value() * DFS.ShadowWidthBytes);
1402 IRBuilder<> IRB(Pos);
1403 Value *ShadowAddr = DFS.getShadowAddress(Addr, Pos);
1404 if (Shadow == DFS.ZeroShadow) {
1405 IntegerType *ShadowTy =
1406 IntegerType::get(*DFS.Ctx, Size * DFS.ShadowWidthBits);
1407 Value *ExtZeroShadow = ConstantInt::get(ShadowTy, 0);
1408 Value *ExtShadowAddr =
1409 IRB.CreateBitCast(ShadowAddr, PointerType::getUnqual(ShadowTy));
1410 IRB.CreateAlignedStore(ExtZeroShadow, ExtShadowAddr, ShadowAlign);
1411 return;
1412 }
1413
1414 const unsigned ShadowVecSize = 128 / DFS.ShadowWidthBits;
1415 uint64_t Offset = 0;
1416 if (Size >= ShadowVecSize) {
1417 auto *ShadowVecTy = FixedVectorType::get(DFS.ShadowTy, ShadowVecSize);
1418 Value *ShadowVec = UndefValue::get(ShadowVecTy);
1419 for (unsigned i = 0; i != ShadowVecSize; ++i) {
1420 ShadowVec = IRB.CreateInsertElement(
1421 ShadowVec, Shadow, ConstantInt::get(Type::getInt32Ty(*DFS.Ctx), i));
1422 }
1423 Value *ShadowVecAddr =
1424 IRB.CreateBitCast(ShadowAddr, PointerType::getUnqual(ShadowVecTy));
1425 do {
1426 Value *CurShadowVecAddr =
1427 IRB.CreateConstGEP1_32(ShadowVecTy, ShadowVecAddr, Offset);
1428 IRB.CreateAlignedStore(ShadowVec, CurShadowVecAddr, ShadowAlign);
1429 Size -= ShadowVecSize;
1430 ++Offset;
1431 } while (Size >= ShadowVecSize);
1432 Offset *= ShadowVecSize;
1433 }
1434 while (Size > 0) {
1435 Value *CurShadowAddr =
1436 IRB.CreateConstGEP1_32(DFS.ShadowTy, ShadowAddr, Offset);
1437 IRB.CreateAlignedStore(Shadow, CurShadowAddr, ShadowAlign);
1438 --Size;
1439 ++Offset;
1440 }
1441}
1442
1443void DFSanVisitor::visitStoreInst(StoreInst &SI) {
1444 auto &DL = SI.getModule()->getDataLayout();
1445 uint64_t Size = DL.getTypeStoreSize(SI.getValueOperand()->getType());
1446 if (Size == 0)
1447 return;
1448
1449 const Align Alignment = ClPreserveAlignment ? SI.getAlign() : Align(1);
1450
1451 Value* Shadow = DFSF.getShadow(SI.getValueOperand());
1452 if (ClCombinePointerLabelsOnStore) {
1453 Value *PtrShadow = DFSF.getShadow(SI.getPointerOperand());
1454 Shadow = DFSF.combineShadows(Shadow, PtrShadow, &SI);
1455 }
1456 DFSF.storeShadow(SI.getPointerOperand(), Size, Alignment, Shadow, &SI);
1457 if (ClEventCallbacks) {
1458 IRBuilder<> IRB(&SI);
1459 Value *Addr8 = IRB.CreateBitCast(SI.getPointerOperand(), DFSF.DFS.Int8Ptr);
1460 IRB.CreateCall(DFSF.DFS.DFSanStoreCallbackFn, {Shadow, Addr8});
1461 }
1462}
1463
1464void DFSanVisitor::visitUnaryOperator(UnaryOperator &UO) {
1465 visitOperandShadowInst(UO);
1466}
1467
1468void DFSanVisitor::visitBinaryOperator(BinaryOperator &BO) {
1469 visitOperandShadowInst(BO);
1470}
1471
1472void DFSanVisitor::visitCastInst(CastInst &CI) { visitOperandShadowInst(CI); }
1473
1474void DFSanVisitor::visitCmpInst(CmpInst &CI) {
1475 Value *CombinedShadow = visitOperandShadowInst(CI);
1476 if (ClEventCallbacks) {
1477 IRBuilder<> IRB(&CI);
1478 IRB.CreateCall(DFSF.DFS.DFSanCmpCallbackFn, CombinedShadow);
1479 }
1480}
1481
1482void DFSanVisitor::visitGetElementPtrInst(GetElementPtrInst &GEPI) {
1483 visitOperandShadowInst(GEPI);
1484}
1485
1486void DFSanVisitor::visitExtractElementInst(ExtractElementInst &I) {
1487 visitOperandShadowInst(I);
1488}
1489
1490void DFSanVisitor::visitInsertElementInst(InsertElementInst &I) {
1491 visitOperandShadowInst(I);
1492}
1493
1494void DFSanVisitor::visitShuffleVectorInst(ShuffleVectorInst &I) {
1495 visitOperandShadowInst(I);
1496}
1497
1498void DFSanVisitor::visitExtractValueInst(ExtractValueInst &I) {
1499 visitOperandShadowInst(I);
1500}
1501
1502void DFSanVisitor::visitInsertValueInst(InsertValueInst &I) {
1503 visitOperandShadowInst(I);
1504}
1505
1506void DFSanVisitor::visitAllocaInst(AllocaInst &I) {
1507 bool AllLoadsStores = true;
1508 for (User *U : I.users()) {
1509 if (isa<LoadInst>(U))
1510 continue;
1511
1512 if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
1513 if (SI->getPointerOperand() == &I)
1514 continue;
1515 }
1516
1517 AllLoadsStores = false;
1518 break;
1519 }
1520 if (AllLoadsStores) {
1521 IRBuilder<> IRB(&I);
1522 DFSF.AllocaShadowMap[&I] = IRB.CreateAlloca(DFSF.DFS.ShadowTy);
1523 }
1524 DFSF.setShadow(&I, DFSF.DFS.ZeroShadow);
1525}
1526
1527void DFSanVisitor::visitSelectInst(SelectInst &I) {
1528 Value *CondShadow = DFSF.getShadow(I.getCondition());
1529 Value *TrueShadow = DFSF.getShadow(I.getTrueValue());
1530 Value *FalseShadow = DFSF.getShadow(I.getFalseValue());
1531 Value *ShadowSel = nullptr;
1532
1533 if (isa<VectorType>(I.getCondition()->getType())) {
1534 ShadowSel = DFSF.combineShadows(TrueShadow, FalseShadow, &I);
1535 } else {
1536 if (TrueShadow == FalseShadow) {
1537 ShadowSel = TrueShadow;
1538 } else {
1539 ShadowSel =
1540 SelectInst::Create(I.getCondition(), TrueShadow, FalseShadow, "", &I);
1541 }
1542 }
1543 DFSF.setShadow(&I, ClTrackSelectControlFlow
1544 ? DFSF.combineShadows(CondShadow, ShadowSel, &I)
1545 : ShadowSel);
1546}
1547
1548void DFSanVisitor::visitMemSetInst(MemSetInst &I) {
1549 IRBuilder<> IRB(&I);
1550 Value *ValShadow = DFSF.getShadow(I.getValue());
1551 IRB.CreateCall(DFSF.DFS.DFSanSetLabelFn,
1552 {ValShadow, IRB.CreateBitCast(I.getDest(), Type::getInt8PtrTy(
1553 *DFSF.DFS.Ctx)),
1554 IRB.CreateZExtOrTrunc(I.getLength(), DFSF.DFS.IntptrTy)});
1555}
1556
1557void DFSanVisitor::visitMemTransferInst(MemTransferInst &I) {
1558 IRBuilder<> IRB(&I);
1559 Value *RawDestShadow = DFSF.DFS.getShadowAddress(I.getDest(), &I);
1560 Value *SrcShadow = DFSF.DFS.getShadowAddress(I.getSource(), &I);
1561 Value *LenShadow =
1562 IRB.CreateMul(I.getLength(), ConstantInt::get(I.getLength()->getType(),
1563 DFSF.DFS.ShadowWidthBytes));
1564 Type *Int8Ptr = Type::getInt8PtrTy(*DFSF.DFS.Ctx);
1565 Value *DestShadow = IRB.CreateBitCast(RawDestShadow, Int8Ptr);
1566 SrcShadow = IRB.CreateBitCast(SrcShadow, Int8Ptr);
1567 auto *MTI = cast<MemTransferInst>(
1568 IRB.CreateCall(I.getFunctionType(), I.getCalledOperand(),
1569 {DestShadow, SrcShadow, LenShadow, I.getVolatileCst()}));
1570 if (ClPreserveAlignment) {
1571 MTI->setDestAlignment(I.getDestAlign() * DFSF.DFS.ShadowWidthBytes);
1572 MTI->setSourceAlignment(I.getSourceAlign() * DFSF.DFS.ShadowWidthBytes);
1573 } else {
1574 MTI->setDestAlignment(Align(DFSF.DFS.ShadowWidthBytes));
1575 MTI->setSourceAlignment(Align(DFSF.DFS.ShadowWidthBytes));
1576 }
1577 if (ClEventCallbacks) {
1578 IRB.CreateCall(DFSF.DFS.DFSanMemTransferCallbackFn,
1579 {RawDestShadow, I.getLength()});
1580 }
1581}
1582
1583void DFSanVisitor::visitReturnInst(ReturnInst &RI) {
1584 if (!DFSF.IsNativeABI && RI.getReturnValue()) {
1585 switch (DFSF.IA) {
1586 case DataFlowSanitizer::IA_TLS: {
1587 Value *S = DFSF.getShadow(RI.getReturnValue());
1588 IRBuilder<> IRB(&RI);
1589 IRB.CreateStore(S, DFSF.DFS.RetvalTLS);
1590 break;
1591 }
1592 case DataFlowSanitizer::IA_Args: {
1593 IRBuilder<> IRB(&RI);
1594 Type *RT = DFSF.F->getFunctionType()->getReturnType();
1595 Value *InsVal =
1596 IRB.CreateInsertValue(UndefValue::get(RT), RI.getReturnValue(), 0);
1597 Value *InsShadow =
1598 IRB.CreateInsertValue(InsVal, DFSF.getShadow(RI.getReturnValue()), 1);
1599 RI.setOperand(0, InsShadow);
1600 break;
1601 }
1602 }
1603 }
1604}
1605
1606void DFSanVisitor::visitCallBase(CallBase &CB) {
1607 Function *F = CB.getCalledFunction();
1608 if ((F && F->isIntrinsic()) || CB.isInlineAsm()) {
1609 visitOperandShadowInst(CB);
1610 return;
1611 }
1612
1613 // Calls to this function are synthesized in wrappers, and we shouldn't
1614 // instrument them.
1615 if (F == DFSF.DFS.DFSanVarargWrapperFn.getCallee()->stripPointerCasts())
1616 return;
1617
1618 IRBuilder<> IRB(&CB);
1619
1620 DenseMap<Value *, Function *>::iterator i =
1621 DFSF.DFS.UnwrappedFnMap.find(CB.getCalledOperand());
1622 if (i != DFSF.DFS.UnwrappedFnMap.end()) {
1623 Function *F = i->second;
1624 switch (DFSF.DFS.getWrapperKind(F)) {
1625 case DataFlowSanitizer::WK_Warning:
1626 CB.setCalledFunction(F);
1627 IRB.CreateCall(DFSF.DFS.DFSanUnimplementedFn,
1628 IRB.CreateGlobalStringPtr(F->getName()));
1629 DFSF.setShadow(&CB, DFSF.DFS.ZeroShadow);
1630 return;
1631 case DataFlowSanitizer::WK_Discard:
1632 CB.setCalledFunction(F);
1633 DFSF.setShadow(&CB, DFSF.DFS.ZeroShadow);
1634 return;
1635 case DataFlowSanitizer::WK_Functional:
1636 CB.setCalledFunction(F);
1637 visitOperandShadowInst(CB);
1638 return;
1639 case DataFlowSanitizer::WK_Custom:
1640 // Don't try to handle invokes of custom functions, it's too complicated.
1641 // Instead, invoke the dfsw$ wrapper, which will in turn call the __dfsw_
1642 // wrapper.
1643 if (CallInst *CI = dyn_cast<CallInst>(&CB)) {
1644 FunctionType *FT = F->getFunctionType();
1645 TransformedFunction CustomFn = DFSF.DFS.getCustomFunctionType(FT);
1646 std::string CustomFName = "__dfsw_";
1647 CustomFName += F->getName();
1648 FunctionCallee CustomF = DFSF.DFS.Mod->getOrInsertFunction(
1649 CustomFName, CustomFn.TransformedType);
1650 if (Function *CustomFn = dyn_cast<Function>(CustomF.getCallee())) {
1651 CustomFn->copyAttributesFrom(F);
1652
1653 // Custom functions returning non-void will write to the return label.
1654 if (!FT->getReturnType()->isVoidTy()) {
1655 CustomFn->removeAttributes(AttributeList::FunctionIndex,
1656 DFSF.DFS.ReadOnlyNoneAttrs);
1657 }
1658 }
1659
1660 std::vector<Value *> Args;
1661
1662 auto i = CB.arg_begin();
1663 for (unsigned n = FT->getNumParams(); n != 0; ++i, --n) {
1664 Type *T = (*i)->getType();
1665 FunctionType *ParamFT;
1666 if (isa<PointerType>(T) &&
1667 (ParamFT = dyn_cast<FunctionType>(
1668 cast<PointerType>(T)->getElementType()))) {
1669 std::string TName = "dfst";
1670 TName += utostr(FT->getNumParams() - n);
1671 TName += "$";
1672 TName += F->getName();
1673 Constant *T = DFSF.DFS.getOrBuildTrampolineFunction(ParamFT, TName);
1674 Args.push_back(T);
1675 Args.push_back(
1676 IRB.CreateBitCast(*i, Type::getInt8PtrTy(*DFSF.DFS.Ctx)));
1677 } else {
1678 Args.push_back(*i);
1679 }
1680 }
1681
1682 i = CB.arg_begin();
1683 const unsigned ShadowArgStart = Args.size();
1684 for (unsigned n = FT->getNumParams(); n != 0; ++i, --n)
1685 Args.push_back(DFSF.getShadow(*i));
1686
1687 if (FT->isVarArg()) {
1688 auto *LabelVATy = ArrayType::get(DFSF.DFS.ShadowTy,
1689 CB.arg_size() - FT->getNumParams());
1690 auto *LabelVAAlloca = new AllocaInst(
1691 LabelVATy, getDataLayout().getAllocaAddrSpace(),
1692 "labelva", &DFSF.F->getEntryBlock().front());
1693
1694 for (unsigned n = 0; i != CB.arg_end(); ++i, ++n) {
1695 auto LabelVAPtr = IRB.CreateStructGEP(LabelVATy, LabelVAAlloca, n);
1696 IRB.CreateStore(DFSF.getShadow(*i), LabelVAPtr);
1697 }
1698
1699 Args.push_back(IRB.CreateStructGEP(LabelVATy, LabelVAAlloca, 0));
1700 }
1701
1702 if (!FT->getReturnType()->isVoidTy()) {
1703 if (!DFSF.LabelReturnAlloca) {
1704 DFSF.LabelReturnAlloca =
1705 new AllocaInst(DFSF.DFS.ShadowTy,
1706 getDataLayout().getAllocaAddrSpace(),
1707 "labelreturn", &DFSF.F->getEntryBlock().front());
1708 }
1709 Args.push_back(DFSF.LabelReturnAlloca);
1710 }
1711
1712 for (i = CB.arg_begin() + FT->getNumParams(); i != CB.arg_end(); ++i)
1713 Args.push_back(*i);
1714
1715 CallInst *CustomCI = IRB.CreateCall(CustomF, Args);
1716 CustomCI->setCallingConv(CI->getCallingConv());
1717 CustomCI->setAttributes(TransformFunctionAttributes(CustomFn,
1718 CI->getContext(), CI->getAttributes()));
1719
1720 // Update the parameter attributes of the custom call instruction to
1721 // zero extend the shadow parameters. This is required for targets
1722 // which consider ShadowTy an illegal type.
1723 for (unsigned n = 0; n < FT->getNumParams(); n++) {
1724 const unsigned ArgNo = ShadowArgStart + n;
1725 if (CustomCI->getArgOperand(ArgNo)->getType() == DFSF.DFS.ShadowTy)
1726 CustomCI->addParamAttr(ArgNo, Attribute::ZExt);
1727 }
1728
1729 if (!FT->getReturnType()->isVoidTy()) {
1730 LoadInst *LabelLoad =
1731 IRB.CreateLoad(DFSF.DFS.ShadowTy, DFSF.LabelReturnAlloca);
1732 DFSF.setShadow(CustomCI, LabelLoad);
1733 }
1734
1735 CI->replaceAllUsesWith(CustomCI);
1736 CI->eraseFromParent();
1737 return;
1738 }
1739 break;
1740 }
1741 }
1742
1743 FunctionType *FT = CB.getFunctionType();
1744 if (DFSF.DFS.getInstrumentedABI() == DataFlowSanitizer::IA_TLS) {
1745 for (unsigned i = 0, n = FT->getNumParams(); i != n; ++i) {
1746 IRB.CreateStore(DFSF.getShadow(CB.getArgOperand(i)),
1747 DFSF.getArgTLS(i, &CB));
1748 }
1749 }
1750
1751 Instruction *Next = nullptr;
1752 if (!CB.getType()->isVoidTy()) {
1753 if (InvokeInst *II = dyn_cast<InvokeInst>(&CB)) {
1754 if (II->getNormalDest()->getSinglePredecessor()) {
1755 Next = &II->getNormalDest()->front();
1756 } else {
1757 BasicBlock *NewBB =
1758 SplitEdge(II->getParent(), II->getNormalDest(), &DFSF.DT);
1759 Next = &NewBB->front();
1760 }
1761 } else {
1762 assert(CB.getIterator() != CB.getParent()->end())((CB.getIterator() != CB.getParent()->end()) ? static_cast
<void> (0) : __assert_fail ("CB.getIterator() != CB.getParent()->end()"
, "/build/llvm-toolchain-snapshot-12~++20201202100633+b276bf5a572/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp"
, 1762, __PRETTY_FUNCTION__))
;
1763 Next = CB.getNextNode();
1764 }
1765
1766 if (DFSF.DFS.getInstrumentedABI() == DataFlowSanitizer::IA_TLS) {
1767 IRBuilder<> NextIRB(Next);
1768 LoadInst *LI = NextIRB.CreateLoad(DFSF.DFS.ShadowTy, DFSF.DFS.RetvalTLS);
1769 DFSF.SkipInsts.insert(LI);
1770 DFSF.setShadow(&CB, LI);
1771 DFSF.NonZeroChecks.push_back(LI);
1772 }
1773 }
1774
1775 // Do all instrumentation for IA_Args down here to defer tampering with the
1776 // CFG in a way that SplitEdge may be able to detect.
1777 if (DFSF.DFS.getInstrumentedABI() == DataFlowSanitizer::IA_Args) {
1778 FunctionType *NewFT = DFSF.DFS.getArgsFunctionType(FT);
1779 Value *Func =
1780 IRB.CreateBitCast(CB.getCalledOperand(), PointerType::getUnqual(NewFT));
1781 std::vector<Value *> Args;
1782
1783 auto i = CB.arg_begin(), E = CB.arg_end();
1784 for (unsigned n = FT->getNumParams(); n != 0; ++i, --n)
1785 Args.push_back(*i);
1786
1787 i = CB.arg_begin();
1788 for (unsigned n = FT->getNumParams(); n != 0; ++i, --n)
1789 Args.push_back(DFSF.getShadow(*i));
1790
1791 if (FT->isVarArg()) {
1792 unsigned VarArgSize = CB.arg_size() - FT->getNumParams();
1793 ArrayType *VarArgArrayTy = ArrayType::get(DFSF.DFS.ShadowTy, VarArgSize);
1794 AllocaInst *VarArgShadow =
1795 new AllocaInst(VarArgArrayTy, getDataLayout().getAllocaAddrSpace(),
1796 "", &DFSF.F->getEntryBlock().front());
1797 Args.push_back(IRB.CreateConstGEP2_32(VarArgArrayTy, VarArgShadow, 0, 0));
1798 for (unsigned n = 0; i != E; ++i, ++n) {
1799 IRB.CreateStore(
1800 DFSF.getShadow(*i),
1801 IRB.CreateConstGEP2_32(VarArgArrayTy, VarArgShadow, 0, n));
1802 Args.push_back(*i);
1803 }
1804 }
1805
1806 CallBase *NewCB;
1807 if (InvokeInst *II = dyn_cast<InvokeInst>(&CB)) {
1808 NewCB = IRB.CreateInvoke(NewFT, Func, II->getNormalDest(),
1809 II->getUnwindDest(), Args);
1810 } else {
1811 NewCB = IRB.CreateCall(NewFT, Func, Args);
1812 }
1813 NewCB->setCallingConv(CB.getCallingConv());
1814 NewCB->setAttributes(CB.getAttributes().removeAttributes(
1815 *DFSF.DFS.Ctx, AttributeList::ReturnIndex,
1816 AttributeFuncs::typeIncompatible(NewCB->getType())));
1817
1818 if (Next) {
1819 ExtractValueInst *ExVal = ExtractValueInst::Create(NewCB, 0, "", Next);
1820 DFSF.SkipInsts.insert(ExVal);
1821 ExtractValueInst *ExShadow = ExtractValueInst::Create(NewCB, 1, "", Next);
1822 DFSF.SkipInsts.insert(ExShadow);
1823 DFSF.setShadow(ExVal, ExShadow);
1824 DFSF.NonZeroChecks.push_back(ExShadow);
1825
1826 CB.replaceAllUsesWith(ExVal);
1827 }
1828
1829 CB.eraseFromParent();
1830 }
1831}
1832
1833void DFSanVisitor::visitPHINode(PHINode &PN) {
1834 PHINode *ShadowPN =
1835 PHINode::Create(DFSF.DFS.ShadowTy, PN.getNumIncomingValues(), "", &PN);
1836
1837 // Give the shadow phi node valid predecessors to fool SplitEdge into working.
1838 Value *UndefShadow = UndefValue::get(DFSF.DFS.ShadowTy);
1839 for (PHINode::block_iterator i = PN.block_begin(), e = PN.block_end(); i != e;
1840 ++i) {
1841 ShadowPN->addIncoming(UndefShadow, *i);
1842 }
1843
1844 DFSF.PHIFixups.push_back(std::make_pair(&PN, ShadowPN));
1845 DFSF.setShadow(&PN, ShadowPN);
1846}
1847
1848namespace {
1849class DataFlowSanitizerLegacyPass : public ModulePass {
1850private:
1851 std::vector<std::string> ABIListFiles;
1852
1853public:
1854 static char ID;
1855
1856 DataFlowSanitizerLegacyPass(
1857 const std::vector<std::string> &ABIListFiles = std::vector<std::string>())
1858 : ModulePass(ID), ABIListFiles(ABIListFiles) {}
1859
1860 bool runOnModule(Module &M) override {
1861 return DataFlowSanitizer(ABIListFiles).runImpl(M);
1862 }
1863};
1864} // namespace
1865
1866char DataFlowSanitizerLegacyPass::ID;
1867
1868INITIALIZE_PASS(DataFlowSanitizerLegacyPass, "dfsan",static void *initializeDataFlowSanitizerLegacyPassPassOnce(PassRegistry
&Registry) { PassInfo *PI = new PassInfo( "DataFlowSanitizer: dynamic data flow analysis."
, "dfsan", &DataFlowSanitizerLegacyPass::ID, PassInfo::NormalCtor_t
(callDefaultCtor<DataFlowSanitizerLegacyPass>), false, false
); Registry.registerPass(*PI, true); return PI; } static llvm
::once_flag InitializeDataFlowSanitizerLegacyPassPassFlag; void
llvm::initializeDataFlowSanitizerLegacyPassPass(PassRegistry
&Registry) { llvm::call_once(InitializeDataFlowSanitizerLegacyPassPassFlag
, initializeDataFlowSanitizerLegacyPassPassOnce, std::ref(Registry
)); }
1869 "DataFlowSanitizer: dynamic data flow analysis.", false, false)static void *initializeDataFlowSanitizerLegacyPassPassOnce(PassRegistry
&Registry) { PassInfo *PI = new PassInfo( "DataFlowSanitizer: dynamic data flow analysis."
, "dfsan", &DataFlowSanitizerLegacyPass::ID, PassInfo::NormalCtor_t
(callDefaultCtor<DataFlowSanitizerLegacyPass>), false, false
); Registry.registerPass(*PI, true); return PI; } static llvm
::once_flag InitializeDataFlowSanitizerLegacyPassPassFlag; void
llvm::initializeDataFlowSanitizerLegacyPassPass(PassRegistry
&Registry) { llvm::call_once(InitializeDataFlowSanitizerLegacyPassPassFlag
, initializeDataFlowSanitizerLegacyPassPassOnce, std::ref(Registry
)); }
1870
1871ModulePass *llvm::createDataFlowSanitizerLegacyPassPass(
1872 const std::vector<std::string> &ABIListFiles) {
1873 return new DataFlowSanitizerLegacyPass(ABIListFiles);
1874}
1875
1876PreservedAnalyses DataFlowSanitizerPass::run(Module &M,
1877 ModuleAnalysisManager &AM) {
1878 if (DataFlowSanitizer(ABIListFiles).runImpl(M)) {
1879 return PreservedAnalyses::none();
1880 }
1881 return PreservedAnalyses::all();
1882}

/build/llvm-toolchain-snapshot-12~++20201202100633+b276bf5a572/llvm/include/llvm/IR/IRBuilder.h

1//===- llvm/IRBuilder.h - Builder for LLVM Instructions ---------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the IRBuilder class, which is used as a convenient way
10// to create LLVM instructions with a consistent and simplified interface.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_IR_IRBUILDER_H
15#define LLVM_IR_IRBUILDER_H
16
17#include "llvm-c/Types.h"
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/None.h"
20#include "llvm/ADT/StringRef.h"
21#include "llvm/ADT/Twine.h"
22#include "llvm/IR/BasicBlock.h"
23#include "llvm/IR/Constant.h"
24#include "llvm/IR/ConstantFolder.h"
25#include "llvm/IR/Constants.h"
26#include "llvm/IR/DataLayout.h"
27#include "llvm/IR/DebugLoc.h"
28#include "llvm/IR/DerivedTypes.h"
29#include "llvm/IR/Function.h"
30#include "llvm/IR/GlobalVariable.h"
31#include "llvm/IR/InstrTypes.h"
32#include "llvm/IR/Instruction.h"
33#include "llvm/IR/Instructions.h"
34#include "llvm/IR/IntrinsicInst.h"
35#include "llvm/IR/LLVMContext.h"
36#include "llvm/IR/Module.h"
37#include "llvm/IR/Operator.h"
38#include "llvm/IR/Type.h"
39#include "llvm/IR/Value.h"
40#include "llvm/IR/ValueHandle.h"
41#include "llvm/Support/AtomicOrdering.h"
42#include "llvm/Support/CBindingWrapping.h"
43#include "llvm/Support/Casting.h"
44#include <cassert>
45#include <cstddef>
46#include <cstdint>
47#include <functional>
48#include <utility>
49
50namespace llvm {
51
52class APInt;
53class MDNode;
54class Use;
55
56/// This provides the default implementation of the IRBuilder
57/// 'InsertHelper' method that is called whenever an instruction is created by
58/// IRBuilder and needs to be inserted.
59///
60/// By default, this inserts the instruction at the insertion point.
61class IRBuilderDefaultInserter {
62public:
63 virtual ~IRBuilderDefaultInserter();
64
65 virtual void InsertHelper(Instruction *I, const Twine &Name,
66 BasicBlock *BB,
67 BasicBlock::iterator InsertPt) const {
68 if (BB) BB->getInstList().insert(InsertPt, I);
69 I->setName(Name);
70 }
71};
72
73/// Provides an 'InsertHelper' that calls a user-provided callback after
74/// performing the default insertion.
75class IRBuilderCallbackInserter : public IRBuilderDefaultInserter {
76 std::function<void(Instruction *)> Callback;
77
78public:
79 virtual ~IRBuilderCallbackInserter();
80
81 IRBuilderCallbackInserter(std::function<void(Instruction *)> Callback)
82 : Callback(std::move(Callback)) {}
83
84 void InsertHelper(Instruction *I, const Twine &Name,
85 BasicBlock *BB,
86 BasicBlock::iterator InsertPt) const override {
87 IRBuilderDefaultInserter::InsertHelper(I, Name, BB, InsertPt);
88 Callback(I);
89 }
90};
91
92/// Common base class shared among various IRBuilders.
93class IRBuilderBase {
94 DebugLoc CurDbgLocation;
95
96protected:
97 BasicBlock *BB;
98 BasicBlock::iterator InsertPt;
99 LLVMContext &Context;
100 const IRBuilderFolder &Folder;
101 const IRBuilderDefaultInserter &Inserter;
102
103 MDNode *DefaultFPMathTag;
104 FastMathFlags FMF;
105
106 bool IsFPConstrained;
107 fp::ExceptionBehavior DefaultConstrainedExcept;
108 RoundingMode DefaultConstrainedRounding;
109
110 ArrayRef<OperandBundleDef> DefaultOperandBundles;
111
112public:
113 IRBuilderBase(LLVMContext &context, const IRBuilderFolder &Folder,
114 const IRBuilderDefaultInserter &Inserter,
115 MDNode *FPMathTag, ArrayRef<OperandBundleDef> OpBundles)
116 : Context(context), Folder(Folder), Inserter(Inserter),
117 DefaultFPMathTag(FPMathTag), IsFPConstrained(false),
118 DefaultConstrainedExcept(fp::ebStrict),
119 DefaultConstrainedRounding(RoundingMode::Dynamic),
120 DefaultOperandBundles(OpBundles) {
121 ClearInsertionPoint();
122 }
123
124 /// Insert and return the specified instruction.
125 template<typename InstTy>
126 InstTy *Insert(InstTy *I, const Twine &Name = "") const {
127 Inserter.InsertHelper(I, Name, BB, InsertPt);
128 SetInstDebugLocation(I);
129 return I;
130 }
131
132 /// No-op overload to handle constants.
133 Constant *Insert(Constant *C, const Twine& = "") const {
134 return C;
135 }
136
137 Value *Insert(Value *V, const Twine &Name = "") const {
138 if (Instruction *I = dyn_cast<Instruction>(V))
139 return Insert(I, Name);
140 assert(isa<Constant>(V))((isa<Constant>(V)) ? static_cast<void> (0) : __assert_fail
("isa<Constant>(V)", "/build/llvm-toolchain-snapshot-12~++20201202100633+b276bf5a572/llvm/include/llvm/IR/IRBuilder.h"
, 140, __PRETTY_FUNCTION__))
;
141 return V;
142 }
143
144 //===--------------------------------------------------------------------===//
145 // Builder configuration methods
146 //===--------------------------------------------------------------------===//
147
148 /// Clear the insertion point: created instructions will not be
149 /// inserted into a block.
150 void ClearInsertionPoint() {
151 BB = nullptr;
152 InsertPt = BasicBlock::iterator();
153 }
154
155 BasicBlock *GetInsertBlock() const { return BB; }
156 BasicBlock::iterator GetInsertPoint() const { return InsertPt; }
157 LLVMContext &getContext() const { return Context; }
158
159 /// This specifies that created instructions should be appended to the
160 /// end of the specified block.
161 void SetInsertPoint(BasicBlock *TheBB) {
162 BB = TheBB;
163 InsertPt = BB->end();
164 }
165
166 /// This specifies that created instructions should be inserted before
167 /// the specified instruction.
168 void SetInsertPoint(Instruction *I) {
169 BB = I->getParent();
170 InsertPt = I->getIterator();
171 assert(InsertPt != BB->end() && "Can't read debug loc from end()")((InsertPt != BB->end() && "Can't read debug loc from end()"
) ? static_cast<void> (0) : __assert_fail ("InsertPt != BB->end() && \"Can't read debug loc from end()\""
, "/build/llvm-toolchain-snapshot-12~++20201202100633+b276bf5a572/llvm/include/llvm/IR/IRBuilder.h"
, 171, __PRETTY_FUNCTION__))
;
172 SetCurrentDebugLocation(I->getDebugLoc());
173 }
174
175 /// This specifies that created instructions should be inserted at the
176 /// specified point.
177 void SetInsertPoint(BasicBlock *TheBB, BasicBlock::iterator IP) {
178 BB = TheBB;
179 InsertPt = IP;
180 if (IP != TheBB->end())
181 SetCurrentDebugLocation(IP->getDebugLoc());
182 }
183
184 /// Set location information used by debugging information.
185 void SetCurrentDebugLocation(DebugLoc L) { CurDbgLocation = std::move(L); }
186
187 /// Get location information used by debugging information.
188 const DebugLoc &getCurrentDebugLocation() const { return CurDbgLocation; }
189
190 /// If this builder has a current debug location, set it on the
191 /// specified instruction.
192 void SetInstDebugLocation(Instruction *I) const {
193 if (CurDbgLocation)
194 I->setDebugLoc(CurDbgLocation);
195 }
196
197 /// Get the return type of the current function that we're emitting
198 /// into.
199 Type *getCurrentFunctionReturnType() const;
200
201 /// InsertPoint - A saved insertion point.
202 class InsertPoint {
203 BasicBlock *Block = nullptr;
204 BasicBlock::iterator Point;
205
206 public:
207 /// Creates a new insertion point which doesn't point to anything.
208 InsertPoint() = default;
209
210 /// Creates a new insertion point at the given location.
211 InsertPoint(BasicBlock *InsertBlock, BasicBlock::iterator InsertPoint)
212 : Block(InsertBlock), Point(InsertPoint) {}
213
214 /// Returns true if this insert point is set.
215 bool isSet() const { return (Block != nullptr); }
216
217 BasicBlock *getBlock() const { return Block; }
218 BasicBlock::iterator getPoint() const { return Point; }
219 };
220
221 /// Returns the current insert point.
222 InsertPoint saveIP() const {
223 return InsertPoint(GetInsertBlock(), GetInsertPoint());
224 }
225
226 /// Returns the current insert point, clearing it in the process.
227 InsertPoint saveAndClearIP() {
228 InsertPoint IP(GetInsertBlock(), GetInsertPoint());
229 ClearInsertionPoint();
230 return IP;
231 }
232
233 /// Sets the current insert point to a previously-saved location.
234 void restoreIP(InsertPoint IP) {
235 if (IP.isSet())
236 SetInsertPoint(IP.getBlock(), IP.getPoint());
237 else
238 ClearInsertionPoint();
239 }
240
241 /// Get the floating point math metadata being used.
242 MDNode *getDefaultFPMathTag() const { return DefaultFPMathTag; }
243
244 /// Get the flags to be applied to created floating point ops
245 FastMathFlags getFastMathFlags() const { return FMF; }
246
247 FastMathFlags &getFastMathFlags() { return FMF; }
248
249 /// Clear the fast-math flags.
250 void clearFastMathFlags() { FMF.clear(); }
251
252 /// Set the floating point math metadata to be used.
253 void setDefaultFPMathTag(MDNode *FPMathTag) { DefaultFPMathTag = FPMathTag; }
254
255 /// Set the fast-math flags to be used with generated fp-math operators
256 void setFastMathFlags(FastMathFlags NewFMF) { FMF = NewFMF; }
257
258 /// Enable/Disable use of constrained floating point math. When
259 /// enabled the CreateF<op>() calls instead create constrained
260 /// floating point intrinsic calls. Fast math flags are unaffected
261 /// by this setting.
262 void setIsFPConstrained(bool IsCon) { IsFPConstrained = IsCon; }
263
264 /// Query for the use of constrained floating point math
265 bool getIsFPConstrained() { return IsFPConstrained; }
266
267 /// Set the exception handling to be used with constrained floating point
268 void setDefaultConstrainedExcept(fp::ExceptionBehavior NewExcept) {
269#ifndef NDEBUG
270 Optional<StringRef> ExceptStr = ExceptionBehaviorToStr(NewExcept);
271 assert(ExceptStr.hasValue() && "Garbage strict exception behavior!")((ExceptStr.hasValue() && "Garbage strict exception behavior!"
) ? static_cast<void> (0) : __assert_fail ("ExceptStr.hasValue() && \"Garbage strict exception behavior!\""
, "/build/llvm-toolchain-snapshot-12~++20201202100633+b276bf5a572/llvm/include/llvm/IR/IRBuilder.h"
, 271, __PRETTY_FUNCTION__))
;
272#endif
273 DefaultConstrainedExcept = NewExcept;
274 }
275
276 /// Set the rounding mode handling to be used with constrained floating point
277 void setDefaultConstrainedRounding(RoundingMode NewRounding) {
278#ifndef NDEBUG
279 Optional<StringRef> RoundingStr = RoundingModeToStr(NewRounding);
280 assert(RoundingStr.hasValue() && "Garbage strict rounding mode!")((RoundingStr.hasValue() && "Garbage strict rounding mode!"
) ? static_cast<void> (0) : __assert_fail ("RoundingStr.hasValue() && \"Garbage strict rounding mode!\""
, "/build/llvm-toolchain-snapshot-12~++20201202100633+b276bf5a572/llvm/include/llvm/IR/IRBuilder.h"
, 280, __PRETTY_FUNCTION__))
;
281#endif
282 DefaultConstrainedRounding = NewRounding;
283 }
284
285 /// Get the exception handling used with constrained floating point
286 fp::ExceptionBehavior getDefaultConstrainedExcept() {
287 return DefaultConstrainedExcept;
288 }
289
290 /// Get the rounding mode handling used with constrained floating point
291 RoundingMode getDefaultConstrainedRounding() {
292 return DefaultConstrainedRounding;
293 }
294
295 void setConstrainedFPFunctionAttr() {
296 assert(BB && "Must have a basic block to set any function attributes!")((BB && "Must have a basic block to set any function attributes!"
) ? static_cast<void> (0) : __assert_fail ("BB && \"Must have a basic block to set any function attributes!\""
, "/build/llvm-toolchain-snapshot-12~++20201202100633+b276bf5a572/llvm/include/llvm/IR/IRBuilder.h"
, 296, __PRETTY_FUNCTION__))
;
297
298 Function *F = BB->getParent();
299 if (!F->hasFnAttribute(Attribute::StrictFP)) {
300 F->addFnAttr(Attribute::StrictFP);
301 }
302 }
303
304 void setConstrainedFPCallAttr(CallInst *I) {
305 I->addAttribute(AttributeList::FunctionIndex, Attribute::StrictFP);
306 }
307
308 void setDefaultOperandBundles(ArrayRef<OperandBundleDef> OpBundles) {
309 DefaultOperandBundles = OpBundles;
310 }
311
312 //===--------------------------------------------------------------------===//
313 // RAII helpers.
314 //===--------------------------------------------------------------------===//
315
316 // RAII object that stores the current insertion point and restores it
317 // when the object is destroyed. This includes the debug location.
318 class InsertPointGuard {
319 IRBuilderBase &Builder;
320 AssertingVH<BasicBlock> Block;
321 BasicBlock::iterator Point;
322 DebugLoc DbgLoc;
323
324 public:
325 InsertPointGuard(IRBuilderBase &B)
326 : Builder(B), Block(B.GetInsertBlock()), Point(B.GetInsertPoint()),
327 DbgLoc(B.getCurrentDebugLocation()) {}
328
329 InsertPointGuard(const InsertPointGuard &) = delete;
330 InsertPointGuard &operator=(const InsertPointGuard &) = delete;
331
332 ~InsertPointGuard() {
333 Builder.restoreIP(InsertPoint(Block, Point));
334 Builder.SetCurrentDebugLocation(DbgLoc);
335 }
336 };
337
338 // RAII object that stores the current fast math settings and restores
339 // them when the object is destroyed.
340 class FastMathFlagGuard {
341 IRBuilderBase &Builder;
342 FastMathFlags FMF;
343 MDNode *FPMathTag;
344 bool IsFPConstrained;
345 fp::ExceptionBehavior DefaultConstrainedExcept;
346 RoundingMode DefaultConstrainedRounding;
347
348 public:
349 FastMathFlagGuard(IRBuilderBase &B)
350 : Builder(B), FMF(B.FMF), FPMathTag(B.DefaultFPMathTag),
351 IsFPConstrained(B.IsFPConstrained),
352 DefaultConstrainedExcept(B.DefaultConstrainedExcept),
353 DefaultConstrainedRounding(B.DefaultConstrainedRounding) {}
354
355 FastMathFlagGuard(const FastMathFlagGuard &) = delete;
356 FastMathFlagGuard &operator=(const FastMathFlagGuard &) = delete;
357
358 ~FastMathFlagGuard() {
359 Builder.FMF = FMF;
360 Builder.DefaultFPMathTag = FPMathTag;
361 Builder.IsFPConstrained = IsFPConstrained;
362 Builder.DefaultConstrainedExcept = DefaultConstrainedExcept;
363 Builder.DefaultConstrainedRounding = DefaultConstrainedRounding;
364 }
365 };
366
367 // RAII object that stores the current default operand bundles and restores
368 // them when the object is destroyed.
369 class OperandBundlesGuard {
370 IRBuilderBase &Builder;
371 ArrayRef<OperandBundleDef> DefaultOperandBundles;
372
373 public:
374 OperandBundlesGuard(IRBuilderBase &B)
375 : Builder(B), DefaultOperandBundles(B.DefaultOperandBundles) {}
376
377 OperandBundlesGuard(const OperandBundlesGuard &) = delete;
378 OperandBundlesGuard &operator=(const OperandBundlesGuard &) = delete;
379
380 ~OperandBundlesGuard() {
381 Builder.DefaultOperandBundles = DefaultOperandBundles;
382 }
383 };
384
385
386 //===--------------------------------------------------------------------===//
387 // Miscellaneous creation methods.
388 //===--------------------------------------------------------------------===//
389
390 /// Make a new global variable with initializer type i8*
391 ///
392 /// Make a new global variable with an initializer that has array of i8 type
393 /// filled in with the null terminated string value specified. The new global
394 /// variable will be marked mergable with any others of the same contents. If
395 /// Name is specified, it is the name of the global variable created.
396 ///
397 /// If no module is given via \p M, it is take from the insertion point basic
398 /// block.
399 GlobalVariable *CreateGlobalString(StringRef Str, const Twine &Name = "",
400 unsigned AddressSpace = 0,
401 Module *M = nullptr);
402
403 /// Get a constant value representing either true or false.
404 ConstantInt *getInt1(bool V) {
405 return ConstantInt::get(getInt1Ty(), V);
406 }
407
408 /// Get the constant value for i1 true.
409 ConstantInt *getTrue() {
410 return ConstantInt::getTrue(Context);
411 }
412
413 /// Get the constant value for i1 false.
414 ConstantInt *getFalse() {
415 return ConstantInt::getFalse(Context);
416 }
417
418 /// Get a constant 8-bit value.
419 ConstantInt *getInt8(uint8_t C) {
420 return ConstantInt::get(getInt8Ty(), C);
421 }
422
423 /// Get a constant 16-bit value.
424 ConstantInt *getInt16(uint16_t C) {
425 return ConstantInt::get(getInt16Ty(), C);
426 }
427
428 /// Get a constant 32-bit value.
429 ConstantInt *getInt32(uint32_t C) {
430 return ConstantInt::get(getInt32Ty(), C);
431 }
432
433 /// Get a constant 64-bit value.
434 ConstantInt *getInt64(uint64_t C) {
435 return ConstantInt::get(getInt64Ty(), C);
436 }
437
438 /// Get a constant N-bit value, zero extended or truncated from
439 /// a 64-bit value.
440 ConstantInt *getIntN(unsigned N, uint64_t C) {
441 return ConstantInt::get(getIntNTy(N), C);
442 }
443
444 /// Get a constant integer value.
445 ConstantInt *getInt(const APInt &AI) {
446 return ConstantInt::get(Context, AI);
447 }
448
449 //===--------------------------------------------------------------------===//
450 // Type creation methods
451 //===--------------------------------------------------------------------===//
452
453 /// Fetch the type representing a single bit
454 IntegerType *getInt1Ty() {
455 return Type::getInt1Ty(Context);
456 }
457
458 /// Fetch the type representing an 8-bit integer.
459 IntegerType *getInt8Ty() {
460 return Type::getInt8Ty(Context);
461 }
462
463 /// Fetch the type representing a 16-bit integer.
464 IntegerType *getInt16Ty() {
465 return Type::getInt16Ty(Context);
466 }
467
468 /// Fetch the type representing a 32-bit integer.
469 IntegerType *getInt32Ty() {
470 return Type::getInt32Ty(Context);
471 }
472
473 /// Fetch the type representing a 64-bit integer.
474 IntegerType *getInt64Ty() {
475 return Type::getInt64Ty(Context);
476 }
477
478 /// Fetch the type representing a 128-bit integer.
479 IntegerType *getInt128Ty() { return Type::getInt128Ty(Context); }
480
481 /// Fetch the type representing an N-bit integer.
482 IntegerType *getIntNTy(unsigned N) {
483 return Type::getIntNTy(Context, N);
484 }
485
486 /// Fetch the type representing a 16-bit floating point value.
487 Type *getHalfTy() {
488 return Type::getHalfTy(Context);
489 }
490
491 /// Fetch the type representing a 16-bit brain floating point value.
492 Type *getBFloatTy() {
493 return Type::getBFloatTy(Context);
494 }
495
496 /// Fetch the type representing a 32-bit floating point value.
497 Type *getFloatTy() {
498 return Type::getFloatTy(Context);
499 }
500
501 /// Fetch the type representing a 64-bit floating point value.
502 Type *getDoubleTy() {
503 return Type::getDoubleTy(Context);
504 }
505
506 /// Fetch the type representing void.
507 Type *getVoidTy() {
508 return Type::getVoidTy(Context);
509 }
510
511 /// Fetch the type representing a pointer to an 8-bit integer value.
512 PointerType *getInt8PtrTy(unsigned AddrSpace = 0) {
513 return Type::getInt8PtrTy(Context, AddrSpace);
514 }
515
516 /// Fetch the type representing a pointer to an integer value.
517 IntegerType *getIntPtrTy(const DataLayout &DL, unsigned AddrSpace = 0) {
518 return DL.getIntPtrType(Context, AddrSpace);
519 }
520
521 //===--------------------------------------------------------------------===//
522 // Intrinsic creation methods
523 //===--------------------------------------------------------------------===//
524
525 /// Create and insert a memset to the specified pointer and the
526 /// specified value.
527 ///
528 /// If the pointer isn't an i8*, it will be converted. If a TBAA tag is
529 /// specified, it will be added to the instruction. Likewise with alias.scope
530 /// and noalias tags.
531 CallInst *CreateMemSet(Value *Ptr, Value *Val, uint64_t Size,
532 MaybeAlign Align, bool isVolatile = false,
533 MDNode *TBAATag = nullptr, MDNode *ScopeTag = nullptr,
534 MDNode *NoAliasTag = nullptr) {
535 return CreateMemSet(Ptr, Val, getInt64(Size), Align, isVolatile,
536 TBAATag, ScopeTag, NoAliasTag);
537 }
538
539 CallInst *CreateMemSet(Value *Ptr, Value *Val, Value *Size, MaybeAlign Align,
540 bool isVolatile = false, MDNode *TBAATag = nullptr,
541 MDNode *ScopeTag = nullptr,
542 MDNode *NoAliasTag = nullptr);
543
544 /// Create and insert an element unordered-atomic memset of the region of
545 /// memory starting at the given pointer to the given value.
546 ///
547 /// If the pointer isn't an i8*, it will be converted. If a TBAA tag is
548 /// specified, it will be added to the instruction. Likewise with alias.scope
549 /// and noalias tags.
550 CallInst *CreateElementUnorderedAtomicMemSet(Value *Ptr, Value *Val,
551 uint64_t Size, Align Alignment,
552 uint32_t ElementSize,
553 MDNode *TBAATag = nullptr,
554 MDNode *ScopeTag = nullptr,
555 MDNode *NoAliasTag = nullptr) {
556 return CreateElementUnorderedAtomicMemSet(Ptr, Val, getInt64(Size),
557 Align(Alignment), ElementSize,
558 TBAATag, ScopeTag, NoAliasTag);
559 }
560
561 CallInst *CreateElementUnorderedAtomicMemSet(Value *Ptr, Value *Val,
562 Value *Size, Align Alignment,
563 uint32_t ElementSize,
564 MDNode *TBAATag = nullptr,
565 MDNode *ScopeTag = nullptr,
566 MDNode *NoAliasTag = nullptr);
567
568 /// Create and insert a memcpy between the specified pointers.
569 ///
570 /// If the pointers aren't i8*, they will be converted. If a TBAA tag is
571 /// specified, it will be added to the instruction. Likewise with alias.scope
572 /// and noalias tags.
573 CallInst *CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src,
574 MaybeAlign SrcAlign, uint64_t Size,
575 bool isVolatile = false, MDNode *TBAATag = nullptr,
576 MDNode *TBAAStructTag = nullptr,
577 MDNode *ScopeTag = nullptr,
578 MDNode *NoAliasTag = nullptr) {
579 return CreateMemCpy(Dst, DstAlign, Src, SrcAlign, getInt64(Size),
580 isVolatile, TBAATag, TBAAStructTag, ScopeTag,
581 NoAliasTag);
582 }
583
584 CallInst *CreateMemTransferInst(
585 Intrinsic::ID IntrID, Value *Dst, MaybeAlign DstAlign, Value *Src,
586 MaybeAlign SrcAlign, Value *Size, bool isVolatile = false,
587 MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr,
588 MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr);
589
590 CallInst *CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src,
591 MaybeAlign SrcAlign, Value *Size,
592 bool isVolatile = false, MDNode *TBAATag = nullptr,
593 MDNode *TBAAStructTag = nullptr,
594 MDNode *ScopeTag = nullptr,
595 MDNode *NoAliasTag = nullptr) {
596 return CreateMemTransferInst(Intrinsic::memcpy, Dst, DstAlign, Src,
597 SrcAlign, Size, isVolatile, TBAATag,
598 TBAAStructTag, ScopeTag, NoAliasTag);
599 }
600
601 CallInst *CreateMemCpyInline(Value *Dst, MaybeAlign DstAlign, Value *Src,
602 MaybeAlign SrcAlign, Value *Size);
603
604 /// Create and insert an element unordered-atomic memcpy between the
605 /// specified pointers.
606 ///
607 /// DstAlign/SrcAlign are the alignments of the Dst/Src pointers, respectively.
608 ///
609 /// If the pointers aren't i8*, they will be converted. If a TBAA tag is
610 /// specified, it will be added to the instruction. Likewise with alias.scope
611 /// and noalias tags.
612 CallInst *CreateElementUnorderedAtomicMemCpy(
613 Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size,
614 uint32_t ElementSize, MDNode *TBAATag = nullptr,
615 MDNode *TBAAStructTag = nullptr, MDNode *ScopeTag = nullptr,
616 MDNode *NoAliasTag = nullptr);
617
618 LLVM_ATTRIBUTE_DEPRECATED(CallInst *CreateElementUnorderedAtomicMemCpy(CallInst *CreateElementUnorderedAtomicMemCpy( Value *Dst, unsigned
DstAlign, Value *Src, unsigned SrcAlign, uint64_t Size, uint32_t
ElementSize, MDNode *TBAATag = nullptr, MDNode *TBAAStructTag
= nullptr, MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr
) __attribute__((deprecated("Use the version that takes Align instead"
)))
619 Value *Dst, unsigned DstAlign, Value *Src,CallInst *CreateElementUnorderedAtomicMemCpy( Value *Dst, unsigned
DstAlign, Value *Src, unsigned SrcAlign, uint64_t Size, uint32_t
ElementSize, MDNode *TBAATag = nullptr, MDNode *TBAAStructTag
= nullptr, MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr
) __attribute__((deprecated("Use the version that takes Align instead"
)))
620 unsigned SrcAlign, uint64_t Size,CallInst *CreateElementUnorderedAtomicMemCpy( Value *Dst, unsigned
DstAlign, Value *Src, unsigned SrcAlign, uint64_t Size, uint32_t
ElementSize, MDNode *TBAATag = nullptr, MDNode *TBAAStructTag
= nullptr, MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr
) __attribute__((deprecated("Use the version that takes Align instead"
)))
621 uint32_t ElementSize, MDNode *TBAATag = nullptr,CallInst *CreateElementUnorderedAtomicMemCpy( Value *Dst, unsigned
DstAlign, Value *Src, unsigned SrcAlign, uint64_t Size, uint32_t
ElementSize, MDNode *TBAATag = nullptr, MDNode *TBAAStructTag
= nullptr, MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr
) __attribute__((deprecated("Use the version that takes Align instead"
)))
622 MDNode *TBAAStructTag = nullptr,CallInst *CreateElementUnorderedAtomicMemCpy( Value *Dst, unsigned
DstAlign, Value *Src, unsigned SrcAlign, uint64_t Size, uint32_t
ElementSize, MDNode *TBAATag = nullptr, MDNode *TBAAStructTag
= nullptr, MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr
) __attribute__((deprecated("Use the version that takes Align instead"
)))
623 MDNode *ScopeTag = nullptr,CallInst *CreateElementUnorderedAtomicMemCpy( Value *Dst, unsigned
DstAlign, Value *Src, unsigned SrcAlign, uint64_t Size, uint32_t
ElementSize, MDNode *TBAATag = nullptr, MDNode *TBAAStructTag
= nullptr, MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr
) __attribute__((deprecated("Use the version that takes Align instead"
)))
624 MDNode *NoAliasTag = nullptr),CallInst *CreateElementUnorderedAtomicMemCpy( Value *Dst, unsigned
DstAlign, Value *Src, unsigned SrcAlign, uint64_t Size, uint32_t
ElementSize, MDNode *TBAATag = nullptr, MDNode *TBAAStructTag
= nullptr, MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr
) __attribute__((deprecated("Use the version that takes Align instead"
)))
625 "Use the version that takes Align instead")CallInst *CreateElementUnorderedAtomicMemCpy( Value *Dst, unsigned
DstAlign, Value *Src, unsigned SrcAlign, uint64_t Size, uint32_t
ElementSize, MDNode *TBAATag = nullptr, MDNode *TBAAStructTag
= nullptr, MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr
) __attribute__((deprecated("Use the version that takes Align instead"
)))
{
626 return CreateElementUnorderedAtomicMemCpy(
627 Dst, Align(DstAlign), Src, Align(SrcAlign), getInt64(Size), ElementSize,
628 TBAATag, TBAAStructTag, ScopeTag, NoAliasTag);
629 }
630
631 LLVM_ATTRIBUTE_DEPRECATED(CallInst *CreateElementUnorderedAtomicMemCpy(CallInst *CreateElementUnorderedAtomicMemCpy( Value *Dst, unsigned
DstAlign, Value *Src, unsigned SrcAlign, Value *Size, uint32_t
ElementSize, MDNode *TBAATag = nullptr, MDNode *TBAAStructTag
= nullptr, MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr
) __attribute__((deprecated("Use the version that takes Align instead"
)))
632 Value *Dst, unsigned DstAlign, Value *Src,CallInst *CreateElementUnorderedAtomicMemCpy( Value *Dst, unsigned
DstAlign, Value *Src, unsigned SrcAlign, Value *Size, uint32_t
ElementSize, MDNode *TBAATag = nullptr, MDNode *TBAAStructTag
= nullptr, MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr
) __attribute__((deprecated("Use the version that takes Align instead"
)))
633 unsigned SrcAlign, Value *Size,CallInst *CreateElementUnorderedAtomicMemCpy( Value *Dst, unsigned
DstAlign, Value *Src, unsigned SrcAlign, Value *Size, uint32_t
ElementSize, MDNode *TBAATag = nullptr, MDNode *TBAAStructTag
= nullptr, MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr
) __attribute__((deprecated("Use the version that takes Align instead"
)))
634 uint32_t ElementSize, MDNode *TBAATag = nullptr,CallInst *CreateElementUnorderedAtomicMemCpy( Value *Dst, unsigned
DstAlign, Value *Src, unsigned SrcAlign, Value *Size, uint32_t
ElementSize, MDNode *TBAATag = nullptr, MDNode *TBAAStructTag
= nullptr, MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr
) __attribute__((deprecated("Use the version that takes Align instead"
)))
635 MDNode *TBAAStructTag = nullptr,CallInst *CreateElementUnorderedAtomicMemCpy( Value *Dst, unsigned
DstAlign, Value *Src, unsigned SrcAlign, Value *Size, uint32_t
ElementSize, MDNode *TBAATag = nullptr, MDNode *TBAAStructTag
= nullptr, MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr
) __attribute__((deprecated("Use the version that takes Align instead"
)))
636 MDNode *ScopeTag = nullptr,CallInst *CreateElementUnorderedAtomicMemCpy( Value *Dst, unsigned
DstAlign, Value *Src, unsigned SrcAlign, Value *Size, uint32_t
ElementSize, MDNode *TBAATag = nullptr, MDNode *TBAAStructTag
= nullptr, MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr
) __attribute__((deprecated("Use the version that takes Align instead"
)))
637 MDNode *NoAliasTag = nullptr),CallInst *CreateElementUnorderedAtomicMemCpy( Value *Dst, unsigned
DstAlign, Value *Src, unsigned SrcAlign, Value *Size, uint32_t
ElementSize, MDNode *TBAATag = nullptr, MDNode *TBAAStructTag
= nullptr, MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr
) __attribute__((deprecated("Use the version that takes Align instead"
)))
638 "Use the version that takes Align instead")CallInst *CreateElementUnorderedAtomicMemCpy( Value *Dst, unsigned
DstAlign, Value *Src, unsigned SrcAlign, Value *Size, uint32_t
ElementSize, MDNode *TBAATag = nullptr, MDNode *TBAAStructTag
= nullptr, MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr
) __attribute__((deprecated("Use the version that takes Align instead"
)))
{
639 return CreateElementUnorderedAtomicMemCpy(
640 Dst, Align(DstAlign), Src, Align(SrcAlign), Size, ElementSize, TBAATag,
641 TBAAStructTag, ScopeTag, NoAliasTag);
642 }
643
644 CallInst *CreateMemMove(Value *Dst, MaybeAlign DstAlign, Value *Src,
645 MaybeAlign SrcAlign, uint64_t Size,
646 bool isVolatile = false, MDNode *TBAATag = nullptr,
647 MDNode *ScopeTag = nullptr,
648 MDNode *NoAliasTag = nullptr) {
649 return CreateMemMove(Dst, DstAlign, Src, SrcAlign, getInt64(Size),
650 isVolatile, TBAATag, ScopeTag, NoAliasTag);
651 }
652
653 CallInst *CreateMemMove(Value *Dst, MaybeAlign DstAlign, Value *Src,
654 MaybeAlign SrcAlign, Value *Size,
655 bool isVolatile = false, MDNode *TBAATag = nullptr,
656 MDNode *ScopeTag = nullptr,
657 MDNode *NoAliasTag = nullptr);
658
659 /// \brief Create and insert an element unordered-atomic memmove between the
660 /// specified pointers.
661 ///
662 /// DstAlign/SrcAlign are the alignments of the Dst/Src pointers,
663 /// respectively.
664 ///
665 /// If the pointers aren't i8*, they will be converted. If a TBAA tag is
666 /// specified, it will be added to the instruction. Likewise with alias.scope
667 /// and noalias tags.
668 CallInst *CreateElementUnorderedAtomicMemMove(
669 Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size,
670 uint32_t ElementSize, MDNode *TBAATag = nullptr,
671 MDNode *TBAAStructTag = nullptr, MDNode *ScopeTag = nullptr,
672 MDNode *NoAliasTag = nullptr);
673
674 LLVM_ATTRIBUTE_DEPRECATED(CallInst *CreateElementUnorderedAtomicMemMove(CallInst *CreateElementUnorderedAtomicMemMove( Value *Dst, unsigned
DstAlign, Value *Src, unsigned SrcAlign, uint64_t Size, uint32_t
ElementSize, MDNode *TBAATag = nullptr, MDNode *TBAAStructTag
= nullptr, MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr
) __attribute__((deprecated("Use the version that takes Align instead"
)))
675 Value *Dst, unsigned DstAlign, Value *Src,CallInst *CreateElementUnorderedAtomicMemMove( Value *Dst, unsigned
DstAlign, Value *Src, unsigned SrcAlign, uint64_t Size, uint32_t
ElementSize, MDNode *TBAATag = nullptr, MDNode *TBAAStructTag
= nullptr, MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr
) __attribute__((deprecated("Use the version that takes Align instead"
)))
676 unsigned SrcAlign, uint64_t Size,CallInst *CreateElementUnorderedAtomicMemMove( Value *Dst, unsigned
DstAlign, Value *Src, unsigned SrcAlign, uint64_t Size, uint32_t
ElementSize, MDNode *TBAATag = nullptr, MDNode *TBAAStructTag
= nullptr, MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr
) __attribute__((deprecated("Use the version that takes Align instead"
)))
677 uint32_t ElementSize, MDNode *TBAATag = nullptr,CallInst *CreateElementUnorderedAtomicMemMove( Value *Dst, unsigned
DstAlign, Value *Src, unsigned SrcAlign, uint64_t Size, uint32_t
ElementSize, MDNode *TBAATag = nullptr, MDNode *TBAAStructTag
= nullptr, MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr
) __attribute__((deprecated("Use the version that takes Align instead"
)))
678 MDNode *TBAAStructTag = nullptr,CallInst *CreateElementUnorderedAtomicMemMove( Value *Dst, unsigned
DstAlign, Value *Src, unsigned SrcAlign, uint64_t Size, uint32_t
ElementSize, MDNode *TBAATag = nullptr, MDNode *TBAAStructTag
= nullptr, MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr
) __attribute__((deprecated("Use the version that takes Align instead"
)))
679 MDNode *ScopeTag = nullptr,CallInst *CreateElementUnorderedAtomicMemMove( Value *Dst, unsigned
DstAlign, Value *Src, unsigned SrcAlign, uint64_t Size, uint32_t
ElementSize, MDNode *TBAATag = nullptr, MDNode *TBAAStructTag
= nullptr, MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr
) __attribute__((deprecated("Use the version that takes Align instead"
)))
680 MDNode *NoAliasTag = nullptr),CallInst *CreateElementUnorderedAtomicMemMove( Value *Dst, unsigned
DstAlign, Value *Src, unsigned SrcAlign, uint64_t Size, uint32_t
ElementSize, MDNode *TBAATag = nullptr, MDNode *TBAAStructTag
= nullptr, MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr
) __attribute__((deprecated("Use the version that takes Align instead"
)))
681 "Use the version that takes Align instead")CallInst *CreateElementUnorderedAtomicMemMove( Value *Dst, unsigned
DstAlign, Value *Src, unsigned SrcAlign, uint64_t Size, uint32_t
ElementSize, MDNode *TBAATag = nullptr, MDNode *TBAAStructTag
= nullptr, MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr
) __attribute__((deprecated("Use the version that takes Align instead"
)))
{
682 return CreateElementUnorderedAtomicMemMove(
683 Dst, Align(DstAlign), Src, Align(SrcAlign), getInt64(Size), ElementSize,
684 TBAATag, TBAAStructTag, ScopeTag, NoAliasTag);
685 }
686
687 LLVM_ATTRIBUTE_DEPRECATED(CallInst *CreateElementUnorderedAtomicMemMove(CallInst *CreateElementUnorderedAtomicMemMove( Value *Dst, unsigned
DstAlign, Value *Src, unsigned SrcAlign, Value *Size, uint32_t
ElementSize, MDNode *TBAATag = nullptr, MDNode *TBAAStructTag
= nullptr, MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr
) __attribute__((deprecated("Use the version that takes Align instead"
)))
688 Value *Dst, unsigned DstAlign, Value *Src,CallInst *CreateElementUnorderedAtomicMemMove( Value *Dst, unsigned
DstAlign, Value *Src, unsigned SrcAlign, Value *Size, uint32_t
ElementSize, MDNode *TBAATag = nullptr, MDNode *TBAAStructTag
= nullptr, MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr
) __attribute__((deprecated("Use the version that takes Align instead"
)))
689 unsigned SrcAlign, Value *Size,CallInst *CreateElementUnorderedAtomicMemMove( Value *Dst, unsigned
DstAlign, Value *Src, unsigned SrcAlign, Value *Size, uint32_t
ElementSize, MDNode *TBAATag = nullptr, MDNode *TBAAStructTag
= nullptr, MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr
) __attribute__((deprecated("Use the version that takes Align instead"
)))
690 uint32_t ElementSize, MDNode *TBAATag = nullptr,CallInst *CreateElementUnorderedAtomicMemMove( Value *Dst, unsigned
DstAlign, Value *Src, unsigned SrcAlign, Value *Size, uint32_t
ElementSize, MDNode *TBAATag = nullptr, MDNode *TBAAStructTag
= nullptr, MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr
) __attribute__((deprecated("Use the version that takes Align instead"
)))
691 MDNode *TBAAStructTag = nullptr,CallInst *CreateElementUnorderedAtomicMemMove( Value *Dst, unsigned
DstAlign, Value *Src, unsigned SrcAlign, Value *Size, uint32_t
ElementSize, MDNode *TBAATag = nullptr, MDNode *TBAAStructTag
= nullptr, MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr
) __attribute__((deprecated("Use the version that takes Align instead"
)))
692 MDNode *ScopeTag = nullptr,CallInst *CreateElementUnorderedAtomicMemMove( Value *Dst, unsigned
DstAlign, Value *Src, unsigned SrcAlign, Value *Size, uint32_t
ElementSize, MDNode *TBAATag = nullptr, MDNode *TBAAStructTag
= nullptr, MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr
) __attribute__((deprecated("Use the version that takes Align instead"
)))
693 MDNode *NoAliasTag = nullptr),CallInst *CreateElementUnorderedAtomicMemMove( Value *Dst, unsigned
DstAlign, Value *Src, unsigned SrcAlign, Value *Size, uint32_t
ElementSize, MDNode *TBAATag = nullptr, MDNode *TBAAStructTag
= nullptr, MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr
) __attribute__((deprecated("Use the version that takes Align instead"
)))
694 "Use the version that takes Align instead")CallInst *CreateElementUnorderedAtomicMemMove( Value *Dst, unsigned
DstAlign, Value *Src, unsigned SrcAlign, Value *Size, uint32_t
ElementSize, MDNode *TBAATag = nullptr, MDNode *TBAAStructTag
= nullptr, MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr
) __attribute__((deprecated("Use the version that takes Align instead"
)))
{
695 return CreateElementUnorderedAtomicMemMove(
696 Dst, Align(DstAlign), Src, Align(SrcAlign), Size, ElementSize, TBAATag,
697 TBAAStructTag, ScopeTag, NoAliasTag);
698 }
699
700 /// Create a vector fadd reduction intrinsic of the source vector.
701 /// The first parameter is a scalar accumulator value for ordered reductions.
702 CallInst *CreateFAddReduce(Value *Acc, Value *Src);
703
704 /// Create a vector fmul reduction intrinsic of the source vector.
705 /// The first parameter is a scalar accumulator value for ordered reductions.
706 CallInst *CreateFMulReduce(Value *Acc, Value *Src);
707
708 /// Create a vector int add reduction intrinsic of the source vector.
709 CallInst *CreateAddReduce(Value *Src);
710
711 /// Create a vector int mul reduction intrinsic of the source vector.
712 CallInst *CreateMulReduce(Value *Src);
713
714 /// Create a vector int AND reduction intrinsic of the source vector.
715 CallInst *CreateAndReduce(Value *Src);
716
717 /// Create a vector int OR reduction intrinsic of the source vector.
718 CallInst *CreateOrReduce(Value *Src);
719
720 /// Create a vector int XOR reduction intrinsic of the source vector.
721 CallInst *CreateXorReduce(Value *Src);
722
723 /// Create a vector integer max reduction intrinsic of the source
724 /// vector.
725 CallInst *CreateIntMaxReduce(Value *Src, bool IsSigned = false);
726
727 /// Create a vector integer min reduction intrinsic of the source
728 /// vector.
729 CallInst *CreateIntMinReduce(Value *Src, bool IsSigned = false);
730
731 /// Create a vector float max reduction intrinsic of the source
732 /// vector.
733 CallInst *CreateFPMaxReduce(Value *Src, bool NoNaN = false);
734
735 /// Create a vector float min reduction intrinsic of the source
736 /// vector.
737 CallInst *CreateFPMinReduce(Value *Src, bool NoNaN = false);
738
739 /// Create a lifetime.start intrinsic.
740 ///
741 /// If the pointer isn't i8* it will be converted.
742 CallInst *CreateLifetimeStart(Value *Ptr, ConstantInt *Size = nullptr);
743
744 /// Create a lifetime.end intrinsic.
745 ///
746 /// If the pointer isn't i8* it will be converted.
747 CallInst *CreateLifetimeEnd(Value *Ptr, ConstantInt *Size = nullptr);
748
749 /// Create a call to invariant.start intrinsic.
750 ///
751 /// If the pointer isn't i8* it will be converted.
752 CallInst *CreateInvariantStart(Value *Ptr, ConstantInt *Size = nullptr);
753
754 /// Create a call to Masked Load intrinsic
755 LLVM_ATTRIBUTE_DEPRECATED(CallInst *CreateMaskedLoad(Value *Ptr, unsigned Alignment, Value
*Mask, Value *PassThru = nullptr, const Twine &Name = ""
) __attribute__((deprecated("Use the version that takes Align instead"
)))
756 CallInst *CreateMaskedLoad(Value *Ptr, unsigned Alignment, Value *Mask,CallInst *CreateMaskedLoad(Value *Ptr, unsigned Alignment, Value
*Mask, Value *PassThru = nullptr, const Twine &Name = ""
) __attribute__((deprecated("Use the version that takes Align instead"
)))
757 Value *PassThru = nullptr,CallInst *CreateMaskedLoad(Value *Ptr, unsigned Alignment, Value
*Mask, Value *PassThru = nullptr, const Twine &Name = ""
) __attribute__((deprecated("Use the version that takes Align instead"
)))
758 const Twine &Name = ""),CallInst *CreateMaskedLoad(Value *Ptr, unsigned Alignment, Value
*Mask, Value *PassThru = nullptr, const Twine &Name = ""
) __attribute__((deprecated("Use the version that takes Align instead"
)))
759 "Use the version that takes Align instead")CallInst *CreateMaskedLoad(Value *Ptr, unsigned Alignment, Value
*Mask, Value *PassThru = nullptr, const Twine &Name = ""
) __attribute__((deprecated("Use the version that takes Align instead"
)))
{
760 return CreateMaskedLoad(Ptr, assumeAligned(Alignment), Mask, PassThru,
761 Name);
762 }
763 CallInst *CreateMaskedLoad(Value *Ptr, Align Alignment, Value *Mask,
764 Value *PassThru = nullptr, const Twine &Name = "");
765
766 /// Create a call to Masked Store intrinsic
767 LLVM_ATTRIBUTE_DEPRECATED(CallInst *CreateMaskedStore(Value *Val, Value *Ptr,CallInst *CreateMaskedStore(Value *Val, Value *Ptr, unsigned Alignment
, Value *Mask) __attribute__((deprecated("Use the version that takes Align instead"
)))
768 unsigned Alignment,CallInst *CreateMaskedStore(Value *Val, Value *Ptr, unsigned Alignment
, Value *Mask) __attribute__((deprecated("Use the version that takes Align instead"
)))
769 Value *Mask),CallInst *CreateMaskedStore(Value *Val, Value *Ptr, unsigned Alignment
, Value *Mask) __attribute__((deprecated("Use the version that takes Align instead"
)))
770 "Use the version that takes Align instead")CallInst *CreateMaskedStore(Value *Val, Value *Ptr, unsigned Alignment
, Value *Mask) __attribute__((deprecated("Use the version that takes Align instead"
)))
{
771 return CreateMaskedStore(Val, Ptr, assumeAligned(Alignment), Mask);
772 }
773
774 CallInst *CreateMaskedStore(Value *Val, Value *Ptr, Align Alignment,
775 Value *Mask);
776
777 /// Create a call to Masked Gather intrinsic
778 LLVM_ATTRIBUTE_DEPRECATED(CallInst *CreateMaskedGather(Value *Ptrs, unsigned Alignment,
Value *Mask = nullptr, Value *PassThru = nullptr, const Twine
&Name = "") __attribute__((deprecated("Use the version that takes Align instead"
)))
779 CallInst *CreateMaskedGather(Value *Ptrs, unsigned Alignment,CallInst *CreateMaskedGather(Value *Ptrs, unsigned Alignment,
Value *Mask = nullptr, Value *PassThru = nullptr, const Twine
&Name = "") __attribute__((deprecated("Use the version that takes Align instead"
)))
780 Value *Mask = nullptr,CallInst *CreateMaskedGather(Value *Ptrs, unsigned Alignment,
Value *Mask = nullptr, Value *PassThru = nullptr, const Twine
&Name = "") __attribute__((deprecated("Use the version that takes Align instead"
)))
781 Value *PassThru = nullptr,CallInst *CreateMaskedGather(Value *Ptrs, unsigned Alignment,
Value *Mask = nullptr, Value *PassThru = nullptr, const Twine
&Name = "") __attribute__((deprecated("Use the version that takes Align instead"
)))
782 const Twine &Name = ""),CallInst *CreateMaskedGather(Value *Ptrs, unsigned Alignment,
Value *Mask = nullptr, Value *PassThru = nullptr, const Twine
&Name = "") __attribute__((deprecated("Use the version that takes Align instead"
)))
783 "Use the version that takes Align instead")CallInst *CreateMaskedGather(Value *Ptrs, unsigned Alignment,
Value *Mask = nullptr, Value *PassThru = nullptr, const Twine
&Name = "") __attribute__((deprecated("Use the version that takes Align instead"
)))
{
784 return CreateMaskedGather(Ptrs, Align(Alignment), Mask, PassThru, Name);
785 }
786
787 /// Create a call to Masked Gather intrinsic
788 CallInst *CreateMaskedGather(Value *Ptrs, Align Alignment,
789 Value *Mask = nullptr, Value *PassThru = nullptr,
790 const Twine &Name = "");
791
792 /// Create a call to Masked Scatter intrinsic
793 LLVM_ATTRIBUTE_DEPRECATED(CallInst *CreateMaskedScatter(Value *Val, Value *Ptrs, unsigned
Alignment, Value *Mask = nullptr) __attribute__((deprecated(
"Use the version that takes Align instead")))
794 CallInst *CreateMaskedScatter(Value *Val, Value *Ptrs, unsigned Alignment,CallInst *CreateMaskedScatter(Value *Val, Value *Ptrs, unsigned
Alignment, Value *Mask = nullptr) __attribute__((deprecated(
"Use the version that takes Align instead")))
795 Value *Mask = nullptr),CallInst *CreateMaskedScatter(Value *Val, Value *Ptrs, unsigned
Alignment, Value *Mask = nullptr) __attribute__((deprecated(
"Use the version that takes Align instead")))
796 "Use the version that takes Align instead")CallInst *CreateMaskedScatter(Value *Val, Value *Ptrs, unsigned
Alignment, Value *Mask = nullptr) __attribute__((deprecated(
"Use the version that takes Align instead")))
{
797 return CreateMaskedScatter(Val, Ptrs, Align(Alignment), Mask);
798 }
799
800 /// Create a call to Masked Scatter intrinsic
801 CallInst *CreateMaskedScatter(Value *Val, Value *Ptrs, Align Alignment,
802 Value *Mask = nullptr);
803
804 /// Create an assume intrinsic call that allows the optimizer to
805 /// assume that the provided condition will be true.
806 ///
807 /// The optional argument \p OpBundles specifies operand bundles that are
808 /// added to the call instruction.
809 CallInst *CreateAssumption(Value *Cond,
810 ArrayRef<OperandBundleDef> OpBundles = llvm::None);
811
812 /// Create a call to the experimental.gc.statepoint intrinsic to
813 /// start a new statepoint sequence.
814 CallInst *CreateGCStatepointCall(uint64_t ID, uint32_t NumPatchBytes,
815 Value *ActualCallee,
816 ArrayRef<Value *> CallArgs,
817 Optional<ArrayRef<Value *>> DeoptArgs,
818 ArrayRef<Value *> GCArgs,
819 const Twine &Name = "");
820
821 /// Create a call to the experimental.gc.statepoint intrinsic to
822 /// start a new statepoint sequence.
823 CallInst *CreateGCStatepointCall(uint64_t ID, uint32_t NumPatchBytes,
824 Value *ActualCallee, uint32_t Flags,
825 ArrayRef<Value *> CallArgs,
826 Optional<ArrayRef<Use>> TransitionArgs,
827 Optional<ArrayRef<Use>> DeoptArgs,
828 ArrayRef<Value *> GCArgs,
829 const Twine &Name = "");
830
831 /// Conveninence function for the common case when CallArgs are filled
832 /// in using makeArrayRef(CS.arg_begin(), CS.arg_end()); Use needs to be
833 /// .get()'ed to get the Value pointer.
834 CallInst *CreateGCStatepointCall(uint64_t ID, uint32_t NumPatchBytes,
835 Value *ActualCallee, ArrayRef<Use> CallArgs,
836 Optional<ArrayRef<Value *>> DeoptArgs,
837 ArrayRef<Value *> GCArgs,
838 const Twine &Name = "");
839
840 /// Create an invoke to the experimental.gc.statepoint intrinsic to
841 /// start a new statepoint sequence.
842 InvokeInst *
843 CreateGCStatepointInvoke(uint64_t ID, uint32_t NumPatchBytes,
844 Value *ActualInvokee, BasicBlock *NormalDest,
845 BasicBlock *UnwindDest, ArrayRef<Value *> InvokeArgs,
846 Optional<ArrayRef<Value *>> DeoptArgs,
847 ArrayRef<Value *> GCArgs, const Twine &Name = "");
848
849 /// Create an invoke to the experimental.gc.statepoint intrinsic to
850 /// start a new statepoint sequence.
851 InvokeInst *CreateGCStatepointInvoke(
852 uint64_t ID, uint32_t NumPatchBytes, Value *ActualInvokee,
853 BasicBlock *NormalDest, BasicBlock *UnwindDest, uint32_t Flags,
854 ArrayRef<Value *> InvokeArgs, Optional<ArrayRef<Use>> TransitionArgs,
855 Optional<ArrayRef<Use>> DeoptArgs, ArrayRef<Value *> GCArgs,
856 const Twine &Name = "");
857
858 // Convenience function for the common case when CallArgs are filled in using
859 // makeArrayRef(CS.arg_begin(), CS.arg_end()); Use needs to be .get()'ed to
860 // get the Value *.
861 InvokeInst *
862 CreateGCStatepointInvoke(uint64_t ID, uint32_t NumPatchBytes,
863 Value *ActualInvokee, BasicBlock *NormalDest,
864 BasicBlock *UnwindDest, ArrayRef<Use> InvokeArgs,
865 Optional<ArrayRef<Value *>> DeoptArgs,
866 ArrayRef<Value *> GCArgs, const Twine &Name = "");
867
868 /// Create a call to the experimental.gc.result intrinsic to extract
869 /// the result from a call wrapped in a statepoint.
870 CallInst *CreateGCResult(Instruction *Statepoint,
871 Type *ResultType,
872 const Twine &Name = "");
873
874 /// Create a call to the experimental.gc.relocate intrinsics to
875 /// project the relocated value of one pointer from the statepoint.
876 CallInst *CreateGCRelocate(Instruction *Statepoint,
877 int BaseOffset,
878 int DerivedOffset,
879 Type *ResultType,
880 const Twine &Name = "");
881
882 /// Create a call to intrinsic \p ID with 1 operand which is mangled on its
883 /// type.
884 CallInst *CreateUnaryIntrinsic(Intrinsic::ID ID, Value *V,
885 Instruction *FMFSource = nullptr,
886 const Twine &Name = "");
887
888 /// Create a call to intrinsic \p ID with 2 operands which is mangled on the
889 /// first type.
890 CallInst *CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS,
891 Instruction *FMFSource = nullptr,
892 const Twine &Name = "");
893
894 /// Create a call to intrinsic \p ID with \p args, mangled using \p Types. If
895 /// \p FMFSource is provided, copy fast-math-flags from that instruction to
896 /// the intrinsic.
897 CallInst *CreateIntrinsic(Intrinsic::ID ID, ArrayRef<Type *> Types,
898 ArrayRef<Value *> Args,
899 Instruction *FMFSource = nullptr,
900 const Twine &Name = "");
901
902 /// Create call to the minnum intrinsic.
903 CallInst *CreateMinNum(Value *LHS, Value *RHS, const Twine &Name = "") {
904 return CreateBinaryIntrinsic(Intrinsic::minnum, LHS, RHS, nullptr, Name);
905 }
906
907 /// Create call to the maxnum intrinsic.
908 CallInst *CreateMaxNum(Value *LHS, Value *RHS, const Twine &Name = "") {
909 return CreateBinaryIntrinsic(Intrinsic::maxnum, LHS, RHS, nullptr, Name);
910 }
911
912 /// Create call to the minimum intrinsic.
913 CallInst *CreateMinimum(Value *LHS, Value *RHS, const Twine &Name = "") {
914 return CreateBinaryIntrinsic(Intrinsic::minimum, LHS, RHS, nullptr, Name);
915 }
916
917 /// Create call to the maximum intrinsic.
918 CallInst *CreateMaximum(Value *LHS, Value *RHS, const Twine &Name = "") {
919 return CreateBinaryIntrinsic(Intrinsic::maximum, LHS, RHS, nullptr, Name);
920 }
921
922private:
923 /// Create a call to a masked intrinsic with given Id.
924 CallInst *CreateMaskedIntrinsic(Intrinsic::ID Id, ArrayRef<Value *> Ops,
925 ArrayRef<Type *> OverloadedTypes,
926 const Twine &Name = "");
927
928 Value *getCastedInt8PtrValue(Value *Ptr);
929
930 //===--------------------------------------------------------------------===//
931 // Instruction creation methods: Terminators
932 //===--------------------------------------------------------------------===//
933
934private:
935 /// Helper to add branch weight and unpredictable metadata onto an
936 /// instruction.
937 /// \returns The annotated instruction.
938 template <typename InstTy>
939 InstTy *addBranchMetadata(InstTy *I, MDNode *Weights, MDNode *Unpredictable) {
940 if (Weights)
941 I->setMetadata(LLVMContext::MD_prof, Weights);
942 if (Unpredictable)
943 I->setMetadata(LLVMContext::MD_unpredictable, Unpredictable);
944 return I;
945 }
946
947public:
948 /// Create a 'ret void' instruction.
949 ReturnInst *CreateRetVoid() {
950 return Insert(ReturnInst::Create(Context));
951 }
952
953 /// Create a 'ret <val>' instruction.
954 ReturnInst *CreateRet(Value *V) {
955 return Insert(ReturnInst::Create(Context, V));
956 }
957
958 /// Create a sequence of N insertvalue instructions,
959 /// with one Value from the retVals array each, that build a aggregate
960 /// return value one value at a time, and a ret instruction to return
961 /// the resulting aggregate value.
962 ///
963 /// This is a convenience function for code that uses aggregate return values
964 /// as a vehicle for having multiple return values.
965 ReturnInst *CreateAggregateRet(Value *const *retVals, unsigned N) {
966 Value *V = UndefValue::get(getCurrentFunctionReturnType());
967 for (unsigned i = 0; i != N; ++i)
968 V = CreateInsertValue(V, retVals[i], i, "mrv");
969 return Insert(ReturnInst::Create(Context, V));
970 }
971
972 /// Create an unconditional 'br label X' instruction.
973 BranchInst *CreateBr(BasicBlock *Dest) {
974 return Insert(BranchInst::Create(Dest));
975 }
976
977 /// Create a conditional 'br Cond, TrueDest, FalseDest'
978 /// instruction.
979 BranchInst *CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False,
980 MDNode *BranchWeights = nullptr,
981 MDNode *Unpredictable = nullptr) {
982 return Insert(addBranchMetadata(BranchInst::Create(True, False, Cond),
983 BranchWeights, Unpredictable));
984 }
985
986 /// Create a conditional 'br Cond, TrueDest, FalseDest'
987 /// instruction. Copy branch meta data if available.
988 BranchInst *CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False,
989 Instruction *MDSrc) {
990 BranchInst *Br = BranchInst::Create(True, False, Cond);
991 if (MDSrc) {
992 unsigned WL[4] = {LLVMContext::MD_prof, LLVMContext::MD_unpredictable,
993 LLVMContext::MD_make_implicit, LLVMContext::MD_dbg};
994 Br->copyMetadata(*MDSrc, makeArrayRef(&WL[0], 4));
995 }
996 return Insert(Br);
997 }
998
999 /// Create a switch instruction with the specified value, default dest,
1000 /// and with a hint for the number of cases that will be added (for efficient
1001 /// allocation).
1002 SwitchInst *CreateSwitch(Value *V, BasicBlock *Dest, unsigned NumCases = 10,
1003 MDNode *BranchWeights = nullptr,
1004 MDNode *Unpredictable = nullptr) {
1005 return Insert(addBranchMetadata(SwitchInst::Create(V, Dest, NumCases),
1006 BranchWeights, Unpredictable));
1007 }
1008
1009 /// Create an indirect branch instruction with the specified address
1010 /// operand, with an optional hint for the number of destinations that will be
1011 /// added (for efficient allocation).
1012 IndirectBrInst *CreateIndirectBr(Value *Addr, unsigned NumDests = 10) {
1013 return Insert(IndirectBrInst::Create(Addr, NumDests));
1014 }
1015
1016 /// Create an invoke instruction.
1017 InvokeInst *CreateInvoke(FunctionType *Ty, Value *Callee,
1018 BasicBlock *NormalDest, BasicBlock *UnwindDest,
1019 ArrayRef<Value *> Args,
1020 ArrayRef<OperandBundleDef> OpBundles,
1021 const Twine &Name = "") {
1022 return Insert(
1023 InvokeInst::Create(Ty, Callee, NormalDest, UnwindDest, Args, OpBundles),
1024 Name);
1025 }
1026 InvokeInst *CreateInvoke(FunctionType *Ty, Value *Callee,
1027 BasicBlock *NormalDest, BasicBlock *UnwindDest,
1028 ArrayRef<Value *> Args = None,
1029 const Twine &Name = "") {
1030 return Insert(InvokeInst::Create(Ty, Callee, NormalDest, UnwindDest, Args),
1031 Name);
1032 }
1033
1034 InvokeInst *CreateInvoke(FunctionCallee Callee, BasicBlock *NormalDest,
1035 BasicBlock *UnwindDest, ArrayRef<Value *> Args,
1036 ArrayRef<OperandBundleDef> OpBundles,
1037 const Twine &Name = "") {
1038 return CreateInvoke(Callee.getFunctionType(), Callee.getCallee(),
1039 NormalDest, UnwindDest, Args, OpBundles, Name);
1040 }
1041
1042 InvokeInst *CreateInvoke(FunctionCallee Callee, BasicBlock *NormalDest,
1043 BasicBlock *UnwindDest,
1044 ArrayRef<Value *> Args = None,
1045 const Twine &Name = "") {
1046 return CreateInvoke(Callee.getFunctionType(), Callee.getCallee(),
1047 NormalDest, UnwindDest, Args, Name);
1048 }
1049
1050 /// \brief Create a callbr instruction.
1051 CallBrInst *CreateCallBr(FunctionType *Ty, Value *Callee,
1052 BasicBlock *DefaultDest,
1053 ArrayRef<BasicBlock *> IndirectDests,
1054 ArrayRef<Value *> Args = None,
1055 const Twine &Name = "") {
1056 return Insert(CallBrInst::Create(Ty, Callee, DefaultDest, IndirectDests,
1057 Args), Name);
1058 }
1059 CallBrInst *CreateCallBr(FunctionType *Ty, Value *Callee,
1060 BasicBlock *DefaultDest,
1061 ArrayRef<BasicBlock *> IndirectDests,
1062 ArrayRef<Value *> Args,
1063 ArrayRef<OperandBundleDef> OpBundles,
1064 const Twine &Name = "") {
1065 return Insert(
1066 CallBrInst::Create(Ty, Callee, DefaultDest, IndirectDests, Args,
1067 OpBundles), Name);
1068 }
1069
1070 CallBrInst *CreateCallBr(FunctionCallee Callee, BasicBlock *DefaultDest,
1071 ArrayRef<BasicBlock *> IndirectDests,
1072 ArrayRef<Value *> Args = None,
1073 const Twine &Name = "") {
1074 return CreateCallBr(Callee.getFunctionType(), Callee.getCallee(),
1075 DefaultDest, IndirectDests, Args, Name);
1076 }
1077 CallBrInst *CreateCallBr(FunctionCallee Callee, BasicBlock *DefaultDest,
1078 ArrayRef<BasicBlock *> IndirectDests,
1079 ArrayRef<Value *> Args,
1080 ArrayRef<OperandBundleDef> OpBundles,
1081 const Twine &Name = "") {
1082 return CreateCallBr(Callee.getFunctionType(), Callee.getCallee(),
1083 DefaultDest, IndirectDests, Args, Name);
1084 }
1085
1086 ResumeInst *CreateResume(Value *Exn) {
1087 return Insert(ResumeInst::Create(Exn));
1088 }
1089
1090 CleanupReturnInst *CreateCleanupRet(CleanupPadInst *CleanupPad,
1091 BasicBlock *UnwindBB = nullptr) {
1092 return Insert(CleanupReturnInst::Create(CleanupPad, UnwindBB));
1093 }
1094
1095 CatchSwitchInst *CreateCatchSwitch(Value *ParentPad, BasicBlock *UnwindBB,
1096 unsigned NumHandlers,
1097 const Twine &Name = "") {
1098 return Insert(CatchSwitchInst::Create(ParentPad, UnwindBB, NumHandlers),
1099 Name);
1100 }
1101
1102 CatchPadInst *CreateCatchPad(Value *ParentPad, ArrayRef<Value *> Args,
1103 const Twine &Name = "") {
1104 return Insert(CatchPadInst::Create(ParentPad, Args), Name);
1105 }
1106
1107 CleanupPadInst *CreateCleanupPad(Value *ParentPad,
1108 ArrayRef<Value *> Args = None,
1109 const Twine &Name = "") {
1110 return Insert(CleanupPadInst::Create(ParentPad, Args), Name);
1111 }
1112
1113 CatchReturnInst *CreateCatchRet(CatchPadInst *CatchPad, BasicBlock *BB) {
1114 return Insert(CatchReturnInst::Create(CatchPad, BB));
1115 }
1116
1117 UnreachableInst *CreateUnreachable() {
1118 return Insert(new UnreachableInst(Context));
1119 }
1120
1121 //===--------------------------------------------------------------------===//
1122 // Instruction creation methods: Binary Operators
1123 //===--------------------------------------------------------------------===//
1124private:
1125 BinaryOperator *CreateInsertNUWNSWBinOp(BinaryOperator::BinaryOps Opc,
1126 Value *LHS, Value *RHS,
1127 const Twine &Name,
1128 bool HasNUW, bool HasNSW) {
1129 BinaryOperator *BO = Insert(BinaryOperator::Create(Opc, LHS, RHS), Name);
1130 if (HasNUW) BO->setHasNoUnsignedWrap();
1131 if (HasNSW) BO->setHasNoSignedWrap();
1132 return BO;
1133 }
1134
1135 Instruction *setFPAttrs(Instruction *I, MDNode *FPMD,
1136 FastMathFlags FMF) const {
1137 if (!FPMD)
1138 FPMD = DefaultFPMathTag;
1139 if (FPMD)
1140 I->setMetadata(LLVMContext::MD_fpmath, FPMD);
1141 I->setFastMathFlags(FMF);
1142 return I;
1143 }
1144
1145 Value *foldConstant(Instruction::BinaryOps Opc, Value *L,
1146 Value *R, const Twine &Name) const {
1147 auto *LC = dyn_cast<Constant>(L);
1148 auto *RC = dyn_cast<Constant>(R);
1149 return (LC && RC) ? Insert(Folder.CreateBinOp(Opc, LC, RC), Name) : nullptr;
1150 }
1151
1152 Value *getConstrainedFPRounding(Optional<RoundingMode> Rounding) {
1153 RoundingMode UseRounding = DefaultConstrainedRounding;
1154
1155 if (Rounding.hasValue())
1156 UseRounding = Rounding.getValue();
1157
1158 Optional<StringRef> RoundingStr = RoundingModeToStr(UseRounding);
1159 assert(RoundingStr.hasValue() && "Garbage strict rounding mode!")((RoundingStr.hasValue() && "Garbage strict rounding mode!"
) ? static_cast<void> (0) : __assert_fail ("RoundingStr.hasValue() && \"Garbage strict rounding mode!\""
, "/build/llvm-toolchain-snapshot-12~++20201202100633+b276bf5a572/llvm/include/llvm/IR/IRBuilder.h"
, 1159, __PRETTY_FUNCTION__))
;
1160 auto *RoundingMDS = MDString::get(Context, RoundingStr.getValue());
1161
1162 return MetadataAsValue::get(Context, RoundingMDS);
1163 }
1164
1165 Value *getConstrainedFPExcept(Optional<fp::ExceptionBehavior> Except) {
1166 fp::ExceptionBehavior UseExcept = DefaultConstrainedExcept;
1167
1168 if (Except.hasValue())
1169 UseExcept = Except.getValue();
1170
1171 Optional<StringRef> ExceptStr = ExceptionBehaviorToStr(UseExcept);
1172 assert(ExceptStr.hasValue() && "Garbage strict exception behavior!")((ExceptStr.hasValue() && "Garbage strict exception behavior!"
) ? static_cast<void> (0) : __assert_fail ("ExceptStr.hasValue() && \"Garbage strict exception behavior!\""
, "/build/llvm-toolchain-snapshot-12~++20201202100633+b276bf5a572/llvm/include/llvm/IR/IRBuilder.h"
, 1172, __PRETTY_FUNCTION__))
;
1173 auto *ExceptMDS = MDString::get(Context, ExceptStr.getValue());
1174
1175 return MetadataAsValue::get(Context, ExceptMDS);
1176 }
1177
1178 Value *getConstrainedFPPredicate(CmpInst::Predicate Predicate) {
1179 assert(CmpInst::isFPPredicate(Predicate) &&((CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst
::FCMP_FALSE && Predicate != CmpInst::FCMP_TRUE &&
"Invalid constrained FP comparison predicate!") ? static_cast
<void> (0) : __assert_fail ("CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst::FCMP_TRUE && \"Invalid constrained FP comparison predicate!\""
, "/build/llvm-toolchain-snapshot-12~++20201202100633+b276bf5a572/llvm/include/llvm/IR/IRBuilder.h"
, 1182, __PRETTY_FUNCTION__))
1180 Predicate != CmpInst::FCMP_FALSE &&((CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst
::FCMP_FALSE && Predicate != CmpInst::FCMP_TRUE &&
"Invalid constrained FP comparison predicate!") ? static_cast
<void> (0) : __assert_fail ("CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst::FCMP_TRUE && \"Invalid constrained FP comparison predicate!\""
, "/build/llvm-toolchain-snapshot-12~++20201202100633+b276bf5a572/llvm/include/llvm/IR/IRBuilder.h"
, 1182, __PRETTY_FUNCTION__))
1181 Predicate != CmpInst::FCMP_TRUE &&((CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst
::FCMP_FALSE && Predicate != CmpInst::FCMP_TRUE &&
"Invalid constrained FP comparison predicate!") ? static_cast
<void> (0) : __assert_fail ("CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst::FCMP_TRUE && \"Invalid constrained FP comparison predicate!\""
, "/build/llvm-toolchain-snapshot-12~++20201202100633+b276bf5a572/llvm/include/llvm/IR/IRBuilder.h"
, 1182, __PRETTY_FUNCTION__))
1182 "Invalid constrained FP comparison predicate!")((CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst
::FCMP_FALSE && Predicate != CmpInst::FCMP_TRUE &&
"Invalid constrained FP comparison predicate!") ? static_cast
<void> (0) : __assert_fail ("CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst::FCMP_TRUE && \"Invalid constrained FP comparison predicate!\""
, "/build/llvm-toolchain-snapshot-12~++20201202100633+b276bf5a572/llvm/include/llvm/IR/IRBuilder.h"
, 1182, __PRETTY_FUNCTION__))
;
1183
1184 StringRef PredicateStr = CmpInst::getPredicateName(Predicate);
1185 auto *PredicateMDS = MDString::get(Context, PredicateStr);
1186
1187 return MetadataAsValue::get(Context, PredicateMDS);
1188 }
1189
1190public:
1191 Value *CreateAdd(Value *LHS, Value *RHS, const Twine &Name = "",
1192 bool HasNUW = false, bool HasNSW = false) {
1193 if (auto *LC = dyn_cast<Constant>(LHS))
1194 if (auto *RC = dyn_cast<Constant>(RHS))
1195 return Insert(Folder.CreateAdd(LC, RC, HasNUW, HasNSW), Name);
1196 return CreateInsertNUWNSWBinOp(Instruction::Add, LHS, RHS, Name,
1197 HasNUW, HasNSW);
1198 }
1199
1200 Value *CreateNSWAdd(Value *LHS, Value *RHS, const Twine &Name = "") {
1201 return CreateAdd(LHS, RHS, Name, false, true);
1202 }
1203
1204 Value *CreateNUWAdd(Value *LHS, Value *RHS, const Twine &Name = "") {
1205 return CreateAdd(LHS, RHS, Name, true, false);
1206 }
1207
1208 Value *CreateSub(Value *LHS, Value *RHS, const Twine &Name = "",
1209 bool HasNUW = false, bool HasNSW = false) {
1210 if (auto *LC = dyn_cast<Constant>(LHS))
1211 if (auto *RC = dyn_cast<Constant>(RHS))
1212 return Insert(Folder.CreateSub(LC, RC, HasNUW, HasNSW), Name);
1213 return CreateInsertNUWNSWBinOp(Instruction::Sub, LHS, RHS, Name,
1214 HasNUW, HasNSW);
1215 }
1216
1217 Value *CreateNSWSub(Value *LHS, Value *RHS, const Twine &Name = "") {
1218 return CreateSub(LHS, RHS, Name, false, true);
1219 }
1220
1221 Value *CreateNUWSub(Value *LHS, Value *RHS, const Twine &Name = "") {
1222 return CreateSub(LHS, RHS, Name, true, false);
1223 }
1224
1225 Value *CreateMul(Value *LHS, Value *RHS, const Twine &Name = "",
1226 bool HasNUW = false, bool HasNSW = false) {
1227 if (auto *LC = dyn_cast<Constant>(LHS))
1228 if (auto *RC = dyn_cast<Constant>(RHS))
1229 return Insert(Folder.CreateMul(LC, RC, HasNUW, HasNSW), Name);
1230 return CreateInsertNUWNSWBinOp(Instruction::Mul, LHS, RHS, Name,
1231 HasNUW, HasNSW);
1232 }
1233
1234 Value *CreateNSWMul(Value *LHS, Value *RHS, const Twine &Name = "") {
1235 return CreateMul(LHS, RHS, Name, false, true);
1236 }
1237
1238 Value *CreateNUWMul(Value *LHS, Value *RHS, const Twine &Name = "") {
1239 return CreateMul(LHS, RHS, Name, true, false);
1240 }
1241
1242 Value *CreateUDiv(Value *LHS, Value *RHS, const Twine &Name = "",
1243 bool isExact = false) {
1244 if (auto *LC = dyn_cast<Constant>(LHS))
1245 if (auto *RC = dyn_cast<Constant>(RHS))
1246 return Insert(Folder.CreateUDiv(LC, RC, isExact), Name);
1247 if (!isExact)
1248 return Insert(BinaryOperator::CreateUDiv(LHS, RHS), Name);
1249 return Insert(BinaryOperator::CreateExactUDiv(LHS, RHS), Name);
1250 }
1251
1252 Value *CreateExactUDiv(Value *LHS, Value *RHS, const Twine &Name = "") {
1253 return CreateUDiv(LHS, RHS, Name, true);
1254 }
1255
1256 Value *CreateSDiv(Value *LHS, Value *RHS, const Twine &Name = "",
1257 bool isExact = false) {
1258 if (auto *LC = dyn_cast<Constant>(LHS))
1259 if (auto *RC = dyn_cast<Constant>(RHS))
1260 return Insert(Folder.CreateSDiv(LC, RC, isExact), Name);
1261 if (!isExact)
1262 return Insert(BinaryOperator::CreateSDiv(LHS, RHS), Name);
1263 return Insert(BinaryOperator::CreateExactSDiv(LHS, RHS), Name);
1264 }
1265
1266 Value *CreateExactSDiv(Value *LHS, Value *RHS, const Twine &Name = "") {
1267 return CreateSDiv(LHS, RHS, Name, true);
1268 }
1269
1270 Value *CreateURem(Value *LHS, Value *RHS, const Twine &Name = "") {
1271 if (Value *V = foldConstant(Instruction::URem, LHS, RHS, Name)) return V;
1272 return Insert(BinaryOperator::CreateURem(LHS, RHS), Name);
1273 }
1274
1275 Value *CreateSRem(Value *LHS, Value *RHS, const Twine &Name = "") {
1276 if (Value *V = foldConstant(Instruction::SRem, LHS, RHS, Name)) return V;
1277 return Insert(BinaryOperator::CreateSRem(LHS, RHS), Name);
1278 }
1279
1280 Value *CreateShl(Value *LHS, Value *RHS, const Twine &Name = "",
1281 bool HasNUW = false, bool HasNSW = false) {
1282 if (auto *LC = dyn_cast<Constant>(LHS))
1283 if (auto *RC = dyn_cast<Constant>(RHS))
1284 return Insert(Folder.CreateShl(LC, RC, HasNUW, HasNSW), Name);
1285 return CreateInsertNUWNSWBinOp(Instruction::Shl, LHS, RHS, Name,
1286 HasNUW, HasNSW);
1287 }
1288
1289 Value *CreateShl(Value *LHS, const APInt &RHS, const Twine &Name = "",
1290 bool HasNUW = false, bool HasNSW = false) {
1291 return CreateShl(LHS, ConstantInt::get(LHS->getType(), RHS), Name,
1292 HasNUW, HasNSW);
1293 }
1294
1295 Value *CreateShl(Value *LHS, uint64_t RHS, const Twine &Name = "",
1296 bool HasNUW = false, bool HasNSW = false) {
1297 return CreateShl(LHS, ConstantInt::get(LHS->getType(), RHS), Name,
1298 HasNUW, HasNSW);
1299 }
1300
1301 Value *CreateLShr(Value *LHS, Value *RHS, const Twine &Name = "",
1302 bool isExact = false) {
1303 if (auto *LC = dyn_cast<Constant>(LHS))
1304 if (auto *RC = dyn_cast<Constant>(RHS))
1305 return Insert(Folder.CreateLShr(LC, RC, isExact), Name);
1306 if (!isExact)
1307 return Insert(BinaryOperator::CreateLShr(LHS, RHS), Name);
1308 return Insert(BinaryOperator::CreateExactLShr(LHS, RHS), Name);
1309 }
1310
1311 Value *CreateLShr(Value *LHS, const APInt &RHS, const Twine &Name = "",
1312 bool isExact = false) {
1313 return CreateLShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact);
1314 }
1315
1316 Value *CreateLShr(Value *LHS, uint64_t RHS, const Twine &Name = "",
1317 bool isExact = false) {
1318 return CreateLShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact);
1319 }
1320
1321 Value *CreateAShr(Value *LHS, Value *RHS, const Twine &Name = "",
1322 bool isExact = false) {
1323 if (auto *LC = dyn_cast<Constant>(LHS))
1324 if (auto *RC = dyn_cast<Constant>(RHS))
1325 return Insert(Folder.CreateAShr(LC, RC, isExact), Name);
1326 if (!isExact)
1327 return Insert(BinaryOperator::CreateAShr(LHS, RHS), Name);
1328 return Insert(BinaryOperator::CreateExactAShr(LHS, RHS), Name);
1329 }
1330
1331 Value *CreateAShr(Value *LHS, const APInt &RHS, const Twine &Name = "",
1332 bool isExact = false) {
1333 return CreateAShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact);
1334 }
1335
1336 Value *CreateAShr(Value *LHS, uint64_t RHS, const Twine &Name = "",
1337 bool isExact = false) {
1338 return CreateAShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact);
1339 }
1340
1341 Value *CreateAnd(Value *LHS, Value *RHS, const Twine &Name = "") {
1342 if (auto *RC = dyn_cast<Constant>(RHS)) {
1343 if (isa<ConstantInt>(RC) && cast<ConstantInt>(RC)->isMinusOne())
1344 return LHS; // LHS & -1 -> LHS
1345 if (auto *LC = dyn_cast<Constant>(LHS))
1346 return Insert(Folder.CreateAnd(LC, RC), Name);
1347 }
1348 return Insert(BinaryOperator::CreateAnd(LHS, RHS), Name);
1349 }
1350
1351 Value *CreateAnd(Value *LHS, const APInt &RHS, const Twine &Name = "") {
1352 return CreateAnd(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1353 }
1354
1355 Value *CreateAnd(Value *LHS, uint64_t RHS, const Twine &Name = "") {
1356 return CreateAnd(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1357 }
1358
1359 Value *CreateAnd(ArrayRef<Value*> Ops) {
1360 assert(!Ops.empty())((!Ops.empty()) ? static_cast<void> (0) : __assert_fail
("!Ops.empty()", "/build/llvm-toolchain-snapshot-12~++20201202100633+b276bf5a572/llvm/include/llvm/IR/IRBuilder.h"
, 1360, __PRETTY_FUNCTION__))
;
1361 Value *Accum = Ops[0];
1362 for (unsigned i = 1; i < Ops.size(); i++)
1363 Accum = CreateAnd(Accum, Ops[i]);
1364 return Accum;
1365 }
1366
1367 Value *CreateOr(Value *LHS, Value *RHS, const Twine &Name = "") {
1368 if (auto *RC = dyn_cast<Constant>(RHS)) {
1369 if (RC->isNullValue())
1370 return LHS; // LHS | 0 -> LHS
1371 if (auto *LC = dyn_cast<Constant>(LHS))
1372 return Insert(Folder.CreateOr(LC, RC), Name);
1373 }
1374 return Insert(BinaryOperator::CreateOr(LHS, RHS), Name);
1375 }
1376
1377 Value *CreateOr(Value *LHS, const APInt &RHS, const Twine &Name = "") {
1378 return CreateOr(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1379 }
1380
1381 Value *CreateOr(Value *LHS, uint64_t RHS, const Twine &Name = "") {
1382 return CreateOr(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1383 }
1384
1385 Value *CreateOr(ArrayRef<Value*> Ops) {
1386 assert(!Ops.empty())((!Ops.empty()) ? static_cast<void> (0) : __assert_fail
("!Ops.empty()", "/build/llvm-toolchain-snapshot-12~++20201202100633+b276bf5a572/llvm/include/llvm/IR/IRBuilder.h"
, 1386, __PRETTY_FUNCTION__))
;
1387 Value *Accum = Ops[0];
1388 for (unsigned i = 1; i < Ops.size(); i++)
1389 Accum = CreateOr(Accum, Ops[i]);
1390 return Accum;
1391 }
1392
1393 Value *CreateXor(Value *LHS, Value *RHS, const Twine &Name = "") {
1394 if (Value *V = foldConstant(Instruction::Xor, LHS, RHS, Name)) return V;
1395 return Insert(BinaryOperator::CreateXor(LHS, RHS), Name);
1396 }
1397
1398 Value *CreateXor(Value *LHS, const APInt &RHS, const Twine &Name = "") {
1399 return CreateXor(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1400 }
1401
1402 Value *CreateXor(Value *LHS, uint64_t RHS, const Twine &Name = "") {
1403 return CreateXor(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1404 }
1405
1406 Value *CreateFAdd(Value *L, Value *R, const Twine &Name = "",
1407 MDNode *FPMD = nullptr) {
1408 if (IsFPConstrained)
1409 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fadd,
1410 L, R, nullptr, Name, FPMD);
1411
1412 if (Value *V = foldConstant(Instruction::FAdd, L, R, Name)) return V;
1413 Instruction *I = setFPAttrs(BinaryOperator::CreateFAdd(L, R), FPMD, FMF);
1414 return Insert(I, Name);
1415 }
1416
1417 /// Copy fast-math-flags from an instruction rather than using the builder's
1418 /// default FMF.
1419 Value *CreateFAddFMF(Value *L, Value *R, Instruction *FMFSource,
1420 const Twine &Name = "") {
1421 if (IsFPConstrained)
1422 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fadd,
1423 L, R, FMFSource, Name);
1424
1425 if (Value *V = foldConstant(Instruction::FAdd, L, R, Name)) return V;
1426 Instruction *I = setFPAttrs(BinaryOperator::CreateFAdd(L, R), nullptr,
1427 FMFSource->getFastMathFlags());
1428 return Insert(I, Name);
1429 }
1430
1431 Value *CreateFSub(Value *L, Value *R, const Twine &Name = "",
1432 MDNode *FPMD = nullptr) {
1433 if (IsFPConstrained)
1434 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fsub,
1435 L, R, nullptr, Name, FPMD);
1436
1437 if (Value *V = foldConstant(Instruction::FSub, L, R, Name)) return V;
1438 Instruction *I = setFPAttrs(BinaryOperator::CreateFSub(L, R), FPMD, FMF);
1439 return Insert(I, Name);
1440 }
1441
1442 /// Copy fast-math-flags from an instruction rather than using the builder's
1443 /// default FMF.
1444 Value *CreateFSubFMF(Value *L, Value *R, Instruction *FMFSource,
1445 const Twine &Name = "") {
1446 if (IsFPConstrained)
1447 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fsub,
1448 L, R, FMFSource, Name);
1449
1450 if (Value *V = foldConstant(Instruction::FSub, L, R, Name)) return V;
1451 Instruction *I = setFPAttrs(BinaryOperator::CreateFSub(L, R), nullptr,
1452 FMFSource->getFastMathFlags());
1453 return Insert(I, Name);
1454 }
1455
1456 Value *CreateFMul(Value *L, Value *R, const Twine &Name = "",
1457 MDNode *FPMD = nullptr) {
1458 if (IsFPConstrained)
1459 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fmul,
1460 L, R, nullptr, Name, FPMD);
1461
1462 if (Value *V = foldConstant(Instruction::FMul, L, R, Name)) return V;
1463 Instruction *I = setFPAttrs(BinaryOperator::CreateFMul(L, R), FPMD, FMF);
1464 return Insert(I, Name);
1465 }
1466
1467 /// Copy fast-math-flags from an instruction rather than using the builder's
1468 /// default FMF.
1469 Value *CreateFMulFMF(Value *L, Value *R, Instruction *FMFSource,
1470 const Twine &Name = "") {
1471 if (IsFPConstrained)
1472 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fmul,
1473 L, R, FMFSource, Name);
1474
1475 if (Value *V = foldConstant(Instruction::FMul, L, R, Name)) return V;
1476 Instruction *I = setFPAttrs(BinaryOperator::CreateFMul(L, R), nullptr,
1477 FMFSource->getFastMathFlags());
1478 return Insert(I, Name);
1479 }
1480
1481 Value *CreateFDiv(Value *L, Value *R, const Twine &Name = "",
1482 MDNode *FPMD = nullptr) {
1483 if (IsFPConstrained)
1484 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fdiv,
1485 L, R, nullptr, Name, FPMD);
1486
1487 if (Value *V = foldConstant(Instruction::FDiv, L, R, Name)) return V;
1488 Instruction *I = setFPAttrs(BinaryOperator::CreateFDiv(L, R), FPMD, FMF);
1489 return Insert(I, Name);
1490 }
1491
1492 /// Copy fast-math-flags from an instruction rather than using the builder's
1493 /// default FMF.
1494 Value *CreateFDivFMF(Value *L, Value *R, Instruction *FMFSource,
1495 const Twine &Name = "") {
1496 if (IsFPConstrained)
1497 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fdiv,
1498 L, R, FMFSource, Name);
1499
1500 if (Value *V = foldConstant(Instruction::FDiv, L, R, Name)) return V;
1501 Instruction *I = setFPAttrs(BinaryOperator::CreateFDiv(L, R), nullptr,
1502 FMFSource->getFastMathFlags());
1503 return Insert(I, Name);
1504 }
1505
1506 Value *CreateFRem(Value *L, Value *R, const Twine &Name = "",
1507 MDNode *FPMD = nullptr) {
1508 if (IsFPConstrained)
1509 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_frem,
1510 L, R, nullptr, Name, FPMD);
1511
1512 if (Value *V = foldConstant(Instruction::FRem, L, R, Name)) return V;
1513 Instruction *I = setFPAttrs(BinaryOperator::CreateFRem(L, R), FPMD, FMF);
1514 return Insert(I, Name);
1515 }
1516
1517 /// Copy fast-math-flags from an instruction rather than using the builder's
1518 /// default FMF.
1519 Value *CreateFRemFMF(Value *L, Value *R, Instruction *FMFSource,
1520 const Twine &Name = "") {
1521 if (IsFPConstrained)
1522 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_frem,
1523 L, R, FMFSource, Name);
1524
1525 if (Value *V = foldConstant(Instruction::FRem, L, R, Name)) return V;
1526 Instruction *I = setFPAttrs(BinaryOperator::CreateFRem(L, R), nullptr,
1527 FMFSource->getFastMathFlags());
1528 return Insert(I, Name);
1529 }
1530
1531 Value *CreateBinOp(Instruction::BinaryOps Opc,
1532 Value *LHS, Value *RHS, const Twine &Name = "",
1533 MDNode *FPMathTag = nullptr) {
1534 if (Value *V = foldConstant(Opc, LHS, RHS, Name)) return V;
1535 Instruction *BinOp = BinaryOperator::Create(Opc, LHS, RHS);
1536 if (isa<FPMathOperator>(BinOp))
1537 setFPAttrs(BinOp, FPMathTag, FMF);
1538 return Insert(BinOp, Name);
1539 }
1540
1541 CallInst *CreateConstrainedFPBinOp(
1542 Intrinsic::ID ID, Value *L, Value *R, Instruction *FMFSource = nullptr,
1543 const Twine &Name = "", MDNode *FPMathTag = nullptr,
1544 Optional<RoundingMode> Rounding = None,
1545 Optional<fp::ExceptionBehavior> Except = None);
1546
1547 Value *CreateNeg(Value *V, const Twine &Name = "",
1548 bool HasNUW = false, bool HasNSW = false) {
1549 if (auto *VC = dyn_cast<Constant>(V))
1550 return Insert(Folder.CreateNeg(VC, HasNUW, HasNSW), Name);
1551 BinaryOperator *BO = Insert(BinaryOperator::CreateNeg(V), Name);
1552 if (HasNUW) BO->setHasNoUnsignedWrap();
1553 if (HasNSW) BO->setHasNoSignedWrap();
1554 return BO;
1555 }
1556
1557 Value *CreateNSWNeg(Value *V, const Twine &Name = "") {
1558 return CreateNeg(V, Name, false, true);
1559 }
1560
1561 Value *CreateNUWNeg(Value *V, const Twine &Name = "") {
1562 return CreateNeg(V, Name, true, false);
1563 }
1564
1565 Value *CreateFNeg(Value *V, const Twine &Name = "",
1566 MDNode *FPMathTag = nullptr) {
1567 if (auto *VC = dyn_cast<Constant>(V))
1568 return Insert(Folder.CreateFNeg(VC), Name);
1569 return Insert(setFPAttrs(UnaryOperator::CreateFNeg(V), FPMathTag, FMF),
1570 Name);
1571 }
1572
1573 /// Copy fast-math-flags from an instruction rather than using the builder's
1574 /// default FMF.
1575 Value *CreateFNegFMF(Value *V, Instruction *FMFSource,
1576 const Twine &Name = "") {
1577 if (auto *VC = dyn_cast<Constant>(V))
1578 return Insert(Folder.CreateFNeg(VC), Name);
1579 return Insert(setFPAttrs(UnaryOperator::CreateFNeg(V), nullptr,
1580 FMFSource->getFastMathFlags()),
1581 Name);
1582 }
1583
1584 Value *CreateNot(Value *V, const Twine &Name = "") {
1585 if (auto *VC = dyn_cast<Constant>(V))
1586 return Insert(Folder.CreateNot(VC), Name);
1587 return Insert(BinaryOperator::CreateNot(V), Name);
1588 }
1589
1590 Value *CreateUnOp(Instruction::UnaryOps Opc,
1591 Value *V, const Twine &Name = "",
1592 MDNode *FPMathTag = nullptr) {
1593 if (auto *VC = dyn_cast<Constant>(V))
1594 return Insert(Folder.CreateUnOp(Opc, VC), Name);
1595 Instruction *UnOp = UnaryOperator::Create(Opc, V);
1596 if (isa<FPMathOperator>(UnOp))
1597 setFPAttrs(UnOp, FPMathTag, FMF);
1598 return Insert(UnOp, Name);
1599 }
1600
1601 /// Create either a UnaryOperator or BinaryOperator depending on \p Opc.
1602 /// Correct number of operands must be passed accordingly.
1603 Value *CreateNAryOp(unsigned Opc, ArrayRef<Value *> Ops,
1604 const Twine &Name = "", MDNode *FPMathTag = nullptr);
1605
1606 //===--------------------------------------------------------------------===//
1607 // Instruction creation methods: Memory Instructions
1608 //===--------------------------------------------------------------------===//
1609
1610 AllocaInst *CreateAlloca(Type *Ty, unsigned AddrSpace,
1611 Value *ArraySize = nullptr, const Twine &Name = "") {
1612 const DataLayout &DL = BB->getModule()->getDataLayout();
1613 Align AllocaAlign = DL.getPrefTypeAlign(Ty);
1614 return Insert(new AllocaInst(Ty, AddrSpace, ArraySize, AllocaAlign), Name);
1615 }
1616
1617 AllocaInst *CreateAlloca(Type *Ty, Value *ArraySize = nullptr,
1618 const Twine &Name = "") {
1619 const DataLayout &DL = BB->getModule()->getDataLayout();
1620 Align AllocaAlign = DL.getPrefTypeAlign(Ty);
1621 unsigned AddrSpace = DL.getAllocaAddrSpace();
1622 return Insert(new AllocaInst(Ty, AddrSpace, ArraySize, AllocaAlign), Name);
1623 }
1624
1625 /// Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of
1626 /// converting the string to 'bool' for the isVolatile parameter.
1627 LoadInst *CreateLoad(Type *Ty, Value *Ptr, const char *Name) {
1628 return CreateAlignedLoad(Ty, Ptr, MaybeAlign(), Name);
1629 }
1630
1631 LoadInst *CreateLoad(Type *Ty, Value *Ptr, const Twine &Name = "") {
1632 return CreateAlignedLoad(Ty, Ptr, MaybeAlign(), Name);
1633 }
1634
1635 LoadInst *CreateLoad(Type *Ty, Value *Ptr, bool isVolatile,
1636 const Twine &Name = "") {
1637 return CreateAlignedLoad(Ty, Ptr, MaybeAlign(), isVolatile, Name);
1638 }
1639
1640 // Deprecated [opaque pointer types]
1641 LoadInst *CreateLoad(Value *Ptr, const char *Name) {
1642 return CreateLoad(Ptr->getType()->getPointerElementType(), Ptr, Name);
1643 }
1644
1645 // Deprecated [opaque pointer types]
1646 LoadInst *CreateLoad(Value *Ptr, const Twine &Name = "") {
1647 return CreateLoad(Ptr->getType()->getPointerElementType(), Ptr, Name);
1648 }
1649
1650 // Deprecated [opaque pointer types]
1651 LoadInst *CreateLoad(Value *Ptr, bool isVolatile, const Twine &Name = "") {
1652 return CreateLoad(Ptr->getType()->getPointerElementType(), Ptr, isVolatile,
1653 Name);
1654 }
1655
1656 StoreInst *CreateStore(Value *Val, Value *Ptr, bool isVolatile = false) {
1657 return CreateAlignedStore(Val, Ptr, MaybeAlign(), isVolatile);
1658 }
1659
1660 LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr,LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, unsigned Align
, const char *Name) __attribute__((deprecated("Use the version that takes NaybeAlign instead"
)))
1661 unsigned Align,LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, unsigned Align
, const char *Name) __attribute__((deprecated("Use the version that takes NaybeAlign instead"
)))
1662 const char *Name),LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, unsigned Align
, const char *Name) __attribute__((deprecated("Use the version that takes NaybeAlign instead"
)))
1663 "Use the version that takes NaybeAlign instead")LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, unsigned Align
, const char *Name) __attribute__((deprecated("Use the version that takes NaybeAlign instead"
)))
{
1664 return CreateAlignedLoad(Ty, Ptr, MaybeAlign(Align), Name);
1665 }
1666 LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align,
1667 const char *Name) {
1668 return CreateAlignedLoad(Ty, Ptr, Align, /*isVolatile*/false, Name);
1669 }
1670
1671 LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr,LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, unsigned Align
, const Twine &Name = "") __attribute__((deprecated("Use the version that takes MaybeAlign instead"
)))
1672 unsigned Align,LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, unsigned Align
, const Twine &Name = "") __attribute__((deprecated("Use the version that takes MaybeAlign instead"
)))
1673 const Twine &Name = ""),LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, unsigned Align
, const Twine &Name = "") __attribute__((deprecated("Use the version that takes MaybeAlign instead"
)))
1674 "Use the version that takes MaybeAlign instead")LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, unsigned Align
, const Twine &Name = "") __attribute__((deprecated("Use the version that takes MaybeAlign instead"
)))
{
1675 return CreateAlignedLoad(Ty, Ptr, MaybeAlign(Align), Name);
1676 }
1677 LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align,
1678 const Twine &Name = "") {
1679 return CreateAlignedLoad(Ty, Ptr, Align, /*isVolatile*/false, Name);
1680 }
1681
1682 LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr,LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, unsigned Align
, bool isVolatile, const Twine &Name = "") __attribute__(
(deprecated("Use the version that takes MaybeAlign instead"))
)
1683 unsigned Align,LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, unsigned Align
, bool isVolatile, const Twine &Name = "") __attribute__(
(deprecated("Use the version that takes MaybeAlign instead"))
)
1684 bool isVolatile,LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, unsigned Align
, bool isVolatile, const Twine &Name = "") __attribute__(
(deprecated("Use the version that takes MaybeAlign instead"))
)
1685 const Twine &Name = ""),LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, unsigned Align
, bool isVolatile, const Twine &Name = "") __attribute__(
(deprecated("Use the version that takes MaybeAlign instead"))
)
1686 "Use the version that takes MaybeAlign instead")LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, unsigned Align
, bool isVolatile, const Twine &Name = "") __attribute__(
(deprecated("Use the version that takes MaybeAlign instead"))
)
{
1687 return CreateAlignedLoad(Ty, Ptr, MaybeAlign(Align), isVolatile, Name);
1688 }
1689 LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align,
1690 bool isVolatile, const Twine &Name = "") {
1691 if (!Align) {
1692 const DataLayout &DL = BB->getModule()->getDataLayout();
1693 Align = DL.getABITypeAlign(Ty);
1694 }
1695 return Insert(new LoadInst(Ty, Ptr, Twine(), isVolatile, *Align), Name);
1696 }
1697
1698 // Deprecated [opaque pointer types]
1699 LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateAlignedLoad(Value *Ptr,LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align, const
char *Name) __attribute__((deprecated("Use the version that takes MaybeAlign instead"
)))
1700 unsigned Align,LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align, const
char *Name) __attribute__((deprecated("Use the version that takes MaybeAlign instead"
)))
1701 const char *Name),LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align, const
char *Name) __attribute__((deprecated("Use the version that takes MaybeAlign instead"
)))
1702 "Use the version that takes MaybeAlign instead")LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align, const
char *Name) __attribute__((deprecated("Use the version that takes MaybeAlign instead"
)))
{
1703 return CreateAlignedLoad(Ptr->getType()->getPointerElementType(), Ptr,
1704 MaybeAlign(Align), Name);
1705 }
1706 // Deprecated [opaque pointer types]
1707 LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateAlignedLoad(Value *Ptr,LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align, const
Twine &Name = "") __attribute__((deprecated("Use the version that takes MaybeAlign instead"
)))
1708 unsigned Align,LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align, const
Twine &Name = "") __attribute__((deprecated("Use the version that takes MaybeAlign instead"
)))
1709 const Twine &Name = ""),LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align, const
Twine &Name = "") __attribute__((deprecated("Use the version that takes MaybeAlign instead"
)))
1710 "Use the version that takes MaybeAlign instead")LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align, const
Twine &Name = "") __attribute__((deprecated("Use the version that takes MaybeAlign instead"
)))
{
1711 return CreateAlignedLoad(Ptr->getType()->getPointerElementType(), Ptr,
1712 MaybeAlign(Align), Name);
1713 }
1714 // Deprecated [opaque pointer types]
1715 LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateAlignedLoad(Value *Ptr,LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align, bool isVolatile
, const Twine &Name = "") __attribute__((deprecated("Use the version that takes MaybeAlign instead"
)))
1716 unsigned Align,LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align, bool isVolatile
, const Twine &Name = "") __attribute__((deprecated("Use the version that takes MaybeAlign instead"
)))
1717 bool isVolatile,LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align, bool isVolatile
, const Twine &Name = "") __attribute__((deprecated("Use the version that takes MaybeAlign instead"
)))
1718 const Twine &Name = ""),LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align, bool isVolatile
, const Twine &Name = "") __attribute__((deprecated("Use the version that takes MaybeAlign instead"
)))
1719 "Use the version that takes MaybeAlign instead")LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align, bool isVolatile
, const Twine &Name = "") __attribute__((deprecated("Use the version that takes MaybeAlign instead"
)))
{
1720 return CreateAlignedLoad(Ptr->getType()->getPointerElementType(), Ptr,
1721 MaybeAlign(Align), isVolatile, Name);
1722 }
1723 // Deprecated [opaque pointer types]
1724 LoadInst *CreateAlignedLoad(Value *Ptr, MaybeAlign Align, const char *Name) {
1725 return CreateAlignedLoad(Ptr->getType()->getPointerElementType(), Ptr,
1726 Align, Name);
1727 }
1728 // Deprecated [opaque pointer types]
1729 LoadInst *CreateAlignedLoad(Value *Ptr, MaybeAlign Align,
1730 const Twine &Name = "") {
1731 return CreateAlignedLoad(Ptr->getType()->getPointerElementType(), Ptr,
1732 Align, Name);
1733 }
1734 // Deprecated [opaque pointer types]
1735 LoadInst *CreateAlignedLoad(Value *Ptr, MaybeAlign Align, bool isVolatile,
1736 const Twine &Name = "") {
1737 return CreateAlignedLoad(Ptr->getType()->getPointerElementType(), Ptr,
1738 Align, isVolatile, Name);
1739 }
1740
1741 LLVM_ATTRIBUTE_DEPRECATED(StoreInst *CreateAlignedStore(Value *Val, Value *Ptr, unsigned
Align, bool isVolatile = false) __attribute__((deprecated("Use the version that takes MaybeAlign instead"
)))
1742 StoreInst *CreateAlignedStore(Value *Val, Value *Ptr, unsigned Align,StoreInst *CreateAlignedStore(Value *Val, Value *Ptr, unsigned
Align, bool isVolatile = false) __attribute__((deprecated("Use the version that takes MaybeAlign instead"
)))
1743 bool isVolatile = false),StoreInst *CreateAlignedStore(Value *Val, Value *Ptr, unsigned
Align, bool isVolatile = false) __attribute__((deprecated("Use the version that takes MaybeAlign instead"
)))
1744 "Use the version that takes MaybeAlign instead")StoreInst *CreateAlignedStore(Value *Val, Value *Ptr, unsigned
Align, bool isVolatile = false) __attribute__((deprecated("Use the version that takes MaybeAlign instead"
)))
{
1745 return CreateAlignedStore(Val, Ptr, MaybeAlign(Align), isVolatile);
1746 }
1747 StoreInst *CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align,
1748 bool isVolatile = false) {
1749 if (!Align) {
1750 const DataLayout &DL = BB->getModule()->getDataLayout();
1751 Align = DL.getABITypeAlign(Val->getType());
1752 }
1753 return Insert(new StoreInst(Val, Ptr, isVolatile, *Align));
1754 }
1755 FenceInst *CreateFence(AtomicOrdering Ordering,
1756 SyncScope::ID SSID = SyncScope::System,
1757 const Twine &Name = "") {
1758 return Insert(new FenceInst(Context, Ordering, SSID), Name);
1759 }
1760
1761 AtomicCmpXchgInst *CreateAtomicCmpXchg(
1762 Value *Ptr, Value *Cmp, Value *New, AtomicOrdering SuccessOrdering,
1763 AtomicOrdering FailureOrdering, SyncScope::ID SSID = SyncScope::System) {
1764 const DataLayout &DL = BB->getModule()->getDataLayout();
1765 Align Alignment(DL.getTypeStoreSize(New->getType()));
1766 return Insert(new AtomicCmpXchgInst(
1767 Ptr, Cmp, New, Alignment, SuccessOrdering, FailureOrdering, SSID));
1768 }
1769
1770 AtomicRMWInst *CreateAtomicRMW(AtomicRMWInst::BinOp Op, Value *Ptr, Value *Val,
1771 AtomicOrdering Ordering,
1772 SyncScope::ID SSID = SyncScope::System) {
1773 const DataLayout &DL = BB->getModule()->getDataLayout();
1774 Align Alignment(DL.getTypeStoreSize(Val->getType()));
1775 return Insert(new AtomicRMWInst(Op, Ptr, Val, Alignment, Ordering, SSID));
1776 }
1777
1778 Value *CreateGEP(Value *Ptr, ArrayRef<Value *> IdxList,
1779 const Twine &Name = "") {
1780 return CreateGEP(nullptr, Ptr, IdxList, Name);
1781 }
1782
1783 Value *CreateGEP(Type *Ty, Value *Ptr, ArrayRef<Value *> IdxList,
1784 const Twine &Name = "") {
1785 if (auto *PC = dyn_cast<Constant>(Ptr)) {
1786 // Every index must be constant.
1787 size_t i, e;
1788 for (i = 0, e = IdxList.size(); i != e; ++i)
1789 if (!isa<Constant>(IdxList[i]))
1790 break;
1791 if (i == e)
1792 return Insert(Folder.CreateGetElementPtr(Ty, PC, IdxList), Name);
1793 }
1794 return Insert(GetElementPtrInst::Create(Ty, Ptr, IdxList), Name);
1795 }
1796
1797 Value *CreateInBoundsGEP(Value *Ptr, ArrayRef<Value *> IdxList,
1798 const Twine &Name = "") {
1799 return CreateInBoundsGEP(nullptr, Ptr, IdxList, Name);
1800 }
1801
1802 Value *CreateInBoundsGEP(Type *Ty, Value *Ptr, ArrayRef<Value *> IdxList,
1803 const Twine &Name = "") {
1804 if (auto *PC = dyn_cast<Constant>(Ptr)) {
1805 // Every index must be constant.
1806 size_t i, e;
1807 for (i = 0, e = IdxList.size(); i != e; ++i)
1808 if (!isa<Constant>(IdxList[i]))
1809 break;
1810 if (i == e)
1811 return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, IdxList),
1812 Name);
1813 }
1814 return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, IdxList), Name);
1815 }
1816
1817 Value *CreateGEP(Value *Ptr, Value *Idx, const Twine &Name = "") {
1818 return CreateGEP(nullptr, Ptr, Idx, Name);
1819 }
1820
1821 Value *CreateGEP(Type *Ty, Value *Ptr, Value *Idx, const Twine &Name = "") {
1822 if (auto *PC = dyn_cast<Constant>(Ptr))
1823 if (auto *IC = dyn_cast<Constant>(Idx))
1824 return Insert(Folder.CreateGetElementPtr(Ty, PC, IC), Name);
1825 return Insert(GetElementPtrInst::Create(Ty, Ptr, Idx), Name);
1826 }
1827
1828 Value *CreateInBoundsGEP(Type *Ty, Value *Ptr, Value *Idx,
1829 const Twine &Name = "") {
1830 if (auto *PC = dyn_cast<Constant>(Ptr))
1831 if (auto *IC = dyn_cast<Constant>(Idx))
1832 return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, IC), Name);
1833 return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idx), Name);
1834 }
1835
1836 Value *CreateConstGEP1_32(Value *Ptr, unsigned Idx0, const Twine &Name = "") {
1837 return CreateConstGEP1_32(nullptr, Ptr, Idx0, Name);
1838 }
1839
1840 Value *CreateConstGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0,
1841 const Twine &Name = "") {
1842 Value *Idx = ConstantInt::get(Type::getInt32Ty(Context), Idx0);
1843
1844 if (auto *PC = dyn_cast<Constant>(Ptr))
1845 return Insert(Folder.CreateGetElementPtr(Ty, PC, Idx), Name);
1846
1847 return Insert(GetElementPtrInst::Create(Ty, Ptr, Idx), Name);
1848 }
1849
1850 Value *CreateConstInBoundsGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0,
1851 const Twine &Name = "") {
1852 Value *Idx = ConstantInt::get(Type::getInt32Ty(Context), Idx0);
1853
1854 if (auto *PC = dyn_cast<Constant>(Ptr))
1855 return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, Idx), Name);
1856
1857 return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idx), Name);
1858 }
1859
1860 Value *CreateConstGEP2_32(Type *Ty, Value *Ptr, unsigned Idx0, unsigned Idx1,
1861 const Twine &Name = "") {
1862 Value *Idxs[] = {
1863 ConstantInt::get(Type::getInt32Ty(Context), Idx0),
1864 ConstantInt::get(Type::getInt32Ty(Context), Idx1)
1865 };
1866
1867 if (auto *PC = dyn_cast<Constant>(Ptr))
1868 return Insert(Folder.CreateGetElementPtr(Ty, PC, Idxs), Name);
1869
1870 return Insert(GetElementPtrInst::Create(Ty, Ptr, Idxs), Name);
1871 }
1872
1873 Value *CreateConstInBoundsGEP2_32(Type *Ty, Value *Ptr, unsigned Idx0,
1874 unsigned Idx1, const Twine &Name = "") {
1875 Value *Idxs[] = {
1876 ConstantInt::get(Type::getInt32Ty(Context), Idx0),
1877 ConstantInt::get(Type::getInt32Ty(Context), Idx1)
1878 };
1879
1880 if (auto *PC = dyn_cast<Constant>(Ptr))
1881 return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, Idxs), Name);
1882
1883 return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idxs), Name);
1884 }
1885
1886 Value *CreateConstGEP1_64(Type *Ty, Value *Ptr, uint64_t Idx0,
1887 const Twine &Name = "") {
1888 Value *Idx = ConstantInt::get(Type::getInt64Ty(Context), Idx0);
1889
1890 if (auto *PC = dyn_cast<Constant>(Ptr))
1891 return Insert(Folder.CreateGetElementPtr(Ty, PC, Idx), Name);
1892
1893 return Insert(GetElementPtrInst::Create(Ty, Ptr, Idx), Name);
1894 }
1895
1896 Value *CreateConstGEP1_64(Value *Ptr, uint64_t Idx0, const Twine &Name = "") {
1897 return CreateConstGEP1_64(nullptr, Ptr, Idx0, Name);
1898 }
1899
1900 Value *CreateConstInBoundsGEP1_64(Type *Ty, Value *Ptr, uint64_t Idx0,
1901 const Twine &Name = "") {
1902 Value *Idx = ConstantInt::get(Type::getInt64Ty(Context), Idx0);
1903
1904 if (auto *PC = dyn_cast<Constant>(Ptr))
1905 return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, Idx), Name);
1906
1907 return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idx), Name);
1908 }
1909
1910 Value *CreateConstInBoundsGEP1_64(Value *Ptr, uint64_t Idx0,
1911 const Twine &Name = "") {
1912 return CreateConstInBoundsGEP1_64(nullptr, Ptr, Idx0, Name);
1913 }
1914
1915 Value *CreateConstGEP2_64(Type *Ty, Value *Ptr, uint64_t Idx0, uint64_t Idx1,
1916 const Twine &Name = "") {
1917 Value *Idxs[] = {
1918 ConstantInt::get(Type::getInt64Ty(Context), Idx0),
1919 ConstantInt::get(Type::getInt64Ty(Context), Idx1)
1920 };
1921
1922 if (auto *PC = dyn_cast<Constant>(Ptr))
32
Assuming 'PC' is null
33
Taking false branch
1923 return Insert(Folder.CreateGetElementPtr(Ty, PC, Idxs), Name);
1924
1925 return Insert(GetElementPtrInst::Create(Ty, Ptr, Idxs), Name);
34
Passing null pointer value via 2nd parameter 'Ptr'
35
Calling 'GetElementPtrInst::Create'
1926 }
1927
1928 Value *CreateConstGEP2_64(Value *Ptr, uint64_t Idx0, uint64_t Idx1,
1929 const Twine &Name = "") {
1930 return CreateConstGEP2_64(nullptr, Ptr, Idx0, Idx1, Name);
1931 }
1932
1933 Value *CreateConstInBoundsGEP2_64(Type *Ty, Value *Ptr, uint64_t Idx0,
1934 uint64_t Idx1, const Twine &Name = "") {
1935 Value *Idxs[] = {
1936 ConstantInt::get(Type::getInt64Ty(Context), Idx0),
1937 ConstantInt::get(Type::getInt64Ty(Context), Idx1)
1938 };
1939
1940 if (auto *PC = dyn_cast<Constant>(Ptr))
1941 return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, Idxs), Name);
1942
1943 return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idxs), Name);
1944 }
1945
1946 Value *CreateConstInBoundsGEP2_64(Value *Ptr, uint64_t Idx0, uint64_t Idx1,
1947 const Twine &Name = "") {
1948 return CreateConstInBoundsGEP2_64(nullptr, Ptr, Idx0, Idx1, Name);
1949 }
1950
1951 Value *CreateStructGEP(Type *Ty, Value *Ptr, unsigned Idx,
1952 const Twine &Name = "") {
1953 return CreateConstInBoundsGEP2_32(Ty, Ptr, 0, Idx, Name);
1954 }
1955
1956 Value *CreateStructGEP(Value *Ptr, unsigned Idx, const Twine &Name = "") {
1957 return CreateConstInBoundsGEP2_32(nullptr, Ptr, 0, Idx, Name);
1958 }
1959
1960 /// Same as CreateGlobalString, but return a pointer with "i8*" type
1961 /// instead of a pointer to array of i8.
1962 ///
1963 /// If no module is given via \p M, it is take from the insertion point basic
1964 /// block.
1965 Constant *CreateGlobalStringPtr(StringRef Str, const Twine &Name = "",
1966 unsigned AddressSpace = 0,
1967 Module *M = nullptr) {
1968 GlobalVariable *GV = CreateGlobalString(Str, Name, AddressSpace, M);
1969 Constant *Zero = ConstantInt::get(Type::getInt32Ty(Context), 0);
1970 Constant *Indices[] = {Zero, Zero};
1971 return ConstantExpr::getInBoundsGetElementPtr(GV->getValueType(), GV,
1972 Indices);
1973 }
1974
1975 //===--------------------------------------------------------------------===//
1976 // Instruction creation methods: Cast/Conversion Operators
1977 //===--------------------------------------------------------------------===//
1978
1979 Value *CreateTrunc(Value *V, Type *DestTy, const Twine &Name = "") {
1980 return CreateCast(Instruction::Trunc, V, DestTy, Name);
1981 }
1982
1983 Value *CreateZExt(Value *V, Type *DestTy, const Twine &Name = "") {
1984 return CreateCast(Instruction::ZExt, V, DestTy, Name);
1985 }
1986
1987 Value *CreateSExt(Value *V, Type *DestTy, const Twine &Name = "") {
1988 return CreateCast(Instruction::SExt, V, DestTy, Name);
1989 }
1990
1991 /// Create a ZExt or Trunc from the integer value V to DestTy. Return
1992 /// the value untouched if the type of V is already DestTy.
1993 Value *CreateZExtOrTrunc(Value *V, Type *DestTy,
1994 const Twine &Name = "") {
1995 assert(V->getType()->isIntOrIntVectorTy() &&((V->getType()->isIntOrIntVectorTy() && DestTy->
isIntOrIntVectorTy() && "Can only zero extend/truncate integers!"
) ? static_cast<void> (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only zero extend/truncate integers!\""
, "/build/llvm-toolchain-snapshot-12~++20201202100633+b276bf5a572/llvm/include/llvm/IR/IRBuilder.h"
, 1997, __PRETTY_FUNCTION__))
1996 DestTy->isIntOrIntVectorTy() &&((V->getType()->isIntOrIntVectorTy() && DestTy->
isIntOrIntVectorTy() && "Can only zero extend/truncate integers!"
) ? static_cast<void> (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only zero extend/truncate integers!\""
, "/build/llvm-toolchain-snapshot-12~++20201202100633+b276bf5a572/llvm/include/llvm/IR/IRBuilder.h"
, 1997, __PRETTY_FUNCTION__))
1997 "Can only zero extend/truncate integers!")((V->getType()->isIntOrIntVectorTy() && DestTy->
isIntOrIntVectorTy() && "Can only zero extend/truncate integers!"
) ? static_cast<void> (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only zero extend/truncate integers!\""
, "/build/llvm-toolchain-snapshot-12~++20201202100633+b276bf5a572/llvm/include/llvm/IR/IRBuilder.h"
, 1997, __PRETTY_FUNCTION__))
;
1998 Type *VTy = V->getType();
1999 if (VTy->getScalarSizeInBits() < DestTy->getScalarSizeInBits())
2000 return CreateZExt(V, DestTy, Name);
2001 if (VTy->getScalarSizeInBits() > DestTy->getScalarSizeInBits())
2002 return CreateTrunc(V, DestTy, Name);
2003 return V;
2004 }
2005
2006 /// Create a SExt or Trunc from the integer value V to DestTy. Return
2007 /// the value untouched if the type of V is already DestTy.
2008 Value *CreateSExtOrTrunc(Value *V, Type *DestTy,
2009 const Twine &Name = "") {
2010 assert(V->getType()->isIntOrIntVectorTy() &&((V->getType()->isIntOrIntVectorTy() && DestTy->
isIntOrIntVectorTy() && "Can only sign extend/truncate integers!"
) ? static_cast<void> (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only sign extend/truncate integers!\""
, "/build/llvm-toolchain-snapshot-12~++20201202100633+b276bf5a572/llvm/include/llvm/IR/IRBuilder.h"
, 2012, __PRETTY_FUNCTION__))
2011 DestTy->isIntOrIntVectorTy() &&((V->getType()->isIntOrIntVectorTy() && DestTy->
isIntOrIntVectorTy() && "Can only sign extend/truncate integers!"
) ? static_cast<void> (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only sign extend/truncate integers!\""
, "/build/llvm-toolchain-snapshot-12~++20201202100633+b276bf5a572/llvm/include/llvm/IR/IRBuilder.h"
, 2012, __PRETTY_FUNCTION__))
2012 "Can only sign extend/truncate integers!")((V->getType()->isIntOrIntVectorTy() && DestTy->
isIntOrIntVectorTy() && "Can only sign extend/truncate integers!"
) ? static_cast<void> (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only sign extend/truncate integers!\""
, "/build/llvm-toolchain-snapshot-12~++20201202100633+b276bf5a572/llvm/include/llvm/IR/IRBuilder.h"
, 2012, __PRETTY_FUNCTION__))
;
2013 Type *VTy = V->getType();
2014 if (VTy->getScalarSizeInBits() < DestTy->getScalarSizeInBits())
2015 return CreateSExt(V, DestTy, Name);
2016 if (VTy->getScalarSizeInBits() > DestTy->getScalarSizeInBits())
2017 return CreateTrunc(V, DestTy, Name);
2018 return V;
2019 }
2020
2021 Value *CreateFPToUI(Value *V, Type *DestTy, const Twine &Name = "") {
2022 if (IsFPConstrained)
2023 return CreateConstrainedFPCast(Intrinsic::experimental_constrained_fptoui,
2024 V, DestTy, nullptr, Name);
2025 return CreateCast(Instruction::FPToUI, V, DestTy, Name);
2026 }
2027
2028 Value *CreateFPToSI(Value *V, Type *DestTy, const Twine &Name = "") {
2029 if (IsFPConstrained)
2030 return CreateConstrainedFPCast(Intrinsic::experimental_constrained_fptosi,
2031 V, DestTy, nullptr, Name);
2032 return CreateCast(Instruction::FPToSI, V, DestTy, Name);
2033 }
2034
2035 Value *CreateUIToFP(Value *V, Type *DestTy, const Twine &Name = ""){
2036 if (IsFPConstrained)
2037 return CreateConstrainedFPCast(Intrinsic::experimental_constrained_uitofp,
2038 V, DestTy, nullptr, Name);
2039 return CreateCast(Instruction::UIToFP, V, DestTy, Name);
2040 }
2041
2042 Value *CreateSIToFP(Value *V, Type *DestTy, const Twine &Name = ""){
2043 if (IsFPConstrained)
2044 return CreateConstrainedFPCast(Intrinsic::experimental_constrained_sitofp,
2045 V, DestTy, nullptr, Name);
2046 return CreateCast(Instruction::SIToFP, V, DestTy, Name);
2047 }
2048
2049 Value *CreateFPTrunc(Value *V, Type *DestTy,
2050 const Twine &Name = "") {
2051 if (IsFPConstrained)
2052 return CreateConstrainedFPCast(
2053 Intrinsic::experimental_constrained_fptrunc, V, DestTy, nullptr,
2054 Name);
2055 return CreateCast(Instruction::FPTrunc, V, DestTy, Name);
2056 }
2057
2058 Value *CreateFPExt(Value *V, Type *DestTy, const Twine &Name = "") {
2059 if (IsFPConstrained)
2060 return CreateConstrainedFPCast(Intrinsic::experimental_constrained_fpext,
2061 V, DestTy, nullptr, Name);
2062 return CreateCast(Instruction::FPExt, V, DestTy, Name);
2063 }
2064
2065 Value *CreatePtrToInt(Value *V, Type *DestTy,
2066 const Twine &Name = "") {
2067 return CreateCast(Instruction::PtrToInt, V, DestTy, Name);
2068 }
2069
2070 Value *CreateIntToPtr(Value *V, Type *DestTy,
2071 const Twine &Name = "") {
2072 return CreateCast(Instruction::IntToPtr, V, DestTy, Name);
2073 }
2074
2075 Value *CreateBitCast(Value *V, Type *DestTy,
2076 const Twine &Name = "") {
2077 return CreateCast(Instruction::BitCast, V, DestTy, Name);
2078 }
2079
2080 Value *CreateAddrSpaceCast(Value *V, Type *DestTy,
2081 const Twine &Name = "") {
2082 return CreateCast(Instruction::AddrSpaceCast, V, DestTy, Name);
2083 }
2084
2085 Value *CreateZExtOrBitCast(Value *V, Type *DestTy,
2086 const Twine &Name = "") {
2087 if (V->getType() == DestTy)
2088 return V;
2089 if (auto *VC = dyn_cast<Constant>(V))
2090 return Insert(Folder.CreateZExtOrBitCast(VC, DestTy), Name);
2091 return Insert(CastInst::CreateZExtOrBitCast(V, DestTy), Name);
2092 }
2093
2094 Value *CreateSExtOrBitCast(Value *V, Type *DestTy,
2095 const Twine &Name = "") {
2096 if (V->getType() == DestTy)
2097 return V;
2098 if (auto *VC = dyn_cast<Constant>(V))
2099 return Insert(Folder.CreateSExtOrBitCast(VC, DestTy), Name);
2100 return Insert(CastInst::CreateSExtOrBitCast(V, DestTy), Name);
2101 }
2102
2103 Value *CreateTruncOrBitCast(Value *V, Type *DestTy,
2104 const Twine &Name = "") {
2105 if (V->getType() == DestTy)
2106 return V;
2107 if (auto *VC = dyn_cast<Constant>(V))
2108 return Insert(Folder.CreateTruncOrBitCast(VC, DestTy), Name);
2109 return Insert(CastInst::CreateTruncOrBitCast(V, DestTy), Name);
2110 }
2111
2112 Value *CreateCast(Instruction::CastOps Op, Value *V, Type *DestTy,
2113 const Twine &Name = "") {
2114 if (V->getType() == DestTy)
2115 return V;
2116 if (auto *VC = dyn_cast<Constant>(V))
2117 return Insert(Folder.CreateCast(Op, VC, DestTy), Name);
2118 return Insert(CastInst::Create(Op, V, DestTy), Name);
2119 }
2120
2121 Value *CreatePointerCast(Value *V, Type *DestTy,
2122 const Twine &Name = "") {
2123 if (V->getType() == DestTy)
2124 return V;
2125 if (auto *VC = dyn_cast<Constant>(V))
2126 return Insert(Folder.CreatePointerCast(VC, DestTy), Name);
2127 return Insert(CastInst::CreatePointerCast(V, DestTy), Name);
2128 }
2129
2130 Value *CreatePointerBitCastOrAddrSpaceCast(Value *V, Type *DestTy,
2131 const Twine &Name = "") {
2132 if (V->getType() == DestTy)
2133 return V;
2134
2135 if (auto *VC = dyn_cast<Constant>(V)) {
2136 return Insert(Folder.CreatePointerBitCastOrAddrSpaceCast(VC, DestTy),
2137 Name);
2138 }
2139
2140 return Insert(CastInst::CreatePointerBitCastOrAddrSpaceCast(V, DestTy),
2141 Name);
2142 }
2143
2144 Value *CreateIntCast(Value *V, Type *DestTy, bool isSigned,
2145 const Twine &Name = "") {
2146 if (V->getType() == DestTy)
2147 return V;
2148 if (auto *VC = dyn_cast<Constant>(V))
2149 return Insert(Folder.CreateIntCast(VC, DestTy, isSigned), Name);
2150 return Insert(CastInst::CreateIntegerCast(V, DestTy, isSigned), Name);
2151 }
2152
2153 Value *CreateBitOrPointerCast(Value *V, Type *DestTy,
2154 const Twine &Name = "") {
2155 if (V->getType() == DestTy)
2156 return V;
2157 if (V->getType()->isPtrOrPtrVectorTy() && DestTy->isIntOrIntVectorTy())
2158 return CreatePtrToInt(V, DestTy, Name);
2159 if (V->getType()->isIntOrIntVectorTy() && DestTy->isPtrOrPtrVectorTy())
2160 return CreateIntToPtr(V, DestTy, Name);
2161
2162 return CreateBitCast(V, DestTy, Name);
2163 }
2164
2165 Value *CreateFPCast(Value *V, Type *DestTy, const Twine &Name = "") {
2166 if (V->getType() == DestTy)
2167 return V;
2168 if (auto *VC = dyn_cast<Constant>(V))
2169 return Insert(Folder.CreateFPCast(VC, DestTy), Name);
2170 return Insert(CastInst::CreateFPCast(V, DestTy), Name);
2171 }
2172
2173 CallInst *CreateConstrainedFPCast(
2174 Intrinsic::ID ID, Value *V, Type *DestTy,
2175 Instruction *FMFSource = nullptr, const Twine &Name = "",
2176 MDNode *FPMathTag = nullptr,
2177 Optional<RoundingMode> Rounding = None,
2178 Optional<fp::ExceptionBehavior> Except = None);
2179
2180 // Provided to resolve 'CreateIntCast(Ptr, Ptr, "...")', giving a
2181 // compile time error, instead of converting the string to bool for the
2182 // isSigned parameter.
2183 Value *CreateIntCast(Value *, Type *, const char *) = delete;
2184
2185 //===--------------------------------------------------------------------===//
2186 // Instruction creation methods: Compare Instructions
2187 //===--------------------------------------------------------------------===//
2188
2189 Value *CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name = "") {
2190 return CreateICmp(ICmpInst::ICMP_EQ, LHS, RHS, Name);
2191 }
2192
2193 Value *CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name = "") {
2194 return CreateICmp(ICmpInst::ICMP_NE, LHS, RHS, Name);
2195 }
2196
2197 Value *CreateICmpUGT(Value *LHS, Value *RHS, const Twine &Name = "") {
2198 return CreateICmp(ICmpInst::ICMP_UGT, LHS, RHS, Name);
2199 }
2200
2201 Value *CreateICmpUGE(Value *LHS, Value *RHS, const Twine &Name = "") {
2202 return CreateICmp(ICmpInst::ICMP_UGE, LHS, RHS, Name);
2203 }
2204
2205 Value *CreateICmpULT(Value *LHS, Value *RHS, const Twine &Name = "") {
2206 return CreateICmp(ICmpInst::ICMP_ULT, LHS, RHS, Name);
2207 }
2208
2209 Value *CreateICmpULE(Value *LHS, Value *RHS, const Twine &Name = "") {
2210 return CreateICmp(ICmpInst::ICMP_ULE, LHS, RHS, Name);
2211 }
2212
2213 Value *CreateICmpSGT(Value *LHS, Value *RHS, const Twine &Name = "") {
2214 return CreateICmp(ICmpInst::ICMP_SGT, LHS, RHS, Name);
2215 }
2216
2217 Value *CreateICmpSGE(Value *LHS, Value *RHS, const Twine &Name = "") {
2218 return CreateICmp(ICmpInst::ICMP_SGE, LHS, RHS, Name);
2219 }
2220
2221 Value *CreateICmpSLT(Value *LHS, Value *RHS, const Twine &Name = "") {
2222 return CreateICmp(ICmpInst::ICMP_SLT, LHS, RHS, Name);
2223 }
2224
2225 Value *CreateICmpSLE(Value *LHS, Value *RHS, const Twine &Name = "") {
2226 return CreateICmp(ICmpInst::ICMP_SLE, LHS, RHS, Name);
2227 }
2228
2229 Value *CreateFCmpOEQ(Value *LHS, Value *RHS, const Twine &Name = "",
2230 MDNode *FPMathTag = nullptr) {
2231 return CreateFCmp(FCmpInst::FCMP_OEQ, LHS, RHS, Name, FPMathTag);
2232 }
2233
2234 Value *CreateFCmpOGT(Value *LHS, Value *RHS, const Twine &Name = "",
2235 MDNode *FPMathTag = nullptr) {
2236 return CreateFCmp(FCmpInst::FCMP_OGT, LHS, RHS, Name, FPMathTag);
2237 }
2238
2239 Value *CreateFCmpOGE(Value *LHS, Value *RHS, const Twine &Name = "",
2240 MDNode *FPMathTag = nullptr) {
2241 return CreateFCmp(FCmpInst::FCMP_OGE, LHS, RHS, Name, FPMathTag);
2242 }
2243
2244 Value *CreateFCmpOLT(Value *LHS, Value *RHS, const Twine &Name = "",
2245 MDNode *FPMathTag = nullptr) {
2246 return CreateFCmp(FCmpInst::FCMP_OLT, LHS, RHS, Name, FPMathTag);
2247 }
2248
2249 Value *CreateFCmpOLE(Value *LHS, Value *RHS, const Twine &Name = "",
2250 MDNode *FPMathTag = nullptr) {
2251 return CreateFCmp(FCmpInst::FCMP_OLE, LHS, RHS, Name, FPMathTag);
2252 }
2253
2254 Value *CreateFCmpONE(Value *LHS, Value *RHS, const Twine &Name = "",
2255 MDNode *FPMathTag = nullptr) {
2256 return CreateFCmp(FCmpInst::FCMP_ONE, LHS, RHS, Name, FPMathTag);
2257 }
2258
2259 Value *CreateFCmpORD(Value *LHS, Value *RHS, const Twine &Name = "",
2260 MDNode *FPMathTag = nullptr) {
2261 return CreateFCmp(FCmpInst::FCMP_ORD, LHS, RHS, Name, FPMathTag);
2262 }
2263
2264 Value *CreateFCmpUNO(Value *LHS, Value *RHS, const Twine &Name = "",
2265 MDNode *FPMathTag = nullptr) {
2266 return CreateFCmp(FCmpInst::FCMP_UNO, LHS, RHS, Name, FPMathTag);
2267 }
2268
2269 Value *CreateFCmpUEQ(Value *LHS, Value *RHS, const Twine &Name = "",
2270 MDNode *FPMathTag = nullptr) {
2271 return CreateFCmp(FCmpInst::FCMP_UEQ, LHS, RHS, Name, FPMathTag);
2272 }
2273
2274 Value *CreateFCmpUGT(Value *LHS, Value *RHS, const Twine &Name = "",
2275 MDNode *FPMathTag = nullptr) {
2276 return CreateFCmp(FCmpInst::FCMP_UGT, LHS, RHS, Name, FPMathTag);
2277 }
2278
2279 Value *CreateFCmpUGE(Value *LHS, Value *RHS, const Twine &Name = "",
2280 MDNode *FPMathTag = nullptr) {
2281 return CreateFCmp(FCmpInst::FCMP_UGE, LHS, RHS, Name, FPMathTag);
2282 }
2283
2284 Value *CreateFCmpULT(Value *LHS, Value *RHS, const Twine &Name = "",
2285 MDNode *FPMathTag = nullptr) {
2286 return CreateFCmp(FCmpInst::FCMP_ULT, LHS, RHS, Name, FPMathTag);
2287 }
2288
2289 Value *CreateFCmpULE(Value *LHS, Value *RHS, const Twine &Name = "",
2290 MDNode *FPMathTag = nullptr) {
2291 return CreateFCmp(FCmpInst::FCMP_ULE, LHS, RHS, Name, FPMathTag);
2292 }
2293
2294 Value *CreateFCmpUNE(Value *LHS, Value *RHS, const Twine &Name = "",
2295 MDNode *FPMathTag = nullptr) {
2296 return CreateFCmp(FCmpInst::FCMP_UNE, LHS, RHS, Name, FPMathTag);
2297 }
2298
2299 Value *CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS,
2300 const Twine &Name = "") {
2301 if (auto *LC = dyn_cast<Constant>(LHS))
2302 if (auto *RC = dyn_cast<Constant>(RHS))
2303 return Insert(Folder.CreateICmp(P, LC, RC), Name);
2304 return Insert(new ICmpInst(P, LHS, RHS), Name);
2305 }
2306
2307 // Create a quiet floating-point comparison (i.e. one that raises an FP
2308 // exception only in the case where an input is a signaling NaN).
2309 // Note that this differs from CreateFCmpS only if IsFPConstrained is true.
2310 Value *CreateFCmp(CmpInst::Predicate P, Value *LHS, Value *RHS,
2311 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
2312 return CreateFCmpHelper(P, LHS, RHS, Name, FPMathTag, false);
2313 }
2314
2315 Value *CreateCmp(CmpInst::Predicate Pred, Value *LHS, Value *RHS,
2316 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
2317 return CmpInst::isFPPredicate(Pred)
2318 ? CreateFCmp(Pred, LHS, RHS, Name, FPMathTag)
2319 : CreateICmp(Pred, LHS, RHS, Name);
2320 }
2321
2322 // Create a signaling floating-point comparison (i.e. one that raises an FP
2323 // exception whenever an input is any NaN, signaling or quiet).
2324 // Note that this differs from CreateFCmp only if IsFPConstrained is true.
2325 Value *CreateFCmpS(CmpInst::Predicate P, Value *LHS, Value *RHS,
2326 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
2327 return CreateFCmpHelper(P, LHS, RHS, Name, FPMathTag, true);
2328 }
2329
2330private:
2331 // Helper routine to create either a signaling or a quiet FP comparison.
2332 Value *CreateFCmpHelper(CmpInst::Predicate P, Value *LHS, Value *RHS,
2333 const Twine &Name, MDNode *FPMathTag,
2334 bool IsSignaling);
2335
2336public:
2337 CallInst *CreateConstrainedFPCmp(
2338 Intrinsic::ID ID, CmpInst::Predicate P, Value *L, Value *R,
2339 const Twine &Name = "", Optional<fp::ExceptionBehavior> Except = None);
2340
2341 //===--------------------------------------------------------------------===//
2342 // Instruction creation methods: Other Instructions
2343 //===--------------------------------------------------------------------===//
2344
2345 PHINode *CreatePHI(Type *Ty, unsigned NumReservedValues,
2346 const Twine &Name = "") {
2347 PHINode *Phi = PHINode::Create(Ty, NumReservedValues);
2348 if (isa<FPMathOperator>(Phi))
2349 setFPAttrs(Phi, nullptr /* MDNode* */, FMF);
2350 return Insert(Phi, Name);
2351 }
2352
2353 CallInst *CreateCall(FunctionType *FTy, Value *Callee,
2354 ArrayRef<Value *> Args = None, const Twine &Name = "",
2355 MDNode *FPMathTag = nullptr) {
2356 CallInst *CI = CallInst::Create(FTy, Callee, Args, DefaultOperandBundles);
2357 if (IsFPConstrained)
2358 setConstrainedFPCallAttr(CI);
2359 if (isa<FPMathOperator>(CI))
2360 setFPAttrs(CI, FPMathTag, FMF);
2361 return Insert(CI, Name);
2362 }
2363
2364 CallInst *CreateCall(FunctionType *FTy, Value *Callee, ArrayRef<Value *> Args,
2365 ArrayRef<OperandBundleDef> OpBundles,
2366 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
2367 CallInst *CI = CallInst::Create(FTy, Callee, Args, OpBundles);
2368 if (IsFPConstrained)
2369 setConstrainedFPCallAttr(CI);
2370 if (isa<FPMathOperator>(CI))
2371 setFPAttrs(CI, FPMathTag, FMF);
2372 return Insert(CI, Name);
2373 }
2374
2375 CallInst *CreateCall(FunctionCallee Callee, ArrayRef<Value *> Args = None,
2376 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
2377 return CreateCall(Callee.getFunctionType(), Callee.getCallee(), Args, Name,
2378 FPMathTag);
2379 }
2380
2381 CallInst *CreateCall(FunctionCallee Callee, ArrayRef<Value *> Args,
2382 ArrayRef<OperandBundleDef> OpBundles,
2383 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
2384 return CreateCall(Callee.getFunctionType(), Callee.getCallee(), Args,
2385 OpBundles, Name, FPMathTag);
2386 }
2387
2388 CallInst *CreateConstrainedFPCall(
2389 Function *Callee, ArrayRef<Value *> Args, const Twine &Name = "",
2390 Optional<RoundingMode> Rounding = None,
2391 Optional<fp::ExceptionBehavior> Except = None);
2392
2393 Value *CreateSelect(Value *C, Value *True, Value *False,
2394 const Twine &Name = "", Instruction *MDFrom = nullptr);
2395
2396 VAArgInst *CreateVAArg(Value *List, Type *Ty, const Twine &Name = "") {
2397 return Insert(new VAArgInst(List, Ty), Name);
2398 }
2399
2400 Value *CreateExtractElement(Value *Vec, Value *Idx,
2401 const Twine &Name = "") {
2402 if (auto *VC = dyn_cast<Constant>(Vec))
2403 if (auto *IC = dyn_cast<Constant>(Idx))
2404 return Insert(Folder.CreateExtractElement(VC, IC), Name);
2405 return Insert(ExtractElementInst::Create(Vec, Idx), Name);
2406 }
2407
2408 Value *CreateExtractElement(Value *Vec, uint64_t Idx,
2409 const Twine &Name = "") {
2410 return CreateExtractElement(Vec, getInt64(Idx), Name);
2411 }
2412
2413 Value *CreateInsertElement(Value *Vec, Value *NewElt, Value *Idx,
2414 const Twine &Name = "") {
2415 if (auto *VC = dyn_cast<Constant>(Vec))
2416 if (auto *NC = dyn_cast<Constant>(NewElt))
2417 if (auto *IC = dyn_cast<Constant>(Idx))
2418 return Insert(Folder.CreateInsertElement(VC, NC, IC), Name);
2419 return Insert(InsertElementInst::Create(Vec, NewElt, Idx), Name);
2420 }
2421
2422 Value *CreateInsertElement(Value *Vec, Value *NewElt, uint64_t Idx,
2423 const Twine &Name = "") {
2424 return CreateInsertElement(Vec, NewElt, getInt64(Idx), Name);
2425 }
2426
2427 Value *CreateShuffleVector(Value *V1, Value *V2, Value *Mask,
2428 const Twine &Name = "") {
2429 SmallVector<int, 16> IntMask;
2430 ShuffleVectorInst::getShuffleMask(cast<Constant>(Mask), IntMask);
2431 return CreateShuffleVector(V1, V2, IntMask, Name);
2432 }
2433
2434 LLVM_ATTRIBUTE_DEPRECATED(Value *CreateShuffleVector(Value *V1, Value *V2,Value *CreateShuffleVector(Value *V1, Value *V2, ArrayRef<
uint32_t> Mask, const Twine &Name = "") __attribute__(
(deprecated("Pass indices as 'int' instead")))
2435 ArrayRef<uint32_t> Mask,Value *CreateShuffleVector(Value *V1, Value *V2, ArrayRef<
uint32_t> Mask, const Twine &Name = "") __attribute__(
(deprecated("Pass indices as 'int' instead")))
2436 const Twine &Name = ""),Value *CreateShuffleVector(Value *V1, Value *V2, ArrayRef<
uint32_t> Mask, const Twine &Name = "") __attribute__(
(deprecated("Pass indices as 'int' instead")))
2437 "Pass indices as 'int' instead")Value *CreateShuffleVector(Value *V1, Value *V2, ArrayRef<
uint32_t> Mask, const Twine &Name = "") __attribute__(
(deprecated("Pass indices as 'int' instead")))
{
2438 SmallVector<int, 16> IntMask;
2439 IntMask.assign(Mask.begin(), Mask.end());
2440 return CreateShuffleVector(V1, V2, IntMask, Name);
2441 }
2442
2443 /// See class ShuffleVectorInst for a description of the mask representation.
2444 Value *CreateShuffleVector(Value *V1, Value *V2, ArrayRef<int> Mask,
2445 const Twine &Name = "") {
2446 if (auto *V1C = dyn_cast<Constant>(V1))
2447 if (auto *V2C = dyn_cast<Constant>(V2))
2448 return Insert(Folder.CreateShuffleVector(V1C, V2C, Mask), Name);
2449 return Insert(new ShuffleVectorInst(V1, V2, Mask), Name);
2450 }
2451
2452 /// Create a unary shuffle. The second vector operand of the IR instruction
2453 /// is undefined.
2454 Value *CreateShuffleVector(Value *V, ArrayRef<int> Mask,
2455 const Twine &Name = "") {
2456 return CreateShuffleVector(V, UndefValue::get(V->getType()), Mask, Name);
2457 }
2458
2459 Value *CreateExtractValue(Value *Agg,
2460 ArrayRef<unsigned> Idxs,
2461 const Twine &Name = "") {
2462 if (auto *AggC = dyn_cast<Constant>(Agg))
2463 return Insert(Folder.CreateExtractValue(AggC, Idxs), Name);
2464 return Insert(ExtractValueInst::Create(Agg, Idxs), Name);
2465 }
2466
2467 Value *CreateInsertValue(Value *Agg, Value *Val,
2468 ArrayRef<unsigned> Idxs,
2469 const Twine &Name = "") {
2470 if (auto *AggC = dyn_cast<Constant>(Agg))
2471 if (auto *ValC = dyn_cast<Constant>(Val))
2472 return Insert(Folder.CreateInsertValue(AggC, ValC, Idxs), Name);
2473 return Insert(InsertValueInst::Create(Agg, Val, Idxs), Name);
2474 }
2475
2476 LandingPadInst *CreateLandingPad(Type *Ty, unsigned NumClauses,
2477 const Twine &Name = "") {
2478 return Insert(LandingPadInst::Create(Ty, NumClauses), Name);
2479 }
2480
2481 Value *CreateFreeze(Value *V, const Twine &Name = "") {
2482 return Insert(new FreezeInst(V), Name);
2483 }
2484
2485 //===--------------------------------------------------------------------===//
2486 // Utility creation methods
2487 //===--------------------------------------------------------------------===//
2488
2489 /// Return an i1 value testing if \p Arg is null.
2490 Value *CreateIsNull(Value *Arg, const Twine &Name = "") {
2491 return CreateICmpEQ(Arg, Constant::getNullValue(Arg->getType()),
2492 Name);
2493 }
2494
2495 /// Return an i1 value testing if \p Arg is not null.
2496 Value *CreateIsNotNull(Value *Arg, const Twine &Name = "") {
2497 return CreateICmpNE(Arg, Constant::getNullValue(Arg->getType()),
2498 Name);
2499 }
2500
2501 /// Return the i64 difference between two pointer values, dividing out
2502 /// the size of the pointed-to objects.
2503 ///
2504 /// This is intended to implement C-style pointer subtraction. As such, the
2505 /// pointers must be appropriately aligned for their element types and
2506 /// pointing into the same object.
2507 Value *CreatePtrDiff(Value *LHS, Value *RHS, const Twine &Name = "");
2508
2509 /// Create a launder.invariant.group intrinsic call. If Ptr type is
2510 /// different from pointer to i8, it's casted to pointer to i8 in the same
2511 /// address space before call and casted back to Ptr type after call.
2512 Value *CreateLaunderInvariantGroup(Value *Ptr);
2513
2514 /// \brief Create a strip.invariant.group intrinsic call. If Ptr type is
2515 /// different from pointer to i8, it's casted to pointer to i8 in the same
2516 /// address space before call and casted back to Ptr type after call.
2517 Value *CreateStripInvariantGroup(Value *Ptr);
2518
2519 /// Return a vector value that contains \arg V broadcasted to \p
2520 /// NumElts elements.
2521 Value *CreateVectorSplat(unsigned NumElts, Value *V, const Twine &Name = "");
2522
2523 /// Return a vector value that contains \arg V broadcasted to \p
2524 /// EC elements.
2525 Value *CreateVectorSplat(ElementCount EC, Value *V, const Twine &Name = "");
2526
2527 /// Return a value that has been extracted from a larger integer type.
2528 Value *CreateExtractInteger(const DataLayout &DL, Value *From,
2529 IntegerType *ExtractedTy, uint64_t Offset,
2530 const Twine &Name);
2531
2532 Value *CreatePreserveArrayAccessIndex(Type *ElTy, Value *Base,
2533 unsigned Dimension, unsigned LastIndex,
2534 MDNode *DbgInfo);
2535
2536 Value *CreatePreserveUnionAccessIndex(Value *Base, unsigned FieldIndex,
2537 MDNode *DbgInfo);
2538
2539 Value *CreatePreserveStructAccessIndex(Type *ElTy, Value *Base,
2540 unsigned Index, unsigned FieldIndex,
2541 MDNode *DbgInfo);
2542
2543private:
2544 /// Helper function that creates an assume intrinsic call that
2545 /// represents an alignment assumption on the provided pointer \p PtrValue
2546 /// with offset \p OffsetValue and alignment value \p AlignValue.
2547 CallInst *CreateAlignmentAssumptionHelper(const DataLayout &DL,
2548 Value *PtrValue, Value *AlignValue,
2549 Value *OffsetValue);
2550
2551public:
2552 /// Create an assume intrinsic call that represents an alignment
2553 /// assumption on the provided pointer.
2554 ///
2555 /// An optional offset can be provided, and if it is provided, the offset
2556 /// must be subtracted from the provided pointer to get the pointer with the
2557 /// specified alignment.
2558 CallInst *CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue,
2559 unsigned Alignment,
2560 Value *OffsetValue = nullptr);
2561
2562 /// Create an assume intrinsic call that represents an alignment
2563 /// assumption on the provided pointer.
2564 ///
2565 /// An optional offset can be provided, and if it is provided, the offset
2566 /// must be subtracted from the provided pointer to get the pointer with the
2567 /// specified alignment.
2568 ///
2569 /// This overload handles the condition where the Alignment is dependent
2570 /// on an existing value rather than a static value.
2571 CallInst *CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue,
2572 Value *Alignment,
2573 Value *OffsetValue = nullptr);
2574};
2575
2576/// This provides a uniform API for creating instructions and inserting
2577/// them into a basic block: either at the end of a BasicBlock, or at a specific
2578/// iterator location in a block.
2579///
2580/// Note that the builder does not expose the full generality of LLVM
2581/// instructions. For access to extra instruction properties, use the mutators
2582/// (e.g. setVolatile) on the instructions after they have been
2583/// created. Convenience state exists to specify fast-math flags and fp-math
2584/// tags.
2585///
2586/// The first template argument specifies a class to use for creating constants.
2587/// This defaults to creating minimally folded constants. The second template
2588/// argument allows clients to specify custom insertion hooks that are called on
2589/// every newly created insertion.
2590template <typename FolderTy = ConstantFolder,
2591 typename InserterTy = IRBuilderDefaultInserter>
2592class IRBuilder : public IRBuilderBase {
2593private:
2594 FolderTy Folder;
2595 InserterTy Inserter;
2596
2597public:
2598 IRBuilder(LLVMContext &C, FolderTy Folder, InserterTy Inserter = InserterTy(),
2599 MDNode *FPMathTag = nullptr,
2600 ArrayRef<OperandBundleDef> OpBundles = None)
2601 : IRBuilderBase(C, this->Folder, this->Inserter, FPMathTag, OpBundles),
2602 Folder(Folder), Inserter(Inserter) {}
2603
2604 explicit IRBuilder(LLVMContext &C, MDNode *FPMathTag = nullptr,
2605 ArrayRef<OperandBundleDef> OpBundles = None)
2606 : IRBuilderBase(C, this->Folder, this->Inserter, FPMathTag, OpBundles) {}
2607
2608 explicit IRBuilder(BasicBlock *TheBB, FolderTy Folder,
2609 MDNode *FPMathTag = nullptr,
2610 ArrayRef<OperandBundleDef> OpBundles = None)
2611 : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter,
2612 FPMathTag, OpBundles), Folder(Folder) {
2613 SetInsertPoint(TheBB);
2614 }
2615
2616 explicit IRBuilder(BasicBlock *TheBB, MDNode *FPMathTag = nullptr,
2617 ArrayRef<OperandBundleDef> OpBundles = None)
2618 : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter,
2619 FPMathTag, OpBundles) {
2620 SetInsertPoint(TheBB);
2621 }
2622
2623 explicit IRBuilder(Instruction *IP, MDNode *FPMathTag = nullptr,
2624 ArrayRef<OperandBundleDef> OpBundles = None)
2625 : IRBuilderBase(IP->getContext(), this->Folder, this->Inserter,
2626 FPMathTag, OpBundles) {
2627 SetInsertPoint(IP);
2628 }
2629
2630 IRBuilder(BasicBlock *TheBB, BasicBlock::iterator IP, FolderTy Folder,
2631 MDNode *FPMathTag = nullptr,
2632 ArrayRef<OperandBundleDef> OpBundles = None)
2633 : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter,
2634 FPMathTag, OpBundles), Folder(Folder) {
2635 SetInsertPoint(TheBB, IP);
2636 }
2637
2638 IRBuilder(BasicBlock *TheBB, BasicBlock::iterator IP,
2639 MDNode *FPMathTag = nullptr,
2640 ArrayRef<OperandBundleDef> OpBundles = None)
2641 : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter,
2642 FPMathTag, OpBundles) {
2643 SetInsertPoint(TheBB, IP);
2644 }
2645
2646 /// Avoid copying the full IRBuilder. Prefer using InsertPointGuard
2647 /// or FastMathFlagGuard instead.
2648 IRBuilder(const IRBuilder &) = delete;
2649
2650 InserterTy &getInserter() { return Inserter; }
2651};
2652
2653// Create wrappers for C Binding types (see CBindingWrapping.h).
2654DEFINE_SIMPLE_CONVERSION_FUNCTIONS(IRBuilder<>, LLVMBuilderRef)inline IRBuilder<> *unwrap(LLVMBuilderRef P) { return reinterpret_cast
<IRBuilder<>*>(P); } inline LLVMBuilderRef wrap(const
IRBuilder<> *P) { return reinterpret_cast<LLVMBuilderRef
>(const_cast<IRBuilder<>*>(P)); }
2655
2656} // end namespace llvm
2657
2658#endif // LLVM_IR_IRBUILDER_H

/build/llvm-toolchain-snapshot-12~++20201202100633+b276bf5a572/llvm/include/llvm/IR/Instructions.h

1//===- llvm/Instructions.h - Instruction subclass definitions ---*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file exposes the class definitions of all of the subclasses of the
10// Instruction class. This is meant to be an easy way to get access to all
11// instruction subclasses.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_IR_INSTRUCTIONS_H
16#define LLVM_IR_INSTRUCTIONS_H
17
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/Bitfields.h"
20#include "llvm/ADT/None.h"
21#include "llvm/ADT/STLExtras.h"
22#include "llvm/ADT/SmallVector.h"
23#include "llvm/ADT/StringRef.h"
24#include "llvm/ADT/Twine.h"
25#include "llvm/ADT/iterator.h"
26#include "llvm/ADT/iterator_range.h"
27#include "llvm/IR/Attributes.h"
28#include "llvm/IR/BasicBlock.h"
29#include "llvm/IR/CallingConv.h"
30#include "llvm/IR/CFG.h"
31#include "llvm/IR/Constant.h"
32#include "llvm/IR/DerivedTypes.h"
33#include "llvm/IR/Function.h"
34#include "llvm/IR/InstrTypes.h"
35#include "llvm/IR/Instruction.h"
36#include "llvm/IR/OperandTraits.h"
37#include "llvm/IR/Type.h"
38#include "llvm/IR/Use.h"
39#include "llvm/IR/User.h"
40#include "llvm/IR/Value.h"
41#include "llvm/Support/AtomicOrdering.h"
42#include "llvm/Support/Casting.h"
43#include "llvm/Support/ErrorHandling.h"
44#include <cassert>
45#include <cstddef>
46#include <cstdint>
47#include <iterator>
48
49namespace llvm {
50
51class APInt;
52class ConstantInt;
53class DataLayout;
54class LLVMContext;
55
56//===----------------------------------------------------------------------===//
57// AllocaInst Class
58//===----------------------------------------------------------------------===//
59
60/// an instruction to allocate memory on the stack
61class AllocaInst : public UnaryInstruction {
62 Type *AllocatedType;
63
64 using AlignmentField = AlignmentBitfieldElementT<0>;
65 using UsedWithInAllocaField = BoolBitfieldElementT<AlignmentField::NextBit>;
66 using SwiftErrorField = BoolBitfieldElementT<UsedWithInAllocaField::NextBit>;
67 static_assert(Bitfield::areContiguous<AlignmentField, UsedWithInAllocaField,
68 SwiftErrorField>(),
69 "Bitfields must be contiguous");
70
71protected:
72 // Note: Instruction needs to be a friend here to call cloneImpl.
73 friend class Instruction;
74
75 AllocaInst *cloneImpl() const;
76
77public:
78 explicit AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
79 const Twine &Name, Instruction *InsertBefore);
80 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
81 const Twine &Name, BasicBlock *InsertAtEnd);
82
83 AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
84 Instruction *InsertBefore);
85 AllocaInst(Type *Ty, unsigned AddrSpace,
86 const Twine &Name, BasicBlock *InsertAtEnd);
87
88 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
89 const Twine &Name = "", Instruction *InsertBefore = nullptr);
90 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
91 const Twine &Name, BasicBlock *InsertAtEnd);
92
93 /// Return true if there is an allocation size parameter to the allocation
94 /// instruction that is not 1.
95 bool isArrayAllocation() const;
96
97 /// Get the number of elements allocated. For a simple allocation of a single
98 /// element, this will return a constant 1 value.
99 const Value *getArraySize() const { return getOperand(0); }
100 Value *getArraySize() { return getOperand(0); }
101
102 /// Overload to return most specific pointer type.
103 PointerType *getType() const {
104 return cast<PointerType>(Instruction::getType());
105 }
106
107 /// Get allocation size in bits. Returns None if size can't be determined,
108 /// e.g. in case of a VLA.
109 Optional<TypeSize> getAllocationSizeInBits(const DataLayout &DL) const;
110
111 /// Return the type that is being allocated by the instruction.
112 Type *getAllocatedType() const { return AllocatedType; }
113 /// for use only in special circumstances that need to generically
114 /// transform a whole instruction (eg: IR linking and vectorization).
115 void setAllocatedType(Type *Ty) { AllocatedType = Ty; }
116
117 /// Return the alignment of the memory that is being allocated by the
118 /// instruction.
119 Align getAlign() const {
120 return Align(1ULL << getSubclassData<AlignmentField>());
121 }
122
123 void setAlignment(Align Align) {
124 setSubclassData<AlignmentField>(Log2(Align));
125 }
126
127 // FIXME: Remove this one transition to Align is over.
128 unsigned getAlignment() const { return getAlign().value(); }
129
130 /// Return true if this alloca is in the entry block of the function and is a
131 /// constant size. If so, the code generator will fold it into the
132 /// prolog/epilog code, so it is basically free.
133 bool isStaticAlloca() const;
134
135 /// Return true if this alloca is used as an inalloca argument to a call. Such
136 /// allocas are never considered static even if they are in the entry block.
137 bool isUsedWithInAlloca() const {
138 return getSubclassData<UsedWithInAllocaField>();
139 }
140
141 /// Specify whether this alloca is used to represent the arguments to a call.
142 void setUsedWithInAlloca(bool V) {
143 setSubclassData<UsedWithInAllocaField>(V);
144 }
145
146 /// Return true if this alloca is used as a swifterror argument to a call.
147 bool isSwiftError() const { return getSubclassData<SwiftErrorField>(); }
148 /// Specify whether this alloca is used to represent a swifterror.
149 void setSwiftError(bool V) { setSubclassData<SwiftErrorField>(V); }
150
151 // Methods for support type inquiry through isa, cast, and dyn_cast:
152 static bool classof(const Instruction *I) {
153 return (I->getOpcode() == Instruction::Alloca);
154 }
155 static bool classof(const Value *V) {
156 return isa<Instruction>(V) && classof(cast<Instruction>(V));
157 }
158
159private:
160 // Shadow Instruction::setInstructionSubclassData with a private forwarding
161 // method so that subclasses cannot accidentally use it.
162 template <typename Bitfield>
163 void setSubclassData(typename Bitfield::Type Value) {
164 Instruction::setSubclassData<Bitfield>(Value);
165 }
166};
167
168//===----------------------------------------------------------------------===//
169// LoadInst Class
170//===----------------------------------------------------------------------===//
171
172/// An instruction for reading from memory. This uses the SubclassData field in
173/// Value to store whether or not the load is volatile.
174class LoadInst : public UnaryInstruction {
175 using VolatileField = BoolBitfieldElementT<0>;
176 using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>;
177 using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>;
178 static_assert(
179 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
180 "Bitfields must be contiguous");
181
182 void AssertOK();
183
184protected:
185 // Note: Instruction needs to be a friend here to call cloneImpl.
186 friend class Instruction;
187
188 LoadInst *cloneImpl() const;
189
190public:
191 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr,
192 Instruction *InsertBefore);
193 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd);
194 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
195 Instruction *InsertBefore);
196 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
197 BasicBlock *InsertAtEnd);
198 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
199 Align Align, Instruction *InsertBefore = nullptr);
200 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
201 Align Align, BasicBlock *InsertAtEnd);
202 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
203 Align Align, AtomicOrdering Order,
204 SyncScope::ID SSID = SyncScope::System,
205 Instruction *InsertBefore = nullptr);
206 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
207 Align Align, AtomicOrdering Order, SyncScope::ID SSID,
208 BasicBlock *InsertAtEnd);
209
210 /// Return true if this is a load from a volatile memory location.
211 bool isVolatile() const { return getSubclassData<VolatileField>(); }
212
213 /// Specify whether this is a volatile load or not.
214 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
215
216 /// Return the alignment of the access that is being performed.
217 /// FIXME: Remove this function once transition to Align is over.
218 /// Use getAlign() instead.
219 unsigned getAlignment() const { return getAlign().value(); }
220
221 /// Return the alignment of the access that is being performed.
222 Align getAlign() const {
223 return Align(1ULL << (getSubclassData<AlignmentField>()));
224 }
225
226 void setAlignment(Align Align) {
227 setSubclassData<AlignmentField>(Log2(Align));
228 }
229
230 /// Returns the ordering constraint of this load instruction.
231 AtomicOrdering getOrdering() const {
232 return getSubclassData<OrderingField>();
233 }
234 /// Sets the ordering constraint of this load instruction. May not be Release
235 /// or AcquireRelease.
236 void setOrdering(AtomicOrdering Ordering) {
237 setSubclassData<OrderingField>(Ordering);
238 }
239
240 /// Returns the synchronization scope ID of this load instruction.
241 SyncScope::ID getSyncScopeID() const {
242 return SSID;
243 }
244
245 /// Sets the synchronization scope ID of this load instruction.
246 void setSyncScopeID(SyncScope::ID SSID) {
247 this->SSID = SSID;
248 }
249
250 /// Sets the ordering constraint and the synchronization scope ID of this load
251 /// instruction.
252 void setAtomic(AtomicOrdering Ordering,
253 SyncScope::ID SSID = SyncScope::System) {
254 setOrdering(Ordering);
255 setSyncScopeID(SSID);
256 }
257
258 bool isSimple() const { return !isAtomic() && !isVolatile(); }
259
260 bool isUnordered() const {
261 return (getOrdering() == AtomicOrdering::NotAtomic ||
262 getOrdering() == AtomicOrdering::Unordered) &&
263 !isVolatile();
264 }
265
266 Value *getPointerOperand() { return getOperand(0); }
267 const Value *getPointerOperand() const { return getOperand(0); }
268 static unsigned getPointerOperandIndex() { return 0U; }
269 Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
270
271 /// Returns the address space of the pointer operand.
272 unsigned getPointerAddressSpace() const {
273 return getPointerOperandType()->getPointerAddressSpace();
274 }
275
276 // Methods for support type inquiry through isa, cast, and dyn_cast:
277 static bool classof(const Instruction *I) {
278 return I->getOpcode() == Instruction::Load;
279 }
280 static bool classof(const Value *V) {
281 return isa<Instruction>(V) && classof(cast<Instruction>(V));
282 }
283
284private:
285 // Shadow Instruction::setInstructionSubclassData with a private forwarding
286 // method so that subclasses cannot accidentally use it.
287 template <typename Bitfield>
288 void setSubclassData(typename Bitfield::Type Value) {
289 Instruction::setSubclassData<Bitfield>(Value);
290 }
291
292 /// The synchronization scope ID of this load instruction. Not quite enough
293 /// room in SubClassData for everything, so synchronization scope ID gets its
294 /// own field.
295 SyncScope::ID SSID;
296};
297
298//===----------------------------------------------------------------------===//
299// StoreInst Class
300//===----------------------------------------------------------------------===//
301
302/// An instruction for storing to memory.
303class StoreInst : public Instruction {
304 using VolatileField = BoolBitfieldElementT<0>;
305 using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>;
306 using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>;
307 static_assert(
308 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
309 "Bitfields must be contiguous");
310
311 void AssertOK();
312
313protected:
314 // Note: Instruction needs to be a friend here to call cloneImpl.
315 friend class Instruction;
316
317 StoreInst *cloneImpl() const;
318
319public:
320 StoreInst(Value *Val, Value *Ptr, Instruction *InsertBefore);
321 StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd);
322 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Instruction *InsertBefore);
323 StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd);
324 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
325 Instruction *InsertBefore = nullptr);
326 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
327 BasicBlock *InsertAtEnd);
328 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
329 AtomicOrdering Order, SyncScope::ID SSID = SyncScope::System,
330 Instruction *InsertBefore = nullptr);
331 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
332 AtomicOrdering Order, SyncScope::ID SSID, BasicBlock *InsertAtEnd);
333
334 // allocate space for exactly two operands
335 void *operator new(size_t s) {
336 return User::operator new(s, 2);
337 }
338
339 /// Return true if this is a store to a volatile memory location.
340 bool isVolatile() const { return getSubclassData<VolatileField>(); }
341
342 /// Specify whether this is a volatile store or not.
343 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
344
345 /// Transparently provide more efficient getOperand methods.
346 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
347
348 /// Return the alignment of the access that is being performed
349 /// FIXME: Remove this function once transition to Align is over.
350 /// Use getAlign() instead.
351 unsigned getAlignment() const { return getAlign().value(); }
352
353 Align getAlign() const {
354 return Align(1ULL << (getSubclassData<AlignmentField>()));
355 }
356
357 void setAlignment(Align Align) {
358 setSubclassData<AlignmentField>(Log2(Align));
359 }
360
361 /// Returns the ordering constraint of this store instruction.
362 AtomicOrdering getOrdering() const {
363 return getSubclassData<OrderingField>();
364 }
365
366 /// Sets the ordering constraint of this store instruction. May not be
367 /// Acquire or AcquireRelease.
368 void setOrdering(AtomicOrdering Ordering) {
369 setSubclassData<OrderingField>(Ordering);
370 }
371
372 /// Returns the synchronization scope ID of this store instruction.
373 SyncScope::ID getSyncScopeID() const {
374 return SSID;
375 }
376
377 /// Sets the synchronization scope ID of this store instruction.
378 void setSyncScopeID(SyncScope::ID SSID) {
379 this->SSID = SSID;
380 }
381
382 /// Sets the ordering constraint and the synchronization scope ID of this
383 /// store instruction.
384 void setAtomic(AtomicOrdering Ordering,
385 SyncScope::ID SSID = SyncScope::System) {
386 setOrdering(Ordering);
387 setSyncScopeID(SSID);
388 }
389
390 bool isSimple() const { return !isAtomic() && !isVolatile(); }
391
392 bool isUnordered() const {
393 return (getOrdering() == AtomicOrdering::NotAtomic ||
394 getOrdering() == AtomicOrdering::Unordered) &&
395 !isVolatile();
396 }
397
398 Value *getValueOperand() { return getOperand(0); }
399 const Value *getValueOperand() const { return getOperand(0); }
400
401 Value *getPointerOperand() { return getOperand(1); }
402 const Value *getPointerOperand() const { return getOperand(1); }
403 static unsigned getPointerOperandIndex() { return 1U; }
404 Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
405
406 /// Returns the address space of the pointer operand.
407 unsigned getPointerAddressSpace() const {
408 return getPointerOperandType()->getPointerAddressSpace();
409 }
410
411 // Methods for support type inquiry through isa, cast, and dyn_cast:
412 static bool classof(const Instruction *I) {
413 return I->getOpcode() == Instruction::Store;
414 }
415 static bool classof(const Value *V) {
416 return isa<Instruction>(V) && classof(cast<Instruction>(V));
417 }
418
419private:
420 // Shadow Instruction::setInstructionSubclassData with a private forwarding
421 // method so that subclasses cannot accidentally use it.
422 template <typename Bitfield>
423 void setSubclassData(typename Bitfield::Type Value) {
424 Instruction::setSubclassData<Bitfield>(Value);
425 }
426
427 /// The synchronization scope ID of this store instruction. Not quite enough
428 /// room in SubClassData for everything, so synchronization scope ID gets its
429 /// own field.
430 SyncScope::ID SSID;
431};
432
433template <>
434struct OperandTraits<StoreInst> : public FixedNumOperandTraits<StoreInst, 2> {
435};
436
437DEFINE_TRANSPARENT_OPERAND_ACCESSORS(StoreInst, Value)StoreInst::op_iterator StoreInst::op_begin() { return OperandTraits
<StoreInst>::op_begin(this); } StoreInst::const_op_iterator
StoreInst::op_begin() const { return OperandTraits<StoreInst
>::op_begin(const_cast<StoreInst*>(this)); } StoreInst
::op_iterator StoreInst::op_end() { return OperandTraits<StoreInst
>::op_end(this); } StoreInst::const_op_iterator StoreInst::
op_end() const { return OperandTraits<StoreInst>::op_end
(const_cast<StoreInst*>(this)); } Value *StoreInst::getOperand
(unsigned i_nocapture) const { ((i_nocapture < OperandTraits
<StoreInst>::operands(this) && "getOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<StoreInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20201202100633+b276bf5a572/llvm/include/llvm/IR/Instructions.h"
, 437, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<StoreInst>::op_begin(const_cast<StoreInst
*>(this))[i_nocapture].get()); } void StoreInst::setOperand
(unsigned i_nocapture, Value *Val_nocapture) { ((i_nocapture <
OperandTraits<StoreInst>::operands(this) && "setOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<StoreInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20201202100633+b276bf5a572/llvm/include/llvm/IR/Instructions.h"
, 437, __PRETTY_FUNCTION__)); OperandTraits<StoreInst>::
op_begin(this)[i_nocapture] = Val_nocapture; } unsigned StoreInst
::getNumOperands() const { return OperandTraits<StoreInst>
::operands(this); } template <int Idx_nocapture> Use &
StoreInst::Op() { return this->OpFrom<Idx_nocapture>
(this); } template <int Idx_nocapture> const Use &StoreInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
438
439//===----------------------------------------------------------------------===//
440// FenceInst Class
441//===----------------------------------------------------------------------===//
442
443/// An instruction for ordering other memory operations.
444class FenceInst : public Instruction {
445 using OrderingField = AtomicOrderingBitfieldElementT<0>;
446
447 void Init(AtomicOrdering Ordering, SyncScope::ID SSID);
448
449protected:
450 // Note: Instruction needs to be a friend here to call cloneImpl.
451 friend class Instruction;
452
453 FenceInst *cloneImpl() const;
454
455public:
456 // Ordering may only be Acquire, Release, AcquireRelease, or
457 // SequentiallyConsistent.
458 FenceInst(LLVMContext &C, AtomicOrdering Ordering,
459 SyncScope::ID SSID = SyncScope::System,
460 Instruction *InsertBefore = nullptr);
461 FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID,
462 BasicBlock *InsertAtEnd);
463
464 // allocate space for exactly zero operands
465 void *operator new(size_t s) {
466 return User::operator new(s, 0);
467 }
468
469 /// Returns the ordering constraint of this fence instruction.
470 AtomicOrdering getOrdering() const {
471 return getSubclassData<OrderingField>();
472 }
473
474 /// Sets the ordering constraint of this fence instruction. May only be
475 /// Acquire, Release, AcquireRelease, or SequentiallyConsistent.
476 void setOrdering(AtomicOrdering Ordering) {
477 setSubclassData<OrderingField>(Ordering);
478 }
479
480 /// Returns the synchronization scope ID of this fence instruction.
481 SyncScope::ID getSyncScopeID() const {
482 return SSID;
483 }
484
485 /// Sets the synchronization scope ID of this fence instruction.
486 void setSyncScopeID(SyncScope::ID SSID) {
487 this->SSID = SSID;
488 }
489
490 // Methods for support type inquiry through isa, cast, and dyn_cast:
491 static bool classof(const Instruction *I) {
492 return I->getOpcode() == Instruction::Fence;
493 }
494 static bool classof(const Value *V) {
495 return isa<Instruction>(V) && classof(cast<Instruction>(V));
496 }
497
498private:
499 // Shadow Instruction::setInstructionSubclassData with a private forwarding
500 // method so that subclasses cannot accidentally use it.
501 template <typename Bitfield>
502 void setSubclassData(typename Bitfield::Type Value) {
503 Instruction::setSubclassData<Bitfield>(Value);
504 }
505
506 /// The synchronization scope ID of this fence instruction. Not quite enough
507 /// room in SubClassData for everything, so synchronization scope ID gets its
508 /// own field.
509 SyncScope::ID SSID;
510};
511
512//===----------------------------------------------------------------------===//
513// AtomicCmpXchgInst Class
514//===----------------------------------------------------------------------===//
515
516/// An instruction that atomically checks whether a
517/// specified value is in a memory location, and, if it is, stores a new value
518/// there. The value returned by this instruction is a pair containing the
519/// original value as first element, and an i1 indicating success (true) or
520/// failure (false) as second element.
521///
522class AtomicCmpXchgInst : public Instruction {
523 void Init(Value *Ptr, Value *Cmp, Value *NewVal, Align Align,
524 AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering,
525 SyncScope::ID SSID);
526
527 template <unsigned Offset>
528 using AtomicOrderingBitfieldElement =
529 typename Bitfield::Element<AtomicOrdering, Offset, 3,
530 AtomicOrdering::LAST>;
531
532protected:
533 // Note: Instruction needs to be a friend here to call cloneImpl.
534 friend class Instruction;
535
536 AtomicCmpXchgInst *cloneImpl() const;
537
538public:
539 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
540 AtomicOrdering SuccessOrdering,
541 AtomicOrdering FailureOrdering, SyncScope::ID SSID,
542 Instruction *InsertBefore = nullptr);
543 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
544 AtomicOrdering SuccessOrdering,
545 AtomicOrdering FailureOrdering, SyncScope::ID SSID,
546 BasicBlock *InsertAtEnd);
547
548 // allocate space for exactly three operands
549 void *operator new(size_t s) {
550 return User::operator new(s, 3);
551 }
552
553 using VolatileField = BoolBitfieldElementT<0>;
554 using WeakField = BoolBitfieldElementT<VolatileField::NextBit>;
555 using SuccessOrderingField =
556 AtomicOrderingBitfieldElementT<WeakField::NextBit>;
557 using FailureOrderingField =
558 AtomicOrderingBitfieldElementT<SuccessOrderingField::NextBit>;
559 using AlignmentField =
560 AlignmentBitfieldElementT<FailureOrderingField::NextBit>;
561 static_assert(
562 Bitfield::areContiguous<VolatileField, WeakField, SuccessOrderingField,
563 FailureOrderingField, AlignmentField>(),
564 "Bitfields must be contiguous");
565
566 /// Return the alignment of the memory that is being allocated by the
567 /// instruction.
568 Align getAlign() const {
569 return Align(1ULL << getSubclassData<AlignmentField>());
570 }
571
572 void setAlignment(Align Align) {
573 setSubclassData<AlignmentField>(Log2(Align));
574 }
575
576 /// Return true if this is a cmpxchg from a volatile memory
577 /// location.
578 ///
579 bool isVolatile() const { return getSubclassData<VolatileField>(); }
580
581 /// Specify whether this is a volatile cmpxchg.
582 ///
583 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
584
585 /// Return true if this cmpxchg may spuriously fail.
586 bool isWeak() const { return getSubclassData<WeakField>(); }
587
588 void setWeak(bool IsWeak) { setSubclassData<WeakField>(IsWeak); }
589
590 /// Transparently provide more efficient getOperand methods.
591 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
592
593 /// Returns the success ordering constraint of this cmpxchg instruction.
594 AtomicOrdering getSuccessOrdering() const {
595 return getSubclassData<SuccessOrderingField>();
596 }
597
598 /// Sets the success ordering constraint of this cmpxchg instruction.
599 void setSuccessOrdering(AtomicOrdering Ordering) {
600 assert(Ordering != AtomicOrdering::NotAtomic &&((Ordering != AtomicOrdering::NotAtomic && "CmpXchg instructions can only be atomic."
) ? static_cast<void> (0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"CmpXchg instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-12~++20201202100633+b276bf5a572/llvm/include/llvm/IR/Instructions.h"
, 601, __PRETTY_FUNCTION__))
601 "CmpXchg instructions can only be atomic.")((Ordering != AtomicOrdering::NotAtomic && "CmpXchg instructions can only be atomic."
) ? static_cast<void> (0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"CmpXchg instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-12~++20201202100633+b276bf5a572/llvm/include/llvm/IR/Instructions.h"
, 601, __PRETTY_FUNCTION__))
;
602 setSubclassData<SuccessOrderingField>(Ordering);
603 }
604
605 /// Returns the failure ordering constraint of this cmpxchg instruction.
606 AtomicOrdering getFailureOrdering() const {
607 return getSubclassData<FailureOrderingField>();
608 }
609
610 /// Sets the failure ordering constraint of this cmpxchg instruction.
611 void setFailureOrdering(AtomicOrdering Ordering) {
612 assert(Ordering != AtomicOrdering::NotAtomic &&((Ordering != AtomicOrdering::NotAtomic && "CmpXchg instructions can only be atomic."
) ? static_cast<void> (0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"CmpXchg instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-12~++20201202100633+b276bf5a572/llvm/include/llvm/IR/Instructions.h"
, 613, __PRETTY_FUNCTION__))
613 "CmpXchg instructions can only be atomic.")((Ordering != AtomicOrdering::NotAtomic && "CmpXchg instructions can only be atomic."
) ? static_cast<void> (0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"CmpXchg instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-12~++20201202100633+b276bf5a572/llvm/include/llvm/IR/Instructions.h"
, 613, __PRETTY_FUNCTION__))
;
614 setSubclassData<FailureOrderingField>(Ordering);
615 }
616
617 /// Returns the synchronization scope ID of this cmpxchg instruction.
618 SyncScope::ID getSyncScopeID() const {
619 return SSID;
620 }
621
622 /// Sets the synchronization scope ID of this cmpxchg instruction.
623 void setSyncScopeID(SyncScope::ID SSID) {
624 this->SSID = SSID;
625 }
626
627 Value *getPointerOperand() { return getOperand(0); }
628 const Value *getPointerOperand() const { return getOperand(0); }
629 static unsigned getPointerOperandIndex() { return 0U; }
630
631 Value *getCompareOperand() { return getOperand(1); }
632 const Value *getCompareOperand() const { return getOperand(1); }
633
634 Value *getNewValOperand() { return getOperand(2); }
635 const Value *getNewValOperand() const { return getOperand(2); }
636
637 /// Returns the address space of the pointer operand.
638 unsigned getPointerAddressSpace() const {
639 return getPointerOperand()->getType()->getPointerAddressSpace();
640 }
641
642 /// Returns the strongest permitted ordering on failure, given the
643 /// desired ordering on success.
644 ///
645 /// If the comparison in a cmpxchg operation fails, there is no atomic store
646 /// so release semantics cannot be provided. So this function drops explicit
647 /// Release requests from the AtomicOrdering. A SequentiallyConsistent
648 /// operation would remain SequentiallyConsistent.
649 static AtomicOrdering
650 getStrongestFailureOrdering(AtomicOrdering SuccessOrdering) {
651 switch (SuccessOrdering) {
652 default:
653 llvm_unreachable("invalid cmpxchg success ordering")::llvm::llvm_unreachable_internal("invalid cmpxchg success ordering"
, "/build/llvm-toolchain-snapshot-12~++20201202100633+b276bf5a572/llvm/include/llvm/IR/Instructions.h"
, 653)
;
654 case AtomicOrdering::Release:
655 case AtomicOrdering::Monotonic:
656 return AtomicOrdering::Monotonic;
657 case AtomicOrdering::AcquireRelease:
658 case AtomicOrdering::Acquire:
659 return AtomicOrdering::Acquire;
660 case AtomicOrdering::SequentiallyConsistent:
661 return AtomicOrdering::SequentiallyConsistent;
662 }
663 }
664
665 // Methods for support type inquiry through isa, cast, and dyn_cast:
666 static bool classof(const Instruction *I) {
667 return I->getOpcode() == Instruction::AtomicCmpXchg;
668 }
669 static bool classof(const Value *V) {
670 return isa<Instruction>(V) && classof(cast<Instruction>(V));
671 }
672
673private:
674 // Shadow Instruction::setInstructionSubclassData with a private forwarding
675 // method so that subclasses cannot accidentally use it.
676 template <typename Bitfield>
677 void setSubclassData(typename Bitfield::Type Value) {
678 Instruction::setSubclassData<Bitfield>(Value);
679 }
680
681 /// The synchronization scope ID of this cmpxchg instruction. Not quite
682 /// enough room in SubClassData for everything, so synchronization scope ID
683 /// gets its own field.
684 SyncScope::ID SSID;
685};
686
687template <>
688struct OperandTraits<AtomicCmpXchgInst> :
689 public FixedNumOperandTraits<AtomicCmpXchgInst, 3> {
690};
691
692DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicCmpXchgInst, Value)AtomicCmpXchgInst::op_iterator AtomicCmpXchgInst::op_begin() {
return OperandTraits<AtomicCmpXchgInst>::op_begin(this
); } AtomicCmpXchgInst::const_op_iterator AtomicCmpXchgInst::
op_begin() const { return OperandTraits<AtomicCmpXchgInst>
::op_begin(const_cast<AtomicCmpXchgInst*>(this)); } AtomicCmpXchgInst
::op_iterator AtomicCmpXchgInst::op_end() { return OperandTraits
<AtomicCmpXchgInst>::op_end(this); } AtomicCmpXchgInst::
const_op_iterator AtomicCmpXchgInst::op_end() const { return OperandTraits
<AtomicCmpXchgInst>::op_end(const_cast<AtomicCmpXchgInst
*>(this)); } Value *AtomicCmpXchgInst::getOperand(unsigned
i_nocapture) const { ((i_nocapture < OperandTraits<AtomicCmpXchgInst
>::operands(this) && "getOperand() out of range!")
? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20201202100633+b276bf5a572/llvm/include/llvm/IR/Instructions.h"
, 692, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<AtomicCmpXchgInst>::op_begin(const_cast
<AtomicCmpXchgInst*>(this))[i_nocapture].get()); } void
AtomicCmpXchgInst::setOperand(unsigned i_nocapture, Value *Val_nocapture
) { ((i_nocapture < OperandTraits<AtomicCmpXchgInst>
::operands(this) && "setOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20201202100633+b276bf5a572/llvm/include/llvm/IR/Instructions.h"
, 692, __PRETTY_FUNCTION__)); OperandTraits<AtomicCmpXchgInst
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
AtomicCmpXchgInst::getNumOperands() const { return OperandTraits
<AtomicCmpXchgInst>::operands(this); } template <int
Idx_nocapture> Use &AtomicCmpXchgInst::Op() { return this
->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture
> const Use &AtomicCmpXchgInst::Op() const { return this
->OpFrom<Idx_nocapture>(this); }
693
694//===----------------------------------------------------------------------===//
695// AtomicRMWInst Class
696//===----------------------------------------------------------------------===//
697
698/// an instruction that atomically reads a memory location,
699/// combines it with another value, and then stores the result back. Returns
700/// the old value.
701///
702class AtomicRMWInst : public Instruction {
703protected:
704 // Note: Instruction needs to be a friend here to call cloneImpl.
705 friend class Instruction;
706
707 AtomicRMWInst *cloneImpl() const;
708
709public:
710 /// This enumeration lists the possible modifications atomicrmw can make. In
711 /// the descriptions, 'p' is the pointer to the instruction's memory location,
712 /// 'old' is the initial value of *p, and 'v' is the other value passed to the
713 /// instruction. These instructions always return 'old'.
714 enum BinOp : unsigned {
715 /// *p = v
716 Xchg,
717 /// *p = old + v
718 Add,
719 /// *p = old - v
720 Sub,
721 /// *p = old & v
722 And,
723 /// *p = ~(old & v)
724 Nand,
725 /// *p = old | v
726 Or,
727 /// *p = old ^ v
728 Xor,
729 /// *p = old >signed v ? old : v
730 Max,
731 /// *p = old <signed v ? old : v
732 Min,
733 /// *p = old >unsigned v ? old : v
734 UMax,
735 /// *p = old <unsigned v ? old : v
736 UMin,
737
738 /// *p = old + v
739 FAdd,
740
741 /// *p = old - v
742 FSub,
743
744 FIRST_BINOP = Xchg,
745 LAST_BINOP = FSub,
746 BAD_BINOP
747 };
748
749private:
750 template <unsigned Offset>
751 using AtomicOrderingBitfieldElement =
752 typename Bitfield::Element<AtomicOrdering, Offset, 3,
753 AtomicOrdering::LAST>;
754
755 template <unsigned Offset>
756 using BinOpBitfieldElement =
757 typename Bitfield::Element<BinOp, Offset, 4, BinOp::LAST_BINOP>;
758
759public:
760 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
761 AtomicOrdering Ordering, SyncScope::ID SSID,
762 Instruction *InsertBefore = nullptr);
763 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
764 AtomicOrdering Ordering, SyncScope::ID SSID,
765 BasicBlock *InsertAtEnd);
766
767 // allocate space for exactly two operands
768 void *operator new(size_t s) {
769 return User::operator new(s, 2);
770 }
771
772 using VolatileField = BoolBitfieldElementT<0>;
773 using AtomicOrderingField =
774 AtomicOrderingBitfieldElementT<VolatileField::NextBit>;
775 using OperationField = BinOpBitfieldElement<AtomicOrderingField::NextBit>;
776 using AlignmentField = AlignmentBitfieldElementT<OperationField::NextBit>;
777 static_assert(Bitfield::areContiguous<VolatileField, AtomicOrderingField,
778 OperationField, AlignmentField>(),
779 "Bitfields must be contiguous");
780
781 BinOp getOperation() const { return getSubclassData<OperationField>(); }
782
783 static StringRef getOperationName(BinOp Op);
784
785 static bool isFPOperation(BinOp Op) {
786 switch (Op) {
787 case AtomicRMWInst::FAdd:
788 case AtomicRMWInst::FSub:
789 return true;
790 default:
791 return false;
792 }
793 }
794
795 void setOperation(BinOp Operation) {
796 setSubclassData<OperationField>(Operation);
797 }
798
799 /// Return the alignment of the memory that is being allocated by the
800 /// instruction.
801 Align getAlign() const {
802 return Align(1ULL << getSubclassData<AlignmentField>());
803 }
804
805 void setAlignment(Align Align) {
806 setSubclassData<AlignmentField>(Log2(Align));
807 }
808
809 /// Return true if this is a RMW on a volatile memory location.
810 ///
811 bool isVolatile() const { return getSubclassData<VolatileField>(); }
812
813 /// Specify whether this is a volatile RMW or not.
814 ///
815 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
816
817 /// Transparently provide more efficient getOperand methods.
818 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
819
820 /// Returns the ordering constraint of this rmw instruction.
821 AtomicOrdering getOrdering() const {
822 return getSubclassData<AtomicOrderingField>();
823 }
824
825 /// Sets the ordering constraint of this rmw instruction.
826 void setOrdering(AtomicOrdering Ordering) {
827 assert(Ordering != AtomicOrdering::NotAtomic &&((Ordering != AtomicOrdering::NotAtomic && "atomicrmw instructions can only be atomic."
) ? static_cast<void> (0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"atomicrmw instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-12~++20201202100633+b276bf5a572/llvm/include/llvm/IR/Instructions.h"
, 828, __PRETTY_FUNCTION__))
828 "atomicrmw instructions can only be atomic.")((Ordering != AtomicOrdering::NotAtomic && "atomicrmw instructions can only be atomic."
) ? static_cast<void> (0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"atomicrmw instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-12~++20201202100633+b276bf5a572/llvm/include/llvm/IR/Instructions.h"
, 828, __PRETTY_FUNCTION__))
;
829 setSubclassData<AtomicOrderingField>(Ordering);
830 }
831
832 /// Returns the synchronization scope ID of this rmw instruction.
833 SyncScope::ID getSyncScopeID() const {
834 return SSID;
835 }
836
837 /// Sets the synchronization scope ID of this rmw instruction.
838 void setSyncScopeID(SyncScope::ID SSID) {
839 this->SSID = SSID;
840 }
841
842 Value *getPointerOperand() { return getOperand(0); }
843 const Value *getPointerOperand() const { return getOperand(0); }
844 static unsigned getPointerOperandIndex() { return 0U; }
845
846 Value *getValOperand() { return getOperand(1); }
847 const Value *getValOperand() const { return getOperand(1); }
848
849 /// Returns the address space of the pointer operand.
850 unsigned getPointerAddressSpace() const {
851 return getPointerOperand()->getType()->getPointerAddressSpace();
852 }
853
854 bool isFloatingPointOperation() const {
855 return isFPOperation(getOperation());
856 }
857
858 // Methods for support type inquiry through isa, cast, and dyn_cast:
859 static bool classof(const Instruction *I) {
860 return I->getOpcode() == Instruction::AtomicRMW;
861 }
862 static bool classof(const Value *V) {
863 return isa<Instruction>(V) && classof(cast<Instruction>(V));
864 }
865
866private:
867 void Init(BinOp Operation, Value *Ptr, Value *Val, Align Align,
868 AtomicOrdering Ordering, SyncScope::ID SSID);
869
870 // Shadow Instruction::setInstructionSubclassData with a private forwarding
871 // method so that subclasses cannot accidentally use it.
872 template <typename Bitfield>
873 void setSubclassData(typename Bitfield::Type Value) {
874 Instruction::setSubclassData<Bitfield>(Value);
875 }
876
877 /// The synchronization scope ID of this rmw instruction. Not quite enough
878 /// room in SubClassData for everything, so synchronization scope ID gets its
879 /// own field.
880 SyncScope::ID SSID;
881};
882
883template <>
884struct OperandTraits<AtomicRMWInst>
885 : public FixedNumOperandTraits<AtomicRMWInst,2> {
886};
887
888DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicRMWInst, Value)AtomicRMWInst::op_iterator AtomicRMWInst::op_begin() { return
OperandTraits<AtomicRMWInst>::op_begin(this); } AtomicRMWInst
::const_op_iterator AtomicRMWInst::op_begin() const { return OperandTraits
<AtomicRMWInst>::op_begin(const_cast<AtomicRMWInst*>
(this)); } AtomicRMWInst::op_iterator AtomicRMWInst::op_end()
{ return OperandTraits<AtomicRMWInst>::op_end(this); }
AtomicRMWInst::const_op_iterator AtomicRMWInst::op_end() const
{ return OperandTraits<AtomicRMWInst>::op_end(const_cast
<AtomicRMWInst*>(this)); } Value *AtomicRMWInst::getOperand
(unsigned i_nocapture) const { ((i_nocapture < OperandTraits
<AtomicRMWInst>::operands(this) && "getOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20201202100633+b276bf5a572/llvm/include/llvm/IR/Instructions.h"
, 888, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<AtomicRMWInst>::op_begin(const_cast<
AtomicRMWInst*>(this))[i_nocapture].get()); } void AtomicRMWInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { ((
i_nocapture < OperandTraits<AtomicRMWInst>::operands
(this) && "setOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20201202100633+b276bf5a572/llvm/include/llvm/IR/Instructions.h"
, 888, __PRETTY_FUNCTION__)); OperandTraits<AtomicRMWInst>
::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned AtomicRMWInst
::getNumOperands() const { return OperandTraits<AtomicRMWInst
>::operands(this); } template <int Idx_nocapture> Use
&AtomicRMWInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
AtomicRMWInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
889
890//===----------------------------------------------------------------------===//
891// GetElementPtrInst Class
892//===----------------------------------------------------------------------===//
893
894// checkGEPType - Simple wrapper function to give a better assertion failure
895// message on bad indexes for a gep instruction.
896//
897inline Type *checkGEPType(Type *Ty) {
898 assert(Ty && "Invalid GetElementPtrInst indices for type!")((Ty && "Invalid GetElementPtrInst indices for type!"
) ? static_cast<void> (0) : __assert_fail ("Ty && \"Invalid GetElementPtrInst indices for type!\""
, "/build/llvm-toolchain-snapshot-12~++20201202100633+b276bf5a572/llvm/include/llvm/IR/Instructions.h"
, 898, __PRETTY_FUNCTION__))
;
899 return Ty;
900}
901
902/// an instruction for type-safe pointer arithmetic to
903/// access elements of arrays and structs
904///
905class GetElementPtrInst : public Instruction {
906 Type *SourceElementType;
907 Type *ResultElementType;
908
909 GetElementPtrInst(const GetElementPtrInst &GEPI);
910
911 /// Constructors - Create a getelementptr instruction with a base pointer an
912 /// list of indices. The first ctor can optionally insert before an existing
913 /// instruction, the second appends the new instruction to the specified
914 /// BasicBlock.
915 inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
916 ArrayRef<Value *> IdxList, unsigned Values,
917 const Twine &NameStr, Instruction *InsertBefore);
918 inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
919 ArrayRef<Value *> IdxList, unsigned Values,
920 const Twine &NameStr, BasicBlock *InsertAtEnd);
921
922 void init(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr);
923
924protected:
925 // Note: Instruction needs to be a friend here to call cloneImpl.
926 friend class Instruction;
927
928 GetElementPtrInst *cloneImpl() const;
929
930public:
931 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
932 ArrayRef<Value *> IdxList,
933 const Twine &NameStr = "",
934 Instruction *InsertBefore = nullptr) {
935 unsigned Values = 1 + unsigned(IdxList.size());
936 if (!PointeeType)
36
Assuming 'PointeeType' is non-null
37
Taking false branch
937 PointeeType =
938 cast<PointerType>(Ptr->getType()->getScalarType())->getElementType();
939 else
940 assert(((PointeeType == cast<PointerType>(Ptr->getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-12~++20201202100633+b276bf5a572/llvm/include/llvm/IR/Instructions.h"
, 942, __PRETTY_FUNCTION__))
38
Called C++ object pointer is null
941 PointeeType ==((PointeeType == cast<PointerType>(Ptr->getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-12~++20201202100633+b276bf5a572/llvm/include/llvm/IR/Instructions.h"
, 942, __PRETTY_FUNCTION__))
942 cast<PointerType>(Ptr->getType()->getScalarType())->getElementType())((PointeeType == cast<PointerType>(Ptr->getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-12~++20201202100633+b276bf5a572/llvm/include/llvm/IR/Instructions.h"
, 942, __PRETTY_FUNCTION__))
;
943 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
944 NameStr, InsertBefore);
945 }
946
947 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
948 ArrayRef<Value *> IdxList,
949 const Twine &NameStr,
950 BasicBlock *InsertAtEnd) {
951 unsigned Values = 1 + unsigned(IdxList.size());
952 if (!PointeeType)
953 PointeeType =
954 cast<PointerType>(Ptr->getType()->getScalarType())->getElementType();
955 else
956 assert(((PointeeType == cast<PointerType>(Ptr->getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-12~++20201202100633+b276bf5a572/llvm/include/llvm/IR/Instructions.h"
, 958, __PRETTY_FUNCTION__))
957 PointeeType ==((PointeeType == cast<PointerType>(Ptr->getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-12~++20201202100633+b276bf5a572/llvm/include/llvm/IR/Instructions.h"
, 958, __PRETTY_FUNCTION__))
958 cast<PointerType>(Ptr->getType()->getScalarType())->getElementType())((PointeeType == cast<PointerType>(Ptr->getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-12~++20201202100633+b276bf5a572/llvm/include/llvm/IR/Instructions.h"
, 958, __PRETTY_FUNCTION__))
;
959 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
960 NameStr, InsertAtEnd);
961 }
962
963 /// Create an "inbounds" getelementptr. See the documentation for the
964 /// "inbounds" flag in LangRef.html for details.
965 static GetElementPtrInst *CreateInBounds(Value *Ptr,
966 ArrayRef<Value *> IdxList,
967 const Twine &NameStr = "",
968 Instruction *InsertBefore = nullptr){
969 return CreateInBounds(nullptr, Ptr, IdxList, NameStr, InsertBefore);
970 }
971
972 static GetElementPtrInst *
973 CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef<Value *> IdxList,
974 const Twine &NameStr = "",
975 Instruction *InsertBefore = nullptr) {
976 GetElementPtrInst *GEP =
977 Create(PointeeType, Ptr, IdxList, NameStr, InsertBefore);
978 GEP->setIsInBounds(true);
979 return GEP;
980 }
981
982 static GetElementPtrInst *CreateInBounds(Value *Ptr,
983 ArrayRef<Value *> IdxList,
984 const Twine &NameStr,
985 BasicBlock *InsertAtEnd) {
986 return CreateInBounds(nullptr, Ptr, IdxList, NameStr, InsertAtEnd);
987 }
988
989 static GetElementPtrInst *CreateInBounds(Type *PointeeType, Value *Ptr,
990 ArrayRef<Value *> IdxList,
991 const Twine &NameStr,
992 BasicBlock *InsertAtEnd) {
993 GetElementPtrInst *GEP =
994 Create(PointeeType, Ptr, IdxList, NameStr, InsertAtEnd);
995 GEP->setIsInBounds(true);
996 return GEP;
997 }
998
999 /// Transparently provide more efficient getOperand methods.
1000 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1001
1002 Type *getSourceElementType() const { return SourceElementType; }
1003
1004 void setSourceElementType(Type *Ty) { SourceElementType = Ty; }
1005 void setResultElementType(Type *Ty) { ResultElementType = Ty; }
1006
1007 Type *getResultElementType() const {
1008 assert(ResultElementType ==((ResultElementType == cast<PointerType>(getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-12~++20201202100633+b276bf5a572/llvm/include/llvm/IR/Instructions.h"
, 1009, __PRETTY_FUNCTION__))
1009 cast<PointerType>(getType()->getScalarType())->getElementType())((ResultElementType == cast<PointerType>(getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-12~++20201202100633+b276bf5a572/llvm/include/llvm/IR/Instructions.h"
, 1009, __PRETTY_FUNCTION__))
;
1010 return ResultElementType;
1011 }
1012
1013 /// Returns the address space of this instruction's pointer type.
1014 unsigned getAddressSpace() const {
1015 // Note that this is always the same as the pointer operand's address space
1016 // and that is cheaper to compute, so cheat here.
1017 return getPointerAddressSpace();
1018 }
1019
1020 /// Returns the result type of a getelementptr with the given source
1021 /// element type and indexes.
1022 ///
1023 /// Null is returned if the indices are invalid for the specified
1024 /// source element type.
1025 static Type *getIndexedType(Type *Ty, ArrayRef<Value *> IdxList);
1026 static Type *getIndexedType(Type *Ty, ArrayRef<Constant *> IdxList);
1027 static Type *getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList);
1028
1029 /// Return the type of the element at the given index of an indexable
1030 /// type. This is equivalent to "getIndexedType(Agg, {Zero, Idx})".
1031 ///
1032 /// Returns null if the type can't be indexed, or the given index is not
1033 /// legal for the given type.
1034 static Type *getTypeAtIndex(Type *Ty, Value *Idx);
1035 static Type *getTypeAtIndex(Type *Ty, uint64_t Idx);
1036
1037 inline op_iterator idx_begin() { return op_begin()+1; }
1038 inline const_op_iterator idx_begin() const { return op_begin()+1; }
1039 inline op_iterator idx_end() { return op_end(); }
1040 inline const_op_iterator idx_end() const { return op_end(); }
1041
1042 inline iterator_range<op_iterator> indices() {
1043 return make_range(idx_begin(), idx_end());
1044 }
1045
1046 inline iterator_range<const_op_iterator> indices() const {
1047 return make_range(idx_begin(), idx_end());
1048 }
1049
1050 Value *getPointerOperand() {
1051 return getOperand(0);
1052 }
1053 const Value *getPointerOperand() const {
1054 return getOperand(0);
1055 }
1056 static unsigned getPointerOperandIndex() {
1057 return 0U; // get index for modifying correct operand.
1058 }
1059
1060 /// Method to return the pointer operand as a
1061 /// PointerType.
1062 Type *getPointerOperandType() const {
1063 return getPointerOperand()->getType();
1064 }
1065
1066 /// Returns the address space of the pointer operand.
1067 unsigned getPointerAddressSpace() const {
1068 return getPointerOperandType()->getPointerAddressSpace();
1069 }
1070
1071 /// Returns the pointer type returned by the GEP
1072 /// instruction, which may be a vector of pointers.
1073 static Type *getGEPReturnType(Type *ElTy, Value *Ptr,
1074 ArrayRef<Value *> IdxList) {
1075 Type *PtrTy = PointerType::get(checkGEPType(getIndexedType(ElTy, IdxList)),
1076 Ptr->getType()->getPointerAddressSpace());
1077 // Vector GEP
1078 if (auto *PtrVTy = dyn_cast<VectorType>(Ptr->getType())) {
1079 ElementCount EltCount = PtrVTy->getElementCount();
1080 return VectorType::get(PtrTy, EltCount);
1081 }
1082 for (Value *Index : IdxList)
1083 if (auto *IndexVTy = dyn_cast<VectorType>(Index->getType())) {
1084 ElementCount EltCount = IndexVTy->getElementCount();
1085 return VectorType::get(PtrTy, EltCount);
1086 }
1087 // Scalar GEP
1088 return PtrTy;
1089 }
1090
1091 unsigned getNumIndices() const { // Note: always non-negative
1092 return getNumOperands() - 1;
1093 }
1094
1095 bool hasIndices() const {
1096 return getNumOperands() > 1;
1097 }
1098
1099 /// Return true if all of the indices of this GEP are
1100 /// zeros. If so, the result pointer and the first operand have the same
1101 /// value, just potentially different types.
1102 bool hasAllZeroIndices() const;
1103
1104 /// Return true if all of the indices of this GEP are
1105 /// constant integers. If so, the result pointer and the first operand have
1106 /// a constant offset between them.
1107 bool hasAllConstantIndices() const;
1108
1109 /// Set or clear the inbounds flag on this GEP instruction.
1110 /// See LangRef.html for the meaning of inbounds on a getelementptr.
1111 void setIsInBounds(bool b = true);
1112
1113 /// Determine whether the GEP has the inbounds flag.
1114 bool isInBounds() const;
1115
1116 /// Accumulate the constant address offset of this GEP if possible.
1117 ///
1118 /// This routine accepts an APInt into which it will accumulate the constant
1119 /// offset of this GEP if the GEP is in fact constant. If the GEP is not
1120 /// all-constant, it returns false and the value of the offset APInt is
1121 /// undefined (it is *not* preserved!). The APInt passed into this routine
1122 /// must be at least as wide as the IntPtr type for the address space of
1123 /// the base GEP pointer.
1124 bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const;
1125
1126 // Methods for support type inquiry through isa, cast, and dyn_cast:
1127 static bool classof(const Instruction *I) {
1128 return (I->getOpcode() == Instruction::GetElementPtr);
1129 }
1130 static bool classof(const Value *V) {
1131 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1132 }
1133};
1134
1135template <>
1136struct OperandTraits<GetElementPtrInst> :
1137 public VariadicOperandTraits<GetElementPtrInst, 1> {
1138};
1139
1140GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1141 ArrayRef<Value *> IdxList, unsigned Values,
1142 const Twine &NameStr,
1143 Instruction *InsertBefore)
1144 : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
1145 OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1146 Values, InsertBefore),
1147 SourceElementType(PointeeType),
1148 ResultElementType(getIndexedType(PointeeType, IdxList)) {
1149 assert(ResultElementType ==((ResultElementType == cast<PointerType>(getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-12~++20201202100633+b276bf5a572/llvm/include/llvm/IR/Instructions.h"
, 1150, __PRETTY_FUNCTION__))
1150 cast<PointerType>(getType()->getScalarType())->getElementType())((ResultElementType == cast<PointerType>(getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-12~++20201202100633+b276bf5a572/llvm/include/llvm/IR/Instructions.h"
, 1150, __PRETTY_FUNCTION__))
;
1151 init(Ptr, IdxList, NameStr);
1152}
1153
1154GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1155 ArrayRef<Value *> IdxList, unsigned Values,
1156 const Twine &NameStr,
1157 BasicBlock *InsertAtEnd)
1158 : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
1159 OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1160 Values, InsertAtEnd),
1161 SourceElementType(PointeeType),
1162 ResultElementType(getIndexedType(PointeeType, IdxList)) {
1163 assert(ResultElementType ==((ResultElementType == cast<PointerType>(getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-12~++20201202100633+b276bf5a572/llvm/include/llvm/IR/Instructions.h"
, 1164, __PRETTY_FUNCTION__))
1164 cast<PointerType>(getType()->getScalarType())->getElementType())((ResultElementType == cast<PointerType>(getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-12~++20201202100633+b276bf5a572/llvm/include/llvm/IR/Instructions.h"
, 1164, __PRETTY_FUNCTION__))
;
1165 init(Ptr, IdxList, NameStr);
1166}
1167
1168DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value)GetElementPtrInst::op_iterator GetElementPtrInst::op_begin() {
return OperandTraits<GetElementPtrInst>::op_begin(this
); } GetElementPtrInst::const_op_iterator GetElementPtrInst::
op_begin() const { return OperandTraits<GetElementPtrInst>
::op_begin(const_cast<GetElementPtrInst*>(this)); } GetElementPtrInst
::op_iterator GetElementPtrInst::op_end() { return OperandTraits
<GetElementPtrInst>::op_end(this); } GetElementPtrInst::
const_op_iterator GetElementPtrInst::op_end() const { return OperandTraits
<GetElementPtrInst>::op_end(const_cast<GetElementPtrInst
*>(this)); } Value *GetElementPtrInst::getOperand(unsigned
i_nocapture) const { ((i_nocapture < OperandTraits<GetElementPtrInst
>::operands(this) && "getOperand() out of range!")
? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20201202100633+b276bf5a572/llvm/include/llvm/IR/Instructions.h"
, 1168, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<GetElementPtrInst>::op_begin(const_cast
<GetElementPtrInst*>(this))[i_nocapture].get()); } void
GetElementPtrInst::setOperand(unsigned i_nocapture, Value *Val_nocapture
) { ((i_nocapture < OperandTraits<GetElementPtrInst>
::operands(this) && "setOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20201202100633+b276bf5a572/llvm/include/llvm/IR/Instructions.h"
, 1168, __PRETTY_FUNCTION__)); OperandTraits<GetElementPtrInst
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
GetElementPtrInst::getNumOperands() const { return OperandTraits
<GetElementPtrInst>::operands(this); } template <int
Idx_nocapture> Use &GetElementPtrInst::Op() { return this
->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture
> const Use &GetElementPtrInst::Op() const { return this
->OpFrom<Idx_nocapture>(this); }
1169
1170//===----------------------------------------------------------------------===//
1171// ICmpInst Class
1172//===----------------------------------------------------------------------===//
1173
1174/// This instruction compares its operands according to the predicate given
1175/// to the constructor. It only operates on integers or pointers. The operands
1176/// must be identical types.
1177/// Represent an integer comparison operator.
1178class ICmpInst: public CmpInst {
1179 void AssertOK() {
1180 assert(isIntPredicate() &&((isIntPredicate() && "Invalid ICmp predicate value")
? static_cast<void> (0) : __assert_fail ("isIntPredicate() && \"Invalid ICmp predicate value\""
, "/build/llvm-toolchain-snapshot-12~++20201202100633+b276bf5a572/llvm/include/llvm/IR/Instructions.h"
, 1181, __PRETTY_FUNCTION__))
1181 "Invalid ICmp predicate value")((isIntPredicate() && "Invalid ICmp predicate value")
? static_cast<void> (0) : __assert_fail ("isIntPredicate() && \"Invalid ICmp predicate value\""
, "/build/llvm-toolchain-snapshot-12~++20201202100633+b276bf5a572/llvm/include/llvm/IR/Instructions.h"
, 1181, __PRETTY_FUNCTION__))
;
1182 assert(getOperand(0)->getType() == getOperand(1)->getType() &&((getOperand(0)->getType() == getOperand(1)->getType() &&
"Both operands to ICmp instruction are not of the same type!"
) ? static_cast<void> (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to ICmp instruction are not of the same type!\""
, "/build/llvm-toolchain-snapshot-12~++20201202100633+b276bf5a572/llvm/include/llvm/IR/Instructions.h"
, 1183, __PRETTY_FUNCTION__))
1183 "Both operands to ICmp instruction are not of the same type!")