LLVM  14.0.0git
InferAddressSpaces.cpp
Go to the documentation of this file.
1 //===- InferAddressSpace.cpp - --------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // CUDA C/C++ includes memory space designation as variable type qualifers (such
10 // as __global__ and __shared__). Knowing the space of a memory access allows
11 // CUDA compilers to emit faster PTX loads and stores. For example, a load from
12 // shared memory can be translated to `ld.shared` which is roughly 10% faster
13 // than a generic `ld` on an NVIDIA Tesla K40c.
14 //
15 // Unfortunately, type qualifiers only apply to variable declarations, so CUDA
16 // compilers must infer the memory space of an address expression from
17 // type-qualified variables.
18 //
19 // LLVM IR uses non-zero (so-called) specific address spaces to represent memory
20 // spaces (e.g. addrspace(3) means shared memory). The Clang frontend
21 // places only type-qualified variables in specific address spaces, and then
22 // conservatively `addrspacecast`s each type-qualified variable to addrspace(0)
23 // (so-called the generic address space) for other instructions to use.
24 //
25 // For example, the Clang translates the following CUDA code
26 // __shared__ float a[10];
27 // float v = a[i];
28 // to
29 // %0 = addrspacecast [10 x float] addrspace(3)* @a to [10 x float]*
30 // %1 = gep [10 x float], [10 x float]* %0, i64 0, i64 %i
31 // %v = load float, float* %1 ; emits ld.f32
32 // @a is in addrspace(3) since it's type-qualified, but its use from %1 is
33 // redirected to %0 (the generic version of @a).
34 //
35 // The optimization implemented in this file propagates specific address spaces
36 // from type-qualified variable declarations to its users. For example, it
37 // optimizes the above IR to
38 // %1 = gep [10 x float] addrspace(3)* @a, i64 0, i64 %i
39 // %v = load float addrspace(3)* %1 ; emits ld.shared.f32
40 // propagating the addrspace(3) from @a to %1. As the result, the NVPTX
41 // codegen is able to emit ld.shared.f32 for %v.
42 //
43 // Address space inference works in two steps. First, it uses a data-flow
44 // analysis to infer as many generic pointers as possible to point to only one
45 // specific address space. In the above example, it can prove that %1 only
46 // points to addrspace(3). This algorithm was published in
47 // CUDA: Compiling and optimizing for a GPU platform
48 // Chakrabarti, Grover, Aarts, Kong, Kudlur, Lin, Marathe, Murphy, Wang
49 // ICCS 2012
50 //
51 // Then, address space inference replaces all refinable generic pointers with
52 // equivalent specific pointers.
53 //
54 // The major challenge of implementing this optimization is handling PHINodes,
55 // which may create loops in the data flow graph. This brings two complications.
56 //
57 // First, the data flow analysis in Step 1 needs to be circular. For example,
58 // %generic.input = addrspacecast float addrspace(3)* %input to float*
59 // loop:
60 // %y = phi [ %generic.input, %y2 ]
61 // %y2 = getelementptr %y, 1
62 // %v = load %y2
63 // br ..., label %loop, ...
64 // proving %y specific requires proving both %generic.input and %y2 specific,
65 // but proving %y2 specific circles back to %y. To address this complication,
66 // the data flow analysis operates on a lattice:
67 // uninitialized > specific address spaces > generic.
68 // All address expressions (our implementation only considers phi, bitcast,
69 // addrspacecast, and getelementptr) start with the uninitialized address space.
70 // The monotone transfer function moves the address space of a pointer down a
71 // lattice path from uninitialized to specific and then to generic. A join
72 // operation of two different specific address spaces pushes the expression down
73 // to the generic address space. The analysis completes once it reaches a fixed
74 // point.
75 //
76 // Second, IR rewriting in Step 2 also needs to be circular. For example,
77 // converting %y to addrspace(3) requires the compiler to know the converted
78 // %y2, but converting %y2 needs the converted %y. To address this complication,
79 // we break these cycles using "undef" placeholders. When converting an
80 // instruction `I` to a new address space, if its operand `Op` is not converted
81 // yet, we let `I` temporarily use `undef` and fix all the uses of undef later.
82 // For instance, our algorithm first converts %y to
83 // %y' = phi float addrspace(3)* [ %input, undef ]
84 // Then, it converts %y2 to
85 // %y2' = getelementptr %y', 1
86 // Finally, it fixes the undef in %y' so that
87 // %y' = phi float addrspace(3)* [ %input, %y2' ]
88 //
89 //===----------------------------------------------------------------------===//
90 
92 #include "llvm/ADT/ArrayRef.h"
93 #include "llvm/ADT/DenseMap.h"
94 #include "llvm/ADT/DenseSet.h"
95 #include "llvm/ADT/None.h"
96 #include "llvm/ADT/Optional.h"
97 #include "llvm/ADT/SetVector.h"
98 #include "llvm/ADT/SmallVector.h"
102 #include "llvm/IR/BasicBlock.h"
103 #include "llvm/IR/Constant.h"
104 #include "llvm/IR/Constants.h"
105 #include "llvm/IR/Dominators.h"
106 #include "llvm/IR/Function.h"
107 #include "llvm/IR/IRBuilder.h"
108 #include "llvm/IR/InstIterator.h"
109 #include "llvm/IR/Instruction.h"
110 #include "llvm/IR/Instructions.h"
111 #include "llvm/IR/IntrinsicInst.h"
112 #include "llvm/IR/Intrinsics.h"
113 #include "llvm/IR/LLVMContext.h"
114 #include "llvm/IR/Operator.h"
115 #include "llvm/IR/PassManager.h"
116 #include "llvm/IR/Type.h"
117 #include "llvm/IR/Use.h"
118 #include "llvm/IR/User.h"
119 #include "llvm/IR/Value.h"
120 #include "llvm/IR/ValueHandle.h"
121 #include "llvm/InitializePasses.h"
122 #include "llvm/Pass.h"
123 #include "llvm/Support/Casting.h"
125 #include "llvm/Support/Compiler.h"
126 #include "llvm/Support/Debug.h"
129 #include "llvm/Transforms/Scalar.h"
132 #include <cassert>
133 #include <iterator>
134 #include <limits>
135 #include <utility>
136 #include <vector>
137 
138 #define DEBUG_TYPE "infer-address-spaces"
139 
140 using namespace llvm;
141 
143  "assume-default-is-flat-addrspace", cl::init(false), cl::ReallyHidden,
144  cl::desc("The default address space is assumed as the flat address space. "
145  "This is mainly for test purpose."));
146 
147 static const unsigned UninitializedAddressSpace =
149 
150 namespace {
151 
152 using ValueToAddrSpaceMapTy = DenseMap<const Value *, unsigned>;
153 // Different from ValueToAddrSpaceMapTy, where a new addrspace is inferred on
154 // the *def* of a value, PredicatedAddrSpaceMapTy is map where a new
155 // addrspace is inferred on the *use* of a pointer. This map is introduced to
156 // infer addrspace from the addrspace predicate assumption built from assume
157 // intrinsic. In that scenario, only specific uses (under valid assumption
158 // context) could be inferred with a new addrspace.
159 using PredicatedAddrSpaceMapTy =
161 using PostorderStackTy = llvm::SmallVector<PointerIntPair<Value *, 1, bool>, 4>;
162 
163 class InferAddressSpaces : public FunctionPass {
164  unsigned FlatAddrSpace = 0;
165 
166 public:
167  static char ID;
168 
169  InferAddressSpaces() :
170  FunctionPass(ID), FlatAddrSpace(UninitializedAddressSpace) {}
171  InferAddressSpaces(unsigned AS) : FunctionPass(ID), FlatAddrSpace(AS) {}
172 
173  void getAnalysisUsage(AnalysisUsage &AU) const override {
174  AU.setPreservesCFG();
178  }
179 
180  bool runOnFunction(Function &F) override;
181 };
182 
183 class InferAddressSpacesImpl {
184  AssumptionCache &AC;
185  DominatorTree *DT = nullptr;
186  const TargetTransformInfo *TTI = nullptr;
187  const DataLayout *DL = nullptr;
188 
189  /// Target specific address space which uses of should be replaced if
190  /// possible.
191  unsigned FlatAddrSpace = 0;
192 
193  // Try to update the address space of V. If V is updated, returns true and
194  // false otherwise.
195  bool updateAddressSpace(const Value &V,
196  ValueToAddrSpaceMapTy &InferredAddrSpace,
197  PredicatedAddrSpaceMapTy &PredicatedAS) const;
198 
199  // Tries to infer the specific address space of each address expression in
200  // Postorder.
201  void inferAddressSpaces(ArrayRef<WeakTrackingVH> Postorder,
202  ValueToAddrSpaceMapTy &InferredAddrSpace,
203  PredicatedAddrSpaceMapTy &PredicatedAS) const;
204 
205  bool isSafeToCastConstAddrSpace(Constant *C, unsigned NewAS) const;
206 
207  Value *cloneInstructionWithNewAddressSpace(
208  Instruction *I, unsigned NewAddrSpace,
209  const ValueToValueMapTy &ValueWithNewAddrSpace,
210  const PredicatedAddrSpaceMapTy &PredicatedAS,
211  SmallVectorImpl<const Use *> *UndefUsesToFix) const;
212 
213  // Changes the flat address expressions in function F to point to specific
214  // address spaces if InferredAddrSpace says so. Postorder is the postorder of
215  // all flat expressions in the use-def graph of function F.
216  bool rewriteWithNewAddressSpaces(
218  const ValueToAddrSpaceMapTy &InferredAddrSpace,
219  const PredicatedAddrSpaceMapTy &PredicatedAS, Function *F) const;
220 
221  void appendsFlatAddressExpressionToPostorderStack(
222  Value *V, PostorderStackTy &PostorderStack,
223  DenseSet<Value *> &Visited) const;
224 
225  bool rewriteIntrinsicOperands(IntrinsicInst *II,
226  Value *OldV, Value *NewV) const;
227  void collectRewritableIntrinsicOperands(IntrinsicInst *II,
228  PostorderStackTy &PostorderStack,
229  DenseSet<Value *> &Visited) const;
230 
231  std::vector<WeakTrackingVH> collectFlatAddressExpressions(Function &F) const;
232 
233  Value *cloneValueWithNewAddressSpace(
234  Value *V, unsigned NewAddrSpace,
235  const ValueToValueMapTy &ValueWithNewAddrSpace,
236  const PredicatedAddrSpaceMapTy &PredicatedAS,
237  SmallVectorImpl<const Use *> *UndefUsesToFix) const;
238  unsigned joinAddressSpaces(unsigned AS1, unsigned AS2) const;
239 
240  unsigned getPredicatedAddrSpace(const Value &V, Value *Opnd) const;
241 
242 public:
243  InferAddressSpacesImpl(AssumptionCache &AC, DominatorTree *DT,
244  const TargetTransformInfo *TTI, unsigned FlatAddrSpace)
245  : AC(AC), DT(DT), TTI(TTI), FlatAddrSpace(FlatAddrSpace) {}
246  bool run(Function &F);
247 };
248 
249 } // end anonymous namespace
250 
251 char InferAddressSpaces::ID = 0;
252 
253 INITIALIZE_PASS_BEGIN(InferAddressSpaces, DEBUG_TYPE, "Infer address spaces",
254  false, false)
257 INITIALIZE_PASS_END(InferAddressSpaces, DEBUG_TYPE, "Infer address spaces",
259 
260 // Check whether that's no-op pointer bicast using a pair of
261 // `ptrtoint`/`inttoptr` due to the missing no-op pointer bitcast over
262 // different address spaces.
265  assert(I2P->getOpcode() == Instruction::IntToPtr);
266  auto *P2I = dyn_cast<Operator>(I2P->getOperand(0));
267  if (!P2I || P2I->getOpcode() != Instruction::PtrToInt)
268  return false;
269  // Check it's really safe to treat that pair of `ptrtoint`/`inttoptr` as a
270  // no-op cast. Besides checking both of them are no-op casts, as the
271  // reinterpreted pointer may be used in other pointer arithmetic, we also
272  // need to double-check that through the target-specific hook. That ensures
273  // the underlying target also agrees that's a no-op address space cast and
274  // pointer bits are preserved.
275  // The current IR spec doesn't have clear rules on address space casts,
276  // especially a clear definition for pointer bits in non-default address
277  // spaces. It would be undefined if that pointer is dereferenced after an
278  // invalid reinterpret cast. Also, due to the unclearness for the meaning of
279  // bits in non-default address spaces in the current spec, the pointer
280  // arithmetic may also be undefined after invalid pointer reinterpret cast.
281  // However, as we confirm through the target hooks that it's a no-op
282  // addrspacecast, it doesn't matter since the bits should be the same.
283  return CastInst::isNoopCast(Instruction::CastOps(I2P->getOpcode()),
284  I2P->getOperand(0)->getType(), I2P->getType(),
285  DL) &&
286  CastInst::isNoopCast(Instruction::CastOps(P2I->getOpcode()),
287  P2I->getOperand(0)->getType(), P2I->getType(),
288  DL) &&
290  P2I->getOperand(0)->getType()->getPointerAddressSpace(),
291  I2P->getType()->getPointerAddressSpace());
292 }
293 
294 // Returns true if V is an address expression.
295 // TODO: Currently, we consider only phi, bitcast, addrspacecast, and
296 // getelementptr operators.
297 static bool isAddressExpression(const Value &V, const DataLayout &DL,
298  const TargetTransformInfo *TTI) {
299  const Operator *Op = dyn_cast<Operator>(&V);
300  if (!Op)
301  return false;
302 
303  switch (Op->getOpcode()) {
304  case Instruction::PHI:
305  assert(Op->getType()->isPointerTy());
306  return true;
307  case Instruction::BitCast:
308  case Instruction::AddrSpaceCast:
309  case Instruction::GetElementPtr:
310  return true;
311  case Instruction::Select:
312  return Op->getType()->isPointerTy();
313  case Instruction::Call: {
314  const IntrinsicInst *II = dyn_cast<IntrinsicInst>(&V);
315  return II && II->getIntrinsicID() == Intrinsic::ptrmask;
316  }
317  case Instruction::IntToPtr:
318  return isNoopPtrIntCastPair(Op, DL, TTI);
319  default:
320  // That value is an address expression if it has an assumed address space.
322  }
323 }
324 
325 // Returns the pointer operands of V.
326 //
327 // Precondition: V is an address expression.
330  const TargetTransformInfo *TTI) {
331  const Operator &Op = cast<Operator>(V);
332  switch (Op.getOpcode()) {
333  case Instruction::PHI: {
334  auto IncomingValues = cast<PHINode>(Op).incoming_values();
335  return SmallVector<Value *, 2>(IncomingValues.begin(),
336  IncomingValues.end());
337  }
338  case Instruction::BitCast:
339  case Instruction::AddrSpaceCast:
340  case Instruction::GetElementPtr:
341  return {Op.getOperand(0)};
342  case Instruction::Select:
343  return {Op.getOperand(1), Op.getOperand(2)};
344  case Instruction::Call: {
345  const IntrinsicInst &II = cast<IntrinsicInst>(Op);
346  assert(II.getIntrinsicID() == Intrinsic::ptrmask &&
347  "unexpected intrinsic call");
348  return {II.getArgOperand(0)};
349  }
350  case Instruction::IntToPtr: {
352  auto *P2I = cast<Operator>(Op.getOperand(0));
353  return {P2I->getOperand(0)};
354  }
355  default:
356  llvm_unreachable("Unexpected instruction type.");
357  }
358 }
359 
360 bool InferAddressSpacesImpl::rewriteIntrinsicOperands(IntrinsicInst *II,
361  Value *OldV,
362  Value *NewV) const {
363  Module *M = II->getParent()->getParent()->getParent();
364 
365  switch (II->getIntrinsicID()) {
366  case Intrinsic::objectsize: {
367  Type *DestTy = II->getType();
368  Type *SrcTy = NewV->getType();
369  Function *NewDecl =
370  Intrinsic::getDeclaration(M, II->getIntrinsicID(), {DestTy, SrcTy});
371  II->setArgOperand(0, NewV);
372  II->setCalledFunction(NewDecl);
373  return true;
374  }
375  case Intrinsic::ptrmask:
376  // This is handled as an address expression, not as a use memory operation.
377  return false;
378  default: {
379  Value *Rewrite = TTI->rewriteIntrinsicWithAddressSpace(II, OldV, NewV);
380  if (!Rewrite)
381  return false;
382  if (Rewrite != II)
383  II->replaceAllUsesWith(Rewrite);
384  return true;
385  }
386  }
387 }
388 
389 void InferAddressSpacesImpl::collectRewritableIntrinsicOperands(
390  IntrinsicInst *II, PostorderStackTy &PostorderStack,
391  DenseSet<Value *> &Visited) const {
392  auto IID = II->getIntrinsicID();
393  switch (IID) {
394  case Intrinsic::ptrmask:
395  case Intrinsic::objectsize:
396  appendsFlatAddressExpressionToPostorderStack(II->getArgOperand(0),
397  PostorderStack, Visited);
398  break;
399  default:
400  SmallVector<int, 2> OpIndexes;
401  if (TTI->collectFlatAddressOperands(OpIndexes, IID)) {
402  for (int Idx : OpIndexes) {
403  appendsFlatAddressExpressionToPostorderStack(II->getArgOperand(Idx),
404  PostorderStack, Visited);
405  }
406  }
407  break;
408  }
409 }
410 
411 // Returns all flat address expressions in function F. The elements are
412 // If V is an unvisited flat address expression, appends V to PostorderStack
413 // and marks it as visited.
414 void InferAddressSpacesImpl::appendsFlatAddressExpressionToPostorderStack(
415  Value *V, PostorderStackTy &PostorderStack,
416  DenseSet<Value *> &Visited) const {
417  assert(V->getType()->isPointerTy());
418 
419  // Generic addressing expressions may be hidden in nested constant
420  // expressions.
421  if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
422  // TODO: Look in non-address parts, like icmp operands.
423  if (isAddressExpression(*CE, *DL, TTI) && Visited.insert(CE).second)
424  PostorderStack.emplace_back(CE, false);
425 
426  return;
427  }
428 
429  if (V->getType()->getPointerAddressSpace() == FlatAddrSpace &&
430  isAddressExpression(*V, *DL, TTI)) {
431  if (Visited.insert(V).second) {
432  PostorderStack.emplace_back(V, false);
433 
434  Operator *Op = cast<Operator>(V);
435  for (unsigned I = 0, E = Op->getNumOperands(); I != E; ++I) {
436  if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Op->getOperand(I))) {
437  if (isAddressExpression(*CE, *DL, TTI) && Visited.insert(CE).second)
438  PostorderStack.emplace_back(CE, false);
439  }
440  }
441  }
442  }
443 }
444 
445 // Returns all flat address expressions in function F. The elements are ordered
446 // ordered in postorder.
447 std::vector<WeakTrackingVH>
448 InferAddressSpacesImpl::collectFlatAddressExpressions(Function &F) const {
449  // This function implements a non-recursive postorder traversal of a partial
450  // use-def graph of function F.
451  PostorderStackTy PostorderStack;
452  // The set of visited expressions.
453  DenseSet<Value *> Visited;
454 
455  auto PushPtrOperand = [&](Value *Ptr) {
456  appendsFlatAddressExpressionToPostorderStack(Ptr, PostorderStack,
457  Visited);
458  };
459 
460  // Look at operations that may be interesting accelerate by moving to a known
461  // address space. We aim at generating after loads and stores, but pure
462  // addressing calculations may also be faster.
463  for (Instruction &I : instructions(F)) {
464  if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) {
465  if (!GEP->getType()->isVectorTy())
466  PushPtrOperand(GEP->getPointerOperand());
467  } else if (auto *LI = dyn_cast<LoadInst>(&I))
468  PushPtrOperand(LI->getPointerOperand());
469  else if (auto *SI = dyn_cast<StoreInst>(&I))
470  PushPtrOperand(SI->getPointerOperand());
471  else if (auto *RMW = dyn_cast<AtomicRMWInst>(&I))
472  PushPtrOperand(RMW->getPointerOperand());
473  else if (auto *CmpX = dyn_cast<AtomicCmpXchgInst>(&I))
474  PushPtrOperand(CmpX->getPointerOperand());
475  else if (auto *MI = dyn_cast<MemIntrinsic>(&I)) {
476  // For memset/memcpy/memmove, any pointer operand can be replaced.
477  PushPtrOperand(MI->getRawDest());
478 
479  // Handle 2nd operand for memcpy/memmove.
480  if (auto *MTI = dyn_cast<MemTransferInst>(MI))
481  PushPtrOperand(MTI->getRawSource());
482  } else if (auto *II = dyn_cast<IntrinsicInst>(&I))
483  collectRewritableIntrinsicOperands(II, PostorderStack, Visited);
484  else if (ICmpInst *Cmp = dyn_cast<ICmpInst>(&I)) {
485  // FIXME: Handle vectors of pointers
486  if (Cmp->getOperand(0)->getType()->isPointerTy()) {
487  PushPtrOperand(Cmp->getOperand(0));
488  PushPtrOperand(Cmp->getOperand(1));
489  }
490  } else if (auto *ASC = dyn_cast<AddrSpaceCastInst>(&I)) {
491  if (!ASC->getType()->isVectorTy())
492  PushPtrOperand(ASC->getPointerOperand());
493  } else if (auto *I2P = dyn_cast<IntToPtrInst>(&I)) {
494  if (isNoopPtrIntCastPair(cast<Operator>(I2P), *DL, TTI))
495  PushPtrOperand(
496  cast<Operator>(I2P->getOperand(0))->getOperand(0));
497  }
498  }
499 
500  std::vector<WeakTrackingVH> Postorder; // The resultant postorder.
501  while (!PostorderStack.empty()) {
502  Value *TopVal = PostorderStack.back().getPointer();
503  // If the operands of the expression on the top are already explored,
504  // adds that expression to the resultant postorder.
505  if (PostorderStack.back().getInt()) {
506  if (TopVal->getType()->getPointerAddressSpace() == FlatAddrSpace)
507  Postorder.push_back(TopVal);
508  PostorderStack.pop_back();
509  continue;
510  }
511  // Otherwise, adds its operands to the stack and explores them.
512  PostorderStack.back().setInt(true);
513  // Skip values with an assumed address space.
515  for (Value *PtrOperand : getPointerOperands(*TopVal, *DL, TTI)) {
516  appendsFlatAddressExpressionToPostorderStack(PtrOperand, PostorderStack,
517  Visited);
518  }
519  }
520  }
521  return Postorder;
522 }
523 
524 // A helper function for cloneInstructionWithNewAddressSpace. Returns the clone
525 // of OperandUse.get() in the new address space. If the clone is not ready yet,
526 // returns an undef in the new address space as a placeholder.
528  const Use &OperandUse, unsigned NewAddrSpace,
529  const ValueToValueMapTy &ValueWithNewAddrSpace,
530  const PredicatedAddrSpaceMapTy &PredicatedAS,
531  SmallVectorImpl<const Use *> *UndefUsesToFix) {
532  Value *Operand = OperandUse.get();
533 
535  cast<PointerType>(Operand->getType()), NewAddrSpace);
536 
537  if (Constant *C = dyn_cast<Constant>(Operand))
538  return ConstantExpr::getAddrSpaceCast(C, NewPtrTy);
539 
540  if (Value *NewOperand = ValueWithNewAddrSpace.lookup(Operand))
541  return NewOperand;
542 
543  Instruction *Inst = cast<Instruction>(OperandUse.getUser());
544  auto I = PredicatedAS.find(std::make_pair(Inst, Operand));
545  if (I != PredicatedAS.end()) {
546  // Insert an addrspacecast on that operand before the user.
547  unsigned NewAS = I->second;
549  cast<PointerType>(Operand->getType()), NewAS);
550  auto *NewI = new AddrSpaceCastInst(Operand, NewPtrTy);
551  NewI->insertBefore(Inst);
552  return NewI;
553  }
554 
555  UndefUsesToFix->push_back(&OperandUse);
556  return UndefValue::get(NewPtrTy);
557 }
558 
559 // Returns a clone of `I` with its operands converted to those specified in
560 // ValueWithNewAddrSpace. Due to potential cycles in the data flow graph, an
561 // operand whose address space needs to be modified might not exist in
562 // ValueWithNewAddrSpace. In that case, uses undef as a placeholder operand and
563 // adds that operand use to UndefUsesToFix so that caller can fix them later.
564 //
565 // Note that we do not necessarily clone `I`, e.g., if it is an addrspacecast
566 // from a pointer whose type already matches. Therefore, this function returns a
567 // Value* instead of an Instruction*.
568 //
569 // This may also return nullptr in the case the instruction could not be
570 // rewritten.
571 Value *InferAddressSpacesImpl::cloneInstructionWithNewAddressSpace(
572  Instruction *I, unsigned NewAddrSpace,
573  const ValueToValueMapTy &ValueWithNewAddrSpace,
574  const PredicatedAddrSpaceMapTy &PredicatedAS,
575  SmallVectorImpl<const Use *> *UndefUsesToFix) const {
577  cast<PointerType>(I->getType()), NewAddrSpace);
578 
579  if (I->getOpcode() == Instruction::AddrSpaceCast) {
580  Value *Src = I->getOperand(0);
581  // Because `I` is flat, the source address space must be specific.
582  // Therefore, the inferred address space must be the source space, according
583  // to our algorithm.
584  assert(Src->getType()->getPointerAddressSpace() == NewAddrSpace);
585  if (Src->getType() != NewPtrType)
586  return new BitCastInst(Src, NewPtrType);
587  return Src;
588  }
589 
590  if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
591  // Technically the intrinsic ID is a pointer typed argument, so specially
592  // handle calls early.
593  assert(II->getIntrinsicID() == Intrinsic::ptrmask);
595  II->getArgOperandUse(0), NewAddrSpace, ValueWithNewAddrSpace,
596  PredicatedAS, UndefUsesToFix);
597  Value *Rewrite =
599  if (Rewrite) {
600  assert(Rewrite != II && "cannot modify this pointer operation in place");
601  return Rewrite;
602  }
603 
604  return nullptr;
605  }
606 
607  unsigned AS = TTI->getAssumedAddrSpace(I);
608  if (AS != UninitializedAddressSpace) {
609  // For the assumed address space, insert an `addrspacecast` to make that
610  // explicit.
612  cast<PointerType>(I->getType()), AS);
613  auto *NewI = new AddrSpaceCastInst(I, NewPtrTy);
614  NewI->insertAfter(I);
615  return NewI;
616  }
617 
618  // Computes the converted pointer operands.
619  SmallVector<Value *, 4> NewPointerOperands;
620  for (const Use &OperandUse : I->operands()) {
621  if (!OperandUse.get()->getType()->isPointerTy())
622  NewPointerOperands.push_back(nullptr);
623  else
624  NewPointerOperands.push_back(operandWithNewAddressSpaceOrCreateUndef(
625  OperandUse, NewAddrSpace, ValueWithNewAddrSpace, PredicatedAS,
626  UndefUsesToFix));
627  }
628 
629  switch (I->getOpcode()) {
630  case Instruction::BitCast:
631  return new BitCastInst(NewPointerOperands[0], NewPtrType);
632  case Instruction::PHI: {
633  assert(I->getType()->isPointerTy());
634  PHINode *PHI = cast<PHINode>(I);
635  PHINode *NewPHI = PHINode::Create(NewPtrType, PHI->getNumIncomingValues());
636  for (unsigned Index = 0; Index < PHI->getNumIncomingValues(); ++Index) {
637  unsigned OperandNo = PHINode::getOperandNumForIncomingValue(Index);
638  NewPHI->addIncoming(NewPointerOperands[OperandNo],
639  PHI->getIncomingBlock(Index));
640  }
641  return NewPHI;
642  }
643  case Instruction::GetElementPtr: {
644  GetElementPtrInst *GEP = cast<GetElementPtrInst>(I);
646  GEP->getSourceElementType(), NewPointerOperands[0],
647  SmallVector<Value *, 4>(GEP->indices()));
648  NewGEP->setIsInBounds(GEP->isInBounds());
649  return NewGEP;
650  }
651  case Instruction::Select:
652  assert(I->getType()->isPointerTy());
653  return SelectInst::Create(I->getOperand(0), NewPointerOperands[1],
654  NewPointerOperands[2], "", nullptr, I);
655  case Instruction::IntToPtr: {
656  assert(isNoopPtrIntCastPair(cast<Operator>(I), *DL, TTI));
657  Value *Src = cast<Operator>(I->getOperand(0))->getOperand(0);
658  assert(Src->getType()->getPointerAddressSpace() == NewAddrSpace);
659  if (Src->getType() != NewPtrType)
660  return new BitCastInst(Src, NewPtrType);
661  return Src;
662  }
663  default:
664  llvm_unreachable("Unexpected opcode");
665  }
666 }
667 
668 // Similar to cloneInstructionWithNewAddressSpace, returns a clone of the
669 // constant expression `CE` with its operands replaced as specified in
670 // ValueWithNewAddrSpace.
672  ConstantExpr *CE, unsigned NewAddrSpace,
673  const ValueToValueMapTy &ValueWithNewAddrSpace, const DataLayout *DL,
674  const TargetTransformInfo *TTI) {
675  Type *TargetType = CE->getType()->isPointerTy()
677  cast<PointerType>(CE->getType()), NewAddrSpace)
678  : CE->getType();
679 
680  if (CE->getOpcode() == Instruction::AddrSpaceCast) {
681  // Because CE is flat, the source address space must be specific.
682  // Therefore, the inferred address space must be the source space according
683  // to our algorithm.
684  assert(CE->getOperand(0)->getType()->getPointerAddressSpace() ==
685  NewAddrSpace);
686  return ConstantExpr::getBitCast(CE->getOperand(0), TargetType);
687  }
688 
689  if (CE->getOpcode() == Instruction::BitCast) {
690  if (Value *NewOperand = ValueWithNewAddrSpace.lookup(CE->getOperand(0)))
691  return ConstantExpr::getBitCast(cast<Constant>(NewOperand), TargetType);
692  return ConstantExpr::getAddrSpaceCast(CE, TargetType);
693  }
694 
695  if (CE->getOpcode() == Instruction::Select) {
696  Constant *Src0 = CE->getOperand(1);
697  Constant *Src1 = CE->getOperand(2);
698  if (Src0->getType()->getPointerAddressSpace() ==
699  Src1->getType()->getPointerAddressSpace()) {
700 
702  CE->getOperand(0), ConstantExpr::getAddrSpaceCast(Src0, TargetType),
703  ConstantExpr::getAddrSpaceCast(Src1, TargetType));
704  }
705  }
706 
707  if (CE->getOpcode() == Instruction::IntToPtr) {
708  assert(isNoopPtrIntCastPair(cast<Operator>(CE), *DL, TTI));
709  Constant *Src = cast<ConstantExpr>(CE->getOperand(0))->getOperand(0);
710  assert(Src->getType()->getPointerAddressSpace() == NewAddrSpace);
711  return ConstantExpr::getBitCast(Src, TargetType);
712  }
713 
714  // Computes the operands of the new constant expression.
715  bool IsNew = false;
716  SmallVector<Constant *, 4> NewOperands;
717  for (unsigned Index = 0; Index < CE->getNumOperands(); ++Index) {
718  Constant *Operand = CE->getOperand(Index);
719  // If the address space of `Operand` needs to be modified, the new operand
720  // with the new address space should already be in ValueWithNewAddrSpace
721  // because (1) the constant expressions we consider (i.e. addrspacecast,
722  // bitcast, and getelementptr) do not incur cycles in the data flow graph
723  // and (2) this function is called on constant expressions in postorder.
724  if (Value *NewOperand = ValueWithNewAddrSpace.lookup(Operand)) {
725  IsNew = true;
726  NewOperands.push_back(cast<Constant>(NewOperand));
727  continue;
728  }
729  if (auto CExpr = dyn_cast<ConstantExpr>(Operand))
731  CExpr, NewAddrSpace, ValueWithNewAddrSpace, DL, TTI)) {
732  IsNew = true;
733  NewOperands.push_back(cast<Constant>(NewOperand));
734  continue;
735  }
736  // Otherwise, reuses the old operand.
737  NewOperands.push_back(Operand);
738  }
739 
740  // If !IsNew, we will replace the Value with itself. However, replaced values
741  // are assumed to wrapped in a addrspace cast later so drop it now.
742  if (!IsNew)
743  return nullptr;
744 
745  if (CE->getOpcode() == Instruction::GetElementPtr) {
746  // Needs to specify the source type while constructing a getelementptr
747  // constant expression.
748  return CE->getWithOperands(NewOperands, TargetType, /*OnlyIfReduced=*/false,
749  cast<GEPOperator>(CE)->getSourceElementType());
750  }
751 
752  return CE->getWithOperands(NewOperands, TargetType);
753 }
754 
755 // Returns a clone of the value `V`, with its operands replaced as specified in
756 // ValueWithNewAddrSpace. This function is called on every flat address
757 // expression whose address space needs to be modified, in postorder.
758 //
759 // See cloneInstructionWithNewAddressSpace for the meaning of UndefUsesToFix.
760 Value *InferAddressSpacesImpl::cloneValueWithNewAddressSpace(
761  Value *V, unsigned NewAddrSpace,
762  const ValueToValueMapTy &ValueWithNewAddrSpace,
763  const PredicatedAddrSpaceMapTy &PredicatedAS,
764  SmallVectorImpl<const Use *> *UndefUsesToFix) const {
765  // All values in Postorder are flat address expressions.
766  assert(V->getType()->getPointerAddressSpace() == FlatAddrSpace &&
767  isAddressExpression(*V, *DL, TTI));
768 
769  if (Instruction *I = dyn_cast<Instruction>(V)) {
770  Value *NewV = cloneInstructionWithNewAddressSpace(
771  I, NewAddrSpace, ValueWithNewAddrSpace, PredicatedAS, UndefUsesToFix);
772  if (Instruction *NewI = dyn_cast_or_null<Instruction>(NewV)) {
773  if (NewI->getParent() == nullptr) {
774  NewI->insertBefore(I);
775  NewI->takeName(I);
776  }
777  }
778  return NewV;
779  }
780 
782  cast<ConstantExpr>(V), NewAddrSpace, ValueWithNewAddrSpace, DL, TTI);
783 }
784 
785 // Defines the join operation on the address space lattice (see the file header
786 // comments).
787 unsigned InferAddressSpacesImpl::joinAddressSpaces(unsigned AS1,
788  unsigned AS2) const {
789  if (AS1 == FlatAddrSpace || AS2 == FlatAddrSpace)
790  return FlatAddrSpace;
791 
792  if (AS1 == UninitializedAddressSpace)
793  return AS2;
794  if (AS2 == UninitializedAddressSpace)
795  return AS1;
796 
797  // The join of two different specific address spaces is flat.
798  return (AS1 == AS2) ? AS1 : FlatAddrSpace;
799 }
800 
801 bool InferAddressSpacesImpl::run(Function &F) {
802  DL = &F.getParent()->getDataLayout();
803 
805  FlatAddrSpace = 0;
806 
807  if (FlatAddrSpace == UninitializedAddressSpace) {
808  FlatAddrSpace = TTI->getFlatAddressSpace();
809  if (FlatAddrSpace == UninitializedAddressSpace)
810  return false;
811  }
812 
813  // Collects all flat address expressions in postorder.
814  std::vector<WeakTrackingVH> Postorder = collectFlatAddressExpressions(F);
815 
816  // Runs a data-flow analysis to refine the address spaces of every expression
817  // in Postorder.
818  ValueToAddrSpaceMapTy InferredAddrSpace;
819  PredicatedAddrSpaceMapTy PredicatedAS;
820  inferAddressSpaces(Postorder, InferredAddrSpace, PredicatedAS);
821 
822  // Changes the address spaces of the flat address expressions who are inferred
823  // to point to a specific address space.
824  return rewriteWithNewAddressSpaces(*TTI, Postorder, InferredAddrSpace,
825  PredicatedAS, &F);
826 }
827 
828 // Constants need to be tracked through RAUW to handle cases with nested
829 // constant expressions, so wrap values in WeakTrackingVH.
830 void InferAddressSpacesImpl::inferAddressSpaces(
831  ArrayRef<WeakTrackingVH> Postorder,
832  ValueToAddrSpaceMapTy &InferredAddrSpace,
833  PredicatedAddrSpaceMapTy &PredicatedAS) const {
834  SetVector<Value *> Worklist(Postorder.begin(), Postorder.end());
835  // Initially, all expressions are in the uninitialized address space.
836  for (Value *V : Postorder)
837  InferredAddrSpace[V] = UninitializedAddressSpace;
838 
839  while (!Worklist.empty()) {
840  Value *V = Worklist.pop_back_val();
841 
842  // Try to update the address space of the stack top according to the
843  // address spaces of its operands.
844  if (!updateAddressSpace(*V, InferredAddrSpace, PredicatedAS))
845  continue;
846 
847  for (Value *User : V->users()) {
848  // Skip if User is already in the worklist.
849  if (Worklist.count(User))
850  continue;
851 
852  auto Pos = InferredAddrSpace.find(User);
853  // Our algorithm only updates the address spaces of flat address
854  // expressions, which are those in InferredAddrSpace.
855  if (Pos == InferredAddrSpace.end())
856  continue;
857 
858  // Function updateAddressSpace moves the address space down a lattice
859  // path. Therefore, nothing to do if User is already inferred as flat (the
860  // bottom element in the lattice).
861  if (Pos->second == FlatAddrSpace)
862  continue;
863 
864  Worklist.insert(User);
865  }
866  }
867 }
868 
869 unsigned InferAddressSpacesImpl::getPredicatedAddrSpace(const Value &V,
870  Value *Opnd) const {
871  const Instruction *I = dyn_cast<Instruction>(&V);
872  if (!I)
874 
875  Opnd = Opnd->stripInBoundsOffsets();
876  for (auto &AssumeVH : AC.assumptionsFor(Opnd)) {
877  if (!AssumeVH)
878  continue;
879  CallInst *CI = cast<CallInst>(AssumeVH);
880  if (!isValidAssumeForContext(CI, I, DT))
881  continue;
882 
883  const Value *Ptr;
884  unsigned AS;
885  std::tie(Ptr, AS) = TTI->getPredicatedAddrSpace(CI->getArgOperand(0));
886  if (Ptr)
887  return AS;
888  }
889 
891 }
892 
893 bool InferAddressSpacesImpl::updateAddressSpace(
894  const Value &V, ValueToAddrSpaceMapTy &InferredAddrSpace,
895  PredicatedAddrSpaceMapTy &PredicatedAS) const {
896  assert(InferredAddrSpace.count(&V));
897 
898  LLVM_DEBUG(dbgs() << "Updating the address space of\n " << V << '\n');
899 
900  // The new inferred address space equals the join of the address spaces
901  // of all its pointer operands.
902  unsigned NewAS = UninitializedAddressSpace;
903 
904  const Operator &Op = cast<Operator>(V);
905  if (Op.getOpcode() == Instruction::Select) {
906  Value *Src0 = Op.getOperand(1);
907  Value *Src1 = Op.getOperand(2);
908 
909  auto I = InferredAddrSpace.find(Src0);
910  unsigned Src0AS = (I != InferredAddrSpace.end()) ?
911  I->second : Src0->getType()->getPointerAddressSpace();
912 
913  auto J = InferredAddrSpace.find(Src1);
914  unsigned Src1AS = (J != InferredAddrSpace.end()) ?
915  J->second : Src1->getType()->getPointerAddressSpace();
916 
917  auto *C0 = dyn_cast<Constant>(Src0);
918  auto *C1 = dyn_cast<Constant>(Src1);
919 
920  // If one of the inputs is a constant, we may be able to do a constant
921  // addrspacecast of it. Defer inferring the address space until the input
922  // address space is known.
923  if ((C1 && Src0AS == UninitializedAddressSpace) ||
924  (C0 && Src1AS == UninitializedAddressSpace))
925  return false;
926 
927  if (C0 && isSafeToCastConstAddrSpace(C0, Src1AS))
928  NewAS = Src1AS;
929  else if (C1 && isSafeToCastConstAddrSpace(C1, Src0AS))
930  NewAS = Src0AS;
931  else
932  NewAS = joinAddressSpaces(Src0AS, Src1AS);
933  } else {
934  unsigned AS = TTI->getAssumedAddrSpace(&V);
935  if (AS != UninitializedAddressSpace) {
936  // Use the assumed address space directly.
937  NewAS = AS;
938  } else {
939  // Otherwise, infer the address space from its pointer operands.
940  for (Value *PtrOperand : getPointerOperands(V, *DL, TTI)) {
941  auto I = InferredAddrSpace.find(PtrOperand);
942  unsigned OperandAS;
943  if (I == InferredAddrSpace.end()) {
944  OperandAS = PtrOperand->getType()->getPointerAddressSpace();
945  if (OperandAS == FlatAddrSpace) {
946  // Check AC for assumption dominating V.
947  unsigned AS = getPredicatedAddrSpace(V, PtrOperand);
948  if (AS != UninitializedAddressSpace) {
949  LLVM_DEBUG(dbgs()
950  << " deduce operand AS from the predicate addrspace "
951  << AS << '\n');
952  OperandAS = AS;
953  // Record this use with the predicated AS.
954  PredicatedAS[std::make_pair(&V, PtrOperand)] = OperandAS;
955  }
956  }
957  } else
958  OperandAS = I->second;
959 
960  // join(flat, *) = flat. So we can break if NewAS is already flat.
961  NewAS = joinAddressSpaces(NewAS, OperandAS);
962  if (NewAS == FlatAddrSpace)
963  break;
964  }
965  }
966  }
967 
968  unsigned OldAS = InferredAddrSpace.lookup(&V);
969  assert(OldAS != FlatAddrSpace);
970  if (OldAS == NewAS)
971  return false;
972 
973  // If any updates are made, grabs its users to the worklist because
974  // their address spaces can also be possibly updated.
975  LLVM_DEBUG(dbgs() << " to " << NewAS << '\n');
976  InferredAddrSpace[&V] = NewAS;
977  return true;
978 }
979 
980 /// \p returns true if \p U is the pointer operand of a memory instruction with
981 /// a single pointer operand that can have its address space changed by simply
982 /// mutating the use to a new value. If the memory instruction is volatile,
983 /// return true only if the target allows the memory instruction to be volatile
984 /// in the new address space.
986  Use &U, unsigned AddrSpace) {
987  User *Inst = U.getUser();
988  unsigned OpNo = U.getOperandNo();
989  bool VolatileIsAllowed = false;
990  if (auto *I = dyn_cast<Instruction>(Inst))
991  VolatileIsAllowed = TTI.hasVolatileVariant(I, AddrSpace);
992 
993  if (auto *LI = dyn_cast<LoadInst>(Inst))
994  return OpNo == LoadInst::getPointerOperandIndex() &&
995  (VolatileIsAllowed || !LI->isVolatile());
996 
997  if (auto *SI = dyn_cast<StoreInst>(Inst))
998  return OpNo == StoreInst::getPointerOperandIndex() &&
999  (VolatileIsAllowed || !SI->isVolatile());
1000 
1001  if (auto *RMW = dyn_cast<AtomicRMWInst>(Inst))
1002  return OpNo == AtomicRMWInst::getPointerOperandIndex() &&
1003  (VolatileIsAllowed || !RMW->isVolatile());
1004 
1005  if (auto *CmpX = dyn_cast<AtomicCmpXchgInst>(Inst))
1006  return OpNo == AtomicCmpXchgInst::getPointerOperandIndex() &&
1007  (VolatileIsAllowed || !CmpX->isVolatile());
1008 
1009  return false;
1010 }
1011 
1012 /// Update memory intrinsic uses that require more complex processing than
1013 /// simple memory instructions. Thse require re-mangling and may have multiple
1014 /// pointer operands.
1016  Value *NewV) {
1017  IRBuilder<> B(MI);
1018  MDNode *TBAA = MI->getMetadata(LLVMContext::MD_tbaa);
1019  MDNode *ScopeMD = MI->getMetadata(LLVMContext::MD_alias_scope);
1020  MDNode *NoAliasMD = MI->getMetadata(LLVMContext::MD_noalias);
1021 
1022  if (auto *MSI = dyn_cast<MemSetInst>(MI)) {
1023  B.CreateMemSet(NewV, MSI->getValue(), MSI->getLength(),
1024  MaybeAlign(MSI->getDestAlignment()),
1025  false, // isVolatile
1026  TBAA, ScopeMD, NoAliasMD);
1027  } else if (auto *MTI = dyn_cast<MemTransferInst>(MI)) {
1028  Value *Src = MTI->getRawSource();
1029  Value *Dest = MTI->getRawDest();
1030 
1031  // Be careful in case this is a self-to-self copy.
1032  if (Src == OldV)
1033  Src = NewV;
1034 
1035  if (Dest == OldV)
1036  Dest = NewV;
1037 
1038  if (isa<MemCpyInlineInst>(MTI)) {
1039  MDNode *TBAAStruct = MTI->getMetadata(LLVMContext::MD_tbaa_struct);
1040  B.CreateMemCpyInline(Dest, MTI->getDestAlign(), Src,
1041  MTI->getSourceAlign(), MTI->getLength(),
1042  false, // isVolatile
1043  TBAA, TBAAStruct, ScopeMD, NoAliasMD);
1044  } else if (isa<MemCpyInst>(MTI)) {
1045  MDNode *TBAAStruct = MTI->getMetadata(LLVMContext::MD_tbaa_struct);
1046  B.CreateMemCpy(Dest, MTI->getDestAlign(), Src, MTI->getSourceAlign(),
1047  MTI->getLength(),
1048  false, // isVolatile
1049  TBAA, TBAAStruct, ScopeMD, NoAliasMD);
1050  } else {
1051  assert(isa<MemMoveInst>(MTI));
1052  B.CreateMemMove(Dest, MTI->getDestAlign(), Src, MTI->getSourceAlign(),
1053  MTI->getLength(),
1054  false, // isVolatile
1055  TBAA, ScopeMD, NoAliasMD);
1056  }
1057  } else
1058  llvm_unreachable("unhandled MemIntrinsic");
1059 
1060  MI->eraseFromParent();
1061  return true;
1062 }
1063 
1064 // \p returns true if it is OK to change the address space of constant \p C with
1065 // a ConstantExpr addrspacecast.
1066 bool InferAddressSpacesImpl::isSafeToCastConstAddrSpace(Constant *C,
1067  unsigned NewAS) const {
1069 
1070  unsigned SrcAS = C->getType()->getPointerAddressSpace();
1071  if (SrcAS == NewAS || isa<UndefValue>(C))
1072  return true;
1073 
1074  // Prevent illegal casts between different non-flat address spaces.
1075  if (SrcAS != FlatAddrSpace && NewAS != FlatAddrSpace)
1076  return false;
1077 
1078  if (isa<ConstantPointerNull>(C))
1079  return true;
1080 
1081  if (auto *Op = dyn_cast<Operator>(C)) {
1082  // If we already have a constant addrspacecast, it should be safe to cast it
1083  // off.
1084  if (Op->getOpcode() == Instruction::AddrSpaceCast)
1085  return isSafeToCastConstAddrSpace(cast<Constant>(Op->getOperand(0)), NewAS);
1086 
1087  if (Op->getOpcode() == Instruction::IntToPtr &&
1088  Op->getType()->getPointerAddressSpace() == FlatAddrSpace)
1089  return true;
1090  }
1091 
1092  return false;
1093 }
1094 
1096  Value::use_iterator End) {
1097  User *CurUser = I->getUser();
1098  ++I;
1099 
1100  while (I != End && I->getUser() == CurUser)
1101  ++I;
1102 
1103  return I;
1104 }
1105 
1106 bool InferAddressSpacesImpl::rewriteWithNewAddressSpaces(
1108  const ValueToAddrSpaceMapTy &InferredAddrSpace,
1109  const PredicatedAddrSpaceMapTy &PredicatedAS, Function *F) const {
1110  // For each address expression to be modified, creates a clone of it with its
1111  // pointer operands converted to the new address space. Since the pointer
1112  // operands are converted, the clone is naturally in the new address space by
1113  // construction.
1114  ValueToValueMapTy ValueWithNewAddrSpace;
1115  SmallVector<const Use *, 32> UndefUsesToFix;
1116  for (Value* V : Postorder) {
1117  unsigned NewAddrSpace = InferredAddrSpace.lookup(V);
1118 
1119  // In some degenerate cases (e.g. invalid IR in unreachable code), we may
1120  // not even infer the value to have its original address space.
1121  if (NewAddrSpace == UninitializedAddressSpace)
1122  continue;
1123 
1124  if (V->getType()->getPointerAddressSpace() != NewAddrSpace) {
1125  Value *New =
1126  cloneValueWithNewAddressSpace(V, NewAddrSpace, ValueWithNewAddrSpace,
1127  PredicatedAS, &UndefUsesToFix);
1128  if (New)
1129  ValueWithNewAddrSpace[V] = New;
1130  }
1131  }
1132 
1133  if (ValueWithNewAddrSpace.empty())
1134  return false;
1135 
1136  // Fixes all the undef uses generated by cloneInstructionWithNewAddressSpace.
1137  for (const Use *UndefUse : UndefUsesToFix) {
1138  User *V = UndefUse->getUser();
1139  User *NewV = cast_or_null<User>(ValueWithNewAddrSpace.lookup(V));
1140  if (!NewV)
1141  continue;
1142 
1143  unsigned OperandNo = UndefUse->getOperandNo();
1144  assert(isa<UndefValue>(NewV->getOperand(OperandNo)));
1145  NewV->setOperand(OperandNo, ValueWithNewAddrSpace.lookup(UndefUse->get()));
1146  }
1147 
1148  SmallVector<Instruction *, 16> DeadInstructions;
1149 
1150  // Replaces the uses of the old address expressions with the new ones.
1151  for (const WeakTrackingVH &WVH : Postorder) {
1152  assert(WVH && "value was unexpectedly deleted");
1153  Value *V = WVH;
1154  Value *NewV = ValueWithNewAddrSpace.lookup(V);
1155  if (NewV == nullptr)
1156  continue;
1157 
1158  LLVM_DEBUG(dbgs() << "Replacing the uses of " << *V << "\n with\n "
1159  << *NewV << '\n');
1160 
1161  if (Constant *C = dyn_cast<Constant>(V)) {
1162  Constant *Replace = ConstantExpr::getAddrSpaceCast(cast<Constant>(NewV),
1163  C->getType());
1164  if (C != Replace) {
1165  LLVM_DEBUG(dbgs() << "Inserting replacement const cast: " << Replace
1166  << ": " << *Replace << '\n');
1167  C->replaceAllUsesWith(Replace);
1168  V = Replace;
1169  }
1170  }
1171 
1172  Value::use_iterator I, E, Next;
1173  for (I = V->use_begin(), E = V->use_end(); I != E; ) {
1174  Use &U = *I;
1175 
1176  // Some users may see the same pointer operand in multiple operands. Skip
1177  // to the next instruction.
1178  I = skipToNextUser(I, E);
1179 
1181  TTI, U, V->getType()->getPointerAddressSpace())) {
1182  // If V is used as the pointer operand of a compatible memory operation,
1183  // sets the pointer operand to NewV. This replacement does not change
1184  // the element type, so the resultant load/store is still valid.
1185  U.set(NewV);
1186  continue;
1187  }
1188 
1189  User *CurUser = U.getUser();
1190  // Skip if the current user is the new value itself.
1191  if (CurUser == NewV)
1192  continue;
1193  // Handle more complex cases like intrinsic that need to be remangled.
1194  if (auto *MI = dyn_cast<MemIntrinsic>(CurUser)) {
1195  if (!MI->isVolatile() && handleMemIntrinsicPtrUse(MI, V, NewV))
1196  continue;
1197  }
1198 
1199  if (auto *II = dyn_cast<IntrinsicInst>(CurUser)) {
1200  if (rewriteIntrinsicOperands(II, V, NewV))
1201  continue;
1202  }
1203 
1204  if (isa<Instruction>(CurUser)) {
1205  if (ICmpInst *Cmp = dyn_cast<ICmpInst>(CurUser)) {
1206  // If we can infer that both pointers are in the same addrspace,
1207  // transform e.g.
1208  // %cmp = icmp eq float* %p, %q
1209  // into
1210  // %cmp = icmp eq float addrspace(3)* %new_p, %new_q
1211 
1212  unsigned NewAS = NewV->getType()->getPointerAddressSpace();
1213  int SrcIdx = U.getOperandNo();
1214  int OtherIdx = (SrcIdx == 0) ? 1 : 0;
1215  Value *OtherSrc = Cmp->getOperand(OtherIdx);
1216 
1217  if (Value *OtherNewV = ValueWithNewAddrSpace.lookup(OtherSrc)) {
1218  if (OtherNewV->getType()->getPointerAddressSpace() == NewAS) {
1219  Cmp->setOperand(OtherIdx, OtherNewV);
1220  Cmp->setOperand(SrcIdx, NewV);
1221  continue;
1222  }
1223  }
1224 
1225  // Even if the type mismatches, we can cast the constant.
1226  if (auto *KOtherSrc = dyn_cast<Constant>(OtherSrc)) {
1227  if (isSafeToCastConstAddrSpace(KOtherSrc, NewAS)) {
1228  Cmp->setOperand(SrcIdx, NewV);
1229  Cmp->setOperand(OtherIdx,
1230  ConstantExpr::getAddrSpaceCast(KOtherSrc, NewV->getType()));
1231  continue;
1232  }
1233  }
1234  }
1235 
1236  if (AddrSpaceCastInst *ASC = dyn_cast<AddrSpaceCastInst>(CurUser)) {
1237  unsigned NewAS = NewV->getType()->getPointerAddressSpace();
1238  if (ASC->getDestAddressSpace() == NewAS) {
1239  if (!cast<PointerType>(ASC->getType())
1240  ->hasSameElementTypeAs(
1241  cast<PointerType>(NewV->getType()))) {
1242  NewV = CastInst::Create(Instruction::BitCast, NewV,
1243  ASC->getType(), "", ASC);
1244  }
1245  ASC->replaceAllUsesWith(NewV);
1246  DeadInstructions.push_back(ASC);
1247  continue;
1248  }
1249  }
1250 
1251  // Otherwise, replaces the use with flat(NewV).
1252  if (Instruction *Inst = dyn_cast<Instruction>(V)) {
1253  // Don't create a copy of the original addrspacecast.
1254  if (U == V && isa<AddrSpaceCastInst>(V))
1255  continue;
1256 
1257  BasicBlock::iterator InsertPos = std::next(Inst->getIterator());
1258  while (isa<PHINode>(InsertPos))
1259  ++InsertPos;
1260  U.set(new AddrSpaceCastInst(NewV, V->getType(), "", &*InsertPos));
1261  } else {
1262  U.set(ConstantExpr::getAddrSpaceCast(cast<Constant>(NewV),
1263  V->getType()));
1264  }
1265  }
1266  }
1267 
1268  if (V->use_empty()) {
1269  if (Instruction *I = dyn_cast<Instruction>(V))
1270  DeadInstructions.push_back(I);
1271  }
1272  }
1273 
1274  for (Instruction *I : DeadInstructions)
1276 
1277  return true;
1278 }
1279 
1281  if (skipFunction(F))
1282  return false;
1283 
1284  auto *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>();
1285  DominatorTree *DT = DTWP ? &DTWP->getDomTree() : nullptr;
1286  return InferAddressSpacesImpl(
1287  getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F), DT,
1288  &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F),
1289  FlatAddrSpace)
1290  .run(F);
1291 }
1292 
1294  return new InferAddressSpaces(AddressSpace);
1295 }
1296 
1298  : FlatAddrSpace(UninitializedAddressSpace) {}
1300  : FlatAddrSpace(AddressSpace) {}
1301 
1304  bool Changed =
1305  InferAddressSpacesImpl(AM.getResult<AssumptionAnalysis>(F),
1307  &AM.getResult<TargetIRAnalysis>(F), FlatAddrSpace)
1308  .run(F);
1309  if (Changed) {
1310  PreservedAnalyses PA;
1311  PA.preserveSet<CFGAnalyses>();
1313  return PA;
1314  }
1315  return PreservedAnalyses::all();
1316 }
llvm::PreservedAnalyses
A set of analyses that are preserved following a run of a transformation pass.
Definition: PassManager.h:155
llvm::RecursivelyDeleteTriviallyDeadInstructions
bool RecursivelyDeleteTriviallyDeadInstructions(Value *V, const TargetLibraryInfo *TLI=nullptr, MemorySSAUpdater *MSSAU=nullptr, std::function< void(Value *)> AboutToDeleteCallback=std::function< void(Value *)>())
If the specified value is a trivially dead instruction, delete it.
Definition: Local.cpp:523
AssumptionCache.h
llvm::TargetIRAnalysis
Analysis pass providing the TargetTransformInfo.
Definition: TargetTransformInfo.h:2420
llvm::GetElementPtrInst::setIsInBounds
void setIsInBounds(bool b=true)
Set or clear the inbounds flag on this GEP instruction.
Definition: Instructions.cpp:1806
MI
IRTranslator LLVM IR MI
Definition: IRTranslator.cpp:105
llvm
This is an optimization pass for GlobalISel generic memory operations.
Definition: AllocatorList.h:22
llvm::Operator
This is a utility class that provides an abstraction for the common functionality between Instruction...
Definition: Operator.h:31
M
We currently emits eax Perhaps this is what we really should generate is Is imull three or four cycles eax eax The current instruction priority is based on pattern complexity The former is more complex because it folds a load so the latter will not be emitted Perhaps we should use AddedComplexity to give LEA32r a higher priority We should always try to match LEA first since the LEA matching code does some estimate to determine whether the match is profitable if we care more about code then imull is better It s two bytes shorter than movl leal On a Pentium M
Definition: README.txt:252
Optional.h
llvm::WeakTrackingVH
Value handle that is nullable, but tries to track the Value.
Definition: ValueHandle.h:204
ValueMapper.h
llvm::CastInst::isNoopCast
static bool isNoopCast(Instruction::CastOps Opcode, Type *SrcTy, Type *DstTy, const DataLayout &DL)
A no-op cast is one that can be effected without changing any bits.
Definition: Instructions.cpp:2887
llvm::DataLayout
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:113
llvm::Intrinsic::getDeclaration
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=None)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:1400
llvm::BasicBlock::iterator
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:90
llvm::BasicBlock::getParent
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:107
IntrinsicInst.h
llvm::Type::isPointerTy
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:217
llvm::AnalysisManager::getResult
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:783
Scalar.h
InstIterator.h
llvm::Function
Definition: Function.h:62
Pass.h
llvm::IntrinsicInst::getIntrinsicID
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
Definition: IntrinsicInst.h:52
llvm::CallBase::setCalledFunction
void setCalledFunction(Function *Fn)
Sets the function called, including updating the function type.
Definition: InstrTypes.h:1434
InferAddressSpaces.h
llvm::BitCastInst
This class represents a no-op cast from one type to another.
Definition: Instructions.h:5218
C1
instcombine should handle this C2 when C1
Definition: README.txt:263
llvm::SmallVector
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1176
llvm::PHINode::getOperandNumForIncomingValue
static unsigned getOperandNumForIncomingValue(unsigned i)
Definition: Instructions.h:2763
ErrorHandling.h
llvm::TargetTransformInfo
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
Definition: TargetTransformInfo.h:168
llvm::Type::getPointerAddressSpace
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
Definition: DerivedTypes.h:736
llvm::IRBuilder<>
llvm::Use::get
Value * get() const
Definition: Use.h:67
llvm::CastInst::Create
static CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", Instruction *InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
Definition: Instructions.cpp:3152
llvm::ConstantExpr::getBitCast
static Constant * getBitCast(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2253
llvm::TargetTransformInfo::getAssumedAddrSpace
unsigned getAssumedAddrSpace(const Value *V) const
Definition: TargetTransformInfo.cpp:267
ValueTracking.h
Local.h
llvm::ValueMap::empty
bool empty() const
Definition: ValueMap.h:140
llvm::ConstantExpr::getSelect
static Constant * getSelect(Constant *C, Constant *V1, Constant *V2, Type *OnlyIfReducedTy=nullptr)
Select constant expr.
Definition: Constants.cpp:2440
llvm::DominatorTree
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:151
llvm::CallBase::getArgOperandUse
const Use & getArgOperandUse(unsigned i) const
Wrappers for getting the Use of a call argument.
Definition: InstrTypes.h:1354
INITIALIZE_PASS_BEGIN
INITIALIZE_PASS_BEGIN(InferAddressSpaces, DEBUG_TYPE, "Infer address spaces", false, false) INITIALIZE_PASS_END(InferAddressSpaces
isNoopPtrIntCastPair
Infer address static false bool isNoopPtrIntCastPair(const Operator *I2P, const DataLayout &DL, const TargetTransformInfo *TTI)
Definition: InferAddressSpaces.cpp:263
llvm::Type
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
DenseMap.h
llvm::AtomicRMWInst::getPointerOperandIndex
static unsigned getPointerOperandIndex()
Definition: Instructions.h:872
llvm::LoadInst::getPointerOperandIndex
static unsigned getPointerOperandIndex()
Definition: Instructions.h:274
llvm::Use::getOperandNo
unsigned getOperandNo() const
Return the operand # of this use in its User.
Definition: Use.cpp:33
llvm::MemIntrinsic
This is the common base class for memset/memcpy/memmove.
Definition: IntrinsicInst.h:926
Operator.h
llvm::detail::DenseSetImpl::insert
std::pair< iterator, bool > insert(const ValueT &V)
Definition: DenseSet.h:205
llvm::cl::ReallyHidden
@ ReallyHidden
Definition: CommandLine.h:144
Use.h
operandWithNewAddressSpaceOrCreateUndef
static Value * operandWithNewAddressSpaceOrCreateUndef(const Use &OperandUse, unsigned NewAddrSpace, const ValueToValueMapTy &ValueWithNewAddrSpace, const PredicatedAddrSpaceMapTy &PredicatedAS, SmallVectorImpl< const Use * > *UndefUsesToFix)
Definition: InferAddressSpaces.cpp:527
LLVM_DEBUG
#define LLVM_DEBUG(X)
Definition: Debug.h:101
F
#define F(x, y, z)
Definition: MD5.cpp:55
llvm::dbgs
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
Instruction.h
CommandLine.h
llvm::createInferAddressSpacesPass
FunctionPass * createInferAddressSpacesPass(unsigned AddressSpace=~0u)
Definition: InferAddressSpaces.cpp:1293
llvm::isValidAssumeForContext
bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI, const DominatorTree *DT=nullptr)
Return true if it is valid to use the assumptions provided by an assume intrinsic,...
Definition: ValueTracking.cpp:537
llvm::SelectInst::Create
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", Instruction *InsertBefore=nullptr, Instruction *MDFrom=nullptr)
Definition: Instructions.h:1772
llvm::AddrSpaceCastInst
This class represents a conversion between pointers from one address space to another.
Definition: Instructions.h:5258
Constants.h
llvm::Value::use_iterator
use_iterator_impl< Use > use_iterator
Definition: Value.h:353
E
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
llvm::User
Definition: User.h:44
Intrinsics.h
C
(vector float) vec_cmpeq(*A, *B) C
Definition: README_ALTIVEC.txt:86
llvm::AnalysisUsage
Represent the analysis usage information of a pass.
Definition: PassAnalysisSupport.h:47
cloneConstantExprWithNewAddressSpace
static Value * cloneConstantExprWithNewAddressSpace(ConstantExpr *CE, unsigned NewAddrSpace, const ValueToValueMapTy &ValueWithNewAddrSpace, const DataLayout *DL, const TargetTransformInfo *TTI)
Definition: InferAddressSpaces.cpp:671
DenseSet.h
false
Definition: StackSlotColoring.cpp:142
llvm::MaybeAlign
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:109
B
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
llvm::Instruction::CastOps
CastOps
Definition: Instruction.h:803
llvm::Instruction
Definition: Instruction.h:45
llvm::DominatorTreeWrapperPass
Legacy analysis pass which computes a DominatorTree.
Definition: Dominators.h:287
llvm::UndefValue::get
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
Definition: Constants.cpp:1775
llvm::Use::getUser
User * getUser() const
Returns the User that contains this Use.
Definition: Use.h:73
llvm::MCID::Call
@ Call
Definition: MCInstrDesc.h:153
llvm::AddressSpace
AddressSpace
Definition: NVPTXBaseInfo.h:21
llvm::PHINode::getNumIncomingValues
unsigned getNumIncomingValues() const
Return the number of incoming edges.
Definition: Instructions.h:2749
llvm::Value::use_empty
bool use_empty() const
Definition: Value.h:344
Type.h
INITIALIZE_PASS_END
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:58
llvm::TargetTransformInfo::hasVolatileVariant
bool hasVolatileVariant(Instruction *I, unsigned AddrSpace) const
Return true if the given instruction (assumed to be a memory access instruction) has a volatile varia...
Definition: TargetTransformInfo.cpp:437
DEBUG_TYPE
#define DEBUG_TYPE
Definition: InferAddressSpaces.cpp:138
llvm::TargetTransformInfo::getFlatAddressSpace
unsigned getFlatAddressSpace() const
Returns the address space ID for a target's 'flat' address space.
Definition: TargetTransformInfo.cpp:248
llvm::DenseSet< Value * >
llvm::Use::set
void set(Value *Val)
Definition: Value.h:868
BasicBlock.h
llvm::cl::opt< bool >
llvm::instructions
inst_range instructions(Function *F)
Definition: InstIterator.h:133
llvm::Constant
This is an important base class in LLVM.
Definition: Constant.h:41
llvm::ICmpInst
This instruction compares its operands according to the predicate given to the constructor.
Definition: Instructions.h:1190
Index
uint32_t Index
Definition: ELFObjHandler.cpp:83
llvm::TargetTransformInfo::collectFlatAddressOperands
bool collectFlatAddressOperands(SmallVectorImpl< int > &OpIndexes, Intrinsic::ID IID) const
Return any intrinsic address operand indexes which may be rewritten if they use a flat address space ...
Definition: TargetTransformInfo.cpp:252
llvm::TargetTransformInfoWrapperPass
Wrapper pass for TargetTransformInfo.
Definition: TargetTransformInfo.h:2476
llvm::GlobalValue::getParent
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:578
const
aarch64 promote const
Definition: AArch64PromoteConstant.cpp:232
llvm::AssumptionAnalysis
A function analysis which provides an AssumptionCache.
Definition: AssumptionCache.h:173
llvm::PreservedAnalyses::preserve
void preserve()
Mark an analysis as preserved.
Definition: PassManager.h:176
INITIALIZE_PASS_DEPENDENCY
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
llvm::ConstantExpr::getAddrSpaceCast
static Constant * getAddrSpaceCast(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2265
llvm::PHINode::addIncoming
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
Definition: Instructions.h:2807
llvm::DenseMap< const Value *, unsigned >
isAddressExpression
static bool isAddressExpression(const Value &V, const DataLayout &DL, const TargetTransformInfo *TTI)
Definition: InferAddressSpaces.cpp:297
I
#define I(x, y, z)
Definition: MD5.cpp:58
llvm::GetElementPtrInst
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:933
llvm::cl::init
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:441
llvm::AtomicCmpXchgInst::getPointerOperandIndex
static unsigned getPointerOperandIndex()
Definition: Instructions.h:658
ArrayRef.h
llvm::TargetTransformInfo::rewriteIntrinsicWithAddressSpace
Value * rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV, Value *NewV) const
Rewrite intrinsic call II such that OldV will be replaced with NewV, which has a different address sp...
Definition: TargetTransformInfo.cpp:276
IRBuilder.h
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
SI
StandardInstrumentations SI(Debug, VerifyEach)
llvm::Value::use_begin
use_iterator use_begin()
Definition: Value.h:360
llvm::Module
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
llvm::MDNode
Metadata node.
Definition: Metadata.h:906
llvm::User::setOperand
void setOperand(unsigned i, Value *Val)
Definition: User.h:174
llvm::GetElementPtrInst::Create
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Definition: Instructions.h:959
llvm::AssumptionCacheTracker
An immutable pass that tracks lazily created AssumptionCache objects.
Definition: AssumptionCache.h:202
llvm::ArrayRef
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: APInt.h:32
None.h
llvm::AnalysisUsage::setPreservesCFG
void setPreservesCFG()
This function should be called by the pass, iff they do not:
Definition: Pass.cpp:253
llvm::AssumptionCache
A cache of @llvm.assume calls within a function.
Definition: AssumptionCache.h:42
llvm_unreachable
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:134
llvm::Value::getType
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
llvm::TargetTransformInfo::getPredicatedAddrSpace
std::pair< const Value *, unsigned > getPredicatedAddrSpace(const Value *V) const
Definition: TargetTransformInfo.cpp:272
llvm::CFGAnalyses
Represents analyses that only rely on functions' control flow.
Definition: PassManager.h:116
llvm::AnalysisUsage::addPreserved
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
Definition: PassAnalysisSupport.h:98
llvm::Value::replaceAllUsesWith
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:532
llvm::ms_demangle::IntrinsicFunctionKind::New
@ New
Compiler.h
llvm::Value::use_end
use_iterator use_end()
Definition: Value.h:368
DL
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Definition: AArch64SLSHardening.cpp:76
llvm::ValueMap< const Value *, WeakTrackingVH >
ValueHandle.h
skipToNextUser
static Value::use_iterator skipToNextUser(Value::use_iterator I, Value::use_iterator End)
Definition: InferAddressSpaces.cpp:1095
llvm::CallBase::setArgOperand
void setArgOperand(unsigned i, Value *v)
Definition: InstrTypes.h:1348
llvm::Value::stripInBoundsOffsets
const Value * stripInBoundsOffsets(function_ref< void(const Value *)> Func=[](const Value *) {}) const
Strip off pointer casts and inbounds GEPs.
Definition: Value.cpp:777
llvm::MCID::Select
@ Select
Definition: MCInstrDesc.h:162
runOnFunction
static bool runOnFunction(Function &F, bool PostInlining)
Definition: EntryExitInstrumenter.cpp:69
Constant.h
llvm::ConstantExpr
A constant value that is initialized with an expression using other constant values.
Definition: Constants.h:971
getPointerOperands
static SmallVector< Value *, 2 > getPointerOperands(const Value &V, const DataLayout &DL, const TargetTransformInfo *TTI)
Definition: InferAddressSpaces.cpp:329
llvm::AMDGPU::SendMsg::Op
Op
Definition: SIDefines.h:325
llvm::PreservedAnalyses::all
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: PassManager.h:161
llvm::PHINode::Create
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
Definition: Instructions.h:2699
llvm::ArrayRef::begin
iterator begin() const
Definition: ArrayRef.h:151
llvm::X86::FirstMacroFusionInstKind::Cmp
@ Cmp
llvm::TargetTransformInfo::isNoopAddrSpaceCast
bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const
Definition: TargetTransformInfo.cpp:257
llvm::AnalysisManager::getCachedResult
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
Definition: PassManager.h:802
Casting.h
Function.h
PassManager.h
isSimplePointerUseValidToReplace
static bool isSimplePointerUseValidToReplace(const TargetTransformInfo &TTI, Use &U, unsigned AddrSpace)
returns true if U is the pointer operand of a memory instruction with a single pointer operand that c...
Definition: InferAddressSpaces.cpp:985
llvm::IntrinsicInst
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:45
llvm::DominatorTreeAnalysis
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:252
Instructions.h
llvm::PreservedAnalyses::preserveSet
void preserveSet()
Mark an analysis set as preserved.
Definition: PassManager.h:191
SmallVector.h
User.h
handleMemIntrinsicPtrUse
static bool handleMemIntrinsicPtrUse(MemIntrinsic *MI, Value *OldV, Value *NewV)
Update memory intrinsic uses that require more complex processing than simple memory instructions.
Definition: InferAddressSpaces.cpp:1015
Dominators.h
llvm::PointerType::getWithSamePointeeType
static PointerType * getWithSamePointeeType(PointerType *PT, unsigned AddressSpace)
This constructs a pointer type with the same pointee type as input PointerType (or opaque pointer is ...
Definition: DerivedTypes.h:666
llvm::CallBase::getArgOperand
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1343
llvm::Instruction::getParent
const BasicBlock * getParent() const
Definition: Instruction.h:94
spaces
Infer address spaces
Definition: InferAddressSpaces.cpp:257
llvm::PHINode::getIncomingBlock
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Definition: Instructions.h:2773
llvm::max
Align max(MaybeAlign Lhs, Align Rhs)
Definition: Alignment.h:340
TargetTransformInfo.h
llvm::PHINode
Definition: Instructions.h:2657
llvm::SmallVectorImpl
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:43
llvm::AnalysisManager
A container for analyses that lazily runs them and caches their results.
Definition: InstructionSimplify.h:44
AssumeDefaultIsFlatAddressSpace
static cl::opt< bool > AssumeDefaultIsFlatAddressSpace("assume-default-is-flat-addrspace", cl::init(false), cl::ReallyHidden, cl::desc("The default address space is assumed as the flat address space. " "This is mainly for test purpose."))
llvm::FunctionPass
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:298
llvm::CallInst
This class represents a function call, abstracting a target machine's calling convention.
Definition: Instructions.h:1478
UninitializedAddressSpace
static const unsigned UninitializedAddressSpace
Definition: InferAddressSpaces.cpp:147
GEP
Hexagon Common GEP
Definition: HexagonCommonGEP.cpp:172
llvm::ValueMap::lookup
ValueT lookup(const KeyT &Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition: ValueMap.h:165
llvm::AnalysisUsage::addRequired
AnalysisUsage & addRequired()
Definition: PassAnalysisSupport.h:75
LLVMContext.h
llvm::User::getOperand
Value * getOperand(unsigned i) const
Definition: User.h:169
llvm::cl::desc
Definition: CommandLine.h:412
raw_ostream.h
llvm::StoreInst::getPointerOperandIndex
static unsigned getPointerOperandIndex()
Definition: Instructions.h:408
llvm::SetVector< Value * >
Value.h
InitializePasses.h
llvm::Value
LLVM Value Representation.
Definition: Value.h:74
llvm::InferAddressSpacesPass::run
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Definition: InferAddressSpaces.cpp:1302
llvm::InferAddressSpacesPass::InferAddressSpacesPass
InferAddressSpacesPass()
Definition: InferAddressSpaces.cpp:1297
Debug.h
llvm::Value::users
iterator_range< user_iterator > users()
Definition: Value.h:421
llvm::ArrayRef::end
iterator end() const
Definition: ArrayRef.h:152
SetVector.h
llvm::Use
A Use represents the edge between a Value definition and its users.
Definition: Use.h:44
llvm::Intrinsic::ID
unsigned ID
Definition: TargetTransformInfo.h:38