LLVM  5.0.0svn
NVPTXInferAddressSpaces.cpp
Go to the documentation of this file.
1 //===-- NVPTXInferAddressSpace.cpp - ---------------------*- C++ -*-===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // CUDA C/C++ includes memory space designation as variable type qualifers (such
11 // as __global__ and __shared__). Knowing the space of a memory access allows
12 // CUDA compilers to emit faster PTX loads and stores. For example, a load from
13 // shared memory can be translated to `ld.shared` which is roughly 10% faster
14 // than a generic `ld` on an NVIDIA Tesla K40c.
15 //
16 // Unfortunately, type qualifiers only apply to variable declarations, so CUDA
17 // compilers must infer the memory space of an address expression from
18 // type-qualified variables.
19 //
20 // LLVM IR uses non-zero (so-called) specific address spaces to represent memory
21 // spaces (e.g. addrspace(3) means shared memory). The Clang frontend
22 // places only type-qualified variables in specific address spaces, and then
23 // conservatively `addrspacecast`s each type-qualified variable to addrspace(0)
24 // (so-called the generic address space) for other instructions to use.
25 //
26 // For example, the Clang translates the following CUDA code
27 // __shared__ float a[10];
28 // float v = a[i];
29 // to
30 // %0 = addrspacecast [10 x float] addrspace(3)* @a to [10 x float]*
31 // %1 = gep [10 x float], [10 x float]* %0, i64 0, i64 %i
32 // %v = load float, float* %1 ; emits ld.f32
33 // @a is in addrspace(3) since it's type-qualified, but its use from %1 is
34 // redirected to %0 (the generic version of @a).
35 //
36 // The optimization implemented in this file propagates specific address spaces
37 // from type-qualified variable declarations to its users. For example, it
38 // optimizes the above IR to
39 // %1 = gep [10 x float] addrspace(3)* @a, i64 0, i64 %i
40 // %v = load float addrspace(3)* %1 ; emits ld.shared.f32
41 // propagating the addrspace(3) from @a to %1. As the result, the NVPTX
42 // codegen is able to emit ld.shared.f32 for %v.
43 //
44 // Address space inference works in two steps. First, it uses a data-flow
45 // analysis to infer as many generic pointers as possible to point to only one
46 // specific address space. In the above example, it can prove that %1 only
47 // points to addrspace(3). This algorithm was published in
48 // CUDA: Compiling and optimizing for a GPU platform
49 // Chakrabarti, Grover, Aarts, Kong, Kudlur, Lin, Marathe, Murphy, Wang
50 // ICCS 2012
51 //
52 // Then, address space inference replaces all refinable generic pointers with
53 // equivalent specific pointers.
54 //
55 // The major challenge of implementing this optimization is handling PHINodes,
56 // which may create loops in the data flow graph. This brings two complications.
57 //
58 // First, the data flow analysis in Step 1 needs to be circular. For example,
59 // %generic.input = addrspacecast float addrspace(3)* %input to float*
60 // loop:
61 // %y = phi [ %generic.input, %y2 ]
62 // %y2 = getelementptr %y, 1
63 // %v = load %y2
64 // br ..., label %loop, ...
65 // proving %y specific requires proving both %generic.input and %y2 specific,
66 // but proving %y2 specific circles back to %y. To address this complication,
67 // the data flow analysis operates on a lattice:
68 // uninitialized > specific address spaces > generic.
69 // All address expressions (our implementation only considers phi, bitcast,
70 // addrspacecast, and getelementptr) start with the uninitialized address space.
71 // The monotone transfer function moves the address space of a pointer down a
72 // lattice path from uninitialized to specific and then to generic. A join
73 // operation of two different specific address spaces pushes the expression down
74 // to the generic address space. The analysis completes once it reaches a fixed
75 // point.
76 //
77 // Second, IR rewriting in Step 2 also needs to be circular. For example,
78 // converting %y to addrspace(3) requires the compiler to know the converted
79 // %y2, but converting %y2 needs the converted %y. To address this complication,
80 // we break these cycles using "undef" placeholders. When converting an
81 // instruction `I` to a new address space, if its operand `Op` is not converted
82 // yet, we let `I` temporarily use `undef` and fix all the uses of undef later.
83 // For instance, our algorithm first converts %y to
84 // %y' = phi float addrspace(3)* [ %input, undef ]
85 // Then, it converts %y2 to
86 // %y2' = getelementptr %y', 1
87 // Finally, it fixes the undef in %y' so that
88 // %y' = phi float addrspace(3)* [ %input, %y2' ]
89 //
90 //===----------------------------------------------------------------------===//
91 
92 #define DEBUG_TYPE "nvptx-infer-addrspace"
93 
94 #include "NVPTX.h"
96 #include "llvm/ADT/DenseSet.h"
97 #include "llvm/ADT/Optional.h"
98 #include "llvm/ADT/SetVector.h"
99 #include "llvm/IR/Function.h"
100 #include "llvm/IR/InstIterator.h"
101 #include "llvm/IR/Instructions.h"
102 #include "llvm/IR/Operator.h"
103 #include "llvm/Support/Debug.h"
107 
108 using namespace llvm;
109 
110 namespace {
111 const unsigned ADDRESS_SPACE_UNINITIALIZED = (unsigned)-1;
112 
113 using ValueToAddrSpaceMapTy = DenseMap<const Value *, unsigned>;
114 
115 /// \brief NVPTXInferAddressSpaces
116 class NVPTXInferAddressSpaces: public FunctionPass {
117 public:
118  static char ID;
119 
120  NVPTXInferAddressSpaces() : FunctionPass(ID) {}
121 
122  void getAnalysisUsage(AnalysisUsage &AU) const override {
123  AU.setPreservesCFG();
124  }
125 
126  bool runOnFunction(Function &F) override;
127 
128 private:
129  // Returns the new address space of V if updated; otherwise, returns None.
131  updateAddressSpace(const Value &V,
132  const ValueToAddrSpaceMapTy &InferredAddrSpace);
133 
134  // Tries to infer the specific address space of each address expression in
135  // Postorder.
136  void inferAddressSpaces(const std::vector<Value *> &Postorder,
137  ValueToAddrSpaceMapTy *InferredAddrSpace);
138 
139  // Changes the generic address expressions in function F to point to specific
140  // address spaces if InferredAddrSpace says so. Postorder is the postorder of
141  // all generic address expressions in the use-def graph of function F.
142  bool
143  rewriteWithNewAddressSpaces(const std::vector<Value *> &Postorder,
144  const ValueToAddrSpaceMapTy &InferredAddrSpace,
145  Function *F);
146 };
147 } // end anonymous namespace
148 
150 
151 namespace llvm {
153 }
154 INITIALIZE_PASS(NVPTXInferAddressSpaces, "nvptx-infer-addrspace",
155  "Infer address spaces",
156  false, false)
157 
158 // Returns true if V is an address expression.
159 // TODO: Currently, we consider only phi, bitcast, addrspacecast, and
160 // getelementptr operators.
161 static bool isAddressExpression(const Value &V) {
162  if (!isa<Operator>(V))
163  return false;
164 
165  switch (cast<Operator>(V).getOpcode()) {
166  case Instruction::PHI:
167  case Instruction::BitCast:
168  case Instruction::AddrSpaceCast:
169  case Instruction::GetElementPtr:
170  return true;
171  default:
172  return false;
173  }
174 }
175 
176 // Returns the pointer operands of V.
177 //
178 // Precondition: V is an address expression.
180  assert(isAddressExpression(V));
181  const Operator& Op = cast<Operator>(V);
182  switch (Op.getOpcode()) {
183  case Instruction::PHI: {
184  auto IncomingValues = cast<PHINode>(Op).incoming_values();
185  return SmallVector<Value *, 2>(IncomingValues.begin(),
186  IncomingValues.end());
187  }
188  case Instruction::BitCast:
189  case Instruction::AddrSpaceCast:
190  case Instruction::GetElementPtr:
191  return {Op.getOperand(0)};
192  default:
193  llvm_unreachable("Unexpected instruction type.");
194  }
195 }
196 
197 // If V is an unvisited generic address expression, appends V to PostorderStack
198 // and marks it as visited.
200  Value *V, std::vector<std::pair<Value *, bool>> *PostorderStack,
201  DenseSet<Value *> *Visited) {
202  assert(V->getType()->isPointerTy());
203  if (isAddressExpression(*V) &&
206  if (Visited->insert(V).second)
207  PostorderStack->push_back(std::make_pair(V, false));
208  }
209 }
210 
211 // Returns all generic address expressions in function F. The elements are
212 // ordered in postorder.
213 static std::vector<Value *> collectGenericAddressExpressions(Function &F) {
214  // This function implements a non-recursive postorder traversal of a partial
215  // use-def graph of function F.
216  std::vector<std::pair<Value*, bool>> PostorderStack;
217  // The set of visited expressions.
218  DenseSet<Value*> Visited;
219  // We only explore address expressions that are reachable from loads and
220  // stores for now because we aim at generating faster loads and stores.
221  for (Instruction &I : instructions(F)) {
222  if (isa<LoadInst>(I)) {
224  I.getOperand(0), &PostorderStack, &Visited);
225  } else if (isa<StoreInst>(I)) {
227  I.getOperand(1), &PostorderStack, &Visited);
228  }
229  }
230 
231  std::vector<Value *> Postorder; // The resultant postorder.
232  while (!PostorderStack.empty()) {
233  // If the operands of the expression on the top are already explored,
234  // adds that expression to the resultant postorder.
235  if (PostorderStack.back().second) {
236  Postorder.push_back(PostorderStack.back().first);
237  PostorderStack.pop_back();
238  continue;
239  }
240  // Otherwise, adds its operands to the stack and explores them.
241  PostorderStack.back().second = true;
242  for (Value *PtrOperand : getPointerOperands(*PostorderStack.back().first)) {
244  PtrOperand, &PostorderStack, &Visited);
245  }
246  }
247  return Postorder;
248 }
249 
250 // A helper function for cloneInstructionWithNewAddressSpace. Returns the clone
251 // of OperandUse.get() in the new address space. If the clone is not ready yet,
252 // returns an undef in the new address space as a placeholder.
254  const Use &OperandUse, unsigned NewAddrSpace,
255  const ValueToValueMapTy &ValueWithNewAddrSpace,
256  SmallVectorImpl<const Use *> *UndefUsesToFix) {
257  Value *Operand = OperandUse.get();
258  if (Value *NewOperand = ValueWithNewAddrSpace.lookup(Operand))
259  return NewOperand;
260 
261  UndefUsesToFix->push_back(&OperandUse);
262  return UndefValue::get(
263  Operand->getType()->getPointerElementType()->getPointerTo(NewAddrSpace));
264 }
265 
266 // Returns a clone of `I` with its operands converted to those specified in
267 // ValueWithNewAddrSpace. Due to potential cycles in the data flow graph, an
268 // operand whose address space needs to be modified might not exist in
269 // ValueWithNewAddrSpace. In that case, uses undef as a placeholder operand and
270 // adds that operand use to UndefUsesToFix so that caller can fix them later.
271 //
272 // Note that we do not necessarily clone `I`, e.g., if it is an addrspacecast
273 // from a pointer whose type already matches. Therefore, this function returns a
274 // Value* instead of an Instruction*.
276  Instruction *I, unsigned NewAddrSpace,
277  const ValueToValueMapTy &ValueWithNewAddrSpace,
278  SmallVectorImpl<const Use *> *UndefUsesToFix) {
279  Type *NewPtrType =
280  I->getType()->getPointerElementType()->getPointerTo(NewAddrSpace);
281 
282  if (I->getOpcode() == Instruction::AddrSpaceCast) {
283  Value *Src = I->getOperand(0);
284  // Because `I` is generic, the source address space must be specific.
285  // Therefore, the inferred address space must be the source space, according
286  // to our algorithm.
287  assert(Src->getType()->getPointerAddressSpace() == NewAddrSpace);
288  if (Src->getType() != NewPtrType)
289  return new BitCastInst(Src, NewPtrType);
290  return Src;
291  }
292 
293  // Computes the converted pointer operands.
294  SmallVector<Value *, 4> NewPointerOperands;
295  for (const Use &OperandUse : I->operands()) {
296  if (!OperandUse.get()->getType()->isPointerTy())
297  NewPointerOperands.push_back(nullptr);
298  else
300  OperandUse, NewAddrSpace, ValueWithNewAddrSpace, UndefUsesToFix));
301  }
302 
303  switch (I->getOpcode()) {
304  case Instruction::BitCast:
305  return new BitCastInst(NewPointerOperands[0], NewPtrType);
306  case Instruction::PHI: {
307  assert(I->getType()->isPointerTy());
308  PHINode *PHI = cast<PHINode>(I);
309  PHINode *NewPHI = PHINode::Create(NewPtrType, PHI->getNumIncomingValues());
310  for (unsigned Index = 0; Index < PHI->getNumIncomingValues(); ++Index) {
311  unsigned OperandNo = PHINode::getOperandNumForIncomingValue(Index);
312  NewPHI->addIncoming(NewPointerOperands[OperandNo],
313  PHI->getIncomingBlock(Index));
314  }
315  return NewPHI;
316  }
317  case Instruction::GetElementPtr: {
318  GetElementPtrInst *GEP = cast<GetElementPtrInst>(I);
320  GEP->getSourceElementType(), NewPointerOperands[0],
321  SmallVector<Value *, 4>(GEP->idx_begin(), GEP->idx_end()));
322  NewGEP->setIsInBounds(GEP->isInBounds());
323  return NewGEP;
324  }
325  default:
326  llvm_unreachable("Unexpected opcode");
327  }
328 }
329 
330 // Similar to cloneInstructionWithNewAddressSpace, returns a clone of the
331 // constant expression `CE` with its operands replaced as specified in
332 // ValueWithNewAddrSpace.
334  ConstantExpr *CE, unsigned NewAddrSpace,
335  const ValueToValueMapTy &ValueWithNewAddrSpace) {
336  Type *TargetType =
337  CE->getType()->getPointerElementType()->getPointerTo(NewAddrSpace);
338 
339  if (CE->getOpcode() == Instruction::AddrSpaceCast) {
340  // Because CE is generic, the source address space must be specific.
341  // Therefore, the inferred address space must be the source space according
342  // to our algorithm.
344  NewAddrSpace);
345  return ConstantExpr::getBitCast(CE->getOperand(0), TargetType);
346  }
347 
348  // Computes the operands of the new constant expression.
349  SmallVector<Constant *, 4> NewOperands;
350  for (unsigned Index = 0; Index < CE->getNumOperands(); ++Index) {
351  Constant *Operand = CE->getOperand(Index);
352  // If the address space of `Operand` needs to be modified, the new operand
353  // with the new address space should already be in ValueWithNewAddrSpace
354  // because (1) the constant expressions we consider (i.e. addrspacecast,
355  // bitcast, and getelementptr) do not incur cycles in the data flow graph
356  // and (2) this function is called on constant expressions in postorder.
357  if (Value *NewOperand = ValueWithNewAddrSpace.lookup(Operand)) {
358  NewOperands.push_back(cast<Constant>(NewOperand));
359  } else {
360  // Otherwise, reuses the old operand.
361  NewOperands.push_back(Operand);
362  }
363  }
364 
365  if (CE->getOpcode() == Instruction::GetElementPtr) {
366  // Needs to specify the source type while constructing a getelementptr
367  // constant expression.
368  return CE->getWithOperands(
369  NewOperands, TargetType, /*OnlyIfReduced=*/false,
370  NewOperands[0]->getType()->getPointerElementType());
371  }
372 
373  return CE->getWithOperands(NewOperands, TargetType);
374 }
375 
376 // Returns a clone of the value `V`, with its operands replaced as specified in
377 // ValueWithNewAddrSpace. This function is called on every generic address
378 // expression whose address space needs to be modified, in postorder.
379 //
380 // See cloneInstructionWithNewAddressSpace for the meaning of UndefUsesToFix.
381 static Value *
382 cloneValueWithNewAddressSpace(Value *V, unsigned NewAddrSpace,
383  const ValueToValueMapTy &ValueWithNewAddrSpace,
384  SmallVectorImpl<const Use *> *UndefUsesToFix) {
385  // All values in Postorder are generic address expressions.
386  assert(isAddressExpression(*V) &&
389 
390  if (Instruction *I = dyn_cast<Instruction>(V)) {
392  I, NewAddrSpace, ValueWithNewAddrSpace, UndefUsesToFix);
393  if (Instruction *NewI = dyn_cast<Instruction>(NewV)) {
394  if (NewI->getParent() == nullptr) {
395  NewI->insertBefore(I);
396  NewI->takeName(I);
397  }
398  }
399  return NewV;
400  }
401 
403  cast<ConstantExpr>(V), NewAddrSpace, ValueWithNewAddrSpace);
404 }
405 
406 // Defines the join operation on the address space lattice (see the file header
407 // comments).
408 static unsigned joinAddressSpaces(unsigned AS1, unsigned AS2) {
412 
413  if (AS1 == ADDRESS_SPACE_UNINITIALIZED)
414  return AS2;
415  if (AS2 == ADDRESS_SPACE_UNINITIALIZED)
416  return AS1;
417 
418  // The join of two different specific address spaces is generic.
419  return AS1 == AS2 ? AS1 : (unsigned)AddressSpace::ADDRESS_SPACE_GENERIC;
420 }
421 
422 bool NVPTXInferAddressSpaces::runOnFunction(Function &F) {
423  if (skipFunction(F))
424  return false;
425 
426  // Collects all generic address expressions in postorder.
427  std::vector<Value *> Postorder = collectGenericAddressExpressions(F);
428 
429  // Runs a data-flow analysis to refine the address spaces of every expression
430  // in Postorder.
431  ValueToAddrSpaceMapTy InferredAddrSpace;
432  inferAddressSpaces(Postorder, &InferredAddrSpace);
433 
434  // Changes the address spaces of the generic address expressions who are
435  // inferred to point to a specific address space.
436  return rewriteWithNewAddressSpaces(Postorder, InferredAddrSpace, &F);
437 }
438 
439 void NVPTXInferAddressSpaces::inferAddressSpaces(
440  const std::vector<Value *> &Postorder,
441  ValueToAddrSpaceMapTy *InferredAddrSpace) {
442  SetVector<Value *> Worklist(Postorder.begin(), Postorder.end());
443  // Initially, all expressions are in the uninitialized address space.
444  for (Value *V : Postorder)
445  (*InferredAddrSpace)[V] = ADDRESS_SPACE_UNINITIALIZED;
446 
447  while (!Worklist.empty()) {
448  Value* V = Worklist.pop_back_val();
449 
450  // Tries to update the address space of the stack top according to the
451  // address spaces of its operands.
452  DEBUG(dbgs() << "Updating the address space of\n"
453  << " " << *V << "\n");
454  Optional<unsigned> NewAS = updateAddressSpace(*V, *InferredAddrSpace);
455  if (!NewAS.hasValue())
456  continue;
457  // If any updates are made, grabs its users to the worklist because
458  // their address spaces can also be possibly updated.
459  DEBUG(dbgs() << " to " << NewAS.getValue() << "\n");
460  (*InferredAddrSpace)[V] = NewAS.getValue();
461 
462  for (Value *User : V->users()) {
463  // Skip if User is already in the worklist.
464  if (Worklist.count(User))
465  continue;
466 
467  auto Pos = InferredAddrSpace->find(User);
468  // Our algorithm only updates the address spaces of generic address
469  // expressions, which are those in InferredAddrSpace.
470  if (Pos == InferredAddrSpace->end())
471  continue;
472 
473  // Function updateAddressSpace moves the address space down a lattice
474  // path. Therefore, nothing to do if User is already inferred as
475  // generic (the bottom element in the lattice).
476  if (Pos->second == AddressSpace::ADDRESS_SPACE_GENERIC)
477  continue;
478 
479  Worklist.insert(User);
480  }
481  }
482 }
483 
484 Optional<unsigned> NVPTXInferAddressSpaces::updateAddressSpace(
485  const Value &V, const ValueToAddrSpaceMapTy &InferredAddrSpace) {
486  assert(InferredAddrSpace.count(&V));
487 
488  // The new inferred address space equals the join of the address spaces
489  // of all its pointer operands.
490  unsigned NewAS = ADDRESS_SPACE_UNINITIALIZED;
491  for (Value *PtrOperand : getPointerOperands(V)) {
492  unsigned OperandAS;
493  if (InferredAddrSpace.count(PtrOperand))
494  OperandAS = InferredAddrSpace.lookup(PtrOperand);
495  else
496  OperandAS = PtrOperand->getType()->getPointerAddressSpace();
497  NewAS = joinAddressSpaces(NewAS, OperandAS);
498  // join(generic, *) = generic. So we can break if NewAS is already generic.
500  break;
501  }
502 
503  unsigned OldAS = InferredAddrSpace.lookup(&V);
505  if (OldAS == NewAS)
506  return None;
507  return NewAS;
508 }
509 
510 bool NVPTXInferAddressSpaces::rewriteWithNewAddressSpaces(
511  const std::vector<Value *> &Postorder,
512  const ValueToAddrSpaceMapTy &InferredAddrSpace, Function *F) {
513  // For each address expression to be modified, creates a clone of it with its
514  // pointer operands converted to the new address space. Since the pointer
515  // operands are converted, the clone is naturally in the new address space by
516  // construction.
517  ValueToValueMapTy ValueWithNewAddrSpace;
518  SmallVector<const Use *, 32> UndefUsesToFix;
519  for (Value* V : Postorder) {
520  unsigned NewAddrSpace = InferredAddrSpace.lookup(V);
521  if (V->getType()->getPointerAddressSpace() != NewAddrSpace) {
522  ValueWithNewAddrSpace[V] = cloneValueWithNewAddressSpace(
523  V, NewAddrSpace, ValueWithNewAddrSpace, &UndefUsesToFix);
524  }
525  }
526 
527  if (ValueWithNewAddrSpace.empty())
528  return false;
529 
530  // Fixes all the undef uses generated by cloneInstructionWithNewAddressSpace.
531  for (const Use* UndefUse : UndefUsesToFix) {
532  User *V = UndefUse->getUser();
533  User *NewV = cast<User>(ValueWithNewAddrSpace.lookup(V));
534  unsigned OperandNo = UndefUse->getOperandNo();
535  assert(isa<UndefValue>(NewV->getOperand(OperandNo)));
536  NewV->setOperand(OperandNo, ValueWithNewAddrSpace.lookup(UndefUse->get()));
537  }
538 
539  // Replaces the uses of the old address expressions with the new ones.
540  for (Value *V : Postorder) {
541  Value *NewV = ValueWithNewAddrSpace.lookup(V);
542  if (NewV == nullptr)
543  continue;
544 
546  for (Use &U : V->uses())
547  Uses.push_back(&U);
548  DEBUG(dbgs() << "Replacing the uses of " << *V << "\n to\n " << *NewV
549  << "\n");
550  for (Use *U : Uses) {
551  if (isa<LoadInst>(U->getUser()) ||
552  (isa<StoreInst>(U->getUser()) && U->getOperandNo() == 1)) {
553  // If V is used as the pointer operand of a load/store, sets the pointer
554  // operand to NewV. This replacement does not change the element type,
555  // so the resultant load/store is still valid.
556  U->set(NewV);
557  } else if (isa<Instruction>(U->getUser())) {
558  // Otherwise, replaces the use with generic(NewV).
559  // TODO: Some optimization opportunities are missed. For example, in
560  // %0 = icmp eq float* %p, %q
561  // if both p and q are inferred to be shared, we can rewrite %0 as
562  // %0 = icmp eq float addrspace(3)* %new_p, %new_q
563  // instead of currently
564  // %generic_p = addrspacecast float addrspace(3)* %new_p to float*
565  // %generic_q = addrspacecast float addrspace(3)* %new_q to float*
566  // %0 = icmp eq float* %generic_p, %generic_q
567  if (Instruction *I = dyn_cast<Instruction>(V)) {
568  BasicBlock::iterator InsertPos = std::next(I->getIterator());
569  while (isa<PHINode>(InsertPos))
570  ++InsertPos;
571  U->set(new AddrSpaceCastInst(NewV, V->getType(), "", &*InsertPos));
572  } else {
573  U->set(ConstantExpr::getAddrSpaceCast(cast<Constant>(NewV),
574  V->getType()));
575  }
576  }
577  }
578  if (V->use_empty())
580  }
581 
582  return true;
583 }
584 
586  return new NVPTXInferAddressSpaces();
587 }
iterator_range< use_iterator > uses()
Definition: Value.h:334
unsigned getOpcode() const
Return the opcode at the root of this constant expression.
Definition: Constants.h:1182
static Value * cloneConstantExprWithNewAddressSpace(ConstantExpr *CE, unsigned NewAddrSpace, const ValueToValueMapTy &ValueWithNewAddrSpace)
Compute iterated dominance frontiers using a linear time algorithm.
Definition: AllocatorList.h:18
INITIALIZE_PASS(NVPTXInferAddressSpaces, "nvptx-infer-addrspace", "Infer address spaces", false, false) static bool isAddressExpression(const Value &V)
static Constant * getAddrSpaceCast(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:1707
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value *> IdxList, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Definition: Instructions.h:857
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
Definition: DerivedTypes.h:479
Hexagon Common GEP
Value * get() const
Definition: Use.h:82
static unsigned getOperandNumForIncomingValue(unsigned i)
This class represents a conversion between pointers from one address space to another.
Type * getPointerElementType() const
Definition: Type.h:358
A Use represents the edge between a Value definition and its users.
Definition: Use.h:56
PointerType * getPointerTo(unsigned AddrSpace=0) const
Return a pointer to the current type.
Definition: Type.cpp:678
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:41
static std::vector< Value * > collectGenericAddressExpressions(Function &F)
static SmallVector< Value *, 2 > getPointerOperands(const Value &V)
Type * getSourceElementType() const
Definition: Instructions.h:928
A constant value that is initialized with an expression using other constant values.
Definition: Constants.h:873
#define F(x, y, z)
Definition: MD5.cpp:51
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:230
bool isInBounds() const
Determine whether the GEP has the inbounds flag.
const T & getValue() const LLVM_LVALUE_FUNCTION
Definition: Optional.h:121
This class represents a no-op cast from one type to another.
op_iterator idx_begin()
Definition: Instructions.h:956
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:111
ValueT lookup(const KeyT &Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition: ValueMap.h:167
Value * getOperand(unsigned i) const
Definition: User.h:145
static Constant * getBitCast(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:1695
an instruction for type-safe pointer arithmetic to access elements of arrays and structs ...
Definition: Instructions.h:830
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:45
static Value * cloneInstructionWithNewAddressSpace(Instruction *I, unsigned NewAddrSpace, const ValueToValueMapTy &ValueWithNewAddrSpace, SmallVectorImpl< const Use *> *UndefUsesToFix)
This is an important base class in LLVM.
Definition: Constant.h:42
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:213
std::pair< iterator, bool > insert(const ValueT &V)
Definition: DenseSet.h:179
Represent the analysis usage information of a pass.
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:298
op_range operands()
Definition: User.h:213
static UndefValue * get(Type *T)
Static factory methods - Return an &#39;undef&#39; object of the specified type.
Definition: Constants.cpp:1337
bool RecursivelyDeleteTriviallyDeadInstructions(Value *V, const TargetLibraryInfo *TLI=nullptr)
If the specified value is a trivially dead instruction, delete it.
Definition: Local.cpp:355
FunctionPass * createNVPTXInferAddressSpacesPass()
Constant * getWithOperands(ArrayRef< Constant *> Ops) const
This returns the current constant expression with the operands replaced with the specified values...
Definition: Constants.h:1202
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Iterator for intrusive lists based on ilist_node.
unsigned getNumOperands() const
Definition: User.h:167
static Value * cloneValueWithNewAddressSpace(Value *V, unsigned NewAddrSpace, const ValueToValueMapTy &ValueWithNewAddrSpace, SmallVectorImpl< const Use *> *UndefUsesToFix)
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:843
This is a utility class that provides an abstraction for the common functionality between Instruction...
Definition: Operator.h:33
FunctionType * getType(LLVMContext &Context, ID id, ArrayRef< Type *> Tys=None)
Return the function type for an intrinsic.
Definition: Function.cpp:905
bool empty() const
Definition: ValueMap.h:142
static unsigned joinAddressSpaces(unsigned AS1, unsigned AS2)
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
void setPreservesCFG()
This function should be called by the pass, iff they do not:
Definition: Pass.cpp:278
static Value * operandWithNewAddressSpaceOrCreateUndef(const Use &OperandUse, unsigned NewAddrSpace, const ValueToValueMapTy &ValueWithNewAddrSpace, SmallVectorImpl< const Use *> *UndefUsesToFix)
void setOperand(unsigned i, Value *Val)
Definition: User.h:150
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:132
iterator_range< user_iterator > users()
Definition: Value.h:378
void initializeNVPTXInferAddressSpacesPass(PassRegistry &)
bool hasValue() const
Definition: Optional.h:125
static void appendsGenericAddressExpressionToPostorderStack(Value *V, std::vector< std::pair< Value *, bool >> *PostorderStack, DenseSet< Value *> *Visited)
#define I(x, y, z)
Definition: MD5.cpp:54
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
aarch64 promote const
LLVM Value Representation.
Definition: Value.h:71
unsigned getOpcode() const
Return the opcode for this Instruction or ConstantExpr.
Definition: Operator.h:49
#define DEBUG(X)
Definition: Debug.h:118
inst_range instructions(Function *F)
Definition: InstIterator.h:132
PassRegistry - This class manages the registration and intitialization of the pass subsystem as appli...
Definition: PassRegistry.h:40
bool use_empty() const
Definition: Value.h:307