LLVM  4.0.0
NVPTXInferAddressSpaces.cpp
Go to the documentation of this file.
1 //===-- NVPTXInferAddressSpace.cpp - ---------------------*- C++ -*-===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // CUDA C/C++ includes memory space designation as variable type qualifers (such
11 // as __global__ and __shared__). Knowing the space of a memory access allows
12 // CUDA compilers to emit faster PTX loads and stores. For example, a load from
13 // shared memory can be translated to `ld.shared` which is roughly 10% faster
14 // than a generic `ld` on an NVIDIA Tesla K40c.
15 //
16 // Unfortunately, type qualifiers only apply to variable declarations, so CUDA
17 // compilers must infer the memory space of an address expression from
18 // type-qualified variables.
19 //
20 // LLVM IR uses non-zero (so-called) specific address spaces to represent memory
21 // spaces (e.g. addrspace(3) means shared memory). The Clang frontend
22 // places only type-qualified variables in specific address spaces, and then
23 // conservatively `addrspacecast`s each type-qualified variable to addrspace(0)
24 // (so-called the generic address space) for other instructions to use.
25 //
26 // For example, the Clang translates the following CUDA code
27 // __shared__ float a[10];
28 // float v = a[i];
29 // to
30 // %0 = addrspacecast [10 x float] addrspace(3)* @a to [10 x float]*
31 // %1 = gep [10 x float], [10 x float]* %0, i64 0, i64 %i
32 // %v = load float, float* %1 ; emits ld.f32
33 // @a is in addrspace(3) since it's type-qualified, but its use from %1 is
34 // redirected to %0 (the generic version of @a).
35 //
36 // The optimization implemented in this file propagates specific address spaces
37 // from type-qualified variable declarations to its users. For example, it
38 // optimizes the above IR to
39 // %1 = gep [10 x float] addrspace(3)* @a, i64 0, i64 %i
40 // %v = load float addrspace(3)* %1 ; emits ld.shared.f32
41 // propagating the addrspace(3) from @a to %1. As the result, the NVPTX
42 // codegen is able to emit ld.shared.f32 for %v.
43 //
44 // Address space inference works in two steps. First, it uses a data-flow
45 // analysis to infer as many generic pointers as possible to point to only one
46 // specific address space. In the above example, it can prove that %1 only
47 // points to addrspace(3). This algorithm was published in
48 // CUDA: Compiling and optimizing for a GPU platform
49 // Chakrabarti, Grover, Aarts, Kong, Kudlur, Lin, Marathe, Murphy, Wang
50 // ICCS 2012
51 //
52 // Then, address space inference replaces all refinable generic pointers with
53 // equivalent specific pointers.
54 //
55 // The major challenge of implementing this optimization is handling PHINodes,
56 // which may create loops in the data flow graph. This brings two complications.
57 //
58 // First, the data flow analysis in Step 1 needs to be circular. For example,
59 // %generic.input = addrspacecast float addrspace(3)* %input to float*
60 // loop:
61 // %y = phi [ %generic.input, %y2 ]
62 // %y2 = getelementptr %y, 1
63 // %v = load %y2
64 // br ..., label %loop, ...
65 // proving %y specific requires proving both %generic.input and %y2 specific,
66 // but proving %y2 specific circles back to %y. To address this complication,
67 // the data flow analysis operates on a lattice:
68 // uninitialized > specific address spaces > generic.
69 // All address expressions (our implementation only considers phi, bitcast,
70 // addrspacecast, and getelementptr) start with the uninitialized address space.
71 // The monotone transfer function moves the address space of a pointer down a
72 // lattice path from uninitialized to specific and then to generic. A join
73 // operation of two different specific address spaces pushes the expression down
74 // to the generic address space. The analysis completes once it reaches a fixed
75 // point.
76 //
77 // Second, IR rewriting in Step 2 also needs to be circular. For example,
78 // converting %y to addrspace(3) requires the compiler to know the converted
79 // %y2, but converting %y2 needs the converted %y. To address this complication,
80 // we break these cycles using "undef" placeholders. When converting an
81 // instruction `I` to a new address space, if its operand `Op` is not converted
82 // yet, we let `I` temporarily use `undef` and fix all the uses of undef later.
83 // For instance, our algorithm first converts %y to
84 // %y' = phi float addrspace(3)* [ %input, undef ]
85 // Then, it converts %y2 to
86 // %y2' = getelementptr %y', 1
87 // Finally, it fixes the undef in %y' so that
88 // %y' = phi float addrspace(3)* [ %input, %y2' ]
89 //
90 //===----------------------------------------------------------------------===//
91 
92 #define DEBUG_TYPE "nvptx-infer-addrspace"
93 
94 #include "NVPTX.h"
96 #include "llvm/ADT/DenseSet.h"
97 #include "llvm/ADT/Optional.h"
98 #include "llvm/ADT/SetVector.h"
99 #include "llvm/IR/Function.h"
100 #include "llvm/IR/InstIterator.h"
101 #include "llvm/IR/Instructions.h"
102 #include "llvm/IR/Operator.h"
103 #include "llvm/Support/Debug.h"
107 
108 using namespace llvm;
109 
110 namespace {
111 const unsigned ADDRESS_SPACE_UNINITIALIZED = (unsigned)-1;
112 
113 using ValueToAddrSpaceMapTy = DenseMap<const Value *, unsigned>;
114 
115 /// \brief NVPTXInferAddressSpaces
116 class NVPTXInferAddressSpaces: public FunctionPass {
117 public:
118  static char ID;
119 
120  NVPTXInferAddressSpaces() : FunctionPass(ID) {}
121 
122  bool runOnFunction(Function &F) override;
123 
124 private:
125  // Returns the new address space of V if updated; otherwise, returns None.
127  updateAddressSpace(const Value &V,
128  const ValueToAddrSpaceMapTy &InferredAddrSpace);
129 
130  // Tries to infer the specific address space of each address expression in
131  // Postorder.
132  void inferAddressSpaces(const std::vector<Value *> &Postorder,
133  ValueToAddrSpaceMapTy *InferredAddrSpace);
134 
135  // Changes the generic address expressions in function F to point to specific
136  // address spaces if InferredAddrSpace says so. Postorder is the postorder of
137  // all generic address expressions in the use-def graph of function F.
138  bool
139  rewriteWithNewAddressSpaces(const std::vector<Value *> &Postorder,
140  const ValueToAddrSpaceMapTy &InferredAddrSpace,
141  Function *F);
142 };
143 } // end anonymous namespace
144 
146 
147 namespace llvm {
149 }
150 INITIALIZE_PASS(NVPTXInferAddressSpaces, "nvptx-infer-addrspace",
151  "Infer address spaces",
152  false, false)
153 
154 // Returns true if V is an address expression.
155 // TODO: Currently, we consider only phi, bitcast, addrspacecast, and
156 // getelementptr operators.
157 static bool isAddressExpression(const Value &V) {
158  if (!isa<Operator>(V))
159  return false;
160 
161  switch (cast<Operator>(V).getOpcode()) {
162  case Instruction::PHI:
163  case Instruction::BitCast:
164  case Instruction::AddrSpaceCast:
165  case Instruction::GetElementPtr:
166  return true;
167  default:
168  return false;
169  }
170 }
171 
172 // Returns the pointer operands of V.
173 //
174 // Precondition: V is an address expression.
176  assert(isAddressExpression(V));
177  const Operator& Op = cast<Operator>(V);
178  switch (Op.getOpcode()) {
179  case Instruction::PHI: {
180  auto IncomingValues = cast<PHINode>(Op).incoming_values();
181  return SmallVector<Value *, 2>(IncomingValues.begin(),
182  IncomingValues.end());
183  }
184  case Instruction::BitCast:
185  case Instruction::AddrSpaceCast:
186  case Instruction::GetElementPtr:
187  return {Op.getOperand(0)};
188  default:
189  llvm_unreachable("Unexpected instruction type.");
190  }
191 }
192 
193 // If V is an unvisited generic address expression, appends V to PostorderStack
194 // and marks it as visited.
196  Value *V, std::vector<std::pair<Value *, bool>> *PostorderStack,
197  DenseSet<Value *> *Visited) {
198  assert(V->getType()->isPointerTy());
199  if (isAddressExpression(*V) &&
202  if (Visited->insert(V).second)
203  PostorderStack->push_back(std::make_pair(V, false));
204  }
205 }
206 
207 // Returns all generic address expressions in function F. The elements are
208 // ordered in postorder.
209 static std::vector<Value *> collectGenericAddressExpressions(Function &F) {
210  // This function implements a non-recursive postorder traversal of a partial
211  // use-def graph of function F.
212  std::vector<std::pair<Value*, bool>> PostorderStack;
213  // The set of visited expressions.
214  DenseSet<Value*> Visited;
215  // We only explore address expressions that are reachable from loads and
216  // stores for now because we aim at generating faster loads and stores.
217  for (Instruction &I : instructions(F)) {
218  if (isa<LoadInst>(I)) {
220  I.getOperand(0), &PostorderStack, &Visited);
221  } else if (isa<StoreInst>(I)) {
223  I.getOperand(1), &PostorderStack, &Visited);
224  }
225  }
226 
227  std::vector<Value *> Postorder; // The resultant postorder.
228  while (!PostorderStack.empty()) {
229  // If the operands of the expression on the top are already explored,
230  // adds that expression to the resultant postorder.
231  if (PostorderStack.back().second) {
232  Postorder.push_back(PostorderStack.back().first);
233  PostorderStack.pop_back();
234  continue;
235  }
236  // Otherwise, adds its operands to the stack and explores them.
237  PostorderStack.back().second = true;
238  for (Value *PtrOperand : getPointerOperands(*PostorderStack.back().first)) {
240  PtrOperand, &PostorderStack, &Visited);
241  }
242  }
243  return Postorder;
244 }
245 
246 // A helper function for cloneInstructionWithNewAddressSpace. Returns the clone
247 // of OperandUse.get() in the new address space. If the clone is not ready yet,
248 // returns an undef in the new address space as a placeholder.
250  const Use &OperandUse, unsigned NewAddrSpace,
251  const ValueToValueMapTy &ValueWithNewAddrSpace,
252  SmallVectorImpl<const Use *> *UndefUsesToFix) {
253  Value *Operand = OperandUse.get();
254  if (Value *NewOperand = ValueWithNewAddrSpace.lookup(Operand))
255  return NewOperand;
256 
257  UndefUsesToFix->push_back(&OperandUse);
258  return UndefValue::get(
259  Operand->getType()->getPointerElementType()->getPointerTo(NewAddrSpace));
260 }
261 
262 // Returns a clone of `I` with its operands converted to those specified in
263 // ValueWithNewAddrSpace. Due to potential cycles in the data flow graph, an
264 // operand whose address space needs to be modified might not exist in
265 // ValueWithNewAddrSpace. In that case, uses undef as a placeholder operand and
266 // adds that operand use to UndefUsesToFix so that caller can fix them later.
267 //
268 // Note that we do not necessarily clone `I`, e.g., if it is an addrspacecast
269 // from a pointer whose type already matches. Therefore, this function returns a
270 // Value* instead of an Instruction*.
272  Instruction *I, unsigned NewAddrSpace,
273  const ValueToValueMapTy &ValueWithNewAddrSpace,
274  SmallVectorImpl<const Use *> *UndefUsesToFix) {
275  Type *NewPtrType =
276  I->getType()->getPointerElementType()->getPointerTo(NewAddrSpace);
277 
278  if (I->getOpcode() == Instruction::AddrSpaceCast) {
279  Value *Src = I->getOperand(0);
280  // Because `I` is generic, the source address space must be specific.
281  // Therefore, the inferred address space must be the source space, according
282  // to our algorithm.
283  assert(Src->getType()->getPointerAddressSpace() == NewAddrSpace);
284  if (Src->getType() != NewPtrType)
285  return new BitCastInst(Src, NewPtrType);
286  return Src;
287  }
288 
289  // Computes the converted pointer operands.
290  SmallVector<Value *, 4> NewPointerOperands;
291  for (const Use &OperandUse : I->operands()) {
292  if (!OperandUse.get()->getType()->isPointerTy())
293  NewPointerOperands.push_back(nullptr);
294  else
296  OperandUse, NewAddrSpace, ValueWithNewAddrSpace, UndefUsesToFix));
297  }
298 
299  switch (I->getOpcode()) {
300  case Instruction::BitCast:
301  return new BitCastInst(NewPointerOperands[0], NewPtrType);
302  case Instruction::PHI: {
303  assert(I->getType()->isPointerTy());
304  PHINode *PHI = cast<PHINode>(I);
305  PHINode *NewPHI = PHINode::Create(NewPtrType, PHI->getNumIncomingValues());
306  for (unsigned Index = 0; Index < PHI->getNumIncomingValues(); ++Index) {
307  unsigned OperandNo = PHINode::getOperandNumForIncomingValue(Index);
308  NewPHI->addIncoming(NewPointerOperands[OperandNo],
309  PHI->getIncomingBlock(Index));
310  }
311  return NewPHI;
312  }
313  case Instruction::GetElementPtr: {
314  GetElementPtrInst *GEP = cast<GetElementPtrInst>(I);
316  GEP->getSourceElementType(), NewPointerOperands[0],
317  SmallVector<Value *, 4>(GEP->idx_begin(), GEP->idx_end()));
318  NewGEP->setIsInBounds(GEP->isInBounds());
319  return NewGEP;
320  }
321  default:
322  llvm_unreachable("Unexpected opcode");
323  }
324 }
325 
326 // Similar to cloneInstructionWithNewAddressSpace, returns a clone of the
327 // constant expression `CE` with its operands replaced as specified in
328 // ValueWithNewAddrSpace.
330  ConstantExpr *CE, unsigned NewAddrSpace,
331  const ValueToValueMapTy &ValueWithNewAddrSpace) {
332  Type *TargetType =
333  CE->getType()->getPointerElementType()->getPointerTo(NewAddrSpace);
334 
335  if (CE->getOpcode() == Instruction::AddrSpaceCast) {
336  // Because CE is generic, the source address space must be specific.
337  // Therefore, the inferred address space must be the source space according
338  // to our algorithm.
340  NewAddrSpace);
341  return ConstantExpr::getBitCast(CE->getOperand(0), TargetType);
342  }
343 
344  // Computes the operands of the new constant expression.
345  SmallVector<Constant *, 4> NewOperands;
346  for (unsigned Index = 0; Index < CE->getNumOperands(); ++Index) {
347  Constant *Operand = CE->getOperand(Index);
348  // If the address space of `Operand` needs to be modified, the new operand
349  // with the new address space should already be in ValueWithNewAddrSpace
350  // because (1) the constant expressions we consider (i.e. addrspacecast,
351  // bitcast, and getelementptr) do not incur cycles in the data flow graph
352  // and (2) this function is called on constant expressions in postorder.
353  if (Value *NewOperand = ValueWithNewAddrSpace.lookup(Operand)) {
354  NewOperands.push_back(cast<Constant>(NewOperand));
355  } else {
356  // Otherwise, reuses the old operand.
357  NewOperands.push_back(Operand);
358  }
359  }
360 
361  if (CE->getOpcode() == Instruction::GetElementPtr) {
362  // Needs to specify the source type while constructing a getelementptr
363  // constant expression.
364  return CE->getWithOperands(
365  NewOperands, TargetType, /*OnlyIfReduced=*/false,
366  NewOperands[0]->getType()->getPointerElementType());
367  }
368 
369  return CE->getWithOperands(NewOperands, TargetType);
370 }
371 
372 // Returns a clone of the value `V`, with its operands replaced as specified in
373 // ValueWithNewAddrSpace. This function is called on every generic address
374 // expression whose address space needs to be modified, in postorder.
375 //
376 // See cloneInstructionWithNewAddressSpace for the meaning of UndefUsesToFix.
377 static Value *
378 cloneValueWithNewAddressSpace(Value *V, unsigned NewAddrSpace,
379  const ValueToValueMapTy &ValueWithNewAddrSpace,
380  SmallVectorImpl<const Use *> *UndefUsesToFix) {
381  // All values in Postorder are generic address expressions.
382  assert(isAddressExpression(*V) &&
385 
386  if (Instruction *I = dyn_cast<Instruction>(V)) {
388  I, NewAddrSpace, ValueWithNewAddrSpace, UndefUsesToFix);
389  if (Instruction *NewI = dyn_cast<Instruction>(NewV)) {
390  if (NewI->getParent() == nullptr) {
391  NewI->insertBefore(I);
392  NewI->takeName(I);
393  }
394  }
395  return NewV;
396  }
397 
399  cast<ConstantExpr>(V), NewAddrSpace, ValueWithNewAddrSpace);
400 }
401 
402 // Defines the join operation on the address space lattice (see the file header
403 // comments).
404 static unsigned joinAddressSpaces(unsigned AS1, unsigned AS2) {
408 
409  if (AS1 == ADDRESS_SPACE_UNINITIALIZED)
410  return AS2;
411  if (AS2 == ADDRESS_SPACE_UNINITIALIZED)
412  return AS1;
413 
414  // The join of two different specific address spaces is generic.
415  return AS1 == AS2 ? AS1 : (unsigned)AddressSpace::ADDRESS_SPACE_GENERIC;
416 }
417 
418 bool NVPTXInferAddressSpaces::runOnFunction(Function &F) {
419  if (skipFunction(F))
420  return false;
421 
422  // Collects all generic address expressions in postorder.
423  std::vector<Value *> Postorder = collectGenericAddressExpressions(F);
424 
425  // Runs a data-flow analysis to refine the address spaces of every expression
426  // in Postorder.
427  ValueToAddrSpaceMapTy InferredAddrSpace;
428  inferAddressSpaces(Postorder, &InferredAddrSpace);
429 
430  // Changes the address spaces of the generic address expressions who are
431  // inferred to point to a specific address space.
432  return rewriteWithNewAddressSpaces(Postorder, InferredAddrSpace, &F);
433 }
434 
435 void NVPTXInferAddressSpaces::inferAddressSpaces(
436  const std::vector<Value *> &Postorder,
437  ValueToAddrSpaceMapTy *InferredAddrSpace) {
438  SetVector<Value *> Worklist(Postorder.begin(), Postorder.end());
439  // Initially, all expressions are in the uninitialized address space.
440  for (Value *V : Postorder)
441  (*InferredAddrSpace)[V] = ADDRESS_SPACE_UNINITIALIZED;
442 
443  while (!Worklist.empty()) {
444  Value* V = Worklist.pop_back_val();
445 
446  // Tries to update the address space of the stack top according to the
447  // address spaces of its operands.
448  DEBUG(dbgs() << "Updating the address space of\n"
449  << " " << *V << "\n");
450  Optional<unsigned> NewAS = updateAddressSpace(*V, *InferredAddrSpace);
451  if (!NewAS.hasValue())
452  continue;
453  // If any updates are made, grabs its users to the worklist because
454  // their address spaces can also be possibly updated.
455  DEBUG(dbgs() << " to " << NewAS.getValue() << "\n");
456  (*InferredAddrSpace)[V] = NewAS.getValue();
457 
458  for (Value *User : V->users()) {
459  // Skip if User is already in the worklist.
460  if (Worklist.count(User))
461  continue;
462 
463  auto Pos = InferredAddrSpace->find(User);
464  // Our algorithm only updates the address spaces of generic address
465  // expressions, which are those in InferredAddrSpace.
466  if (Pos == InferredAddrSpace->end())
467  continue;
468 
469  // Function updateAddressSpace moves the address space down a lattice
470  // path. Therefore, nothing to do if User is already inferred as
471  // generic (the bottom element in the lattice).
472  if (Pos->second == AddressSpace::ADDRESS_SPACE_GENERIC)
473  continue;
474 
475  Worklist.insert(User);
476  }
477  }
478 }
479 
480 Optional<unsigned> NVPTXInferAddressSpaces::updateAddressSpace(
481  const Value &V, const ValueToAddrSpaceMapTy &InferredAddrSpace) {
482  assert(InferredAddrSpace.count(&V));
483 
484  // The new inferred address space equals the join of the address spaces
485  // of all its pointer operands.
486  unsigned NewAS = ADDRESS_SPACE_UNINITIALIZED;
487  for (Value *PtrOperand : getPointerOperands(V)) {
488  unsigned OperandAS;
489  if (InferredAddrSpace.count(PtrOperand))
490  OperandAS = InferredAddrSpace.lookup(PtrOperand);
491  else
492  OperandAS = PtrOperand->getType()->getPointerAddressSpace();
493  NewAS = joinAddressSpaces(NewAS, OperandAS);
494  // join(generic, *) = generic. So we can break if NewAS is already generic.
496  break;
497  }
498 
499  unsigned OldAS = InferredAddrSpace.lookup(&V);
501  if (OldAS == NewAS)
502  return None;
503  return NewAS;
504 }
505 
506 bool NVPTXInferAddressSpaces::rewriteWithNewAddressSpaces(
507  const std::vector<Value *> &Postorder,
508  const ValueToAddrSpaceMapTy &InferredAddrSpace, Function *F) {
509  // For each address expression to be modified, creates a clone of it with its
510  // pointer operands converted to the new address space. Since the pointer
511  // operands are converted, the clone is naturally in the new address space by
512  // construction.
513  ValueToValueMapTy ValueWithNewAddrSpace;
514  SmallVector<const Use *, 32> UndefUsesToFix;
515  for (Value* V : Postorder) {
516  unsigned NewAddrSpace = InferredAddrSpace.lookup(V);
517  if (V->getType()->getPointerAddressSpace() != NewAddrSpace) {
518  ValueWithNewAddrSpace[V] = cloneValueWithNewAddressSpace(
519  V, NewAddrSpace, ValueWithNewAddrSpace, &UndefUsesToFix);
520  }
521  }
522 
523  if (ValueWithNewAddrSpace.empty())
524  return false;
525 
526  // Fixes all the undef uses generated by cloneInstructionWithNewAddressSpace.
527  for (const Use* UndefUse : UndefUsesToFix) {
528  User *V = UndefUse->getUser();
529  User *NewV = cast<User>(ValueWithNewAddrSpace.lookup(V));
530  unsigned OperandNo = UndefUse->getOperandNo();
531  assert(isa<UndefValue>(NewV->getOperand(OperandNo)));
532  NewV->setOperand(OperandNo, ValueWithNewAddrSpace.lookup(UndefUse->get()));
533  }
534 
535  // Replaces the uses of the old address expressions with the new ones.
536  for (Value *V : Postorder) {
537  Value *NewV = ValueWithNewAddrSpace.lookup(V);
538  if (NewV == nullptr)
539  continue;
540 
542  for (Use &U : V->uses())
543  Uses.push_back(&U);
544  DEBUG(dbgs() << "Replacing the uses of " << *V << "\n to\n " << *NewV
545  << "\n");
546  for (Use *U : Uses) {
547  if (isa<LoadInst>(U->getUser()) ||
548  (isa<StoreInst>(U->getUser()) && U->getOperandNo() == 1)) {
549  // If V is used as the pointer operand of a load/store, sets the pointer
550  // operand to NewV. This replacement does not change the element type,
551  // so the resultant load/store is still valid.
552  U->set(NewV);
553  } else if (isa<Instruction>(U->getUser())) {
554  // Otherwise, replaces the use with generic(NewV).
555  // TODO: Some optimization opportunities are missed. For example, in
556  // %0 = icmp eq float* %p, %q
557  // if both p and q are inferred to be shared, we can rewrite %0 as
558  // %0 = icmp eq float addrspace(3)* %new_p, %new_q
559  // instead of currently
560  // %generic_p = addrspacecast float addrspace(3)* %new_p to float*
561  // %generic_q = addrspacecast float addrspace(3)* %new_q to float*
562  // %0 = icmp eq float* %generic_p, %generic_q
563  if (Instruction *I = dyn_cast<Instruction>(V)) {
564  BasicBlock::iterator InsertPos = std::next(I->getIterator());
565  while (isa<PHINode>(InsertPos))
566  ++InsertPos;
567  U->set(new AddrSpaceCastInst(NewV, V->getType(), "", &*InsertPos));
568  } else {
569  U->set(ConstantExpr::getAddrSpaceCast(cast<Constant>(NewV),
570  V->getType()));
571  }
572  }
573  }
574  if (V->use_empty())
576  }
577 
578  return true;
579 }
580 
582  return new NVPTXInferAddressSpaces();
583 }
const NoneType None
Definition: None.h:23
iterator_range< use_iterator > uses()
Definition: Value.h:326
static Value * cloneConstantExprWithNewAddressSpace(ConstantExpr *CE, unsigned NewAddrSpace, const ValueToValueMapTy &ValueWithNewAddrSpace)
Type * getSourceElementType() const
Definition: Instructions.h:928
bool hasValue() const
Definition: Optional.h:125
ValueT lookup(const KeyT &Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition: ValueMap.h:167
unsigned getNumOperands() const
Definition: User.h:167
static Constant * getAddrSpaceCast(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:1707
FunctionType * getType(LLVMContext &Context, ID id, ArrayRef< Type * > Tys=None)
Return the function type for an intrinsic.
Definition: Function.cpp:905
Hexagon Common GEP
unsigned getOpcode() const
Return the opcode at the root of this constant expression.
Definition: Constants.h:1182
bool empty() const
Definition: ValueMap.h:142
Type * getPointerElementType() const
Definition: Type.h:358
static unsigned getOperandNumForIncomingValue(unsigned i)
This class represents a conversion between pointers from one address space to another.
A Use represents the edge between a Value definition and its users.
Definition: Use.h:56
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:32
static std::vector< Value * > collectGenericAddressExpressions(Function &F)
static SmallVector< Value *, 2 > getPointerOperands(const Value &V)
static Value * cloneInstructionWithNewAddressSpace(Instruction *I, unsigned NewAddrSpace, const ValueToValueMapTy &ValueWithNewAddrSpace, SmallVectorImpl< const Use * > *UndefUsesToFix)
A constant value that is initialized with an expression using other constant values.
Definition: Constants.h:873
#define F(x, y, z)
Definition: MD5.cpp:51
const T & getValue() const LLVM_LVALUE_FUNCTION
Definition: Optional.h:121
This class represents a no-op cast from one type to another.
op_iterator idx_begin()
Definition: Instructions.h:956
bool isInBounds() const
Determine whether the GEP has the inbounds flag.
static Constant * getBitCast(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:1695
an instruction for type-safe pointer arithmetic to access elements of arrays and structs ...
Definition: Instructions.h:830
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:45
This is an important base class in LLVM.
Definition: Constant.h:42
std::pair< iterator, bool > insert(const ValueT &V)
Definition: DenseSet.h:168
Value * get() const
Definition: Use.h:82
INITIALIZE_PASS(NVPTXInferAddressSpaces,"nvptx-infer-addrspace","Infer address spaces", false, false) static bool isAddressExpression(const Value &V)
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:298
Value * getOperand(unsigned i) const
Definition: User.h:145
op_range operands()
Definition: User.h:213
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:213
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
Definition: Constants.cpp:1337
bool RecursivelyDeleteTriviallyDeadInstructions(Value *V, const TargetLibraryInfo *TLI=nullptr)
If the specified value is a trivially dead instruction, delete it.
Definition: Local.cpp:355
FunctionPass * createNVPTXInferAddressSpacesPass()
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Definition: Instructions.h:857
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
static Value * cloneValueWithNewAddressSpace(Value *V, unsigned NewAddrSpace, const ValueToValueMapTy &ValueWithNewAddrSpace, SmallVectorImpl< const Use * > *UndefUsesToFix)
Iterator for intrusive lists based on ilist_node.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:843
This is a utility class that provides an abstraction for the common functionality between Instruction...
Definition: Operator.h:33
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:230
static unsigned joinAddressSpaces(unsigned AS1, unsigned AS2)
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
void setOperand(unsigned i, Value *Val)
Definition: User.h:150
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:132
iterator_range< user_iterator > users()
Definition: Value.h:370
unsigned getOpcode() const
Return the opcode for this Instruction or ConstantExpr.
Definition: Operator.h:49
void initializeNVPTXInferAddressSpacesPass(PassRegistry &)
#define I(x, y, z)
Definition: MD5.cpp:54
static Value * operandWithNewAddressSpaceOrCreateUndef(const Use &OperandUse, unsigned NewAddrSpace, const ValueToValueMapTy &ValueWithNewAddrSpace, SmallVectorImpl< const Use * > *UndefUsesToFix)
bool use_empty() const
Definition: Value.h:299
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
aarch64 promote const
LLVM Value Representation.
Definition: Value.h:71
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:111
#define DEBUG(X)
Definition: Debug.h:100
PointerType * getPointerTo(unsigned AddrSpace=0) const
Return a pointer to the current type.
Definition: Type.cpp:678
inst_range instructions(Function *F)
Definition: InstIterator.h:132
PassRegistry - This class manages the registration and intitialization of the pass subsystem as appli...
Definition: PassRegistry.h:40
static void appendsGenericAddressExpressionToPostorderStack(Value *V, std::vector< std::pair< Value *, bool >> *PostorderStack, DenseSet< Value * > *Visited)
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
Definition: DerivedTypes.h:479
Constant * getWithOperands(ArrayRef< Constant * > Ops) const
This returns the current constant expression with the operands replaced with the specified values...
Definition: Constants.h:1202