LLVM  14.0.0git
Instructions.h
Go to the documentation of this file.
1 //===- llvm/Instructions.h - Instruction subclass definitions ---*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file exposes the class definitions of all of the subclasses of the
10 // Instruction class. This is meant to be an easy way to get access to all
11 // instruction subclasses.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #ifndef LLVM_IR_INSTRUCTIONS_H
16 #define LLVM_IR_INSTRUCTIONS_H
17 
18 #include "llvm/ADT/ArrayRef.h"
19 #include "llvm/ADT/Bitfields.h"
20 #include "llvm/ADT/MapVector.h"
21 #include "llvm/ADT/None.h"
22 #include "llvm/ADT/STLExtras.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/StringRef.h"
25 #include "llvm/ADT/Twine.h"
26 #include "llvm/ADT/iterator.h"
28 #include "llvm/IR/Attributes.h"
29 #include "llvm/IR/BasicBlock.h"
30 #include "llvm/IR/CallingConv.h"
31 #include "llvm/IR/CFG.h"
32 #include "llvm/IR/Constant.h"
33 #include "llvm/IR/DerivedTypes.h"
34 #include "llvm/IR/Function.h"
35 #include "llvm/IR/InstrTypes.h"
36 #include "llvm/IR/Instruction.h"
37 #include "llvm/IR/OperandTraits.h"
38 #include "llvm/IR/Type.h"
39 #include "llvm/IR/Use.h"
40 #include "llvm/IR/User.h"
41 #include "llvm/IR/Value.h"
43 #include "llvm/Support/Casting.h"
45 #include <cassert>
46 #include <cstddef>
47 #include <cstdint>
48 #include <iterator>
49 
50 namespace llvm {
51 
52 class APInt;
53 class ConstantInt;
54 class DataLayout;
55 class LLVMContext;
56 
57 //===----------------------------------------------------------------------===//
58 // AllocaInst Class
59 //===----------------------------------------------------------------------===//
60 
61 /// an instruction to allocate memory on the stack
62 class AllocaInst : public UnaryInstruction {
63  Type *AllocatedType;
64 
65  using AlignmentField = AlignmentBitfieldElementT<0>;
66  using UsedWithInAllocaField = BoolBitfieldElementT<AlignmentField::NextBit>;
68  static_assert(Bitfield::areContiguous<AlignmentField, UsedWithInAllocaField,
69  SwiftErrorField>(),
70  "Bitfields must be contiguous");
71 
72 protected:
73  // Note: Instruction needs to be a friend here to call cloneImpl.
74  friend class Instruction;
75 
76  AllocaInst *cloneImpl() const;
77 
78 public:
79  explicit AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
80  const Twine &Name, Instruction *InsertBefore);
81  AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
82  const Twine &Name, BasicBlock *InsertAtEnd);
83 
84  AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
85  Instruction *InsertBefore);
86  AllocaInst(Type *Ty, unsigned AddrSpace,
87  const Twine &Name, BasicBlock *InsertAtEnd);
88 
89  AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
90  const Twine &Name = "", Instruction *InsertBefore = nullptr);
91  AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
92  const Twine &Name, BasicBlock *InsertAtEnd);
93 
94  /// Return true if there is an allocation size parameter to the allocation
95  /// instruction that is not 1.
96  bool isArrayAllocation() const;
97 
98  /// Get the number of elements allocated. For a simple allocation of a single
99  /// element, this will return a constant 1 value.
100  const Value *getArraySize() const { return getOperand(0); }
101  Value *getArraySize() { return getOperand(0); }
102 
103  /// Overload to return most specific pointer type.
104  PointerType *getType() const {
105  return cast<PointerType>(Instruction::getType());
106  }
107 
108  /// Get allocation size in bits. Returns None if size can't be determined,
109  /// e.g. in case of a VLA.
111 
112  /// Return the type that is being allocated by the instruction.
113  Type *getAllocatedType() const { return AllocatedType; }
114  /// for use only in special circumstances that need to generically
115  /// transform a whole instruction (eg: IR linking and vectorization).
116  void setAllocatedType(Type *Ty) { AllocatedType = Ty; }
117 
118  /// Return the alignment of the memory that is being allocated by the
119  /// instruction.
120  Align getAlign() const {
121  return Align(1ULL << getSubclassData<AlignmentField>());
122  }
123 
125  setSubclassData<AlignmentField>(Log2(Align));
126  }
127 
128  // FIXME: Remove this one transition to Align is over.
129  unsigned getAlignment() const { return getAlign().value(); }
130 
131  /// Return true if this alloca is in the entry block of the function and is a
132  /// constant size. If so, the code generator will fold it into the
133  /// prolog/epilog code, so it is basically free.
134  bool isStaticAlloca() const;
135 
136  /// Return true if this alloca is used as an inalloca argument to a call. Such
137  /// allocas are never considered static even if they are in the entry block.
138  bool isUsedWithInAlloca() const {
139  return getSubclassData<UsedWithInAllocaField>();
140  }
141 
142  /// Specify whether this alloca is used to represent the arguments to a call.
143  void setUsedWithInAlloca(bool V) {
144  setSubclassData<UsedWithInAllocaField>(V);
145  }
146 
147  /// Return true if this alloca is used as a swifterror argument to a call.
148  bool isSwiftError() const { return getSubclassData<SwiftErrorField>(); }
149  /// Specify whether this alloca is used to represent a swifterror.
150  void setSwiftError(bool V) { setSubclassData<SwiftErrorField>(V); }
151 
152  // Methods for support type inquiry through isa, cast, and dyn_cast:
153  static bool classof(const Instruction *I) {
154  return (I->getOpcode() == Instruction::Alloca);
155  }
156  static bool classof(const Value *V) {
157  return isa<Instruction>(V) && classof(cast<Instruction>(V));
158  }
159 
160 private:
161  // Shadow Instruction::setInstructionSubclassData with a private forwarding
162  // method so that subclasses cannot accidentally use it.
163  template <typename Bitfield>
164  void setSubclassData(typename Bitfield::Type Value) {
165  Instruction::setSubclassData<Bitfield>(Value);
166  }
167 };
168 
169 //===----------------------------------------------------------------------===//
170 // LoadInst Class
171 //===----------------------------------------------------------------------===//
172 
173 /// An instruction for reading from memory. This uses the SubclassData field in
174 /// Value to store whether or not the load is volatile.
175 class LoadInst : public UnaryInstruction {
176  using VolatileField = BoolBitfieldElementT<0>;
179  static_assert(
180  Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
181  "Bitfields must be contiguous");
182 
183  void AssertOK();
184 
185 protected:
186  // Note: Instruction needs to be a friend here to call cloneImpl.
187  friend class Instruction;
188 
189  LoadInst *cloneImpl() const;
190 
191 public:
192  LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr,
193  Instruction *InsertBefore);
194  LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd);
195  LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
196  Instruction *InsertBefore);
197  LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
198  BasicBlock *InsertAtEnd);
199  LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
200  Align Align, Instruction *InsertBefore = nullptr);
201  LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
202  Align Align, BasicBlock *InsertAtEnd);
203  LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
204  Align Align, AtomicOrdering Order,
206  Instruction *InsertBefore = nullptr);
207  LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
209  BasicBlock *InsertAtEnd);
210 
211  /// Return true if this is a load from a volatile memory location.
212  bool isVolatile() const { return getSubclassData<VolatileField>(); }
213 
214  /// Specify whether this is a volatile load or not.
215  void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
216 
217  /// Return the alignment of the access that is being performed.
218  /// FIXME: Remove this function once transition to Align is over.
219  /// Use getAlign() instead.
220  unsigned getAlignment() const { return getAlign().value(); }
221 
222  /// Return the alignment of the access that is being performed.
223  Align getAlign() const {
224  return Align(1ULL << (getSubclassData<AlignmentField>()));
225  }
226 
228  setSubclassData<AlignmentField>(Log2(Align));
229  }
230 
231  /// Returns the ordering constraint of this load instruction.
233  return getSubclassData<OrderingField>();
234  }
235  /// Sets the ordering constraint of this load instruction. May not be Release
236  /// or AcquireRelease.
237  void setOrdering(AtomicOrdering Ordering) {
238  setSubclassData<OrderingField>(Ordering);
239  }
240 
241  /// Returns the synchronization scope ID of this load instruction.
243  return SSID;
244  }
245 
246  /// Sets the synchronization scope ID of this load instruction.
248  this->SSID = SSID;
249  }
250 
251  /// Sets the ordering constraint and the synchronization scope ID of this load
252  /// instruction.
253  void setAtomic(AtomicOrdering Ordering,
255  setOrdering(Ordering);
256  setSyncScopeID(SSID);
257  }
258 
259  bool isSimple() const { return !isAtomic() && !isVolatile(); }
260 
261  bool isUnordered() const {
262  return (getOrdering() == AtomicOrdering::NotAtomic ||
264  !isVolatile();
265  }
266 
268  const Value *getPointerOperand() const { return getOperand(0); }
269  static unsigned getPointerOperandIndex() { return 0U; }
271 
272  /// Returns the address space of the pointer operand.
273  unsigned getPointerAddressSpace() const {
275  }
276 
277  // Methods for support type inquiry through isa, cast, and dyn_cast:
278  static bool classof(const Instruction *I) {
279  return I->getOpcode() == Instruction::Load;
280  }
281  static bool classof(const Value *V) {
282  return isa<Instruction>(V) && classof(cast<Instruction>(V));
283  }
284 
285 private:
286  // Shadow Instruction::setInstructionSubclassData with a private forwarding
287  // method so that subclasses cannot accidentally use it.
288  template <typename Bitfield>
289  void setSubclassData(typename Bitfield::Type Value) {
290  Instruction::setSubclassData<Bitfield>(Value);
291  }
292 
293  /// The synchronization scope ID of this load instruction. Not quite enough
294  /// room in SubClassData for everything, so synchronization scope ID gets its
295  /// own field.
296  SyncScope::ID SSID;
297 };
298 
299 //===----------------------------------------------------------------------===//
300 // StoreInst Class
301 //===----------------------------------------------------------------------===//
302 
303 /// An instruction for storing to memory.
304 class StoreInst : public Instruction {
305  using VolatileField = BoolBitfieldElementT<0>;
308  static_assert(
309  Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
310  "Bitfields must be contiguous");
311 
312  void AssertOK();
313 
314 protected:
315  // Note: Instruction needs to be a friend here to call cloneImpl.
316  friend class Instruction;
317 
318  StoreInst *cloneImpl() const;
319 
320 public:
321  StoreInst(Value *Val, Value *Ptr, Instruction *InsertBefore);
322  StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd);
323  StoreInst(Value *Val, Value *Ptr, bool isVolatile, Instruction *InsertBefore);
324  StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd);
325  StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
326  Instruction *InsertBefore = nullptr);
327  StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
328  BasicBlock *InsertAtEnd);
329  StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
331  Instruction *InsertBefore = nullptr);
332  StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
333  AtomicOrdering Order, SyncScope::ID SSID, BasicBlock *InsertAtEnd);
334 
335  // allocate space for exactly two operands
336  void *operator new(size_t S) { return User::operator new(S, 2); }
337  void operator delete(void *Ptr) { User::operator delete(Ptr); }
338 
339  /// Return true if this is a store to a volatile memory location.
340  bool isVolatile() const { return getSubclassData<VolatileField>(); }
341 
342  /// Specify whether this is a volatile store or not.
343  void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
344 
345  /// Transparently provide more efficient getOperand methods.
347 
348  /// Return the alignment of the access that is being performed
349  /// FIXME: Remove this function once transition to Align is over.
350  /// Use getAlign() instead.
351  unsigned getAlignment() const { return getAlign().value(); }
352 
353  Align getAlign() const {
354  return Align(1ULL << (getSubclassData<AlignmentField>()));
355  }
356 
358  setSubclassData<AlignmentField>(Log2(Align));
359  }
360 
361  /// Returns the ordering constraint of this store instruction.
363  return getSubclassData<OrderingField>();
364  }
365 
366  /// Sets the ordering constraint of this store instruction. May not be
367  /// Acquire or AcquireRelease.
368  void setOrdering(AtomicOrdering Ordering) {
369  setSubclassData<OrderingField>(Ordering);
370  }
371 
372  /// Returns the synchronization scope ID of this store instruction.
374  return SSID;
375  }
376 
377  /// Sets the synchronization scope ID of this store instruction.
379  this->SSID = SSID;
380  }
381 
382  /// Sets the ordering constraint and the synchronization scope ID of this
383  /// store instruction.
384  void setAtomic(AtomicOrdering Ordering,
386  setOrdering(Ordering);
387  setSyncScopeID(SSID);
388  }
389 
390  bool isSimple() const { return !isAtomic() && !isVolatile(); }
391 
392  bool isUnordered() const {
393  return (getOrdering() == AtomicOrdering::NotAtomic ||
395  !isVolatile();
396  }
397 
398  Value *getValueOperand() { return getOperand(0); }
399  const Value *getValueOperand() const { return getOperand(0); }
400 
402  const Value *getPointerOperand() const { return getOperand(1); }
403  static unsigned getPointerOperandIndex() { return 1U; }
405 
406  /// Returns the address space of the pointer operand.
407  unsigned getPointerAddressSpace() const {
409  }
410 
411  // Methods for support type inquiry through isa, cast, and dyn_cast:
412  static bool classof(const Instruction *I) {
413  return I->getOpcode() == Instruction::Store;
414  }
415  static bool classof(const Value *V) {
416  return isa<Instruction>(V) && classof(cast<Instruction>(V));
417  }
418 
419 private:
420  // Shadow Instruction::setInstructionSubclassData with a private forwarding
421  // method so that subclasses cannot accidentally use it.
422  template <typename Bitfield>
423  void setSubclassData(typename Bitfield::Type Value) {
424  Instruction::setSubclassData<Bitfield>(Value);
425  }
426 
427  /// The synchronization scope ID of this store instruction. Not quite enough
428  /// room in SubClassData for everything, so synchronization scope ID gets its
429  /// own field.
430  SyncScope::ID SSID;
431 };
432 
433 template <>
434 struct OperandTraits<StoreInst> : public FixedNumOperandTraits<StoreInst, 2> {
435 };
436 
438 
439 //===----------------------------------------------------------------------===//
440 // FenceInst Class
441 //===----------------------------------------------------------------------===//
442 
443 /// An instruction for ordering other memory operations.
444 class FenceInst : public Instruction {
445  using OrderingField = AtomicOrderingBitfieldElementT<0>;
446 
447  void Init(AtomicOrdering Ordering, SyncScope::ID SSID);
448 
449 protected:
450  // Note: Instruction needs to be a friend here to call cloneImpl.
451  friend class Instruction;
452 
453  FenceInst *cloneImpl() const;
454 
455 public:
456  // Ordering may only be Acquire, Release, AcquireRelease, or
457  // SequentiallyConsistent.
460  Instruction *InsertBefore = nullptr);
462  BasicBlock *InsertAtEnd);
463 
464  // allocate space for exactly zero operands
465  void *operator new(size_t S) { return User::operator new(S, 0); }
466  void operator delete(void *Ptr) { User::operator delete(Ptr); }
467 
468  /// Returns the ordering constraint of this fence instruction.
470  return getSubclassData<OrderingField>();
471  }
472 
473  /// Sets the ordering constraint of this fence instruction. May only be
474  /// Acquire, Release, AcquireRelease, or SequentiallyConsistent.
475  void setOrdering(AtomicOrdering Ordering) {
476  setSubclassData<OrderingField>(Ordering);
477  }
478 
479  /// Returns the synchronization scope ID of this fence instruction.
481  return SSID;
482  }
483 
484  /// Sets the synchronization scope ID of this fence instruction.
486  this->SSID = SSID;
487  }
488 
489  // Methods for support type inquiry through isa, cast, and dyn_cast:
490  static bool classof(const Instruction *I) {
491  return I->getOpcode() == Instruction::Fence;
492  }
493  static bool classof(const Value *V) {
494  return isa<Instruction>(V) && classof(cast<Instruction>(V));
495  }
496 
497 private:
498  // Shadow Instruction::setInstructionSubclassData with a private forwarding
499  // method so that subclasses cannot accidentally use it.
500  template <typename Bitfield>
501  void setSubclassData(typename Bitfield::Type Value) {
502  Instruction::setSubclassData<Bitfield>(Value);
503  }
504 
505  /// The synchronization scope ID of this fence instruction. Not quite enough
506  /// room in SubClassData for everything, so synchronization scope ID gets its
507  /// own field.
508  SyncScope::ID SSID;
509 };
510 
511 //===----------------------------------------------------------------------===//
512 // AtomicCmpXchgInst Class
513 //===----------------------------------------------------------------------===//
514 
515 /// An instruction that atomically checks whether a
516 /// specified value is in a memory location, and, if it is, stores a new value
517 /// there. The value returned by this instruction is a pair containing the
518 /// original value as first element, and an i1 indicating success (true) or
519 /// failure (false) as second element.
520 ///
522  void Init(Value *Ptr, Value *Cmp, Value *NewVal, Align Align,
523  AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering,
524  SyncScope::ID SSID);
525 
526  template <unsigned Offset>
527  using AtomicOrderingBitfieldElement =
530 
531 protected:
532  // Note: Instruction needs to be a friend here to call cloneImpl.
533  friend class Instruction;
534 
535  AtomicCmpXchgInst *cloneImpl() const;
536 
537 public:
538  AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
539  AtomicOrdering SuccessOrdering,
540  AtomicOrdering FailureOrdering, SyncScope::ID SSID,
541  Instruction *InsertBefore = nullptr);
542  AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
543  AtomicOrdering SuccessOrdering,
544  AtomicOrdering FailureOrdering, SyncScope::ID SSID,
545  BasicBlock *InsertAtEnd);
546 
547  // allocate space for exactly three operands
548  void *operator new(size_t S) { return User::operator new(S, 3); }
549  void operator delete(void *Ptr) { User::operator delete(Ptr); }
550 
553  using SuccessOrderingField =
555  using FailureOrderingField =
557  using AlignmentField =
559  static_assert(
562  "Bitfields must be contiguous");
563 
564  /// Return the alignment of the memory that is being allocated by the
565  /// instruction.
566  Align getAlign() const {
567  return Align(1ULL << getSubclassData<AlignmentField>());
568  }
569 
571  setSubclassData<AlignmentField>(Log2(Align));
572  }
573 
574  /// Return true if this is a cmpxchg from a volatile memory
575  /// location.
576  ///
577  bool isVolatile() const { return getSubclassData<VolatileField>(); }
578 
579  /// Specify whether this is a volatile cmpxchg.
580  ///
581  void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
582 
583  /// Return true if this cmpxchg may spuriously fail.
584  bool isWeak() const { return getSubclassData<WeakField>(); }
585 
586  void setWeak(bool IsWeak) { setSubclassData<WeakField>(IsWeak); }
587 
588  /// Transparently provide more efficient getOperand methods.
590 
591  static bool isValidSuccessOrdering(AtomicOrdering Ordering) {
592  return Ordering != AtomicOrdering::NotAtomic &&
593  Ordering != AtomicOrdering::Unordered;
594  }
595 
596  static bool isValidFailureOrdering(AtomicOrdering Ordering) {
597  return Ordering != AtomicOrdering::NotAtomic &&
598  Ordering != AtomicOrdering::Unordered &&
599  Ordering != AtomicOrdering::AcquireRelease &&
600  Ordering != AtomicOrdering::Release;
601  }
602 
603  /// Returns the success ordering constraint of this cmpxchg instruction.
605  return getSubclassData<SuccessOrderingField>();
606  }
607 
608  /// Sets the success ordering constraint of this cmpxchg instruction.
610  assert(isValidSuccessOrdering(Ordering) &&
611  "invalid CmpXchg success ordering");
612  setSubclassData<SuccessOrderingField>(Ordering);
613  }
614 
615  /// Returns the failure ordering constraint of this cmpxchg instruction.
617  return getSubclassData<FailureOrderingField>();
618  }
619 
620  /// Sets the failure ordering constraint of this cmpxchg instruction.
622  assert(isValidFailureOrdering(Ordering) &&
623  "invalid CmpXchg failure ordering");
624  setSubclassData<FailureOrderingField>(Ordering);
625  }
626 
627  /// Returns a single ordering which is at least as strong as both the
628  /// success and failure orderings for this cmpxchg.
637  }
638  return getSuccessOrdering();
639  }
640 
641  /// Returns the synchronization scope ID of this cmpxchg instruction.
643  return SSID;
644  }
645 
646  /// Sets the synchronization scope ID of this cmpxchg instruction.
648  this->SSID = SSID;
649  }
650 
652  const Value *getPointerOperand() const { return getOperand(0); }
653  static unsigned getPointerOperandIndex() { return 0U; }
654 
656  const Value *getCompareOperand() const { return getOperand(1); }
657 
659  const Value *getNewValOperand() const { return getOperand(2); }
660 
661  /// Returns the address space of the pointer operand.
662  unsigned getPointerAddressSpace() const {
664  }
665 
666  /// Returns the strongest permitted ordering on failure, given the
667  /// desired ordering on success.
668  ///
669  /// If the comparison in a cmpxchg operation fails, there is no atomic store
670  /// so release semantics cannot be provided. So this function drops explicit
671  /// Release requests from the AtomicOrdering. A SequentiallyConsistent
672  /// operation would remain SequentiallyConsistent.
673  static AtomicOrdering
675  switch (SuccessOrdering) {
676  default:
677  llvm_unreachable("invalid cmpxchg success ordering");
686  }
687  }
688 
689  // Methods for support type inquiry through isa, cast, and dyn_cast:
690  static bool classof(const Instruction *I) {
691  return I->getOpcode() == Instruction::AtomicCmpXchg;
692  }
693  static bool classof(const Value *V) {
694  return isa<Instruction>(V) && classof(cast<Instruction>(V));
695  }
696 
697 private:
698  // Shadow Instruction::setInstructionSubclassData with a private forwarding
699  // method so that subclasses cannot accidentally use it.
700  template <typename Bitfield>
701  void setSubclassData(typename Bitfield::Type Value) {
702  Instruction::setSubclassData<Bitfield>(Value);
703  }
704 
705  /// The synchronization scope ID of this cmpxchg instruction. Not quite
706  /// enough room in SubClassData for everything, so synchronization scope ID
707  /// gets its own field.
708  SyncScope::ID SSID;
709 };
710 
711 template <>
713  public FixedNumOperandTraits<AtomicCmpXchgInst, 3> {
714 };
715 
717 
718 //===----------------------------------------------------------------------===//
719 // AtomicRMWInst Class
720 //===----------------------------------------------------------------------===//
721 
722 /// an instruction that atomically reads a memory location,
723 /// combines it with another value, and then stores the result back. Returns
724 /// the old value.
725 ///
726 class AtomicRMWInst : public Instruction {
727 protected:
728  // Note: Instruction needs to be a friend here to call cloneImpl.
729  friend class Instruction;
730 
731  AtomicRMWInst *cloneImpl() const;
732 
733 public:
734  /// This enumeration lists the possible modifications atomicrmw can make. In
735  /// the descriptions, 'p' is the pointer to the instruction's memory location,
736  /// 'old' is the initial value of *p, and 'v' is the other value passed to the
737  /// instruction. These instructions always return 'old'.
738  enum BinOp : unsigned {
739  /// *p = v
741  /// *p = old + v
743  /// *p = old - v
745  /// *p = old & v
747  /// *p = ~(old & v)
749  /// *p = old | v
750  Or,
751  /// *p = old ^ v
753  /// *p = old >signed v ? old : v
755  /// *p = old <signed v ? old : v
757  /// *p = old >unsigned v ? old : v
759  /// *p = old <unsigned v ? old : v
761 
762  /// *p = old + v
764 
765  /// *p = old - v
767 
768  FIRST_BINOP = Xchg,
769  LAST_BINOP = FSub,
770  BAD_BINOP
771  };
772 
773 private:
774  template <unsigned Offset>
775  using AtomicOrderingBitfieldElement =
778 
779  template <unsigned Offset>
780  using BinOpBitfieldElement =
782 
783 public:
784  AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
785  AtomicOrdering Ordering, SyncScope::ID SSID,
786  Instruction *InsertBefore = nullptr);
787  AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
788  AtomicOrdering Ordering, SyncScope::ID SSID,
789  BasicBlock *InsertAtEnd);
790 
791  // allocate space for exactly two operands
792  void *operator new(size_t S) { return User::operator new(S, 2); }
793  void operator delete(void *Ptr) { User::operator delete(Ptr); }
794 
796  using AtomicOrderingField =
798  using OperationField = BinOpBitfieldElement<AtomicOrderingField::NextBit>;
802  "Bitfields must be contiguous");
803 
804  BinOp getOperation() const { return getSubclassData<OperationField>(); }
805 
806  static StringRef getOperationName(BinOp Op);
807 
808  static bool isFPOperation(BinOp Op) {
809  switch (Op) {
810  case AtomicRMWInst::FAdd:
811  case AtomicRMWInst::FSub:
812  return true;
813  default:
814  return false;
815  }
816  }
817 
819  setSubclassData<OperationField>(Operation);
820  }
821 
822  /// Return the alignment of the memory that is being allocated by the
823  /// instruction.
824  Align getAlign() const {
825  return Align(1ULL << getSubclassData<AlignmentField>());
826  }
827 
829  setSubclassData<AlignmentField>(Log2(Align));
830  }
831 
832  /// Return true if this is a RMW on a volatile memory location.
833  ///
834  bool isVolatile() const { return getSubclassData<VolatileField>(); }
835 
836  /// Specify whether this is a volatile RMW or not.
837  ///
838  void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
839 
840  /// Transparently provide more efficient getOperand methods.
842 
843  /// Returns the ordering constraint of this rmw instruction.
845  return getSubclassData<AtomicOrderingField>();
846  }
847 
848  /// Sets the ordering constraint of this rmw instruction.
849  void setOrdering(AtomicOrdering Ordering) {
850  assert(Ordering != AtomicOrdering::NotAtomic &&
851  "atomicrmw instructions can only be atomic.");
852  setSubclassData<AtomicOrderingField>(Ordering);
853  }
854 
855  /// Returns the synchronization scope ID of this rmw instruction.
857  return SSID;
858  }
859 
860  /// Sets the synchronization scope ID of this rmw instruction.
862  this->SSID = SSID;
863  }
864 
865  Value *getPointerOperand() { return getOperand(0); }
866  const Value *getPointerOperand() const { return getOperand(0); }
867  static unsigned getPointerOperandIndex() { return 0U; }
868 
869  Value *getValOperand() { return getOperand(1); }
870  const Value *getValOperand() const { return getOperand(1); }
871 
872  /// Returns the address space of the pointer operand.
873  unsigned getPointerAddressSpace() const {
875  }
876 
878  return isFPOperation(getOperation());
879  }
880 
881  // Methods for support type inquiry through isa, cast, and dyn_cast:
882  static bool classof(const Instruction *I) {
883  return I->getOpcode() == Instruction::AtomicRMW;
884  }
885  static bool classof(const Value *V) {
886  return isa<Instruction>(V) && classof(cast<Instruction>(V));
887  }
888 
889 private:
890  void Init(BinOp Operation, Value *Ptr, Value *Val, Align Align,
891  AtomicOrdering Ordering, SyncScope::ID SSID);
892 
893  // Shadow Instruction::setInstructionSubclassData with a private forwarding
894  // method so that subclasses cannot accidentally use it.
895  template <typename Bitfield>
896  void setSubclassData(typename Bitfield::Type Value) {
897  Instruction::setSubclassData<Bitfield>(Value);
898  }
899 
900  /// The synchronization scope ID of this rmw instruction. Not quite enough
901  /// room in SubClassData for everything, so synchronization scope ID gets its
902  /// own field.
903  SyncScope::ID SSID;
904 };
905 
906 template <>
908  : public FixedNumOperandTraits<AtomicRMWInst,2> {
909 };
910 
912 
913 //===----------------------------------------------------------------------===//
914 // GetElementPtrInst Class
915 //===----------------------------------------------------------------------===//
916 
917 // checkGEPType - Simple wrapper function to give a better assertion failure
918 // message on bad indexes for a gep instruction.
919 //
921  assert(Ty && "Invalid GetElementPtrInst indices for type!");
922  return Ty;
923 }
924 
925 /// an instruction for type-safe pointer arithmetic to
926 /// access elements of arrays and structs
927 ///
929  Type *SourceElementType;
930  Type *ResultElementType;
931 
933 
934  /// Constructors - Create a getelementptr instruction with a base pointer an
935  /// list of indices. The first ctor can optionally insert before an existing
936  /// instruction, the second appends the new instruction to the specified
937  /// BasicBlock.
938  inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
939  ArrayRef<Value *> IdxList, unsigned Values,
940  const Twine &NameStr, Instruction *InsertBefore);
941  inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
942  ArrayRef<Value *> IdxList, unsigned Values,
943  const Twine &NameStr, BasicBlock *InsertAtEnd);
944 
945  void init(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr);
946 
947 protected:
948  // Note: Instruction needs to be a friend here to call cloneImpl.
949  friend class Instruction;
950 
951  GetElementPtrInst *cloneImpl() const;
952 
953 public:
954  static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
955  ArrayRef<Value *> IdxList,
956  const Twine &NameStr = "",
957  Instruction *InsertBefore = nullptr) {
958  unsigned Values = 1 + unsigned(IdxList.size());
959  assert(PointeeType && "Must specify element type");
960  assert(cast<PointerType>(Ptr->getType()->getScalarType())
961  ->isOpaqueOrPointeeTypeMatches(PointeeType));
962  return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
963  NameStr, InsertBefore);
964  }
965 
966  static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
967  ArrayRef<Value *> IdxList,
968  const Twine &NameStr,
969  BasicBlock *InsertAtEnd) {
970  unsigned Values = 1 + unsigned(IdxList.size());
971  assert(PointeeType && "Must specify element type");
972  assert(cast<PointerType>(Ptr->getType()->getScalarType())
973  ->isOpaqueOrPointeeTypeMatches(PointeeType));
974  return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
975  NameStr, InsertAtEnd);
976  }
977 
979  Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr = "",
980  Instruction *InsertBefore = nullptr),
981  "Use the version with explicit element type instead") {
982  return CreateInBounds(
983  Ptr->getType()->getScalarType()->getPointerElementType(), Ptr, IdxList,
984  NameStr, InsertBefore);
985  }
986 
987  /// Create an "inbounds" getelementptr. See the documentation for the
988  /// "inbounds" flag in LangRef.html for details.
989  static GetElementPtrInst *
990  CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef<Value *> IdxList,
991  const Twine &NameStr = "",
992  Instruction *InsertBefore = nullptr) {
994  Create(PointeeType, Ptr, IdxList, NameStr, InsertBefore);
995  GEP->setIsInBounds(true);
996  return GEP;
997  }
998 
1000  Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr,
1001  BasicBlock *InsertAtEnd),
1002  "Use the version with explicit element type instead") {
1003  return CreateInBounds(
1004  Ptr->getType()->getScalarType()->getPointerElementType(), Ptr, IdxList,
1005  NameStr, InsertAtEnd);
1006  }
1007 
1008  static GetElementPtrInst *CreateInBounds(Type *PointeeType, Value *Ptr,
1009  ArrayRef<Value *> IdxList,
1010  const Twine &NameStr,
1011  BasicBlock *InsertAtEnd) {
1013  Create(PointeeType, Ptr, IdxList, NameStr, InsertAtEnd);
1014  GEP->setIsInBounds(true);
1015  return GEP;
1016  }
1017 
1018  /// Transparently provide more efficient getOperand methods.
1020 
1021  Type *getSourceElementType() const { return SourceElementType; }
1022 
1023  void setSourceElementType(Type *Ty) { SourceElementType = Ty; }
1024  void setResultElementType(Type *Ty) { ResultElementType = Ty; }
1025 
1027  assert(cast<PointerType>(getType()->getScalarType())
1028  ->isOpaqueOrPointeeTypeMatches(ResultElementType));
1029  return ResultElementType;
1030  }
1031 
1032  /// Returns the address space of this instruction's pointer type.
1033  unsigned getAddressSpace() const {
1034  // Note that this is always the same as the pointer operand's address space
1035  // and that is cheaper to compute, so cheat here.
1036  return getPointerAddressSpace();
1037  }
1038 
1039  /// Returns the result type of a getelementptr with the given source
1040  /// element type and indexes.
1041  ///
1042  /// Null is returned if the indices are invalid for the specified
1043  /// source element type.
1044  static Type *getIndexedType(Type *Ty, ArrayRef<Value *> IdxList);
1045  static Type *getIndexedType(Type *Ty, ArrayRef<Constant *> IdxList);
1046  static Type *getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList);
1047 
1048  /// Return the type of the element at the given index of an indexable
1049  /// type. This is equivalent to "getIndexedType(Agg, {Zero, Idx})".
1050  ///
1051  /// Returns null if the type can't be indexed, or the given index is not
1052  /// legal for the given type.
1053  static Type *getTypeAtIndex(Type *Ty, Value *Idx);
1054  static Type *getTypeAtIndex(Type *Ty, uint64_t Idx);
1055 
1056  inline op_iterator idx_begin() { return op_begin()+1; }
1057  inline const_op_iterator idx_begin() const { return op_begin()+1; }
1058  inline op_iterator idx_end() { return op_end(); }
1059  inline const_op_iterator idx_end() const { return op_end(); }
1060 
1062  return make_range(idx_begin(), idx_end());
1063  }
1064 
1066  return make_range(idx_begin(), idx_end());
1067  }
1068 
1070  return getOperand(0);
1071  }
1072  const Value *getPointerOperand() const {
1073  return getOperand(0);
1074  }
1075  static unsigned getPointerOperandIndex() {
1076  return 0U; // get index for modifying correct operand.
1077  }
1078 
1079  /// Method to return the pointer operand as a
1080  /// PointerType.
1082  return getPointerOperand()->getType();
1083  }
1084 
1085  /// Returns the address space of the pointer operand.
1086  unsigned getPointerAddressSpace() const {
1088  }
1089 
1090  /// Returns the pointer type returned by the GEP
1091  /// instruction, which may be a vector of pointers.
1092  static Type *getGEPReturnType(Type *ElTy, Value *Ptr,
1093  ArrayRef<Value *> IdxList) {
1094  PointerType *OrigPtrTy = cast<PointerType>(Ptr->getType()->getScalarType());
1095  unsigned AddrSpace = OrigPtrTy->getAddressSpace();
1096  Type *ResultElemTy = checkGEPType(getIndexedType(ElTy, IdxList));
1097  Type *PtrTy = OrigPtrTy->isOpaque()
1098  ? PointerType::get(OrigPtrTy->getContext(), AddrSpace)
1099  : PointerType::get(ResultElemTy, AddrSpace);
1100  // Vector GEP
1101  if (auto *PtrVTy = dyn_cast<VectorType>(Ptr->getType())) {
1102  ElementCount EltCount = PtrVTy->getElementCount();
1103  return VectorType::get(PtrTy, EltCount);
1104  }
1105  for (Value *Index : IdxList)
1106  if (auto *IndexVTy = dyn_cast<VectorType>(Index->getType())) {
1107  ElementCount EltCount = IndexVTy->getElementCount();
1108  return VectorType::get(PtrTy, EltCount);
1109  }
1110  // Scalar GEP
1111  return PtrTy;
1112  }
1113 
1114  unsigned getNumIndices() const { // Note: always non-negative
1115  return getNumOperands() - 1;
1116  }
1117 
1118  bool hasIndices() const {
1119  return getNumOperands() > 1;
1120  }
1121 
1122  /// Return true if all of the indices of this GEP are
1123  /// zeros. If so, the result pointer and the first operand have the same
1124  /// value, just potentially different types.
1125  bool hasAllZeroIndices() const;
1126 
1127  /// Return true if all of the indices of this GEP are
1128  /// constant integers. If so, the result pointer and the first operand have
1129  /// a constant offset between them.
1130  bool hasAllConstantIndices() const;
1131 
1132  /// Set or clear the inbounds flag on this GEP instruction.
1133  /// See LangRef.html for the meaning of inbounds on a getelementptr.
1134  void setIsInBounds(bool b = true);
1135 
1136  /// Determine whether the GEP has the inbounds flag.
1137  bool isInBounds() const;
1138 
1139  /// Accumulate the constant address offset of this GEP if possible.
1140  ///
1141  /// This routine accepts an APInt into which it will accumulate the constant
1142  /// offset of this GEP if the GEP is in fact constant. If the GEP is not
1143  /// all-constant, it returns false and the value of the offset APInt is
1144  /// undefined (it is *not* preserved!). The APInt passed into this routine
1145  /// must be at least as wide as the IntPtr type for the address space of
1146  /// the base GEP pointer.
1147  bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const;
1148  bool collectOffset(const DataLayout &DL, unsigned BitWidth,
1149  MapVector<Value *, APInt> &VariableOffsets,
1150  APInt &ConstantOffset) const;
1151  // Methods for support type inquiry through isa, cast, and dyn_cast:
1152  static bool classof(const Instruction *I) {
1153  return (I->getOpcode() == Instruction::GetElementPtr);
1154  }
1155  static bool classof(const Value *V) {
1156  return isa<Instruction>(V) && classof(cast<Instruction>(V));
1157  }
1158 };
1159 
1160 template <>
1162  public VariadicOperandTraits<GetElementPtrInst, 1> {
1163 };
1164 
1165 GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1166  ArrayRef<Value *> IdxList, unsigned Values,
1167  const Twine &NameStr,
1168  Instruction *InsertBefore)
1169  : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
1170  OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1171  Values, InsertBefore),
1172  SourceElementType(PointeeType),
1173  ResultElementType(getIndexedType(PointeeType, IdxList)) {
1174  assert(cast<PointerType>(getType()->getScalarType())
1175  ->isOpaqueOrPointeeTypeMatches(ResultElementType));
1176  init(Ptr, IdxList, NameStr);
1177 }
1178 
1179 GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1180  ArrayRef<Value *> IdxList, unsigned Values,
1181  const Twine &NameStr,
1182  BasicBlock *InsertAtEnd)
1183  : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
1184  OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1185  Values, InsertAtEnd),
1186  SourceElementType(PointeeType),
1187  ResultElementType(getIndexedType(PointeeType, IdxList)) {
1188  assert(cast<PointerType>(getType()->getScalarType())
1189  ->isOpaqueOrPointeeTypeMatches(ResultElementType));
1190  init(Ptr, IdxList, NameStr);
1191 }
1192 
1193 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value)
1194 
1195 //===----------------------------------------------------------------------===//
1196 // ICmpInst Class
1197 //===----------------------------------------------------------------------===//
1198 
1199 /// This instruction compares its operands according to the predicate given
1200 /// to the constructor. It only operates on integers or pointers. The operands
1201 /// must be identical types.
1202 /// Represent an integer comparison operator.
1203 class ICmpInst: public CmpInst {
1204  void AssertOK() {
1205  assert(isIntPredicate() &&
1206  "Invalid ICmp predicate value");
1207  assert(getOperand(0)->getType() == getOperand(1)->getType() &&
1208  "Both operands to ICmp instruction are not of the same type!");
1209  // Check that the operands are the right type
1210  assert((getOperand(0)->getType()->isIntOrIntVectorTy() ||
1211  getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
1212  "Invalid operand types for ICmp instruction");
1213  }
1214 
1215 protected:
1216  // Note: Instruction needs to be a friend here to call cloneImpl.
1217  friend class Instruction;
1218 
1219  /// Clone an identical ICmpInst
1220  ICmpInst *cloneImpl() const;
1221 
1222 public:
1223  /// Constructor with insert-before-instruction semantics.
1225  Instruction *InsertBefore, ///< Where to insert
1226  Predicate pred, ///< The predicate to use for the comparison
1227  Value *LHS, ///< The left-hand-side of the expression
1228  Value *RHS, ///< The right-hand-side of the expression
1229  const Twine &NameStr = "" ///< Name of the instruction
1230  ) : CmpInst(makeCmpResultType(LHS->getType()),
1231  Instruction::ICmp, pred, LHS, RHS, NameStr,
1232  InsertBefore) {
1233 #ifndef NDEBUG
1234  AssertOK();
1235 #endif
1236  }
1237 
1238  /// Constructor with insert-at-end semantics.
1240  BasicBlock &InsertAtEnd, ///< Block to insert into.
1241  Predicate pred, ///< The predicate to use for the comparison
1242  Value *LHS, ///< The left-hand-side of the expression
1243  Value *RHS, ///< The right-hand-side of the expression
1244  const Twine &NameStr = "" ///< Name of the instruction
1245  ) : CmpInst(makeCmpResultType(LHS->getType()),
1246  Instruction::ICmp, pred, LHS, RHS, NameStr,
1247  &InsertAtEnd) {
1248 #ifndef NDEBUG
1249  AssertOK();
1250 #endif
1251  }
1252 
1253  /// Constructor with no-insertion semantics
1255  Predicate pred, ///< The predicate to use for the comparison
1256  Value *LHS, ///< The left-hand-side of the expression
1257  Value *RHS, ///< The right-hand-side of the expression
1258  const Twine &NameStr = "" ///< Name of the instruction
1259  ) : CmpInst(makeCmpResultType(LHS->getType()),
1260  Instruction::ICmp, pred, LHS, RHS, NameStr) {
1261 #ifndef NDEBUG
1262  AssertOK();
1263 #endif
1264  }
1265 
1266  /// For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
1267  /// @returns the predicate that would be the result if the operand were
1268  /// regarded as signed.
1269  /// Return the signed version of the predicate
1271  return getSignedPredicate(getPredicate());
1272  }
1273 
1274  /// This is a static version that you can use without an instruction.
1275  /// Return the signed version of the predicate.
1276  static Predicate getSignedPredicate(Predicate pred);
1277 
1278  /// For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
1279  /// @returns the predicate that would be the result if the operand were
1280  /// regarded as unsigned.
1281  /// Return the unsigned version of the predicate
1283  return getUnsignedPredicate(getPredicate());
1284  }
1285 
1286  /// This is a static version that you can use without an instruction.
1287  /// Return the unsigned version of the predicate.
1288  static Predicate getUnsignedPredicate(Predicate pred);
1289 
1290  /// Return true if this predicate is either EQ or NE. This also
1291  /// tests for commutativity.
1292  static bool isEquality(Predicate P) {
1293  return P == ICMP_EQ || P == ICMP_NE;
1294  }
1295 
1296  /// Return true if this predicate is either EQ or NE. This also
1297  /// tests for commutativity.
1298  bool isEquality() const {
1299  return isEquality(getPredicate());
1300  }
1301 
1302  /// @returns true if the predicate of this ICmpInst is commutative
1303  /// Determine if this relation is commutative.
1304  bool isCommutative() const { return isEquality(); }
1305 
1306  /// Return true if the predicate is relational (not EQ or NE).
1307  ///
1308  bool isRelational() const {
1309  return !isEquality();
1310  }
1311 
1312  /// Return true if the predicate is relational (not EQ or NE).
1313  ///
1314  static bool isRelational(Predicate P) {
1315  return !isEquality(P);
1316  }
1317 
1318  /// Return true if the predicate is SGT or UGT.
1319  ///
1320  static bool isGT(Predicate P) {
1321  return P == ICMP_SGT || P == ICMP_UGT;
1322  }
1323 
1324  /// Return true if the predicate is SLT or ULT.
1325  ///
1326  static bool isLT(Predicate P) {
1327  return P == ICMP_SLT || P == ICMP_ULT;
1328  }
1329 
1330  /// Return true if the predicate is SGE or UGE.
1331  ///
1332  static bool isGE(Predicate P) {
1333  return P == ICMP_SGE || P == ICMP_UGE;
1334  }
1335 
1336  /// Return true if the predicate is SLE or ULE.
1337  ///
1338  static bool isLE(Predicate P) {
1339  return P == ICMP_SLE || P == ICMP_ULE;
1340  }
1341 
1342  /// Exchange the two operands to this instruction in such a way that it does
1343  /// not modify the semantics of the instruction. The predicate value may be
1344  /// changed to retain the same result if the predicate is order dependent
1345  /// (e.g. ult).
1346  /// Swap operands and adjust predicate.
1347  void swapOperands() {
1348  setPredicate(getSwappedPredicate());
1349  Op<0>().swap(Op<1>());
1350  }
1351 
1352  // Methods for support type inquiry through isa, cast, and dyn_cast:
1353  static bool classof(const Instruction *I) {
1354  return I->getOpcode() == Instruction::ICmp;
1355  }
1356  static bool classof(const Value *V) {
1357  return isa<Instruction>(V) && classof(cast<Instruction>(V));
1358  }
1359 };
1360 
1361 //===----------------------------------------------------------------------===//
1362 // FCmpInst Class
1363 //===----------------------------------------------------------------------===//
1364 
1365 /// This instruction compares its operands according to the predicate given
1366 /// to the constructor. It only operates on floating point values or packed
1367 /// vectors of floating point values. The operands must be identical types.
1368 /// Represents a floating point comparison operator.
1369 class FCmpInst: public CmpInst {
1370  void AssertOK() {
1371  assert(isFPPredicate() && "Invalid FCmp predicate value");
1372  assert(getOperand(0)->getType() == getOperand(1)->getType() &&
1373  "Both operands to FCmp instruction are not of the same type!");
1374  // Check that the operands are the right type
1375  assert(getOperand(0)->getType()->isFPOrFPVectorTy() &&
1376  "Invalid operand types for FCmp instruction");
1377  }
1378 
1379 protected:
1380  // Note: Instruction needs to be a friend here to call cloneImpl.
1381  friend class Instruction;
1382 
1383  /// Clone an identical FCmpInst
1384  FCmpInst *cloneImpl() const;
1385 
1386 public:
1387  /// Constructor with insert-before-instruction semantics.
1389  Instruction *InsertBefore, ///< Where to insert
1390  Predicate pred, ///< The predicate to use for the comparison
1391  Value *LHS, ///< The left-hand-side of the expression
1392  Value *RHS, ///< The right-hand-side of the expression
1393  const Twine &NameStr = "" ///< Name of the instruction
1394  ) : CmpInst(makeCmpResultType(LHS->getType()),
1395  Instruction::FCmp, pred, LHS, RHS, NameStr,
1396  InsertBefore) {
1397  AssertOK();
1398  }
1399 
1400  /// Constructor with insert-at-end semantics.
1402  BasicBlock &InsertAtEnd, ///< Block to insert into.
1403  Predicate pred, ///< The predicate to use for the comparison
1404  Value *LHS, ///< The left-hand-side of the expression
1405  Value *RHS, ///< The right-hand-side of the expression
1406  const Twine &NameStr = "" ///< Name of the instruction
1407  ) : CmpInst(makeCmpResultType(LHS->getType()),
1408  Instruction::FCmp, pred, LHS, RHS, NameStr,
1409  &InsertAtEnd) {
1410  AssertOK();
1411  }
1412 
1413  /// Constructor with no-insertion semantics
1415  Predicate Pred, ///< The predicate to use for the comparison
1416  Value *LHS, ///< The left-hand-side of the expression
1417  Value *RHS, ///< The right-hand-side of the expression
1418  const Twine &NameStr = "", ///< Name of the instruction
1419  Instruction *FlagsSource = nullptr
1420  ) : CmpInst(makeCmpResultType(LHS->getType()), Instruction::FCmp, Pred, LHS,
1421  RHS, NameStr, nullptr, FlagsSource) {
1422  AssertOK();
1423  }
1424 
1425  /// @returns true if the predicate of this instruction is EQ or NE.
1426  /// Determine if this is an equality predicate.
1427  static bool isEquality(Predicate Pred) {
1428  return Pred == FCMP_OEQ || Pred == FCMP_ONE || Pred == FCMP_UEQ ||
1429  Pred == FCMP_UNE;
1430  }
1431 
1432  /// @returns true if the predicate of this instruction is EQ or NE.
1433  /// Determine if this is an equality predicate.
1434  bool isEquality() const { return isEquality(getPredicate()); }
1435 
1436  /// @returns true if the predicate of this instruction is commutative.
1437  /// Determine if this is a commutative predicate.
1438  bool isCommutative() const {
1439  return isEquality() ||
1440  getPredicate() == FCMP_FALSE ||
1441  getPredicate() == FCMP_TRUE ||
1442  getPredicate() == FCMP_ORD ||
1443  getPredicate() == FCMP_UNO;
1444  }
1445 
1446  /// @returns true if the predicate is relational (not EQ or NE).
1447  /// Determine if this a relational predicate.
1448  bool isRelational() const { return !isEquality(); }
1449 
1450  /// Exchange the two operands to this instruction in such a way that it does
1451  /// not modify the semantics of the instruction. The predicate value may be
1452  /// changed to retain the same result if the predicate is order dependent
1453  /// (e.g. ult).
1454  /// Swap operands and adjust predicate.
1455  void swapOperands() {
1457  Op<0>().swap(Op<1>());
1458  }
1459 
1460  /// Methods for support type inquiry through isa, cast, and dyn_cast:
1461  static bool classof(const Instruction *I) {
1462  return I->getOpcode() == Instruction::FCmp;
1463  }
1464  static bool classof(const Value *V) {
1465  return isa<Instruction>(V) && classof(cast<Instruction>(V));
1466  }
1467 };
1468 
1469 //===----------------------------------------------------------------------===//
1470 /// This class represents a function call, abstracting a target
1471 /// machine's calling convention. This class uses low bit of the SubClassData
1472 /// field to indicate whether or not this is a tail call. The rest of the bits
1473 /// hold the calling convention of the call.
1474 ///
1475 class CallInst : public CallBase {
1476  CallInst(const CallInst &CI);
1477 
1478  /// Construct a CallInst given a range of arguments.
1479  /// Construct a CallInst from a range of arguments
1480  inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1481  ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1482  Instruction *InsertBefore);
1483 
1484  inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1485  const Twine &NameStr, Instruction *InsertBefore)
1486  : CallInst(Ty, Func, Args, None, NameStr, InsertBefore) {}
1487 
1488  /// Construct a CallInst given a range of arguments.
1489  /// Construct a CallInst from a range of arguments
1490  inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1491  ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1492  BasicBlock *InsertAtEnd);
1493 
1494  explicit CallInst(FunctionType *Ty, Value *F, const Twine &NameStr,
1495  Instruction *InsertBefore);
1496 
1497  CallInst(FunctionType *ty, Value *F, const Twine &NameStr,
1498  BasicBlock *InsertAtEnd);
1499 
1500  void init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
1501  ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
1502  void init(FunctionType *FTy, Value *Func, const Twine &NameStr);
1503 
1504  /// Compute the number of operands to allocate.
1505  static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) {
1506  // We need one operand for the called function, plus the input operand
1507  // counts provided.
1508  return 1 + NumArgs + NumBundleInputs;
1509  }
1510 
1511 protected:
1512  // Note: Instruction needs to be a friend here to call cloneImpl.
1513  friend class Instruction;
1514 
1515  CallInst *cloneImpl() const;
1516 
1517 public:
1518  static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr = "",
1519  Instruction *InsertBefore = nullptr) {
1520  return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertBefore);
1521  }
1522 
1524  const Twine &NameStr,
1525  Instruction *InsertBefore = nullptr) {
1526  return new (ComputeNumOperands(Args.size()))
1527  CallInst(Ty, Func, Args, None, NameStr, InsertBefore);
1528  }
1529 
1531  ArrayRef<OperandBundleDef> Bundles = None,
1532  const Twine &NameStr = "",
1533  Instruction *InsertBefore = nullptr) {
1534  const int NumOperands =
1535  ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
1536  const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1537 
1538  return new (NumOperands, DescriptorBytes)
1539  CallInst(Ty, Func, Args, Bundles, NameStr, InsertBefore);
1540  }
1541 
1542  static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr,
1543  BasicBlock *InsertAtEnd) {
1544  return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertAtEnd);
1545  }
1546 
1548  const Twine &NameStr, BasicBlock *InsertAtEnd) {
1549  return new (ComputeNumOperands(Args.size()))
1550  CallInst(Ty, Func, Args, None, NameStr, InsertAtEnd);
1551  }
1552 
1555  const Twine &NameStr, BasicBlock *InsertAtEnd) {
1556  const int NumOperands =
1557  ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
1558  const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1559 
1560  return new (NumOperands, DescriptorBytes)
1561  CallInst(Ty, Func, Args, Bundles, NameStr, InsertAtEnd);
1562  }
1563 
1564  static CallInst *Create(FunctionCallee Func, const Twine &NameStr = "",
1565  Instruction *InsertBefore = nullptr) {
1566  return Create(Func.getFunctionType(), Func.getCallee(), NameStr,
1567  InsertBefore);
1568  }
1569 
1571  ArrayRef<OperandBundleDef> Bundles = None,
1572  const Twine &NameStr = "",
1573  Instruction *InsertBefore = nullptr) {
1574  return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles,
1575  NameStr, InsertBefore);
1576  }
1577 
1579  const Twine &NameStr,
1580  Instruction *InsertBefore = nullptr) {
1581  return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr,
1582  InsertBefore);
1583  }
1584 
1585  static CallInst *Create(FunctionCallee Func, const Twine &NameStr,
1586  BasicBlock *InsertAtEnd) {
1587  return Create(Func.getFunctionType(), Func.getCallee(), NameStr,
1588  InsertAtEnd);
1589  }
1590 
1592  const Twine &NameStr, BasicBlock *InsertAtEnd) {
1593  return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr,
1594  InsertAtEnd);
1595  }
1596 
1599  const Twine &NameStr, BasicBlock *InsertAtEnd) {
1600  return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles,
1601  NameStr, InsertAtEnd);
1602  }
1603 
1604  /// Create a clone of \p CI with a different set of operand bundles and
1605  /// insert it before \p InsertPt.
1606  ///
1607  /// The returned call instruction is identical \p CI in every way except that
1608  /// the operand bundles for the new instruction are set to the operand bundles
1609  /// in \p Bundles.
1610  static CallInst *Create(CallInst *CI, ArrayRef<OperandBundleDef> Bundles,
1611  Instruction *InsertPt = nullptr);
1612 
1613  /// Generate the IR for a call to malloc:
1614  /// 1. Compute the malloc call's argument as the specified type's size,
1615  /// possibly multiplied by the array size if the array size is not
1616  /// constant 1.
1617  /// 2. Call malloc with that argument.
1618  /// 3. Bitcast the result of the malloc call to the specified type.
1619  static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy,
1620  Type *AllocTy, Value *AllocSize,
1621  Value *ArraySize = nullptr,
1622  Function *MallocF = nullptr,
1623  const Twine &Name = "");
1624  static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy,
1625  Type *AllocTy, Value *AllocSize,
1626  Value *ArraySize = nullptr,
1627  Function *MallocF = nullptr,
1628  const Twine &Name = "");
1629  static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy,
1630  Type *AllocTy, Value *AllocSize,
1631  Value *ArraySize = nullptr,
1632  ArrayRef<OperandBundleDef> Bundles = None,
1633  Function *MallocF = nullptr,
1634  const Twine &Name = "");
1635  static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy,
1636  Type *AllocTy, Value *AllocSize,
1637  Value *ArraySize = nullptr,
1638  ArrayRef<OperandBundleDef> Bundles = None,
1639  Function *MallocF = nullptr,
1640  const Twine &Name = "");
1641  /// Generate the IR for a call to the builtin free function.
1642  static Instruction *CreateFree(Value *Source, Instruction *InsertBefore);
1643  static Instruction *CreateFree(Value *Source, BasicBlock *InsertAtEnd);
1646  Instruction *InsertBefore);
1649  BasicBlock *InsertAtEnd);
1650 
1651  // Note that 'musttail' implies 'tail'.
1652  enum TailCallKind : unsigned {
1658  };
1659 
1661  static_assert(
1662  Bitfield::areContiguous<TailCallKindField, CallBase::CallingConvField>(),
1663  "Bitfields must be contiguous");
1664 
1666  return getSubclassData<TailCallKindField>();
1667  }
1668 
1669  bool isTailCall() const {
1671  return Kind == TCK_Tail || Kind == TCK_MustTail;
1672  }
1673 
1674  bool isMustTailCall() const { return getTailCallKind() == TCK_MustTail; }
1675 
1676  bool isNoTailCall() const { return getTailCallKind() == TCK_NoTail; }
1677 
1679  setSubclassData<TailCallKindField>(TCK);
1680  }
1681 
1682  void setTailCall(bool IsTc = true) {
1683  setTailCallKind(IsTc ? TCK_Tail : TCK_None);
1684  }
1685 
1686  /// Return true if the call can return twice
1687  bool canReturnTwice() const { return hasFnAttr(Attribute::ReturnsTwice); }
1688  void setCanReturnTwice() { addFnAttr(Attribute::ReturnsTwice); }
1689 
1690  // Methods for support type inquiry through isa, cast, and dyn_cast:
1691  static bool classof(const Instruction *I) {
1692  return I->getOpcode() == Instruction::Call;
1693  }
1694  static bool classof(const Value *V) {
1695  return isa<Instruction>(V) && classof(cast<Instruction>(V));
1696  }
1697 
1698  /// Updates profile metadata by scaling it by \p S / \p T.
1700 
1701 private:
1702  // Shadow Instruction::setInstructionSubclassData with a private forwarding
1703  // method so that subclasses cannot accidentally use it.
1704  template <typename Bitfield>
1705  void setSubclassData(typename Bitfield::Type Value) {
1706  Instruction::setSubclassData<Bitfield>(Value);
1707  }
1708 };
1709 
1710 CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1711  ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1712  BasicBlock *InsertAtEnd)
1713  : CallBase(Ty->getReturnType(), Instruction::Call,
1714  OperandTraits<CallBase>::op_end(this) -
1715  (Args.size() + CountBundleInputs(Bundles) + 1),
1716  unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
1717  InsertAtEnd) {
1718  init(Ty, Func, Args, Bundles, NameStr);
1719 }
1720 
1721 CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1722  ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1723  Instruction *InsertBefore)
1724  : CallBase(Ty->getReturnType(), Instruction::Call,
1725  OperandTraits<CallBase>::op_end(this) -
1726  (Args.size() + CountBundleInputs(Bundles) + 1),
1727  unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
1728  InsertBefore) {
1729  init(Ty, Func, Args, Bundles, NameStr);
1730 }
1731 
1732 //===----------------------------------------------------------------------===//
1733 // SelectInst Class
1734 //===----------------------------------------------------------------------===//
1735 
1736 /// This class represents the LLVM 'select' instruction.
1737 ///
1738 class SelectInst : public Instruction {
1739  SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1740  Instruction *InsertBefore)
1742  &Op<0>(), 3, InsertBefore) {
1743  init(C, S1, S2);
1744  setName(NameStr);
1745  }
1746 
1747  SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1748  BasicBlock *InsertAtEnd)
1750  &Op<0>(), 3, InsertAtEnd) {
1751  init(C, S1, S2);
1752  setName(NameStr);
1753  }
1754 
1755  void init(Value *C, Value *S1, Value *S2) {
1756  assert(!areInvalidOperands(C, S1, S2) && "Invalid operands for select");
1757  Op<0>() = C;
1758  Op<1>() = S1;
1759  Op<2>() = S2;
1760  }
1761 
1762 protected:
1763  // Note: Instruction needs to be a friend here to call cloneImpl.
1764  friend class Instruction;
1765 
1766  SelectInst *cloneImpl() const;
1767 
1768 public:
1769  static SelectInst *Create(Value *C, Value *S1, Value *S2,
1770  const Twine &NameStr = "",
1771  Instruction *InsertBefore = nullptr,
1772  Instruction *MDFrom = nullptr) {
1773  SelectInst *Sel = new(3) SelectInst(C, S1, S2, NameStr, InsertBefore);
1774  if (MDFrom)
1775  Sel->copyMetadata(*MDFrom);
1776  return Sel;
1777  }
1778 
1779  static SelectInst *Create(Value *C, Value *S1, Value *S2,
1780  const Twine &NameStr,
1781  BasicBlock *InsertAtEnd) {
1782  return new(3) SelectInst(C, S1, S2, NameStr, InsertAtEnd);
1783  }
1784 
1785  const Value *getCondition() const { return Op<0>(); }
1786  const Value *getTrueValue() const { return Op<1>(); }
1787  const Value *getFalseValue() const { return Op<2>(); }
1788  Value *getCondition() { return Op<0>(); }
1789  Value *getTrueValue() { return Op<1>(); }
1790  Value *getFalseValue() { return Op<2>(); }
1791 
1792  void setCondition(Value *V) { Op<0>() = V; }
1793  void setTrueValue(Value *V) { Op<1>() = V; }
1794  void setFalseValue(Value *V) { Op<2>() = V; }
1795 
1796  /// Swap the true and false values of the select instruction.
1797  /// This doesn't swap prof metadata.
1798  void swapValues() { Op<1>().swap(Op<2>()); }
1799 
1800  /// Return a string if the specified operands are invalid
1801  /// for a select operation, otherwise return null.
1802  static const char *areInvalidOperands(Value *Cond, Value *True, Value *False);
1803 
1804  /// Transparently provide more efficient getOperand methods.
1806 
1808  return static_cast<OtherOps>(Instruction::getOpcode());
1809  }
1810 
1811  // Methods for support type inquiry through isa, cast, and dyn_cast:
1812  static bool classof(const Instruction *I) {
1813  return I->getOpcode() == Instruction::Select;
1814  }
1815  static bool classof(const Value *V) {
1816  return isa<Instruction>(V) && classof(cast<Instruction>(V));
1817  }
1818 };
1819 
1820 template <>
1821 struct OperandTraits<SelectInst> : public FixedNumOperandTraits<SelectInst, 3> {
1822 };
1823 
1825 
1826 //===----------------------------------------------------------------------===//
1827 // VAArgInst Class
1828 //===----------------------------------------------------------------------===//
1829 
1830 /// This class represents the va_arg llvm instruction, which returns
1831 /// an argument of the specified type given a va_list and increments that list
1832 ///
1833 class VAArgInst : public UnaryInstruction {
1834 protected:
1835  // Note: Instruction needs to be a friend here to call cloneImpl.
1836  friend class Instruction;
1837 
1838  VAArgInst *cloneImpl() const;
1839 
1840 public:
1841  VAArgInst(Value *List, Type *Ty, const Twine &NameStr = "",
1842  Instruction *InsertBefore = nullptr)
1843  : UnaryInstruction(Ty, VAArg, List, InsertBefore) {
1844  setName(NameStr);
1845  }
1846 
1847  VAArgInst(Value *List, Type *Ty, const Twine &NameStr,
1848  BasicBlock *InsertAtEnd)
1849  : UnaryInstruction(Ty, VAArg, List, InsertAtEnd) {
1850  setName(NameStr);
1851  }
1852 
1853  Value *getPointerOperand() { return getOperand(0); }
1854  const Value *getPointerOperand() const { return getOperand(0); }
1855  static unsigned getPointerOperandIndex() { return 0U; }
1856 
1857  // Methods for support type inquiry through isa, cast, and dyn_cast:
1858  static bool classof(const Instruction *I) {
1859  return I->getOpcode() == VAArg;
1860  }
1861  static bool classof(const Value *V) {
1862  return isa<Instruction>(V) && classof(cast<Instruction>(V));
1863  }
1864 };
1865 
1866 //===----------------------------------------------------------------------===//
1867 // ExtractElementInst Class
1868 //===----------------------------------------------------------------------===//
1869 
1870 /// This instruction extracts a single (scalar)
1871 /// element from a VectorType value
1872 ///
1874  ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr = "",
1875  Instruction *InsertBefore = nullptr);
1876  ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr,
1877  BasicBlock *InsertAtEnd);
1878 
1879 protected:
1880  // Note: Instruction needs to be a friend here to call cloneImpl.
1881  friend class Instruction;
1882 
1883  ExtractElementInst *cloneImpl() const;
1884 
1885 public:
1886  static ExtractElementInst *Create(Value *Vec, Value *Idx,
1887  const Twine &NameStr = "",
1888  Instruction *InsertBefore = nullptr) {
1889  return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertBefore);
1890  }
1891 
1892  static ExtractElementInst *Create(Value *Vec, Value *Idx,
1893  const Twine &NameStr,
1894  BasicBlock *InsertAtEnd) {
1895  return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertAtEnd);
1896  }
1897 
1898  /// Return true if an extractelement instruction can be
1899  /// formed with the specified operands.
1900  static bool isValidOperands(const Value *Vec, const Value *Idx);
1901 
1902  Value *getVectorOperand() { return Op<0>(); }
1903  Value *getIndexOperand() { return Op<1>(); }
1904  const Value *getVectorOperand() const { return Op<0>(); }
1905  const Value *getIndexOperand() const { return Op<1>(); }
1906 
1908  return cast<VectorType>(getVectorOperand()->getType());
1909  }
1910 
1911  /// Transparently provide more efficient getOperand methods.
1913 
1914  // Methods for support type inquiry through isa, cast, and dyn_cast:
1915  static bool classof(const Instruction *I) {
1916  return I->getOpcode() == Instruction::ExtractElement;
1917  }
1918  static bool classof(const Value *V) {
1919  return isa<Instruction>(V) && classof(cast<Instruction>(V));
1920  }
1921 };
1922 
1923 template <>
1925  public FixedNumOperandTraits<ExtractElementInst, 2> {
1926 };
1927 
1929 
1930 //===----------------------------------------------------------------------===//
1931 // InsertElementInst Class
1932 //===----------------------------------------------------------------------===//
1933 
1934 /// This instruction inserts a single (scalar)
1935 /// element into a VectorType value
1936 ///
1938  InsertElementInst(Value *Vec, Value *NewElt, Value *Idx,
1939  const Twine &NameStr = "",
1940  Instruction *InsertBefore = nullptr);
1941  InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr,
1942  BasicBlock *InsertAtEnd);
1943 
1944 protected:
1945  // Note: Instruction needs to be a friend here to call cloneImpl.
1946  friend class Instruction;
1947 
1948  InsertElementInst *cloneImpl() const;
1949 
1950 public:
1951  static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
1952  const Twine &NameStr = "",
1953  Instruction *InsertBefore = nullptr) {
1954  return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertBefore);
1955  }
1956 
1957  static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
1958  const Twine &NameStr,
1959  BasicBlock *InsertAtEnd) {
1960  return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertAtEnd);
1961  }
1962 
1963  /// Return true if an insertelement instruction can be
1964  /// formed with the specified operands.
1965  static bool isValidOperands(const Value *Vec, const Value *NewElt,
1966  const Value *Idx);
1967 
1968  /// Overload to return most specific vector type.
1969  ///
1970  VectorType *getType() const {
1971  return cast<VectorType>(Instruction::getType());
1972  }
1973 
1974  /// Transparently provide more efficient getOperand methods.
1976 
1977  // Methods for support type inquiry through isa, cast, and dyn_cast:
1978  static bool classof(const Instruction *I) {
1979  return I->getOpcode() == Instruction::InsertElement;
1980  }
1981  static bool classof(const Value *V) {
1982  return isa<Instruction>(V) && classof(cast<Instruction>(V));
1983  }
1984 };
1985 
1986 template <>
1988  public FixedNumOperandTraits<InsertElementInst, 3> {
1989 };
1990 
1992 
1993 //===----------------------------------------------------------------------===//
1994 // ShuffleVectorInst Class
1995 //===----------------------------------------------------------------------===//
1996 
1997 constexpr int UndefMaskElem = -1;
1998 
1999 /// This instruction constructs a fixed permutation of two
2000 /// input vectors.
2001 ///
2002 /// For each element of the result vector, the shuffle mask selects an element
2003 /// from one of the input vectors to copy to the result. Non-negative elements
2004 /// in the mask represent an index into the concatenated pair of input vectors.
2005 /// UndefMaskElem (-1) specifies that the result element is undefined.
2006 ///
2007 /// For scalable vectors, all the elements of the mask must be 0 or -1. This
2008 /// requirement may be relaxed in the future.
2010  SmallVector<int, 4> ShuffleMask;
2011  Constant *ShuffleMaskForBitcode;
2012 
2013 protected:
2014  // Note: Instruction needs to be a friend here to call cloneImpl.
2015  friend class Instruction;
2016 
2017  ShuffleVectorInst *cloneImpl() const;
2018 
2019 public:
2020  ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr = "",
2021  Instruction *InsertBefore = nullptr);
2022  ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr,
2023  BasicBlock *InsertAtEnd);
2024  ShuffleVectorInst(Value *V1, ArrayRef<int> Mask, const Twine &NameStr = "",
2025  Instruction *InsertBefore = nullptr);
2026  ShuffleVectorInst(Value *V1, ArrayRef<int> Mask, const Twine &NameStr,
2027  BasicBlock *InsertAtEnd);
2029  const Twine &NameStr = "",
2030  Instruction *InsertBefor = nullptr);
2032  const Twine &NameStr, BasicBlock *InsertAtEnd);
2034  const Twine &NameStr = "",
2035  Instruction *InsertBefor = nullptr);
2037  const Twine &NameStr, BasicBlock *InsertAtEnd);
2038 
2039  void *operator new(size_t S) { return User::operator new(S, 2); }
2040  void operator delete(void *Ptr) { return User::operator delete(Ptr); }
2041 
2042  /// Swap the operands and adjust the mask to preserve the semantics
2043  /// of the instruction.
2044  void commute();
2045 
2046  /// Return true if a shufflevector instruction can be
2047  /// formed with the specified operands.
2048  static bool isValidOperands(const Value *V1, const Value *V2,
2049  const Value *Mask);
2050  static bool isValidOperands(const Value *V1, const Value *V2,
2052 
2053  /// Overload to return most specific vector type.
2054  ///
2055  VectorType *getType() const {
2056  return cast<VectorType>(Instruction::getType());
2057  }
2058 
2059  /// Transparently provide more efficient getOperand methods.
2061 
2062  /// Return the shuffle mask value of this instruction for the given element
2063  /// index. Return UndefMaskElem if the element is undef.
2064  int getMaskValue(unsigned Elt) const { return ShuffleMask[Elt]; }
2065 
2066  /// Convert the input shuffle mask operand to a vector of integers. Undefined
2067  /// elements of the mask are returned as UndefMaskElem.
2068  static void getShuffleMask(const Constant *Mask,
2069  SmallVectorImpl<int> &Result);
2070 
2071  /// Return the mask for this instruction as a vector of integers. Undefined
2072  /// elements of the mask are returned as UndefMaskElem.
2073  void getShuffleMask(SmallVectorImpl<int> &Result) const {
2074  Result.assign(ShuffleMask.begin(), ShuffleMask.end());
2075  }
2076 
2077  /// Return the mask for this instruction, for use in bitcode.
2078  ///
2079  /// TODO: This is temporary until we decide a new bitcode encoding for
2080  /// shufflevector.
2081  Constant *getShuffleMaskForBitcode() const { return ShuffleMaskForBitcode; }
2082 
2083  static Constant *convertShuffleMaskForBitcode(ArrayRef<int> Mask,
2084  Type *ResultTy);
2085 
2086  void setShuffleMask(ArrayRef<int> Mask);
2087 
2088  ArrayRef<int> getShuffleMask() const { return ShuffleMask; }
2089 
2090  /// Return true if this shuffle returns a vector with a different number of
2091  /// elements than its source vectors.
2092  /// Examples: shufflevector <4 x n> A, <4 x n> B, <1,2,3>
2093  /// shufflevector <4 x n> A, <4 x n> B, <1,2,3,4,5>
2094  bool changesLength() const {
2095  unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType())
2096  ->getElementCount()
2097  .getKnownMinValue();
2098  unsigned NumMaskElts = ShuffleMask.size();
2099  return NumSourceElts != NumMaskElts;
2100  }
2101 
2102  /// Return true if this shuffle returns a vector with a greater number of
2103  /// elements than its source vectors.
2104  /// Example: shufflevector <2 x n> A, <2 x n> B, <1,2,3>
2105  bool increasesLength() const {
2106  unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType())
2107  ->getElementCount()
2108  .getKnownMinValue();
2109  unsigned NumMaskElts = ShuffleMask.size();
2110  return NumSourceElts < NumMaskElts;
2111  }
2112 
2113  /// Return true if this shuffle mask chooses elements from exactly one source
2114  /// vector.
2115  /// Example: <7,5,undef,7>
2116  /// This assumes that vector operands are the same length as the mask.
2117  static bool isSingleSourceMask(ArrayRef<int> Mask);
2118  static bool isSingleSourceMask(const Constant *Mask) {
2119  assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2120  SmallVector<int, 16> MaskAsInts;
2121  getShuffleMask(Mask, MaskAsInts);
2122  return isSingleSourceMask(MaskAsInts);
2123  }
2124 
2125  /// Return true if this shuffle chooses elements from exactly one source
2126  /// vector without changing the length of that vector.
2127  /// Example: shufflevector <4 x n> A, <4 x n> B, <3,0,undef,3>
2128  /// TODO: Optionally allow length-changing shuffles.
2129  bool isSingleSource() const {
2130  return !changesLength() && isSingleSourceMask(ShuffleMask);
2131  }
2132 
2133  /// Return true if this shuffle mask chooses elements from exactly one source
2134  /// vector without lane crossings. A shuffle using this mask is not
2135  /// necessarily a no-op because it may change the number of elements from its
2136  /// input vectors or it may provide demanded bits knowledge via undef lanes.
2137  /// Example: <undef,undef,2,3>
2138  static bool isIdentityMask(ArrayRef<int> Mask);
2139  static bool isIdentityMask(const Constant *Mask) {
2140  assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2141  SmallVector<int, 16> MaskAsInts;
2142  getShuffleMask(Mask, MaskAsInts);
2143  return isIdentityMask(MaskAsInts);
2144  }
2145 
2146  /// Return true if this shuffle chooses elements from exactly one source
2147  /// vector without lane crossings and does not change the number of elements
2148  /// from its input vectors.
2149  /// Example: shufflevector <4 x n> A, <4 x n> B, <4,undef,6,undef>
2150  bool isIdentity() const {
2151  return !changesLength() && isIdentityMask(ShuffleMask);
2152  }
2153 
2154  /// Return true if this shuffle lengthens exactly one source vector with
2155  /// undefs in the high elements.
2156  bool isIdentityWithPadding() const;
2157 
2158  /// Return true if this shuffle extracts the first N elements of exactly one
2159  /// source vector.
2160  bool isIdentityWithExtract() const;
2161 
2162  /// Return true if this shuffle concatenates its 2 source vectors. This
2163  /// returns false if either input is undefined. In that case, the shuffle is
2164  /// is better classified as an identity with padding operation.
2165  bool isConcat() const;
2166 
2167  /// Return true if this shuffle mask chooses elements from its source vectors
2168  /// without lane crossings. A shuffle using this mask would be
2169  /// equivalent to a vector select with a constant condition operand.
2170  /// Example: <4,1,6,undef>
2171  /// This returns false if the mask does not choose from both input vectors.
2172  /// In that case, the shuffle is better classified as an identity shuffle.
2173  /// This assumes that vector operands are the same length as the mask
2174  /// (a length-changing shuffle can never be equivalent to a vector select).
2175  static bool isSelectMask(ArrayRef<int> Mask);
2176  static bool isSelectMask(const Constant *Mask) {
2177  assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2178  SmallVector<int, 16> MaskAsInts;
2179  getShuffleMask(Mask, MaskAsInts);
2180  return isSelectMask(MaskAsInts);
2181  }
2182 
2183  /// Return true if this shuffle chooses elements from its source vectors
2184  /// without lane crossings and all operands have the same number of elements.
2185  /// In other words, this shuffle is equivalent to a vector select with a
2186  /// constant condition operand.
2187  /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,1,6,3>
2188  /// This returns false if the mask does not choose from both input vectors.
2189  /// In that case, the shuffle is better classified as an identity shuffle.
2190  /// TODO: Optionally allow length-changing shuffles.
2191  bool isSelect() const {
2192  return !changesLength() && isSelectMask(ShuffleMask);
2193  }
2194 
2195  /// Return true if this shuffle mask swaps the order of elements from exactly
2196  /// one source vector.
2197  /// Example: <7,6,undef,4>
2198  /// This assumes that vector operands are the same length as the mask.
2199  static bool isReverseMask(ArrayRef<int> Mask);
2200  static bool isReverseMask(const Constant *Mask) {
2201  assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2202  SmallVector<int, 16> MaskAsInts;
2203  getShuffleMask(Mask, MaskAsInts);
2204  return isReverseMask(MaskAsInts);
2205  }
2206 
2207  /// Return true if this shuffle swaps the order of elements from exactly
2208  /// one source vector.
2209  /// Example: shufflevector <4 x n> A, <4 x n> B, <3,undef,1,undef>
2210  /// TODO: Optionally allow length-changing shuffles.
2211  bool isReverse() const {
2212  return !changesLength() && isReverseMask(ShuffleMask);
2213  }
2214 
2215  /// Return true if this shuffle mask chooses all elements with the same value
2216  /// as the first element of exactly one source vector.
2217  /// Example: <4,undef,undef,4>
2218  /// This assumes that vector operands are the same length as the mask.
2219  static bool isZeroEltSplatMask(ArrayRef<int> Mask);
2220  static bool isZeroEltSplatMask(const Constant *Mask) {
2221  assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2222  SmallVector<int, 16> MaskAsInts;
2223  getShuffleMask(Mask, MaskAsInts);
2224  return isZeroEltSplatMask(MaskAsInts);
2225  }
2226 
2227  /// Return true if all elements of this shuffle are the same value as the
2228  /// first element of exactly one source vector without changing the length
2229  /// of that vector.
2230  /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,0,undef,0>
2231  /// TODO: Optionally allow length-changing shuffles.
2232  /// TODO: Optionally allow splats from other elements.
2233  bool isZeroEltSplat() const {
2234  return !changesLength() && isZeroEltSplatMask(ShuffleMask);
2235  }
2236 
2237  /// Return true if this shuffle mask is a transpose mask.
2238  /// Transpose vector masks transpose a 2xn matrix. They read corresponding
2239  /// even- or odd-numbered vector elements from two n-dimensional source
2240  /// vectors and write each result into consecutive elements of an
2241  /// n-dimensional destination vector. Two shuffles are necessary to complete
2242  /// the transpose, one for the even elements and another for the odd elements.
2243  /// This description closely follows how the TRN1 and TRN2 AArch64
2244  /// instructions operate.
2245  ///
2246  /// For example, a simple 2x2 matrix can be transposed with:
2247  ///
2248  /// ; Original matrix
2249  /// m0 = < a, b >
2250  /// m1 = < c, d >
2251  ///
2252  /// ; Transposed matrix
2253  /// t0 = < a, c > = shufflevector m0, m1, < 0, 2 >
2254  /// t1 = < b, d > = shufflevector m0, m1, < 1, 3 >
2255  ///
2256  /// For matrices having greater than n columns, the resulting nx2 transposed
2257  /// matrix is stored in two result vectors such that one vector contains
2258  /// interleaved elements from all the even-numbered rows and the other vector
2259  /// contains interleaved elements from all the odd-numbered rows. For example,
2260  /// a 2x4 matrix can be transposed with:
2261  ///
2262  /// ; Original matrix
2263  /// m0 = < a, b, c, d >
2264  /// m1 = < e, f, g, h >
2265  ///
2266  /// ; Transposed matrix
2267  /// t0 = < a, e, c, g > = shufflevector m0, m1 < 0, 4, 2, 6 >
2268  /// t1 = < b, f, d, h > = shufflevector m0, m1 < 1, 5, 3, 7 >
2269  static bool isTransposeMask(ArrayRef<int> Mask);
2270  static bool isTransposeMask(const Constant *Mask) {
2271  assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2272  SmallVector<int, 16> MaskAsInts;
2273  getShuffleMask(Mask, MaskAsInts);
2274  return isTransposeMask(MaskAsInts);
2275  }
2276 
2277  /// Return true if this shuffle transposes the elements of its inputs without
2278  /// changing the length of the vectors. This operation may also be known as a
2279  /// merge or interleave. See the description for isTransposeMask() for the
2280  /// exact specification.
2281  /// Example: shufflevector <4 x n> A, <4 x n> B, <0,4,2,6>
2282  bool isTranspose() const {
2283  return !changesLength() && isTransposeMask(ShuffleMask);
2284  }
2285 
2286  /// Return true if this shuffle mask is an extract subvector mask.
2287  /// A valid extract subvector mask returns a smaller vector from a single
2288  /// source operand. The base extraction index is returned as well.
2289  static bool isExtractSubvectorMask(ArrayRef<int> Mask, int NumSrcElts,
2290  int &Index);
2291  static bool isExtractSubvectorMask(const Constant *Mask, int NumSrcElts,
2292  int &Index) {
2293  assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2294  // Not possible to express a shuffle mask for a scalable vector for this
2295  // case.
2296  if (isa<ScalableVectorType>(Mask->getType()))
2297  return false;
2298  SmallVector<int, 16> MaskAsInts;
2299  getShuffleMask(Mask, MaskAsInts);
2300  return isExtractSubvectorMask(MaskAsInts, NumSrcElts, Index);
2301  }
2302 
2303  /// Return true if this shuffle mask is an extract subvector mask.
2304  bool isExtractSubvectorMask(int &Index) const {
2305  // Not possible to express a shuffle mask for a scalable vector for this
2306  // case.
2307  if (isa<ScalableVectorType>(getType()))
2308  return false;
2309 
2310  int NumSrcElts =
2311  cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2312  return isExtractSubvectorMask(ShuffleMask, NumSrcElts, Index);
2313  }
2314 
2315  /// Return true if this shuffle mask is an insert subvector mask.
2316  /// A valid insert subvector mask inserts the lowest elements of a second
2317  /// source operand into an in-place first source operand operand.
2318  /// Both the sub vector width and the insertion index is returned.
2319  static bool isInsertSubvectorMask(ArrayRef<int> Mask, int NumSrcElts,
2320  int &NumSubElts, int &Index);
2321  static bool isInsertSubvectorMask(const Constant *Mask, int NumSrcElts,
2322  int &NumSubElts, int &Index) {
2323  assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2324  // Not possible to express a shuffle mask for a scalable vector for this
2325  // case.
2326  if (isa<ScalableVectorType>(Mask->getType()))
2327  return false;
2328  SmallVector<int, 16> MaskAsInts;
2329  getShuffleMask(Mask, MaskAsInts);
2330  return isInsertSubvectorMask(MaskAsInts, NumSrcElts, NumSubElts, Index);
2331  }
2332 
2333  /// Return true if this shuffle mask is an insert subvector mask.
2334  bool isInsertSubvectorMask(int &NumSubElts, int &Index) const {
2335  // Not possible to express a shuffle mask for a scalable vector for this
2336  // case.
2337  if (isa<ScalableVectorType>(getType()))
2338  return false;
2339 
2340  int NumSrcElts =
2341  cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2342  return isInsertSubvectorMask(ShuffleMask, NumSrcElts, NumSubElts, Index);
2343  }
2344 
2345  /// Change values in a shuffle permute mask assuming the two vector operands
2346  /// of length InVecNumElts have swapped position.
2348  unsigned InVecNumElts) {
2349  for (int &Idx : Mask) {
2350  if (Idx == -1)
2351  continue;
2352  Idx = Idx < (int)InVecNumElts ? Idx + InVecNumElts : Idx - InVecNumElts;
2353  assert(Idx >= 0 && Idx < (int)InVecNumElts * 2 &&
2354  "shufflevector mask index out of range");
2355  }
2356  }
2357 
2358  // Methods for support type inquiry through isa, cast, and dyn_cast:
2359  static bool classof(const Instruction *I) {
2360  return I->getOpcode() == Instruction::ShuffleVector;
2361  }
2362  static bool classof(const Value *V) {
2363  return isa<Instruction>(V) && classof(cast<Instruction>(V));
2364  }
2365 };
2366 
2367 template <>
2369  : public FixedNumOperandTraits<ShuffleVectorInst, 2> {};
2370 
2372 
2373 //===----------------------------------------------------------------------===//
2374 // ExtractValueInst Class
2375 //===----------------------------------------------------------------------===//
2376 
2377 /// This instruction extracts a struct member or array
2378 /// element value from an aggregate value.
2379 ///
2381  SmallVector<unsigned, 4> Indices;
2382 
2383  ExtractValueInst(const ExtractValueInst &EVI);
2384 
2385  /// Constructors - Create a extractvalue instruction with a base aggregate
2386  /// value and a list of indices. The first ctor can optionally insert before
2387  /// an existing instruction, the second appends the new instruction to the
2388  /// specified BasicBlock.
2389  inline ExtractValueInst(Value *Agg,
2390  ArrayRef<unsigned> Idxs,
2391  const Twine &NameStr,
2392  Instruction *InsertBefore);
2393  inline ExtractValueInst(Value *Agg,
2394  ArrayRef<unsigned> Idxs,
2395  const Twine &NameStr, BasicBlock *InsertAtEnd);
2396 
2397  void init(ArrayRef<unsigned> Idxs, const Twine &NameStr);
2398 
2399 protected:
2400  // Note: Instruction needs to be a friend here to call cloneImpl.
2401  friend class Instruction;
2402 
2403  ExtractValueInst *cloneImpl() const;
2404 
2405 public:
2407  ArrayRef<unsigned> Idxs,
2408  const Twine &NameStr = "",
2409  Instruction *InsertBefore = nullptr) {
2410  return new
2411  ExtractValueInst(Agg, Idxs, NameStr, InsertBefore);
2412  }
2413 
2415  ArrayRef<unsigned> Idxs,
2416  const Twine &NameStr,
2417  BasicBlock *InsertAtEnd) {
2418  return new ExtractValueInst(Agg, Idxs, NameStr, InsertAtEnd);
2419  }
2420 
2421  /// Returns the type of the element that would be extracted
2422  /// with an extractvalue instruction with the specified parameters.
2423  ///
2424  /// Null is returned if the indices are invalid for the specified type.
2425  static Type *getIndexedType(Type *Agg, ArrayRef<unsigned> Idxs);
2426 
2427  using idx_iterator = const unsigned*;
2428 
2429  inline idx_iterator idx_begin() const { return Indices.begin(); }
2430  inline idx_iterator idx_end() const { return Indices.end(); }
2432  return make_range(idx_begin(), idx_end());
2433  }
2434 
2436  return getOperand(0);
2437  }
2438  const Value *getAggregateOperand() const {
2439  return getOperand(0);
2440  }
2441  static unsigned getAggregateOperandIndex() {
2442  return 0U; // get index for modifying correct operand
2443  }
2444 
2446  return Indices;
2447  }
2448 
2449  unsigned getNumIndices() const {
2450  return (unsigned)Indices.size();
2451  }
2452 
2453  bool hasIndices() const {
2454  return true;
2455  }
2456 
2457  // Methods for support type inquiry through isa, cast, and dyn_cast:
2458  static bool classof(const Instruction *I) {
2459  return I->getOpcode() == Instruction::ExtractValue;
2460  }
2461  static bool classof(const Value *V) {
2462  return isa<Instruction>(V) && classof(cast<Instruction>(V));
2463  }
2464 };
2465 
2466 ExtractValueInst::ExtractValueInst(Value *Agg,
2467  ArrayRef<unsigned> Idxs,
2468  const Twine &NameStr,
2469  Instruction *InsertBefore)
2470  : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2471  ExtractValue, Agg, InsertBefore) {
2472  init(Idxs, NameStr);
2473 }
2474 
2475 ExtractValueInst::ExtractValueInst(Value *Agg,
2476  ArrayRef<unsigned> Idxs,
2477  const Twine &NameStr,
2478  BasicBlock *InsertAtEnd)
2479  : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2480  ExtractValue, Agg, InsertAtEnd) {
2481  init(Idxs, NameStr);
2482 }
2483 
2484 //===----------------------------------------------------------------------===//
2485 // InsertValueInst Class
2486 //===----------------------------------------------------------------------===//
2487 
2488 /// This instruction inserts a struct field of array element
2489 /// value into an aggregate value.
2490 ///
2492  SmallVector<unsigned, 4> Indices;
2493 
2494  InsertValueInst(const InsertValueInst &IVI);
2495 
2496  /// Constructors - Create a insertvalue instruction with a base aggregate
2497  /// value, a value to insert, and a list of indices. The first ctor can
2498  /// optionally insert before an existing instruction, the second appends
2499  /// the new instruction to the specified BasicBlock.
2500  inline InsertValueInst(Value *Agg, Value *Val,
2501  ArrayRef<unsigned> Idxs,
2502  const Twine &NameStr,
2503  Instruction *InsertBefore);
2504  inline InsertValueInst(Value *Agg, Value *Val,
2505  ArrayRef<unsigned> Idxs,
2506  const Twine &NameStr, BasicBlock *InsertAtEnd);
2507 
2508  /// Constructors - These two constructors are convenience methods because one
2509  /// and two index insertvalue instructions are so common.
2510  InsertValueInst(Value *Agg, Value *Val, unsigned Idx,
2511  const Twine &NameStr = "",
2512  Instruction *InsertBefore = nullptr);
2513  InsertValueInst(Value *Agg, Value *Val, unsigned Idx, const Twine &NameStr,
2514  BasicBlock *InsertAtEnd);
2515 
2516  void init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
2517  const Twine &NameStr);
2518 
2519 protected:
2520  // Note: Instruction needs to be a friend here to call cloneImpl.
2521  friend class Instruction;
2522 
2523  InsertValueInst *cloneImpl() const;
2524 
2525 public:
2526  // allocate space for exactly two operands
2527  void *operator new(size_t S) { return User::operator new(S, 2); }
2528  void operator delete(void *Ptr) { User::operator delete(Ptr); }
2529 
2530  static InsertValueInst *Create(Value *Agg, Value *Val,
2531  ArrayRef<unsigned> Idxs,
2532  const Twine &NameStr = "",
2533  Instruction *InsertBefore = nullptr) {
2534  return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertBefore);
2535  }
2536 
2537  static InsertValueInst *Create(Value *Agg, Value *Val,
2538  ArrayRef<unsigned> Idxs,
2539  const Twine &NameStr,
2540  BasicBlock *InsertAtEnd) {
2541  return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertAtEnd);
2542  }
2543 
2544  /// Transparently provide more efficient getOperand methods.
2546 
2547  using idx_iterator = const unsigned*;
2548 
2549  inline idx_iterator idx_begin() const { return Indices.begin(); }
2550  inline idx_iterator idx_end() const { return Indices.end(); }
2552  return make_range(idx_begin(), idx_end());
2553  }
2554 
2556  return getOperand(0);
2557  }
2558  const Value *getAggregateOperand() const {
2559  return getOperand(0);
2560  }
2561  static unsigned getAggregateOperandIndex() {
2562  return 0U; // get index for modifying correct operand
2563  }
2564 
2566  return getOperand(1);
2567  }
2569  return getOperand(1);
2570  }
2571  static unsigned getInsertedValueOperandIndex() {
2572  return 1U; // get index for modifying correct operand
2573  }
2574 
2576  return Indices;
2577  }
2578 
2579  unsigned getNumIndices() const {
2580  return (unsigned)Indices.size();
2581  }
2582 
2583  bool hasIndices() const {
2584  return true;
2585  }
2586 
2587  // Methods for support type inquiry through isa, cast, and dyn_cast:
2588  static bool classof(const Instruction *I) {
2589  return I->getOpcode() == Instruction::InsertValue;
2590  }
2591  static bool classof(const Value *V) {
2592  return isa<Instruction>(V) && classof(cast<Instruction>(V));
2593  }
2594 };
2595 
2596 template <>
2598  public FixedNumOperandTraits<InsertValueInst, 2> {
2599 };
2600 
2601 InsertValueInst::InsertValueInst(Value *Agg,
2602  Value *Val,
2603  ArrayRef<unsigned> Idxs,
2604  const Twine &NameStr,
2605  Instruction *InsertBefore)
2606  : Instruction(Agg->getType(), InsertValue,
2607  OperandTraits<InsertValueInst>::op_begin(this),
2608  2, InsertBefore) {
2609  init(Agg, Val, Idxs, NameStr);
2610 }
2611 
2612 InsertValueInst::InsertValueInst(Value *Agg,
2613  Value *Val,
2614  ArrayRef<unsigned> Idxs,
2615  const Twine &NameStr,
2616  BasicBlock *InsertAtEnd)
2617  : Instruction(Agg->getType(), InsertValue,
2618  OperandTraits<InsertValueInst>::op_begin(this),
2619  2, InsertAtEnd) {
2620  init(Agg, Val, Idxs, NameStr);
2621 }
2622 
2624 
2625 //===----------------------------------------------------------------------===//
2626 // PHINode Class
2627 //===----------------------------------------------------------------------===//
2628 
2629 // PHINode - The PHINode class is used to represent the magical mystical PHI
2630 // node, that can not exist in nature, but can be synthesized in a computer
2631 // scientist's overactive imagination.
2632 //
2633 class PHINode : public Instruction {
2634  /// The number of operands actually allocated. NumOperands is
2635  /// the number actually in use.
2636  unsigned ReservedSpace;
2637 
2638  PHINode(const PHINode &PN);
2639 
2640  explicit PHINode(Type *Ty, unsigned NumReservedValues,
2641  const Twine &NameStr = "",
2642  Instruction *InsertBefore = nullptr)
2643  : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertBefore),
2644  ReservedSpace(NumReservedValues) {
2645  assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!");
2646  setName(NameStr);
2647  allocHungoffUses(ReservedSpace);
2648  }
2649 
2650  PHINode(Type *Ty, unsigned NumReservedValues, const Twine &NameStr,
2651  BasicBlock *InsertAtEnd)
2652  : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertAtEnd),
2653  ReservedSpace(NumReservedValues) {
2654  assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!");
2655  setName(NameStr);
2656  allocHungoffUses(ReservedSpace);
2657  }
2658 
2659 protected:
2660  // Note: Instruction needs to be a friend here to call cloneImpl.
2661  friend class Instruction;
2662 
2663  PHINode *cloneImpl() const;
2664 
2665  // allocHungoffUses - this is more complicated than the generic
2666  // User::allocHungoffUses, because we have to allocate Uses for the incoming
2667  // values and pointers to the incoming blocks, all in one allocation.
2668  void allocHungoffUses(unsigned N) {
2669  User::allocHungoffUses(N, /* IsPhi */ true);
2670  }
2671 
2672 public:
2673  /// Constructors - NumReservedValues is a hint for the number of incoming
2674  /// edges that this phi node will have (use 0 if you really have no idea).
2675  static PHINode *Create(Type *Ty, unsigned NumReservedValues,
2676  const Twine &NameStr = "",
2677  Instruction *InsertBefore = nullptr) {
2678  return new PHINode(Ty, NumReservedValues, NameStr, InsertBefore);
2679  }
2680 
2681  static PHINode *Create(Type *Ty, unsigned NumReservedValues,
2682  const Twine &NameStr, BasicBlock *InsertAtEnd) {
2683  return new PHINode(Ty, NumReservedValues, NameStr, InsertAtEnd);
2684  }
2685 
2686  /// Provide fast operand accessors
2688 
2689  // Block iterator interface. This provides access to the list of incoming
2690  // basic blocks, which parallels the list of incoming values.
2691 
2694 
2696  return reinterpret_cast<block_iterator>(op_begin() + ReservedSpace);
2697  }
2698 
2700  return reinterpret_cast<const_block_iterator>(op_begin() + ReservedSpace);
2701  }
2702 
2704  return block_begin() + getNumOperands();
2705  }
2706 
2708  return block_begin() + getNumOperands();
2709  }
2710 
2712  return make_range(block_begin(), block_end());
2713  }
2714 
2716  return make_range(block_begin(), block_end());
2717  }
2718 
2719  op_range incoming_values() { return operands(); }
2720 
2721  const_op_range incoming_values() const { return operands(); }
2722 
2723  /// Return the number of incoming edges
2724  ///
2725  unsigned getNumIncomingValues() const { return getNumOperands(); }
2726 
2727  /// Return incoming value number x
2728  ///
2729  Value *getIncomingValue(unsigned i) const {
2730  return getOperand(i);
2731  }
2732  void setIncomingValue(unsigned i, Value *V) {
2733  assert(V && "PHI node got a null value!");
2734  assert(getType() == V->getType() &&
2735  "All operands to PHI node must be the same type as the PHI node!");
2736  setOperand(i, V);
2737  }
2738 
2739  static unsigned getOperandNumForIncomingValue(unsigned i) {
2740  return i;
2741  }
2742 
2743  static unsigned getIncomingValueNumForOperand(unsigned i) {
2744  return i;
2745  }
2746 
2747  /// Return incoming basic block number @p i.
2748  ///
2749  BasicBlock *getIncomingBlock(unsigned i) const {
2750  return block_begin()[i];
2751  }
2752 
2753  /// Return incoming basic block corresponding
2754  /// to an operand of the PHI.
2755  ///
2756  BasicBlock *getIncomingBlock(const Use &U) const {
2757  assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?");
2758  return getIncomingBlock(unsigned(&U - op_begin()));
2759  }
2760 
2761  /// Return incoming basic block corresponding
2762  /// to value use iterator.
2763  ///
2765  return getIncomingBlock(I.getUse());
2766  }
2767 
2768  void setIncomingBlock(unsigned i, BasicBlock *BB) {
2769  assert(BB && "PHI node got a null basic block!");
2770  block_begin()[i] = BB;
2771  }
2772 
2773  /// Replace every incoming basic block \p Old to basic block \p New.
2775  assert(New && Old && "PHI node got a null basic block!");
2776  for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op)
2777  if (getIncomingBlock(Op) == Old)
2778  setIncomingBlock(Op, New);
2779  }
2780 
2781  /// Add an incoming value to the end of the PHI list
2782  ///
2784  if (getNumOperands() == ReservedSpace)
2785  growOperands(); // Get more space!
2786  // Initialize some new operands.
2787  setNumHungOffUseOperands(getNumOperands() + 1);
2788  setIncomingValue(getNumOperands() - 1, V);
2789  setIncomingBlock(getNumOperands() - 1, BB);
2790  }
2791 
2792  /// Remove an incoming value. This is useful if a
2793  /// predecessor basic block is deleted. The value removed is returned.
2794  ///
2795  /// If the last incoming value for a PHI node is removed (and DeletePHIIfEmpty
2796  /// is true), the PHI node is destroyed and any uses of it are replaced with
2797  /// dummy values. The only time there should be zero incoming values to a PHI
2798  /// node is when the block is dead, so this strategy is sound.
2799  ///
2800  Value *removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty = true);
2801 
2802  Value *removeIncomingValue(const BasicBlock *BB, bool DeletePHIIfEmpty=true) {
2803  int Idx = getBasicBlockIndex(BB);
2804  assert(Idx >= 0 && "Invalid basic block argument to remove!");
2805  return removeIncomingValue(Idx, DeletePHIIfEmpty);
2806  }
2807 
2808  /// Return the first index of the specified basic
2809  /// block in the value list for this PHI. Returns -1 if no instance.
2810  ///
2811  int getBasicBlockIndex(const BasicBlock *BB) const {
2812  for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
2813  if (block_begin()[i] == BB)
2814  return i;
2815  return -1;
2816  }
2817 
2819  int Idx = getBasicBlockIndex(BB);
2820  assert(Idx >= 0 && "Invalid basic block argument!");
2821  return getIncomingValue(Idx);
2822  }
2823 
2824  /// Set every incoming value(s) for block \p BB to \p V.
2826  assert(BB && "PHI node got a null basic block!");
2827  bool Found = false;
2828  for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op)
2829  if (getIncomingBlock(Op) == BB) {
2830  Found = true;
2831  setIncomingValue(Op, V);
2832  }
2833  (void)Found;
2834  assert(Found && "Invalid basic block argument to set!");
2835  }
2836 
2837  /// If the specified PHI node always merges together the
2838  /// same value, return the value, otherwise return null.
2839  Value *hasConstantValue() const;
2840 
2841  /// Whether the specified PHI node always merges
2842  /// together the same value, assuming undefs are equal to a unique
2843  /// non-undef value.
2844  bool hasConstantOrUndefValue() const;
2845 
2846  /// If the PHI node is complete which means all of its parent's predecessors
2847  /// have incoming value in this PHI, return true, otherwise return false.
2848  bool isComplete() const {
2850  [this](const BasicBlock *Pred) {
2851  return getBasicBlockIndex(Pred) >= 0;
2852  });
2853  }
2854 
2855  /// Methods for support type inquiry through isa, cast, and dyn_cast:
2856  static bool classof(const Instruction *I) {
2857  return I->getOpcode() == Instruction::PHI;
2858  }
2859  static bool classof(const Value *V) {
2860  return isa<Instruction>(V) && classof(cast<Instruction>(V));
2861  }
2862 
2863 private:
2864  void growOperands();
2865 };
2866 
2867 template <>
2869 };
2870 
2872 
2873 //===----------------------------------------------------------------------===//
2874 // LandingPadInst Class
2875 //===----------------------------------------------------------------------===//
2876 
2877 //===---------------------------------------------------------------------------
2878 /// The landingpad instruction holds all of the information
2879 /// necessary to generate correct exception handling. The landingpad instruction
2880 /// cannot be moved from the top of a landing pad block, which itself is
2881 /// accessible only from the 'unwind' edge of an invoke. This uses the
2882 /// SubclassData field in Value to store whether or not the landingpad is a
2883 /// cleanup.
2884 ///
2885 class LandingPadInst : public Instruction {
2886  using CleanupField = BoolBitfieldElementT<0>;
2887 
2888  /// The number of operands actually allocated. NumOperands is
2889  /// the number actually in use.
2890  unsigned ReservedSpace;
2891 
2892  LandingPadInst(const LandingPadInst &LP);
2893 
2894 public:
2896 
2897 private:
2898  explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
2899  const Twine &NameStr, Instruction *InsertBefore);
2900  explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
2901  const Twine &NameStr, BasicBlock *InsertAtEnd);
2902 
2903  // Allocate space for exactly zero operands.
2904  void *operator new(size_t S) { return User::operator new(S); }
2905 
2906  void growOperands(unsigned Size);
2907  void init(unsigned NumReservedValues, const Twine &NameStr);
2908 
2909 protected:
2910  // Note: Instruction needs to be a friend here to call cloneImpl.
2911  friend class Instruction;
2912 
2913  LandingPadInst *cloneImpl() const;
2914 
2915 public:
2916  void operator delete(void *Ptr) { User::operator delete(Ptr); }
2917 
2918  /// Constructors - NumReservedClauses is a hint for the number of incoming
2919  /// clauses that this landingpad will have (use 0 if you really have no idea).
2920  static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
2921  const Twine &NameStr = "",
2922  Instruction *InsertBefore = nullptr);
2923  static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
2924  const Twine &NameStr, BasicBlock *InsertAtEnd);
2925 
2926  /// Provide fast operand accessors
2928 
2929  /// Return 'true' if this landingpad instruction is a
2930  /// cleanup. I.e., it should be run when unwinding even if its landing pad
2931  /// doesn't catch the exception.
2932  bool isCleanup() const { return getSubclassData<CleanupField>(); }
2933 
2934  /// Indicate that this landingpad instruction is a cleanup.
2935  void setCleanup(bool V) { setSubclassData<CleanupField>(V); }
2936 
2937  /// Add a catch or filter clause to the landing pad.
2938  void addClause(Constant *ClauseVal);
2939 
2940  /// Get the value of the clause at index Idx. Use isCatch/isFilter to
2941  /// determine what type of clause this is.
2942  Constant *getClause(unsigned Idx) const {
2943  return cast<Constant>(getOperandList()[Idx]);
2944  }
2945 
2946  /// Return 'true' if the clause and index Idx is a catch clause.
2947  bool isCatch(unsigned Idx) const {
2948  return !isa<ArrayType>(getOperandList()[Idx]->getType());
2949  }
2950 
2951  /// Return 'true' if the clause and index Idx is a filter clause.
2952  bool isFilter(unsigned Idx) const {
2953  return isa<ArrayType>(getOperandList()[Idx]->getType());
2954  }
2955 
2956  /// Get the number of clauses for this landing pad.
2957  unsigned getNumClauses() const { return getNumOperands(); }
2958 
2959  /// Grow the size of the operand list to accommodate the new
2960  /// number of clauses.
2961  void reserveClauses(unsigned Size) { growOperands(Size); }
2962 
2963  // Methods for support type inquiry through isa, cast, and dyn_cast:
2964  static bool classof(const Instruction *I) {
2965  return I->getOpcode() == Instruction::LandingPad;
2966  }
2967  static bool classof(const Value *V) {
2968  return isa<Instruction>(V) && classof(cast<Instruction>(V));
2969  }
2970 };
2971 
2972 template <>
2974 };
2975 
2977 
2978 //===----------------------------------------------------------------------===//
2979 // ReturnInst Class
2980 //===----------------------------------------------------------------------===//
2981 
2982 //===---------------------------------------------------------------------------
2983 /// Return a value (possibly void), from a function. Execution
2984 /// does not continue in this function any longer.
2985 ///
2986 class ReturnInst : public Instruction {
2987  ReturnInst(const ReturnInst &RI);
2988 
2989 private:
2990  // ReturnInst constructors:
2991  // ReturnInst() - 'ret void' instruction
2992  // ReturnInst( null) - 'ret void' instruction
2993  // ReturnInst(Value* X) - 'ret X' instruction
2994  // ReturnInst( null, Inst *I) - 'ret void' instruction, insert before I
2995  // ReturnInst(Value* X, Inst *I) - 'ret X' instruction, insert before I
2996  // ReturnInst( null, BB *B) - 'ret void' instruction, insert @ end of B
2997  // ReturnInst(Value* X, BB *B) - 'ret X' instruction, insert @ end of B
2998  //
2999  // NOTE: If the Value* passed is of type void then the constructor behaves as
3000  // if it was passed NULL.
3001  explicit ReturnInst(LLVMContext &C, Value *retVal = nullptr,
3002  Instruction *InsertBefore = nullptr);
3003  ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd);
3004  explicit ReturnInst(LLVMContext &C, BasicBlock *InsertAtEnd);
3005 
3006 protected:
3007  // Note: Instruction needs to be a friend here to call cloneImpl.
3008  friend class Instruction;
3009 
3010  ReturnInst *cloneImpl() const;
3011 
3012 public:
3013  static ReturnInst* Create(LLVMContext &C, Value *retVal = nullptr,
3014  Instruction *InsertBefore = nullptr) {
3015  return new(!!retVal) ReturnInst(C, retVal, InsertBefore);
3016  }
3017 
3018  static ReturnInst* Create(LLVMContext &C, Value *retVal,
3019  BasicBlock *InsertAtEnd) {
3020  return new(!!retVal) ReturnInst(C, retVal, InsertAtEnd);
3021  }
3022 
3023  static ReturnInst* Create(LLVMContext &C, BasicBlock *InsertAtEnd) {
3024  return new(0) ReturnInst(C, InsertAtEnd);
3025  }
3026 
3027  /// Provide fast operand accessors
3029 
3030  /// Convenience accessor. Returns null if there is no return value.
3032  return getNumOperands() != 0 ? getOperand(0) : nullptr;
3033  }
3034 
3035  unsigned getNumSuccessors() const { return 0; }
3036 
3037  // Methods for support type inquiry through isa, cast, and dyn_cast:
3038  static bool classof(const Instruction *I) {
3039  return (I->getOpcode() == Instruction::Ret);
3040  }
3041  static bool classof(const Value *V) {
3042  return isa<Instruction>(V) && classof(cast<Instruction>(V));
3043  }
3044 
3045 private:
3046  BasicBlock *getSuccessor(unsigned idx) const {
3047  llvm_unreachable("ReturnInst has no successors!");
3048  }
3049 
3050  void setSuccessor(unsigned idx, BasicBlock *B) {
3051  llvm_unreachable("ReturnInst has no successors!");
3052  }
3053 };
3054 
3055 template <>
3056 struct OperandTraits<ReturnInst> : public VariadicOperandTraits<ReturnInst> {
3057 };
3058 
3060 
3061 //===----------------------------------------------------------------------===//
3062 // BranchInst Class
3063 //===----------------------------------------------------------------------===//
3064 
3065 //===---------------------------------------------------------------------------
3066 /// Conditional or Unconditional Branch instruction.
3067 ///
3068 class BranchInst : public Instruction {
3069  /// Ops list - Branches are strange. The operands are ordered:
3070  /// [Cond, FalseDest,] TrueDest. This makes some accessors faster because
3071  /// they don't have to check for cond/uncond branchness. These are mostly
3072  /// accessed relative from op_end().
3073  BranchInst(const BranchInst &BI);
3074  // BranchInst constructors (where {B, T, F} are blocks, and C is a condition):
3075  // BranchInst(BB *B) - 'br B'
3076  // BranchInst(BB* T, BB *F, Value *C) - 'br C, T, F'
3077  // BranchInst(BB* B, Inst *I) - 'br B' insert before I
3078  // BranchInst(BB* T, BB *F, Value *C, Inst *I) - 'br C, T, F', insert before I
3079  // BranchInst(BB* B, BB *I) - 'br B' insert at end
3080  // BranchInst(BB* T, BB *F, Value *C, BB *I) - 'br C, T, F', insert at end
3081  explicit BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore = nullptr);
3082  BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
3083  Instruction *InsertBefore = nullptr);
3084  BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd);
3085  BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
3086  BasicBlock *InsertAtEnd);
3087 
3088  void AssertOK();
3089 
3090 protected:
3091  // Note: Instruction needs to be a friend here to call cloneImpl.
3092  friend class Instruction;
3093 
3094  BranchInst *cloneImpl() const;
3095 
3096 public:
3097  /// Iterator type that casts an operand to a basic block.
3098  ///
3099  /// This only makes sense because the successors are stored as adjacent
3100  /// operands for branch instructions.
3102  : iterator_adaptor_base<succ_op_iterator, value_op_iterator,
3103  std::random_access_iterator_tag, BasicBlock *,
3104  ptrdiff_t, BasicBlock *, BasicBlock *> {
3105  explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {}
3106 
3107  BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3108  BasicBlock *operator->() const { return operator*(); }
3109  };
3110 
3111  /// The const version of `succ_op_iterator`.
3113  : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator,
3114  std::random_access_iterator_tag,
3115  const BasicBlock *, ptrdiff_t, const BasicBlock *,
3116  const BasicBlock *> {
3118  : iterator_adaptor_base(I) {}
3119 
3120  const BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3121  const BasicBlock *operator->() const { return operator*(); }
3122  };
3123 
3124  static BranchInst *Create(BasicBlock *IfTrue,
3125  Instruction *InsertBefore = nullptr) {
3126  return new(1) BranchInst(IfTrue, InsertBefore);
3127  }
3128 
3129  static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
3130  Value *Cond, Instruction *InsertBefore = nullptr) {
3131  return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertBefore);
3132  }
3133 
3134  static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *InsertAtEnd) {
3135  return new(1) BranchInst(IfTrue, InsertAtEnd);
3136  }
3137 
3138  static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
3139  Value *Cond, BasicBlock *InsertAtEnd) {
3140  return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertAtEnd);
3141  }
3142 
3143  /// Transparently provide more efficient getOperand methods.
3145 
3146  bool isUnconditional() const { return getNumOperands() == 1; }
3147  bool isConditional() const { return getNumOperands() == 3; }
3148 
3149  Value *getCondition() const {
3150  assert(isConditional() && "Cannot get condition of an uncond branch!");
3151  return Op<-3>();
3152  }
3153 
3154  void setCondition(Value *V) {
3155  assert(isConditional() && "Cannot set condition of unconditional branch!");
3156  Op<-3>() = V;
3157  }
3158 
3159  unsigned getNumSuccessors() const { return 1+isConditional(); }
3160 
3161  BasicBlock *getSuccessor(unsigned i) const {
3162  assert(i < getNumSuccessors() && "Successor # out of range for Branch!");
3163  return cast_or_null<BasicBlock>((&Op<-1>() - i)->get());
3164  }
3165 
3166  void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3167  assert(idx < getNumSuccessors() && "Successor # out of range for Branch!");
3168  *(&Op<-1>() - idx) = NewSucc;
3169  }
3170 
3171  /// Swap the successors of this branch instruction.
3172  ///
3173  /// Swaps the successors of the branch instruction. This also swaps any
3174  /// branch weight metadata associated with the instruction so that it
3175  /// continues to map correctly to each operand.
3176  void swapSuccessors();
3177 
3179  return make_range(
3180  succ_op_iterator(std::next(value_op_begin(), isConditional() ? 1 : 0)),
3181  succ_op_iterator(value_op_end()));
3182  }
3183 
3186  std::next(value_op_begin(), isConditional() ? 1 : 0)),
3187  const_succ_op_iterator(value_op_end()));
3188  }
3189 
3190  // Methods for support type inquiry through isa, cast, and dyn_cast:
3191  static bool classof(const Instruction *I) {
3192  return (I->getOpcode() == Instruction::Br);
3193  }
3194  static bool classof(const Value *V) {
3195  return isa<Instruction>(V) && classof(cast<Instruction>(V));
3196  }
3197 };
3198 
3199 template <>
3200 struct OperandTraits<BranchInst> : public VariadicOperandTraits<BranchInst, 1> {
3201 };
3202 
3204 
3205 //===----------------------------------------------------------------------===//
3206 // SwitchInst Class
3207 //===----------------------------------------------------------------------===//
3208 
3209 //===---------------------------------------------------------------------------
3210 /// Multiway switch
3211 ///
3212 class SwitchInst : public Instruction {
3213  unsigned ReservedSpace;
3214 
3215  // Operand[0] = Value to switch on
3216  // Operand[1] = Default basic block destination
3217  // Operand[2n ] = Value to match
3218  // Operand[2n+1] = BasicBlock to go to on match
3219  SwitchInst(const SwitchInst &SI);
3220 
3221  /// Create a new switch instruction, specifying a value to switch on and a
3222  /// default destination. The number of additional cases can be specified here
3223  /// to make memory allocation more efficient. This constructor can also
3224  /// auto-insert before another instruction.
3225  SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3226  Instruction *InsertBefore);
3227 
3228  /// Create a new switch instruction, specifying a value to switch on and a
3229  /// default destination. The number of additional cases can be specified here
3230  /// to make memory allocation more efficient. This constructor also
3231  /// auto-inserts at the end of the specified BasicBlock.
3232  SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3233  BasicBlock *InsertAtEnd);
3234 
3235  // allocate space for exactly zero operands
3236  void *operator new(size_t S) { return User::operator new(S); }
3237 
3238  void init(Value *Value, BasicBlock *Default, unsigned NumReserved);
3239  void growOperands();
3240 
3241 protected:
3242  // Note: Instruction needs to be a friend here to call cloneImpl.
3243  friend class Instruction;
3244 
3245  SwitchInst *cloneImpl() const;
3246 
3247 public:
3248  void operator delete(void *Ptr) { User::operator delete(Ptr); }
3249 
3250  // -2
3251  static const unsigned DefaultPseudoIndex = static_cast<unsigned>(~0L-1);
3252 
3253  template <typename CaseHandleT> class CaseIteratorImpl;
3254 
3255  /// A handle to a particular switch case. It exposes a convenient interface
3256  /// to both the case value and the successor block.
3257  ///
3258  /// We define this as a template and instantiate it to form both a const and
3259  /// non-const handle.
3260  template <typename SwitchInstT, typename ConstantIntT, typename BasicBlockT>
3262  // Directly befriend both const and non-const iterators.
3263  friend class SwitchInst::CaseIteratorImpl<
3264  CaseHandleImpl<SwitchInstT, ConstantIntT, BasicBlockT>>;
3265 
3266  protected:
3267  // Expose the switch type we're parameterized with to the iterator.
3268  using SwitchInstType = SwitchInstT;
3269 
3270  SwitchInstT *SI;
3272 
3273  CaseHandleImpl() = default;
3274  CaseHandleImpl(SwitchInstT *SI, ptrdiff_t Index) : SI(SI), Index(Index) {}
3275 
3276  public:
3277  /// Resolves case value for current case.
3278  ConstantIntT *getCaseValue() const {
3279  assert((unsigned)Index < SI->getNumCases() &&
3280  "Index out the number of cases.");
3281  return reinterpret_cast<ConstantIntT *>(SI->getOperand(2 + Index * 2));
3282  }
3283 
3284  /// Resolves successor for current case.
3285  BasicBlockT *getCaseSuccessor() const {
3286  assert(((unsigned)Index < SI->getNumCases() ||
3287  (unsigned)Index == DefaultPseudoIndex) &&
3288  "Index out the number of cases.");
3289  return SI->getSuccessor(getSuccessorIndex());
3290  }
3291 
3292  /// Returns number of current case.
3293  unsigned getCaseIndex() const { return Index; }
3294 
3295  /// Returns successor index for current case successor.
3296  unsigned getSuccessorIndex() const {
3297  assert(((unsigned)Index == DefaultPseudoIndex ||
3298  (unsigned)Index < SI->getNumCases()) &&
3299  "Index out the number of cases.");
3300  return (unsigned)Index != DefaultPseudoIndex ? Index + 1 : 0;
3301  }
3302 
3303  bool operator==(const CaseHandleImpl &RHS) const {
3304  assert(SI == RHS.SI && "Incompatible operators.");
3305  return Index == RHS.Index;
3306  }
3307  };
3308 
3309  using ConstCaseHandle =
3311 
3313  : public CaseHandleImpl<SwitchInst, ConstantInt, BasicBlock> {
3315 
3316  public:
3318 
3319  /// Sets the new value for current case.
3321  assert((unsigned)Index < SI->getNumCases() &&
3322  "Index out the number of cases.");
3323  SI->setOperand(2 + Index*2, reinterpret_cast<Value*>(V));
3324  }
3325 
3326  /// Sets the new successor for current case.
3328  SI->setSuccessor(getSuccessorIndex(), S);
3329  }
3330  };
3331 
3332  template <typename CaseHandleT>
3333  class CaseIteratorImpl
3334  : public iterator_facade_base<CaseIteratorImpl<CaseHandleT>,
3335  std::random_access_iterator_tag,
3336  CaseHandleT> {
3337  using SwitchInstT = typename CaseHandleT::SwitchInstType;
3338 
3339  CaseHandleT Case;
3340 
3341  public:
3342  /// Default constructed iterator is in an invalid state until assigned to
3343  /// a case for a particular switch.
3344  CaseIteratorImpl() = default;
3345 
3346  /// Initializes case iterator for given SwitchInst and for given
3347  /// case number.
3348  CaseIteratorImpl(SwitchInstT *SI, unsigned CaseNum) : Case(SI, CaseNum) {}
3349 
3350  /// Initializes case iterator for given SwitchInst and for given
3351  /// successor index.
3353  unsigned SuccessorIndex) {
3354  assert(SuccessorIndex < SI->getNumSuccessors() &&
3355  "Successor index # out of range!");
3356  return SuccessorIndex != 0 ? CaseIteratorImpl(SI, SuccessorIndex - 1)
3357  : CaseIteratorImpl(SI, DefaultPseudoIndex);
3358  }
3359 
3360  /// Support converting to the const variant. This will be a no-op for const
3361  /// variant.
3363  return CaseIteratorImpl<ConstCaseHandle>(Case.SI, Case.Index);
3364  }
3365 
3367  // Check index correctness after addition.
3368  // Note: Index == getNumCases() means end().
3369  assert(Case.Index + N >= 0 &&
3370  (unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&
3371  "Case.Index out the number of cases.");
3372  Case.Index += N;
3373  return *this;
3374  }
3376  // Check index correctness after subtraction.
3377  // Note: Case.Index == getNumCases() means end().
3378  assert(Case.Index - N >= 0 &&
3379  (unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&
3380  "Case.Index out the number of cases.");
3381  Case.Index -= N;
3382  return *this;
3383  }
3385  assert(Case.SI == RHS.Case.SI && "Incompatible operators.");
3386  return Case.Index - RHS.Case.Index;
3387  }
3388  bool operator==(const CaseIteratorImpl &RHS) const {
3389  return Case == RHS.Case;
3390  }
3391  bool operator<(const CaseIteratorImpl &RHS) const {
3392  assert(Case.SI == RHS.Case.SI && "Incompatible operators.");
3393  return Case.Index < RHS.Case.Index;
3394  }
3395  CaseHandleT &operator*() { return Case; }
3396  const CaseHandleT &operator*() const { return Case; }
3397  };
3398 
3401 
3403  unsigned NumCases,
3404  Instruction *InsertBefore = nullptr) {
3405  return new SwitchInst(Value, Default, NumCases, InsertBefore);
3406  }
3407 
3409  unsigned NumCases, BasicBlock *InsertAtEnd) {
3410  return new SwitchInst(Value, Default, NumCases, InsertAtEnd);
3411  }
3412 
3413  /// Provide fast operand accessors
3415 
3416  // Accessor Methods for Switch stmt
3417  Value *getCondition() const { return getOperand(0); }
3418  void setCondition(Value *V) { setOperand(0, V); }
3419 
3421  return cast<BasicBlock>(getOperand(1));
3422  }
3423 
3424  void setDefaultDest(BasicBlock *DefaultCase) {
3425  setOperand(1, reinterpret_cast<Value*>(DefaultCase));
3426  }
3427 
3428  /// Return the number of 'cases' in this switch instruction, excluding the
3429  /// default case.
3430  unsigned getNumCases() const {
3431  return getNumOperands()/2 - 1;
3432  }
3433 
3434  /// Returns a read/write iterator that points to the first case in the
3435  /// SwitchInst.
3437  return CaseIt(this, 0);
3438  }
3439 
3440  /// Returns a read-only iterator that points to the first case in the
3441  /// SwitchInst.
3443  return ConstCaseIt(this, 0);
3444  }
3445 
3446  /// Returns a read/write iterator that points one past the last in the
3447  /// SwitchInst.
3449  return CaseIt(this, getNumCases());
3450  }
3451 
3452  /// Returns a read-only iterator that points one past the last in the
3453  /// SwitchInst.
3455  return ConstCaseIt(this, getNumCases());
3456  }
3457 
3458  /// Iteration adapter for range-for loops.
3460  return make_range(case_begin(), case_end());
3461  }
3462 
3463  /// Constant iteration adapter for range-for loops.
3465  return make_range(case_begin(), case_end());
3466  }
3467 
3468  /// Returns an iterator that points to the default case.
3469  /// Note: this iterator allows to resolve successor only. Attempt
3470  /// to resolve case value causes an assertion.
3471  /// Also note, that increment and decrement also causes an assertion and
3472  /// makes iterator invalid.
3474  return CaseIt(this, DefaultPseudoIndex);
3475  }
3477  return ConstCaseIt(this, DefaultPseudoIndex);
3478  }
3479 
3480  /// Search all of the case values for the specified constant. If it is
3481  /// explicitly handled, return the case iterator of it, otherwise return
3482  /// default case iterator to indicate that it is handled by the default
3483  /// handler.
3486  cases(), [C](CaseHandle &Case) { return Case.getCaseValue() == C; });
3487  if (I != case_end())
3488  return I;
3489 
3490  return case_default();
3491  }
3494  return Case.getCaseValue() == C;
3495  });
3496  if (I != case_end())
3497  return I;
3498 
3499  return case_default();
3500  }
3501 
3502  /// Finds the unique case value for a given successor. Returns null if the
3503  /// successor is not found, not unique, or is the default case.
3505  if (BB == getDefaultDest())
3506  return nullptr;
3507 
3508  ConstantInt *CI = nullptr;
3509  for (auto Case : cases()) {
3510  if (Case.getCaseSuccessor() != BB)
3511  continue;
3512 
3513  if (CI)
3514  return nullptr; // Multiple cases lead to BB.
3515 
3516  CI = Case.getCaseValue();
3517  }
3518 
3519  return CI;
3520  }
3521 
3522  /// Add an entry to the switch instruction.
3523  /// Note:
3524  /// This action invalidates case_end(). Old case_end() iterator will
3525  /// point to the added case.
3526  void addCase(ConstantInt *OnVal, BasicBlock *Dest);
3527 
3528  /// This method removes the specified case and its successor from the switch
3529  /// instruction. Note that this operation may reorder the remaining cases at
3530  /// index idx and above.
3531  /// Note:
3532  /// This action invalidates iterators for all cases following the one removed,
3533  /// including the case_end() iterator. It returns an iterator for the next
3534  /// case.
3535  CaseIt removeCase(CaseIt I);
3536 
3537  unsigned getNumSuccessors() const { return getNumOperands()/2; }
3538  BasicBlock *getSuccessor(unsigned idx) const {
3539  assert(idx < getNumSuccessors() &&"Successor idx out of range for switch!");
3540  return cast<BasicBlock>(getOperand(idx*2+1));
3541  }
3542  void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3543  assert(idx < getNumSuccessors() && "Successor # out of range for switch!");
3544  setOperand(idx * 2 + 1, NewSucc);
3545  }
3546 
3547  // Methods for support type inquiry through isa, cast, and dyn_cast:
3548  static bool classof(const Instruction *I) {
3549  return I->getOpcode() == Instruction::Switch;
3550  }
3551  static bool classof(const Value *V) {
3552  return isa<Instruction>(V) && classof(cast<Instruction>(V));
3553  }
3554 };
3555 
3556 /// A wrapper class to simplify modification of SwitchInst cases along with
3557 /// their prof branch_weights metadata.
3559  SwitchInst &SI;
3561  bool Changed = false;
3562 
3563 protected:
3564  static MDNode *getProfBranchWeightsMD(const SwitchInst &SI);
3565 
3567 
3568  void init();
3569 
3570 public:
3572  SwitchInst *operator->() { return &SI; }
3573  SwitchInst &operator*() { return SI; }
3574  operator SwitchInst *() { return &SI; }
3575 
3577 
3579  if (Changed)
3580  SI.setMetadata(LLVMContext::MD_prof, buildProfBranchWeightsMD());
3581  }
3582 
3583  /// Delegate the call to the underlying SwitchInst::removeCase() and remove
3584  /// correspondent branch weight.
3586 
3587  /// Delegate the call to the underlying SwitchInst::addCase() and set the
3588  /// specified branch weight for the added case.
3589  void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W);
3590 
3591  /// Delegate the call to the underlying SwitchInst::eraseFromParent() and mark
3592  /// this object to not touch the underlying SwitchInst in destructor.
3594 
3595  void setSuccessorWeight(unsigned idx, CaseWeightOpt W);
3596  CaseWeightOpt getSuccessorWeight(unsigned idx);
3597 
3598  static CaseWeightOpt getSuccessorWeight(const SwitchInst &SI, unsigned idx);
3599 };
3600 
3601 template <>
3603 };
3604 
3606 
3607 //===----------------------------------------------------------------------===//
3608 // IndirectBrInst Class
3609 //===----------------------------------------------------------------------===//
3610 
3611 //===---------------------------------------------------------------------------
3612 /// Indirect Branch Instruction.
3613 ///
3614 class IndirectBrInst : public Instruction {
3615  unsigned ReservedSpace;
3616 
3617  // Operand[0] = Address to jump to
3618  // Operand[n+1] = n-th destination
3619  IndirectBrInst(const IndirectBrInst &IBI);
3620 
3621  /// Create a new indirectbr instruction, specifying an
3622  /// Address to jump to. The number of expected destinations can be specified
3623  /// here to make memory allocation more efficient. This constructor can also
3624  /// autoinsert before another instruction.
3625  IndirectBrInst(Value *Address, unsigned NumDests, Instruction *InsertBefore);
3626 
3627  /// Create a new indirectbr instruction, specifying an
3628  /// Address to jump to. The number of expected destinations can be specified
3629  /// here to make memory allocation more efficient. This constructor also
3630  /// autoinserts at the end of the specified BasicBlock.
3631  IndirectBrInst(Value *Address, unsigned NumDests, BasicBlock *InsertAtEnd);
3632 
3633  // allocate space for exactly zero operands
3634  void *operator new(size_t S) { return User::operator new(S); }
3635 
3636  void init(Value *Address, unsigned NumDests);
3637  void growOperands();
3638 
3639 protected:
3640  // Note: Instruction needs to be a friend here to call cloneImpl.
3641  friend class Instruction;
3642 
3643  IndirectBrInst *cloneImpl() const;
3644 
3645 public:
3646  void operator delete(void *Ptr) { User::operator delete(Ptr); }
3647 
3648  /// Iterator type that casts an operand to a basic block.
3649  ///
3650  /// This only makes sense because the successors are stored as adjacent
3651  /// operands for indirectbr instructions.
3653  : iterator_adaptor_base<succ_op_iterator, value_op_iterator,
3654  std::random_access_iterator_tag, BasicBlock *,
3655  ptrdiff_t, BasicBlock *, BasicBlock *> {
3657 
3658  BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3659  BasicBlock *operator->() const { return operator*(); }
3660  };
3661 
3662  /// The const version of `succ_op_iterator`.
3664  : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator,
3665  std::random_access_iterator_tag,
3666  const BasicBlock *, ptrdiff_t, const BasicBlock *,
3667  const BasicBlock *> {
3669  : iterator_adaptor_base(I) {}
3670 
3671  const BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3672  const BasicBlock *operator->() const { return operator*(); }
3673  };
3674 
3675  static IndirectBrInst *Create(Value *Address, unsigned NumDests,
3676  Instruction *InsertBefore = nullptr) {
3677  return new IndirectBrInst(Address, NumDests, InsertBefore);
3678  }
3679 
3680  static IndirectBrInst *Create(Value *Address, unsigned NumDests,
3681  BasicBlock *InsertAtEnd) {
3682  return new IndirectBrInst(Address, NumDests, InsertAtEnd);
3683  }
3684 
3685  /// Provide fast operand accessors.
3687 
3688  // Accessor Methods for IndirectBrInst instruction.
3689  Value *getAddress() { return getOperand(0); }
3690  const Value *getAddress() const { return getOperand(0); }
3691  void setAddress(Value *V) { setOperand(0, V); }
3692 
3693  /// return the number of possible destinations in this
3694  /// indirectbr instruction.
3695  unsigned getNumDestinations() const { return getNumOperands()-1; }
3696 
3697  /// Return the specified destination.
3698  BasicBlock *getDestination(unsigned i) { return getSuccessor(i); }
3699  const BasicBlock *getDestination(unsigned i) const { return getSuccessor(i); }
3700 
3701  /// Add a destination.
3702  ///
3703  void addDestination(BasicBlock *Dest);
3704 
3705  /// This method removes the specified successor from the
3706  /// indirectbr instruction.
3707  void removeDestination(unsigned i);
3708 
3709  unsigned getNumSuccessors() const { return getNumOperands()-1; }
3710  BasicBlock *getSuccessor(unsigned i) const {
3711  return cast<BasicBlock>(getOperand(i+1));
3712  }
3713  void setSuccessor(unsigned i, BasicBlock *NewSucc) {
3714  setOperand(i + 1, NewSucc);
3715  }
3716 
3718  return make_range(succ_op_iterator(std::next(value_op_begin())),
3719  succ_op_iterator(value_op_end()));
3720  }
3721 
3723  return make_range(const_succ_op_iterator(std::next(value_op_begin())),
3724  const_succ_op_iterator(value_op_end()));
3725  }
3726 
3727  // Methods for support type inquiry through isa, cast, and dyn_cast:
3728  static bool classof(const Instruction *I) {
3729  return I->getOpcode() == Instruction::IndirectBr;
3730  }
3731  static bool classof(const Value *V) {
3732  return isa<Instruction>(V) && classof(cast<Instruction>(V));
3733  }
3734 };
3735 
3736 template <>
3738 };
3739 
3741 
3742 //===----------------------------------------------------------------------===//
3743 // InvokeInst Class
3744 //===----------------------------------------------------------------------===//
3745 
3746 /// Invoke instruction. The SubclassData field is used to hold the
3747 /// calling convention of the call.
3748 ///
3749 class InvokeInst : public CallBase {
3750  /// The number of operands for this call beyond the called function,
3751  /// arguments, and operand bundles.
3752  static constexpr int NumExtraOperands = 2;
3753 
3754  /// The index from the end of the operand array to the normal destination.
3755  static constexpr int NormalDestOpEndIdx = -3;
3756 
3757  /// The index from the end of the operand array to the unwind destination.
3758  static constexpr int UnwindDestOpEndIdx = -2;
3759 
3760  InvokeInst(const InvokeInst &BI);
3761 
3762  /// Construct an InvokeInst given a range of arguments.
3763  ///
3764  /// Construct an InvokeInst from a range of arguments
3765  inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3766  BasicBlock *IfException, ArrayRef<Value *> Args,
3767  ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3768  const Twine &NameStr, Instruction *InsertBefore);
3769 
3770  inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3771  BasicBlock *IfException, ArrayRef<Value *> Args,
3772  ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3773  const Twine &NameStr, BasicBlock *InsertAtEnd);
3774 
3775  void init(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3776  BasicBlock *IfException, ArrayRef<Value *> Args,
3777  ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
3778 
3779  /// Compute the number of operands to allocate.
3780  static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) {
3781  // We need one operand for the called function, plus our extra operands and
3782  // the input operand counts provided.
3783  return 1 + NumExtraOperands + NumArgs + NumBundleInputs;
3784  }
3785 
3786 protected:
3787  // Note: Instruction needs to be a friend here to call cloneImpl.
3788  friend class Instruction;
3789 
3790  InvokeInst *cloneImpl() const;
3791 
3792 public:
3793  static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3794  BasicBlock *IfException, ArrayRef<Value *> Args,
3795  const Twine &NameStr,
3796  Instruction *InsertBefore = nullptr) {
3797  int NumOperands = ComputeNumOperands(Args.size());
3798  return new (NumOperands)
3799  InvokeInst(Ty, Func, IfNormal, IfException, Args, None, NumOperands,
3800  NameStr, InsertBefore);
3801  }
3802 
3803  static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3804  BasicBlock *IfException, ArrayRef<Value *> Args,
3805  ArrayRef<OperandBundleDef> Bundles = None,
3806  const Twine &NameStr = "",
3807  Instruction *InsertBefore = nullptr) {
3808  int NumOperands =
3809  ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
3810  unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
3811 
3812  return new (NumOperands, DescriptorBytes)
3813  InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands,
3814  NameStr, InsertBefore);
3815  }
3816 
3817  static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3818  BasicBlock *IfException, ArrayRef<Value *> Args,
3819  const Twine &NameStr, BasicBlock *InsertAtEnd) {
3820  int NumOperands = ComputeNumOperands(Args.size());
3821  return new (NumOperands)
3822  InvokeInst(Ty, Func, IfNormal, IfException, Args, None, NumOperands,
3823  NameStr, InsertAtEnd);
3824  }
3825 
3826  static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3827  BasicBlock *IfException, ArrayRef<Value *> Args,
3829  const Twine &NameStr, BasicBlock *InsertAtEnd) {
3830  int NumOperands =
3831  ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
3832  unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
3833 
3834  return new (NumOperands, DescriptorBytes)
3835  InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands,
3836  NameStr, InsertAtEnd);
3837  }
3838 
3839  static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3840  BasicBlock *IfException, ArrayRef<Value *> Args,
3841  const Twine &NameStr,
3842  Instruction *InsertBefore = nullptr) {
3843  return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3844  IfException, Args, None, NameStr, InsertBefore);
3845  }
3846 
3847  static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3848  BasicBlock *IfException, ArrayRef<Value *> Args,
3849  ArrayRef<OperandBundleDef> Bundles = None,
3850  const Twine &NameStr = "",
3851  Instruction *InsertBefore = nullptr) {
3852  return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3853  IfException, Args, Bundles, NameStr, InsertBefore);
3854  }
3855 
3856  static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3857  BasicBlock *IfException, ArrayRef<Value *> Args,
3858  const Twine &NameStr, BasicBlock *InsertAtEnd) {
3859  return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3860  IfException, Args, NameStr, InsertAtEnd);
3861  }
3862 
3863  static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3864  BasicBlock *IfException, ArrayRef<Value *> Args,
3866  const Twine &NameStr, BasicBlock *InsertAtEnd) {
3867  return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3868  IfException, Args, Bundles, NameStr, InsertAtEnd);
3869  }
3870 
3871  /// Create a clone of \p II with a different set of operand bundles and
3872  /// insert it before \p InsertPt.
3873  ///
3874  /// The returned invoke instruction is identical to \p II in every way except
3875  /// that the operand bundles for the new instruction are set to the operand
3876  /// bundles in \p Bundles.
3877  static InvokeInst *Create(InvokeInst *II, ArrayRef<OperandBundleDef> Bundles,
3878  Instruction *InsertPt = nullptr);
3879 
3880  // get*Dest - Return the destination basic blocks...
3882  return cast<BasicBlock>(Op<NormalDestOpEndIdx>());
3883  }
3885  return cast<BasicBlock>(Op<UnwindDestOpEndIdx>());
3886  }
3888  Op<NormalDestOpEndIdx>() = reinterpret_cast<Value *>(B);
3889  }
3891  Op<UnwindDestOpEndIdx>() = reinterpret_cast<Value *>(B);
3892  }
3893 
3894  /// Get the landingpad instruction from the landing pad
3895  /// block (the unwind destination).
3896  LandingPadInst *getLandingPadInst() const;
3897 
3898  BasicBlock *getSuccessor(unsigned i) const {
3899  assert(i < 2 && "Successor # out of range for invoke!");
3900  return i == 0 ? getNormalDest() : getUnwindDest();
3901  }
3902 
3903  void setSuccessor(unsigned i, BasicBlock *NewSucc) {
3904  assert(i < 2 && "Successor # out of range for invoke!");
3905  if (i == 0)
3906  setNormalDest(NewSucc);
3907  else
3908  setUnwindDest(NewSucc);
3909  }
3910 
3911  unsigned getNumSuccessors() const { return 2; }
3912 
3913  // Methods for support type inquiry through isa, cast, and dyn_cast:
3914  static bool classof(const Instruction *I) {
3915  return (I->getOpcode() == Instruction::Invoke);
3916  }
3917  static bool classof(const Value *V) {
3918  return isa<Instruction>(V) && classof(cast<Instruction>(V));
3919  }
3920 
3921 private:
3922  // Shadow Instruction::setInstructionSubclassData with a private forwarding
3923  // method so that subclasses cannot accidentally use it.
3924  template <typename Bitfield>
3925  void setSubclassData(typename Bitfield::Type Value) {
3926  Instruction::setSubclassData<Bitfield>(Value);
3927  }
3928 };
3929 
3930 InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3931  BasicBlock *IfException, ArrayRef<Value *> Args,
3932  ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3933  const Twine &NameStr, Instruction *InsertBefore)
3934  : CallBase(Ty->getReturnType(), Instruction::Invoke,
3935  OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
3936  InsertBefore) {
3937  init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr);
3938 }
3939 
3940 InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3941  BasicBlock *IfException, ArrayRef<Value *> Args,
3942  ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3943  const Twine &NameStr, BasicBlock *InsertAtEnd)
3944  : CallBase(Ty->getReturnType(), Instruction::Invoke,
3945  OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
3946  InsertAtEnd) {
3947  init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr);
3948 }
3949 
3950 //===----------------------------------------------------------------------===//
3951 // CallBrInst Class
3952 //===----------------------------------------------------------------------===//
3953 
3954 /// CallBr instruction, tracking function calls that may not return control but
3955 /// instead transfer it to a third location. The SubclassData field is used to
3956 /// hold the calling convention of the call.
3957 ///
3958 class CallBrInst : public CallBase {
3959 
3960  unsigned NumIndirectDests;
3961 
3962  CallBrInst(const CallBrInst &BI);
3963 
3964  /// Construct a CallBrInst given a range of arguments.
3965  ///
3966  /// Construct a CallBrInst from a range of arguments
3967  inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
3968  ArrayRef<BasicBlock *> IndirectDests,
3970  ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3971  const Twine &NameStr, Instruction *InsertBefore);
3972 
3973  inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
3974  ArrayRef<BasicBlock *> IndirectDests,
3976  ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3977  const Twine &NameStr, BasicBlock *InsertAtEnd);
3978 
3979  void init(FunctionType *FTy, Value *Func, BasicBlock *DefaultDest,
3981  ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
3982 
3983  /// Should the Indirect Destinations change, scan + update the Arg list.
3984  void updateArgBlockAddresses(unsigned i, BasicBlock *B);
3985 
3986  /// Compute the number of operands to allocate.
3987  static int ComputeNumOperands(int NumArgs, int NumIndirectDests,
3988  int NumBundleInputs = 0) {
3989  // We need one operand for the called function, plus our extra operands and
3990  // the input operand counts provided.
3991  return 2 + NumIndirectDests + NumArgs + NumBundleInputs;
3992  }
3993 
3994 protected:
3995  // Note: Instruction needs to be a friend here to call cloneImpl.
3996  friend class Instruction;
3997 
3998  CallBrInst *cloneImpl() const;
3999 
4000 public:
4001  static CallBrInst *Create(FunctionType *Ty, Value *Func,
4002  BasicBlock *DefaultDest,
4003  ArrayRef<BasicBlock *> IndirectDests,
4004  ArrayRef<Value *> Args, const Twine &NameStr,
4005  Instruction *InsertBefore = nullptr) {
4006  int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size());
4007  return new (NumOperands)
4008  CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, None,
4009  NumOperands, NameStr, InsertBefore);
4010  }
4011 
4012  static CallBrInst *Create(FunctionType *Ty, Value *Func,
4013  BasicBlock *DefaultDest,
4014  ArrayRef<BasicBlock *> IndirectDests,
4016  ArrayRef<OperandBundleDef> Bundles = None,
4017  const Twine &NameStr = "",
4018  Instruction *InsertBefore = nullptr) {
4019  int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(),
4020  CountBundleInputs(Bundles));
4021  unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
4022 
4023  return new (NumOperands, DescriptorBytes)
4024  CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles,
4025  NumOperands, NameStr, InsertBefore);
4026  }
4027 
4028  static CallBrInst *Create(FunctionType *Ty, Value *Func,
4029  BasicBlock *DefaultDest,
4030  ArrayRef<BasicBlock *> IndirectDests,
4031  ArrayRef<Value *> Args, const Twine &NameStr,
4032  BasicBlock *InsertAtEnd) {
4033  int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size());
4034  return new (NumOperands)
4035  CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, None,
4036  NumOperands, NameStr, InsertAtEnd);
4037  }
4038 
4039  static CallBrInst *Create(FunctionType *Ty, Value *Func,
4040  BasicBlock *DefaultDest,
4041  ArrayRef<BasicBlock *> IndirectDests,
4044  const Twine &NameStr, BasicBlock *InsertAtEnd) {
4045  int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(),
4046  CountBundleInputs(Bundles));
4047  unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
4048 
4049  return new (NumOperands, DescriptorBytes)
4050  CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles,
4051  NumOperands, NameStr, InsertAtEnd);
4052  }
4053 
4054  static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
4055  ArrayRef<BasicBlock *> IndirectDests,
4056  ArrayRef<Value *> Args, const Twine &NameStr,
4057  Instruction *InsertBefore = nullptr) {
4058  return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4059  IndirectDests, Args, NameStr, InsertBefore);
4060  }
4061 
4062  static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
4063  ArrayRef<BasicBlock *> IndirectDests,
4065  ArrayRef<OperandBundleDef> Bundles = None,
4066  const Twine &NameStr = "",
4067  Instruction *InsertBefore = nullptr) {
4068  return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4069  IndirectDests, Args, Bundles, NameStr, InsertBefore);
4070  }
4071 
4072  static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
4073  ArrayRef<BasicBlock *> IndirectDests,
4074  ArrayRef<Value *> Args, const Twine &NameStr,
4075  BasicBlock *InsertAtEnd) {
4076  return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4077  IndirectDests, Args, NameStr, InsertAtEnd);
4078  }
4079 
4081  BasicBlock *DefaultDest,
4082  ArrayRef<BasicBlock *> IndirectDests,
4085  const Twine &NameStr, BasicBlock *InsertAtEnd) {
4086  return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4087  IndirectDests, Args, Bundles, NameStr, InsertAtEnd);
4088  }
4089 
4090  /// Create a clone of \p CBI with a different set of operand bundles and
4091  /// insert it before \p InsertPt.
4092  ///
4093  /// The returned callbr instruction is identical to \p CBI in every way
4094  /// except that the operand bundles for the new instruction are set to the
4095  /// operand bundles in \p Bundles.
4096  static CallBrInst *Create(CallBrInst *CBI,
4098  Instruction *InsertPt = nullptr);
4099 
4100  /// Return the number of callbr indirect dest labels.
4101  ///
4102  unsigned getNumIndirectDests() const { return NumIndirectDests; }
4103 
4104  /// getIndirectDestLabel - Return the i-th indirect dest label.
4105  ///
4106  Value *getIndirectDestLabel(unsigned i) const {
4107  assert(i < getNumIndirectDests() && "Out of bounds!");
4109  1);
4110  }
4111 
4112  Value *getIndirectDestLabelUse(unsigned i) const {
4113  assert(i < getNumIndirectDests() && "Out of bounds!");
4115  1);
4116  }
4117 
4118  // Return the destination basic blocks...
4120  return cast<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() - 1));
4121  }
4122  BasicBlock *getIndirectDest(unsigned i) const {
4123  return cast_or_null<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() + i));
4124  }
4126  SmallVector<BasicBlock *, 16> IndirectDests;
4127  for (unsigned i = 0, e = getNumIndirectDests(); i < e; ++i)
4128  IndirectDests.push_back(getIndirectDest(i));
4129  return IndirectDests;
4130  }
4132  *(&Op<-1>() - getNumIndirectDests() - 1) = reinterpret_cast<Value *>(B);
4133  }
4134  void setIndirectDest(unsigned i, BasicBlock *B) {
4135  updateArgBlockAddresses(i, B);
4136  *(&Op<-1>() - getNumIndirectDests() + i) = reinterpret_cast<Value *>(B);
4137  }
4138 
4139  BasicBlock *getSuccessor(unsigned i) const {
4140  assert(i < getNumSuccessors() + 1 &&
4141  "Successor # out of range for callbr!");
4142  return i == 0 ? getDefaultDest() : getIndirectDest(i - 1);
4143  }
4144 
4145  void setSuccessor(unsigned i, BasicBlock *NewSucc) {
4146  assert(i < getNumIndirectDests() + 1 &&
4147  "Successor # out of range for callbr!");
4148  return i == 0 ? setDefaultDest(NewSucc) : setIndirectDest(i - 1, NewSucc);
4149  }
4150 
4151  unsigned getNumSuccessors() const { return getNumIndirectDests() + 1; }
4152 
4153  // Methods for support type inquiry through isa, cast, and dyn_cast:
4154  static bool classof(const Instruction *I) {
4155  return (I->getOpcode() == Instruction::CallBr);
4156  }
4157  static bool classof(const Value *V) {
4158  return isa<Instruction>(V) && classof(cast<Instruction>(V));
4159  }
4160 
4161 private:
4162  // Shadow Instruction::setInstructionSubclassData with a private forwarding
4163  // method so that subclasses cannot accidentally use it.
4164  template <typename Bitfield>
4165  void setSubclassData(typename Bitfield::Type Value) {
4166  Instruction::setSubclassData<Bitfield>(Value);
4167  }
4168 };
4169 
4170 CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4171  ArrayRef<BasicBlock *> IndirectDests,
4172  ArrayRef<Value *> Args,
4173  ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4174  const Twine &NameStr, Instruction *InsertBefore)
4175  : CallBase(Ty->getReturnType(), Instruction::CallBr,
4176  OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
4177  InsertBefore) {
4178  init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr);
4179 }
4180 
4181 CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4182  ArrayRef<BasicBlock *> IndirectDests,
4183  ArrayRef<Value *> Args,
4184  ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4185  const Twine &NameStr, BasicBlock *InsertAtEnd)
4186  : CallBase(Ty->getReturnType(), Instruction::CallBr,
4187  OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
4188  InsertAtEnd) {
4189  init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr);
4190 }
4191 
4192 //===----------------------------------------------------------------------===//
4193 // ResumeInst Class
4194 //===----------------------------------------------------------------------===//
4195 
4196 //===---------------------------------------------------------------------------
4197 /// Resume the propagation of an exception.
4198 ///
4199 class ResumeInst : public Instruction {
4200  ResumeInst(const ResumeInst &RI);
4201 
4202  explicit ResumeInst(Value *Exn, Instruction *InsertBefore=nullptr);
4203  ResumeInst(Value *Exn, BasicBlock *InsertAtEnd);
4204 
4205 protected:
4206  // Note: Instruction needs to be a friend here to call cloneImpl.
4207  friend class Instruction;
4208 
4209  ResumeInst *cloneImpl() const;
4210 
4211 public:
4212  static ResumeInst *Create(Value *Exn, Instruction *InsertBefore = nullptr) {
4213  return new(1) ResumeInst(Exn, InsertBefore);
4214  }
4215 
4216  static ResumeInst *Create(Value *Exn, BasicBlock *InsertAtEnd) {
4217  return new(1) ResumeInst(Exn, InsertAtEnd);
4218  }
4219 
4220  /// Provide fast operand accessors
4222 
4223  /// Convenience accessor.
4224  Value *getValue() const { return Op<0>(); }
4225 
4226  unsigned getNumSuccessors() const { return 0; }
4227 
4228  // Methods for support type inquiry through isa, cast, and dyn_cast:
4229  static bool classof(const Instruction *I) {
4230  return I->getOpcode() == Instruction::Resume;
4231  }
4232  static bool classof(const Value *V) {
4233  return isa<Instruction>(V) && classof(cast<Instruction>(V));
4234  }
4235 
4236 private:
4237  BasicBlock *getSuccessor(unsigned idx) const {
4238  llvm_unreachable("ResumeInst has no successors!");
4239  }
4240 
4241  void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
4242  llvm_unreachable("ResumeInst has no successors!");
4243  }
4244 };
4245 
4246 template <>
4248  public FixedNumOperandTraits<ResumeInst, 1> {
4249 };
4250 
4252 
4253 //===----------------------------------------------------------------------===//
4254 // CatchSwitchInst Class
4255 //===----------------------------------------------------------------------===//
4257  using UnwindDestField = BoolBitfieldElementT<0>;
4258 
4259  /// The number of operands actually allocated. NumOperands is
4260  /// the number actually in use.
4261  unsigned ReservedSpace;
4262 
4263  // Operand[0] = Outer scope
4264  // Operand[1] = Unwind block destination
4265  // Operand[n] = BasicBlock to go to on match
4266  CatchSwitchInst(const CatchSwitchInst &CSI);
4267 
4268  /// Create a new switch instruction, specifying a
4269  /// default destination. The number of additional handlers can be specified
4270  /// here to make memory allocation more efficient.
4271  /// This constructor can also autoinsert before another instruction.
4272  CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
4273  unsigned NumHandlers, const Twine &NameStr,
4274  Instruction *InsertBefore);
4275 
4276  /// Create a new switch instruction, specifying a
4277  /// default destination. The number of additional handlers can be specified
4278  /// here to make memory allocation more efficient.
4279  /// This constructor also autoinserts at the end of the specified BasicBlock.
4280  CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
4281  unsigned NumHandlers, const Twine &NameStr,
4282  BasicBlock *InsertAtEnd);
4283 
4284  // allocate space for exactly zero operands
4285  void *operator new(size_t S) { return User::operator new(S); }
4286 
4287  void init(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumReserved);
4288  void growOperands(unsigned Size);
4289 
4290 protected:
4291  // Note: Instruction needs to be a friend here to call cloneImpl.
4292  friend class Instruction;
4293 
4294  CatchSwitchInst *cloneImpl() const;
4295 
4296 public:
4297  void operator delete(void *Ptr) { return User::operator delete(Ptr); }
4298 
4299  static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest,
4300  unsigned NumHandlers,
4301  const Twine &NameStr = "",
4302  Instruction *InsertBefore = nullptr) {
4303  return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr,
4304  InsertBefore);
4305  }
4306 
4307  static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest,
4308  unsigned NumHandlers, const Twine &NameStr,
4309  BasicBlock *InsertAtEnd) {
4310  return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr,
4311  InsertAtEnd);
4312  }
4313 
4314  /// Provide fast operand accessors
4316 
4317  // Accessor Methods for CatchSwitch stmt
4318  Value *getParentPad() const { return getOperand(0); }
4319  void setParentPad(Value *ParentPad) { setOperand(0, ParentPad); }
4320 
4321  // Accessor Methods for CatchSwitch stmt
4322  bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); }
4323  bool unwindsToCaller() const { return !hasUnwindDest(); }
4325  if (hasUnwindDest())
4326  return cast<BasicBlock>(getOperand(1));
4327  return nullptr;
4328  }
4329  void setUnwindDest(BasicBlock *UnwindDest) {
4330  assert(UnwindDest);
4331  assert(hasUnwindDest());
4332  setOperand(1, UnwindDest);
4333  }
4334 
4335  /// return the number of 'handlers' in this catchswitch
4336  /// instruction, except the default handler
4337  unsigned getNumHandlers() const {
4338  if (hasUnwindDest())
4339  return getNumOperands() - 2;
4340  return getNumOperands() - 1;
4341  }
4342 
4343 private:
4344  static BasicBlock *handler_helper(Value *V) { return cast<BasicBlock>(V); }
4345  static const BasicBlock *handler_helper(const Value *V) {
4346  return cast<BasicBlock>(V);
4347  }
4348 
4349 public:
4350  using DerefFnTy = BasicBlock *(*)(Value *);
4353  using ConstDerefFnTy = const BasicBlock *(*)(const Value *);
4354  using const_handler_iterator =
4357 
4358  /// Returns an iterator that points to the first handler in CatchSwitchInst.
4360  op_iterator It = op_begin() + 1;
4361  if (hasUnwindDest())
4362  ++It;
4363  return handler_iterator(It, DerefFnTy(handler_helper));
4364  }
4365 
4366  /// Returns an iterator that points to the first handler in the
4367  /// CatchSwitchInst.
4369  const_op_iterator It = op_begin() + 1;
4370  if (hasUnwindDest())
4371  ++It;
4372  return const_handler_iterator(It, ConstDerefFnTy(handler_helper));
4373  }
4374 
4375  /// Returns a read-only iterator that points one past the last
4376  /// handler in the CatchSwitchInst.
4378  return handler_iterator(op_end(), DerefFnTy(handler_helper));
4379  }
4380 
4381  /// Returns an iterator that points one past the last handler in the
4382  /// CatchSwitchInst.
4384  return const_handler_iterator(op_end(), ConstDerefFnTy(handler_helper));
4385  }
4386 
4387  /// iteration adapter for range-for loops.
4389  return make_range(handler_begin(), handler_end());
4390  }
4391 
4392  /// iteration adapter for range-for loops.
4394  return make_range(handler_begin(), handler_end());
4395  }
4396 
4397  /// Add an entry to the switch instruction...
4398  /// Note:
4399  /// This action invalidates handler_end(). Old handler_end() iterator will
4400  /// point to the added handler.
4401  void addHandler(BasicBlock *Dest);
4402 
4403  void removeHandler(handler_iterator HI);
4404 
4405  unsigned getNumSuccessors() const { return getNumOperands() - 1; }
4406  BasicBlock *getSuccessor(unsigned Idx) const {
4407  assert(Idx < getNumSuccessors() &&
4408  "Successor # out of range for catchswitch!");
4409  return cast<BasicBlock>(getOperand(Idx + 1));
4410  }
4411  void setSuccessor(unsigned Idx, BasicBlock *NewSucc) {
4412  assert(Idx < getNumSuccessors() &&
4413  "Successor # out of range for catchswitch!");
4414  setOperand(Idx + 1, NewSucc);
4415  }
4416 
4417  // Methods for support type inquiry through isa, cast, and dyn_cast:
4418  static bool classof(const Instruction *I) {
4419  return I->getOpcode() == Instruction::CatchSwitch;
4420  }
4421  static bool classof(const Value *V) {
4422  return isa<Instruction>(V) && classof(cast<Instruction>(V));
4423  }
4424 };
4425 
4426 template <>
4428 
4430 
4431 //===----------------------------------------------------------------------===//
4432 // CleanupPadInst Class
4433 //===----------------------------------------------------------------------===//
4435 private:
4436  explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args,
4437  unsigned Values, const Twine &NameStr,
4438  Instruction *InsertBefore)
4439  : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values,
4440  NameStr, InsertBefore) {}
4441  explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args,
4442  unsigned Values, const Twine &NameStr,
4443  BasicBlock *InsertAtEnd)
4444  : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values,
4445  NameStr, InsertAtEnd) {}
4446 
4447 public:
4449  const Twine &NameStr = "",
4450  Instruction *InsertBefore = nullptr) {
4451  unsigned Values = 1 + Args.size();
4452  return new (Values)
4453  CleanupPadInst(ParentPad, Args, Values, NameStr, InsertBefore);
4454  }
4455 
4457  const Twine &NameStr, BasicBlock *InsertAtEnd) {
4458  unsigned Values = 1 + Args.size();
4459  return new (Values)
4460  CleanupPadInst(ParentPad, Args, Values, NameStr, InsertAtEnd);
4461  }
4462 
4463  /// Methods for support type inquiry through isa, cast, and dyn_cast:
4464  static bool classof(const Instruction *I) {
4465  return I->getOpcode() == Instruction::CleanupPad;
4466  }
4467  static bool classof(const Value *V) {
4468  return isa<Instruction>(V) && classof(cast<Instruction>(V));
4469  }
4470 };
4471 
4472 //===----------------------------------------------------------------------===//
4473 // CatchPadInst Class
4474 //===----------------------------------------------------------------------===//
4476 private:
4477  explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args,
4478  unsigned Values, const Twine &NameStr,
4479  Instruction *InsertBefore)
4480  : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values,
4481  NameStr, InsertBefore) {}
4482  explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args,
4483  unsigned Values, const Twine &NameStr,
4484  BasicBlock *InsertAtEnd)
4485  : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values,
4486  NameStr, InsertAtEnd) {}
4487 
4488 public:
4490  const Twine &NameStr = "",
4491  Instruction *InsertBefore = nullptr) {
4492  unsigned Values = 1 + Args.size();
4493  return new (Values)
4494  CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertBefore);
4495  }
4496 
4498  const Twine &NameStr, BasicBlock *InsertAtEnd) {
4499  unsigned Values = 1 + Args.size();
4500  return new (Values)
4501  CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertAtEnd);
4502  }
4503 
4504  /// Convenience accessors
4506  return cast<CatchSwitchInst>(Op<-1>());
4507  }
4508  void setCatchSwitch(Value *CatchSwitch) {
4509  assert(CatchSwitch);
4510  Op<-1>() = CatchSwitch;
4511  }
4512 
4513  /// Methods for support type inquiry through isa, cast, and dyn_cast:
4514  static bool classof(const Instruction *I) {
4515  return I->getOpcode() == Instruction::CatchPad;
4516  }
4517  static bool classof(const Value *V) {
4518  return isa<Instruction>(V) && classof(cast<Instruction>(V));
4519  }
4520 };
4521 
4522 //===----------------------------------------------------------------------===//
4523 // CatchReturnInst Class
4524 //===----------------------------------------------------------------------===//
4525 
4527  CatchReturnInst(const CatchReturnInst &RI);
4528  CatchReturnInst(Value *CatchPad, BasicBlock *BB, Instruction *InsertBefore);
4529  CatchReturnInst(Value *CatchPad, BasicBlock *BB, BasicBlock *InsertAtEnd);
4530 
4531  void init(Value *CatchPad, BasicBlock *BB);
4532 
4533 protected:
4534  // Note: Instruction needs to be a friend here to call cloneImpl.
4535  friend class Instruction;
4536 
4537  CatchReturnInst *cloneImpl() const;
4538 
4539 public:
4541  Instruction *InsertBefore = nullptr) {
4542  assert(CatchPad);
4543  assert(BB);
4544  return new (2) CatchReturnInst(CatchPad, BB, InsertBefore);
4545  }
4546 
4548  BasicBlock *InsertAtEnd) {
4549  assert(CatchPad);
4550  assert(BB);
4551  return new (2) CatchReturnInst(CatchPad, BB, InsertAtEnd);
4552  }
4553 
4554  /// Provide fast operand accessors
4556 
4557  /// Convenience accessors.
4558  CatchPadInst *getCatchPad() const { return cast<CatchPadInst>(Op<0>()); }
4559  void setCatchPad(CatchPadInst *CatchPad) {
4560  assert(CatchPad);
4561  Op<0>() = CatchPad;
4562  }
4563 
4564  BasicBlock *getSuccessor() const { return cast<BasicBlock>(Op<1>()); }
4565  void setSuccessor(BasicBlock *NewSucc) {
4566  assert(NewSucc);
4567  Op<1>() = NewSucc;
4568  }
4569  unsigned getNumSuccessors() const { return 1; }
4570 
4571  /// Get the parentPad of this catchret's catchpad's catchswitch.
4572  /// The successor block is implicitly a member of this funclet.
4574  return getCatchPad()->getCatchSwitch()->getParentPad();
4575  }
4576 
4577  // Methods for support type inquiry through isa, cast, and dyn_cast:
4578  static bool classof(const Instruction *I) {
4579  return (I->getOpcode() == Instruction::CatchRet);
4580  }
4581  static bool classof(const Value *V) {
4582  return isa<Instruction>(V) && classof(cast<Instruction>(V));
4583  }
4584 
4585 private:
4586  BasicBlock *getSuccessor(unsigned Idx) const {
4587  assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!");
4588  return getSuccessor();
4589  }
4590 
4591  void setSuccessor(unsigned Idx, BasicBlock *B) {
4592  assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!");
4593  setSuccessor(B);
4594  }
4595 };
4596 
4597 template <>
4599  : public FixedNumOperandTraits<CatchReturnInst, 2> {};
4600 
4602 
4603 //===----------------------------------------------------------------------===//
4604 // CleanupReturnInst Class
4605 //===----------------------------------------------------------------------===//
4606 
4608  using UnwindDestField = BoolBitfieldElementT<0>;
4609 
4610 private:
4612  CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values,
4613  Instruction *InsertBefore = nullptr);
4614  CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values,
4615  BasicBlock *InsertAtEnd);
4616 
4617  void init(Value *CleanupPad, BasicBlock *UnwindBB);
4618 
4619 protected:
4620  // Note: Instruction needs to be a friend here to call cloneImpl.
4621  friend class Instruction;
4622 
4623  CleanupReturnInst *cloneImpl() const;
4624 
4625 public:
4626  static CleanupReturnInst *Create(Value *CleanupPad,
4627  BasicBlock *UnwindBB = nullptr,
4628  Instruction *InsertBefore = nullptr) {
4629  assert(CleanupPad);
4630  unsigned Values = 1;
4631  if (UnwindBB)
4632  ++Values;
4633  return new (Values)
4634  CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertBefore);
4635  }
4636 
4637  static CleanupReturnInst *Create(Value *CleanupPad, BasicBlock *UnwindBB,
4638  BasicBlock *InsertAtEnd) {
4639  assert(CleanupPad);
4640  unsigned Values = 1;
4641  if (UnwindBB)
4642  ++Values;
4643  return new (Values)
4644  CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertAtEnd);
4645  }
4646 
4647  /// Provide fast operand accessors
4648</