LLVM  14.0.0git
Instructions.h
Go to the documentation of this file.
1 //===- llvm/Instructions.h - Instruction subclass definitions ---*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file exposes the class definitions of all of the subclasses of the
10 // Instruction class. This is meant to be an easy way to get access to all
11 // instruction subclasses.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #ifndef LLVM_IR_INSTRUCTIONS_H
16 #define LLVM_IR_INSTRUCTIONS_H
17 
18 #include "llvm/ADT/ArrayRef.h"
19 #include "llvm/ADT/Bitfields.h"
20 #include "llvm/ADT/MapVector.h"
21 #include "llvm/ADT/None.h"
22 #include "llvm/ADT/STLExtras.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/StringRef.h"
25 #include "llvm/ADT/Twine.h"
26 #include "llvm/ADT/iterator.h"
28 #include "llvm/IR/Attributes.h"
29 #include "llvm/IR/BasicBlock.h"
30 #include "llvm/IR/CallingConv.h"
31 #include "llvm/IR/CFG.h"
32 #include "llvm/IR/Constant.h"
33 #include "llvm/IR/DerivedTypes.h"
34 #include "llvm/IR/Function.h"
35 #include "llvm/IR/InstrTypes.h"
36 #include "llvm/IR/Instruction.h"
37 #include "llvm/IR/OperandTraits.h"
38 #include "llvm/IR/Type.h"
39 #include "llvm/IR/Use.h"
40 #include "llvm/IR/User.h"
41 #include "llvm/IR/Value.h"
43 #include "llvm/Support/Casting.h"
45 #include <cassert>
46 #include <cstddef>
47 #include <cstdint>
48 #include <iterator>
49 
50 namespace llvm {
51 
52 class APInt;
53 class ConstantInt;
54 class DataLayout;
55 class LLVMContext;
56 
57 //===----------------------------------------------------------------------===//
58 // AllocaInst Class
59 //===----------------------------------------------------------------------===//
60 
61 /// an instruction to allocate memory on the stack
62 class AllocaInst : public UnaryInstruction {
63  Type *AllocatedType;
64 
65  using AlignmentField = AlignmentBitfieldElementT<0>;
66  using UsedWithInAllocaField = BoolBitfieldElementT<AlignmentField::NextBit>;
68  static_assert(Bitfield::areContiguous<AlignmentField, UsedWithInAllocaField,
69  SwiftErrorField>(),
70  "Bitfields must be contiguous");
71 
72 protected:
73  // Note: Instruction needs to be a friend here to call cloneImpl.
74  friend class Instruction;
75 
76  AllocaInst *cloneImpl() const;
77 
78 public:
79  explicit AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
80  const Twine &Name, Instruction *InsertBefore);
81  AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
82  const Twine &Name, BasicBlock *InsertAtEnd);
83 
84  AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
85  Instruction *InsertBefore);
86  AllocaInst(Type *Ty, unsigned AddrSpace,
87  const Twine &Name, BasicBlock *InsertAtEnd);
88 
89  AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
90  const Twine &Name = "", Instruction *InsertBefore = nullptr);
91  AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
92  const Twine &Name, BasicBlock *InsertAtEnd);
93 
94  /// Return true if there is an allocation size parameter to the allocation
95  /// instruction that is not 1.
96  bool isArrayAllocation() const;
97 
98  /// Get the number of elements allocated. For a simple allocation of a single
99  /// element, this will return a constant 1 value.
100  const Value *getArraySize() const { return getOperand(0); }
101  Value *getArraySize() { return getOperand(0); }
102 
103  /// Overload to return most specific pointer type.
104  PointerType *getType() const {
105  return cast<PointerType>(Instruction::getType());
106  }
107 
108  /// Return the address space for the allocation.
109  unsigned getAddressSpace() const {
110  return getType()->getAddressSpace();
111  }
112 
113  /// Get allocation size in bits. Returns None if size can't be determined,
114  /// e.g. in case of a VLA.
116 
117  /// Return the type that is being allocated by the instruction.
118  Type *getAllocatedType() const { return AllocatedType; }
119  /// for use only in special circumstances that need to generically
120  /// transform a whole instruction (eg: IR linking and vectorization).
121  void setAllocatedType(Type *Ty) { AllocatedType = Ty; }
122 
123  /// Return the alignment of the memory that is being allocated by the
124  /// instruction.
125  Align getAlign() const {
126  return Align(1ULL << getSubclassData<AlignmentField>());
127  }
128 
130  setSubclassData<AlignmentField>(Log2(Align));
131  }
132 
133  // FIXME: Remove this one transition to Align is over.
134  uint64_t getAlignment() const { return getAlign().value(); }
135 
136  /// Return true if this alloca is in the entry block of the function and is a
137  /// constant size. If so, the code generator will fold it into the
138  /// prolog/epilog code, so it is basically free.
139  bool isStaticAlloca() const;
140 
141  /// Return true if this alloca is used as an inalloca argument to a call. Such
142  /// allocas are never considered static even if they are in the entry block.
143  bool isUsedWithInAlloca() const {
144  return getSubclassData<UsedWithInAllocaField>();
145  }
146 
147  /// Specify whether this alloca is used to represent the arguments to a call.
148  void setUsedWithInAlloca(bool V) {
149  setSubclassData<UsedWithInAllocaField>(V);
150  }
151 
152  /// Return true if this alloca is used as a swifterror argument to a call.
153  bool isSwiftError() const { return getSubclassData<SwiftErrorField>(); }
154  /// Specify whether this alloca is used to represent a swifterror.
155  void setSwiftError(bool V) { setSubclassData<SwiftErrorField>(V); }
156 
157  // Methods for support type inquiry through isa, cast, and dyn_cast:
158  static bool classof(const Instruction *I) {
159  return (I->getOpcode() == Instruction::Alloca);
160  }
161  static bool classof(const Value *V) {
162  return isa<Instruction>(V) && classof(cast<Instruction>(V));
163  }
164 
165 private:
166  // Shadow Instruction::setInstructionSubclassData with a private forwarding
167  // method so that subclasses cannot accidentally use it.
168  template <typename Bitfield>
169  void setSubclassData(typename Bitfield::Type Value) {
170  Instruction::setSubclassData<Bitfield>(Value);
171  }
172 };
173 
174 //===----------------------------------------------------------------------===//
175 // LoadInst Class
176 //===----------------------------------------------------------------------===//
177 
178 /// An instruction for reading from memory. This uses the SubclassData field in
179 /// Value to store whether or not the load is volatile.
180 class LoadInst : public UnaryInstruction {
181  using VolatileField = BoolBitfieldElementT<0>;
184  static_assert(
185  Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
186  "Bitfields must be contiguous");
187 
188  void AssertOK();
189 
190 protected:
191  // Note: Instruction needs to be a friend here to call cloneImpl.
192  friend class Instruction;
193 
194  LoadInst *cloneImpl() const;
195 
196 public:
197  LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr,
198  Instruction *InsertBefore);
199  LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd);
200  LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
201  Instruction *InsertBefore);
202  LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
203  BasicBlock *InsertAtEnd);
204  LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
205  Align Align, Instruction *InsertBefore = nullptr);
206  LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
207  Align Align, BasicBlock *InsertAtEnd);
208  LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
209  Align Align, AtomicOrdering Order,
211  Instruction *InsertBefore = nullptr);
212  LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
214  BasicBlock *InsertAtEnd);
215 
216  /// Return true if this is a load from a volatile memory location.
217  bool isVolatile() const { return getSubclassData<VolatileField>(); }
218 
219  /// Specify whether this is a volatile load or not.
220  void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
221 
222  /// Return the alignment of the access that is being performed.
223  /// FIXME: Remove this function once transition to Align is over.
224  /// Use getAlign() instead.
225  uint64_t getAlignment() const { return getAlign().value(); }
226 
227  /// Return the alignment of the access that is being performed.
228  Align getAlign() const {
229  return Align(1ULL << (getSubclassData<AlignmentField>()));
230  }
231 
233  setSubclassData<AlignmentField>(Log2(Align));
234  }
235 
236  /// Returns the ordering constraint of this load instruction.
238  return getSubclassData<OrderingField>();
239  }
240  /// Sets the ordering constraint of this load instruction. May not be Release
241  /// or AcquireRelease.
242  void setOrdering(AtomicOrdering Ordering) {
243  setSubclassData<OrderingField>(Ordering);
244  }
245 
246  /// Returns the synchronization scope ID of this load instruction.
248  return SSID;
249  }
250 
251  /// Sets the synchronization scope ID of this load instruction.
253  this->SSID = SSID;
254  }
255 
256  /// Sets the ordering constraint and the synchronization scope ID of this load
257  /// instruction.
258  void setAtomic(AtomicOrdering Ordering,
260  setOrdering(Ordering);
261  setSyncScopeID(SSID);
262  }
263 
264  bool isSimple() const { return !isAtomic() && !isVolatile(); }
265 
266  bool isUnordered() const {
267  return (getOrdering() == AtomicOrdering::NotAtomic ||
269  !isVolatile();
270  }
271 
273  const Value *getPointerOperand() const { return getOperand(0); }
274  static unsigned getPointerOperandIndex() { return 0U; }
276 
277  /// Returns the address space of the pointer operand.
278  unsigned getPointerAddressSpace() const {
280  }
281 
282  // Methods for support type inquiry through isa, cast, and dyn_cast:
283  static bool classof(const Instruction *I) {
284  return I->getOpcode() == Instruction::Load;
285  }
286  static bool classof(const Value *V) {
287  return isa<Instruction>(V) && classof(cast<Instruction>(V));
288  }
289 
290 private:
291  // Shadow Instruction::setInstructionSubclassData with a private forwarding
292  // method so that subclasses cannot accidentally use it.
293  template <typename Bitfield>
294  void setSubclassData(typename Bitfield::Type Value) {
295  Instruction::setSubclassData<Bitfield>(Value);
296  }
297 
298  /// The synchronization scope ID of this load instruction. Not quite enough
299  /// room in SubClassData for everything, so synchronization scope ID gets its
300  /// own field.
301  SyncScope::ID SSID;
302 };
303 
304 //===----------------------------------------------------------------------===//
305 // StoreInst Class
306 //===----------------------------------------------------------------------===//
307 
308 /// An instruction for storing to memory.
309 class StoreInst : public Instruction {
310  using VolatileField = BoolBitfieldElementT<0>;
313  static_assert(
314  Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
315  "Bitfields must be contiguous");
316 
317  void AssertOK();
318 
319 protected:
320  // Note: Instruction needs to be a friend here to call cloneImpl.
321  friend class Instruction;
322 
323  StoreInst *cloneImpl() const;
324 
325 public:
326  StoreInst(Value *Val, Value *Ptr, Instruction *InsertBefore);
327  StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd);
328  StoreInst(Value *Val, Value *Ptr, bool isVolatile, Instruction *InsertBefore);
329  StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd);
330  StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
331  Instruction *InsertBefore = nullptr);
332  StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
333  BasicBlock *InsertAtEnd);
334  StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
336  Instruction *InsertBefore = nullptr);
337  StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
338  AtomicOrdering Order, SyncScope::ID SSID, BasicBlock *InsertAtEnd);
339 
340  // allocate space for exactly two operands
341  void *operator new(size_t S) { return User::operator new(S, 2); }
342  void operator delete(void *Ptr) { User::operator delete(Ptr); }
343 
344  /// Return true if this is a store to a volatile memory location.
345  bool isVolatile() const { return getSubclassData<VolatileField>(); }
346 
347  /// Specify whether this is a volatile store or not.
348  void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
349 
350  /// Transparently provide more efficient getOperand methods.
352 
353  /// Return the alignment of the access that is being performed
354  /// FIXME: Remove this function once transition to Align is over.
355  /// Use getAlign() instead.
356  uint64_t getAlignment() const { return getAlign().value(); }
357 
358  Align getAlign() const {
359  return Align(1ULL << (getSubclassData<AlignmentField>()));
360  }
361 
363  setSubclassData<AlignmentField>(Log2(Align));
364  }
365 
366  /// Returns the ordering constraint of this store instruction.
368  return getSubclassData<OrderingField>();
369  }
370 
371  /// Sets the ordering constraint of this store instruction. May not be
372  /// Acquire or AcquireRelease.
373  void setOrdering(AtomicOrdering Ordering) {
374  setSubclassData<OrderingField>(Ordering);
375  }
376 
377  /// Returns the synchronization scope ID of this store instruction.
379  return SSID;
380  }
381 
382  /// Sets the synchronization scope ID of this store instruction.
384  this->SSID = SSID;
385  }
386 
387  /// Sets the ordering constraint and the synchronization scope ID of this
388  /// store instruction.
389  void setAtomic(AtomicOrdering Ordering,
391  setOrdering(Ordering);
392  setSyncScopeID(SSID);
393  }
394 
395  bool isSimple() const { return !isAtomic() && !isVolatile(); }
396 
397  bool isUnordered() const {
398  return (getOrdering() == AtomicOrdering::NotAtomic ||
400  !isVolatile();
401  }
402 
403  Value *getValueOperand() { return getOperand(0); }
404  const Value *getValueOperand() const { return getOperand(0); }
405 
407  const Value *getPointerOperand() const { return getOperand(1); }
408  static unsigned getPointerOperandIndex() { return 1U; }
410 
411  /// Returns the address space of the pointer operand.
412  unsigned getPointerAddressSpace() const {
414  }
415 
416  // Methods for support type inquiry through isa, cast, and dyn_cast:
417  static bool classof(const Instruction *I) {
418  return I->getOpcode() == Instruction::Store;
419  }
420  static bool classof(const Value *V) {
421  return isa<Instruction>(V) && classof(cast<Instruction>(V));
422  }
423 
424 private:
425  // Shadow Instruction::setInstructionSubclassData with a private forwarding
426  // method so that subclasses cannot accidentally use it.
427  template <typename Bitfield>
428  void setSubclassData(typename Bitfield::Type Value) {
429  Instruction::setSubclassData<Bitfield>(Value);
430  }
431 
432  /// The synchronization scope ID of this store instruction. Not quite enough
433  /// room in SubClassData for everything, so synchronization scope ID gets its
434  /// own field.
435  SyncScope::ID SSID;
436 };
437 
438 template <>
439 struct OperandTraits<StoreInst> : public FixedNumOperandTraits<StoreInst, 2> {
440 };
441 
443 
444 //===----------------------------------------------------------------------===//
445 // FenceInst Class
446 //===----------------------------------------------------------------------===//
447 
448 /// An instruction for ordering other memory operations.
449 class FenceInst : public Instruction {
450  using OrderingField = AtomicOrderingBitfieldElementT<0>;
451 
452  void Init(AtomicOrdering Ordering, SyncScope::ID SSID);
453 
454 protected:
455  // Note: Instruction needs to be a friend here to call cloneImpl.
456  friend class Instruction;
457 
458  FenceInst *cloneImpl() const;
459 
460 public:
461  // Ordering may only be Acquire, Release, AcquireRelease, or
462  // SequentiallyConsistent.
465  Instruction *InsertBefore = nullptr);
467  BasicBlock *InsertAtEnd);
468 
469  // allocate space for exactly zero operands
470  void *operator new(size_t S) { return User::operator new(S, 0); }
471  void operator delete(void *Ptr) { User::operator delete(Ptr); }
472 
473  /// Returns the ordering constraint of this fence instruction.
475  return getSubclassData<OrderingField>();
476  }
477 
478  /// Sets the ordering constraint of this fence instruction. May only be
479  /// Acquire, Release, AcquireRelease, or SequentiallyConsistent.
480  void setOrdering(AtomicOrdering Ordering) {
481  setSubclassData<OrderingField>(Ordering);
482  }
483 
484  /// Returns the synchronization scope ID of this fence instruction.
486  return SSID;
487  }
488 
489  /// Sets the synchronization scope ID of this fence instruction.
491  this->SSID = SSID;
492  }
493 
494  // Methods for support type inquiry through isa, cast, and dyn_cast:
495  static bool classof(const Instruction *I) {
496  return I->getOpcode() == Instruction::Fence;
497  }
498  static bool classof(const Value *V) {
499  return isa<Instruction>(V) && classof(cast<Instruction>(V));
500  }
501 
502 private:
503  // Shadow Instruction::setInstructionSubclassData with a private forwarding
504  // method so that subclasses cannot accidentally use it.
505  template <typename Bitfield>
506  void setSubclassData(typename Bitfield::Type Value) {
507  Instruction::setSubclassData<Bitfield>(Value);
508  }
509 
510  /// The synchronization scope ID of this fence instruction. Not quite enough
511  /// room in SubClassData for everything, so synchronization scope ID gets its
512  /// own field.
513  SyncScope::ID SSID;
514 };
515 
516 //===----------------------------------------------------------------------===//
517 // AtomicCmpXchgInst Class
518 //===----------------------------------------------------------------------===//
519 
520 /// An instruction that atomically checks whether a
521 /// specified value is in a memory location, and, if it is, stores a new value
522 /// there. The value returned by this instruction is a pair containing the
523 /// original value as first element, and an i1 indicating success (true) or
524 /// failure (false) as second element.
525 ///
527  void Init(Value *Ptr, Value *Cmp, Value *NewVal, Align Align,
528  AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering,
529  SyncScope::ID SSID);
530 
531  template <unsigned Offset>
532  using AtomicOrderingBitfieldElement =
535 
536 protected:
537  // Note: Instruction needs to be a friend here to call cloneImpl.
538  friend class Instruction;
539 
540  AtomicCmpXchgInst *cloneImpl() const;
541 
542 public:
543  AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
544  AtomicOrdering SuccessOrdering,
545  AtomicOrdering FailureOrdering, SyncScope::ID SSID,
546  Instruction *InsertBefore = nullptr);
547  AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
548  AtomicOrdering SuccessOrdering,
549  AtomicOrdering FailureOrdering, SyncScope::ID SSID,
550  BasicBlock *InsertAtEnd);
551 
552  // allocate space for exactly three operands
553  void *operator new(size_t S) { return User::operator new(S, 3); }
554  void operator delete(void *Ptr) { User::operator delete(Ptr); }
555 
558  using SuccessOrderingField =
560  using FailureOrderingField =
562  using AlignmentField =
564  static_assert(
567  "Bitfields must be contiguous");
568 
569  /// Return the alignment of the memory that is being allocated by the
570  /// instruction.
571  Align getAlign() const {
572  return Align(1ULL << getSubclassData<AlignmentField>());
573  }
574 
576  setSubclassData<AlignmentField>(Log2(Align));
577  }
578 
579  /// Return true if this is a cmpxchg from a volatile memory
580  /// location.
581  ///
582  bool isVolatile() const { return getSubclassData<VolatileField>(); }
583 
584  /// Specify whether this is a volatile cmpxchg.
585  ///
586  void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
587 
588  /// Return true if this cmpxchg may spuriously fail.
589  bool isWeak() const { return getSubclassData<WeakField>(); }
590 
591  void setWeak(bool IsWeak) { setSubclassData<WeakField>(IsWeak); }
592 
593  /// Transparently provide more efficient getOperand methods.
595 
596  static bool isValidSuccessOrdering(AtomicOrdering Ordering) {
597  return Ordering != AtomicOrdering::NotAtomic &&
598  Ordering != AtomicOrdering::Unordered;
599  }
600 
601  static bool isValidFailureOrdering(AtomicOrdering Ordering) {
602  return Ordering != AtomicOrdering::NotAtomic &&
603  Ordering != AtomicOrdering::Unordered &&
604  Ordering != AtomicOrdering::AcquireRelease &&
605  Ordering != AtomicOrdering::Release;
606  }
607 
608  /// Returns the success ordering constraint of this cmpxchg instruction.
610  return getSubclassData<SuccessOrderingField>();
611  }
612 
613  /// Sets the success ordering constraint of this cmpxchg instruction.
615  assert(isValidSuccessOrdering(Ordering) &&
616  "invalid CmpXchg success ordering");
617  setSubclassData<SuccessOrderingField>(Ordering);
618  }
619 
620  /// Returns the failure ordering constraint of this cmpxchg instruction.
622  return getSubclassData<FailureOrderingField>();
623  }
624 
625  /// Sets the failure ordering constraint of this cmpxchg instruction.
627  assert(isValidFailureOrdering(Ordering) &&
628  "invalid CmpXchg failure ordering");
629  setSubclassData<FailureOrderingField>(Ordering);
630  }
631 
632  /// Returns a single ordering which is at least as strong as both the
633  /// success and failure orderings for this cmpxchg.
642  }
643  return getSuccessOrdering();
644  }
645 
646  /// Returns the synchronization scope ID of this cmpxchg instruction.
648  return SSID;
649  }
650 
651  /// Sets the synchronization scope ID of this cmpxchg instruction.
653  this->SSID = SSID;
654  }
655 
657  const Value *getPointerOperand() const { return getOperand(0); }
658  static unsigned getPointerOperandIndex() { return 0U; }
659 
661  const Value *getCompareOperand() const { return getOperand(1); }
662 
664  const Value *getNewValOperand() const { return getOperand(2); }
665 
666  /// Returns the address space of the pointer operand.
667  unsigned getPointerAddressSpace() const {
669  }
670 
671  /// Returns the strongest permitted ordering on failure, given the
672  /// desired ordering on success.
673  ///
674  /// If the comparison in a cmpxchg operation fails, there is no atomic store
675  /// so release semantics cannot be provided. So this function drops explicit
676  /// Release requests from the AtomicOrdering. A SequentiallyConsistent
677  /// operation would remain SequentiallyConsistent.
678  static AtomicOrdering
680  switch (SuccessOrdering) {
681  default:
682  llvm_unreachable("invalid cmpxchg success ordering");
691  }
692  }
693 
694  // Methods for support type inquiry through isa, cast, and dyn_cast:
695  static bool classof(const Instruction *I) {
696  return I->getOpcode() == Instruction::AtomicCmpXchg;
697  }
698  static bool classof(const Value *V) {
699  return isa<Instruction>(V) && classof(cast<Instruction>(V));
700  }
701 
702 private:
703  // Shadow Instruction::setInstructionSubclassData with a private forwarding
704  // method so that subclasses cannot accidentally use it.
705  template <typename Bitfield>
706  void setSubclassData(typename Bitfield::Type Value) {
707  Instruction::setSubclassData<Bitfield>(Value);
708  }
709 
710  /// The synchronization scope ID of this cmpxchg instruction. Not quite
711  /// enough room in SubClassData for everything, so synchronization scope ID
712  /// gets its own field.
713  SyncScope::ID SSID;
714 };
715 
716 template <>
718  public FixedNumOperandTraits<AtomicCmpXchgInst, 3> {
719 };
720 
722 
723 //===----------------------------------------------------------------------===//
724 // AtomicRMWInst Class
725 //===----------------------------------------------------------------------===//
726 
727 /// an instruction that atomically reads a memory location,
728 /// combines it with another value, and then stores the result back. Returns
729 /// the old value.
730 ///
731 class AtomicRMWInst : public Instruction {
732 protected:
733  // Note: Instruction needs to be a friend here to call cloneImpl.
734  friend class Instruction;
735 
736  AtomicRMWInst *cloneImpl() const;
737 
738 public:
739  /// This enumeration lists the possible modifications atomicrmw can make. In
740  /// the descriptions, 'p' is the pointer to the instruction's memory location,
741  /// 'old' is the initial value of *p, and 'v' is the other value passed to the
742  /// instruction. These instructions always return 'old'.
743  enum BinOp : unsigned {
744  /// *p = v
746  /// *p = old + v
748  /// *p = old - v
750  /// *p = old & v
752  /// *p = ~(old & v)
754  /// *p = old | v
755  Or,
756  /// *p = old ^ v
758  /// *p = old >signed v ? old : v
760  /// *p = old <signed v ? old : v
762  /// *p = old >unsigned v ? old : v
764  /// *p = old <unsigned v ? old : v
766 
767  /// *p = old + v
769 
770  /// *p = old - v
772 
773  FIRST_BINOP = Xchg,
774  LAST_BINOP = FSub,
775  BAD_BINOP
776  };
777 
778 private:
779  template <unsigned Offset>
780  using AtomicOrderingBitfieldElement =
783 
784  template <unsigned Offset>
785  using BinOpBitfieldElement =
787 
788 public:
789  AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
790  AtomicOrdering Ordering, SyncScope::ID SSID,
791  Instruction *InsertBefore = nullptr);
792  AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
793  AtomicOrdering Ordering, SyncScope::ID SSID,
794  BasicBlock *InsertAtEnd);
795 
796  // allocate space for exactly two operands
797  void *operator new(size_t S) { return User::operator new(S, 2); }
798  void operator delete(void *Ptr) { User::operator delete(Ptr); }
799 
801  using AtomicOrderingField =
803  using OperationField = BinOpBitfieldElement<AtomicOrderingField::NextBit>;
807  "Bitfields must be contiguous");
808 
809  BinOp getOperation() const { return getSubclassData<OperationField>(); }
810 
811  static StringRef getOperationName(BinOp Op);
812 
813  static bool isFPOperation(BinOp Op) {
814  switch (Op) {
815  case AtomicRMWInst::FAdd:
816  case AtomicRMWInst::FSub:
817  return true;
818  default:
819  return false;
820  }
821  }
822 
824  setSubclassData<OperationField>(Operation);
825  }
826 
827  /// Return the alignment of the memory that is being allocated by the
828  /// instruction.
829  Align getAlign() const {
830  return Align(1ULL << getSubclassData<AlignmentField>());
831  }
832 
834  setSubclassData<AlignmentField>(Log2(Align));
835  }
836 
837  /// Return true if this is a RMW on a volatile memory location.
838  ///
839  bool isVolatile() const { return getSubclassData<VolatileField>(); }
840 
841  /// Specify whether this is a volatile RMW or not.
842  ///
843  void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
844 
845  /// Transparently provide more efficient getOperand methods.
847 
848  /// Returns the ordering constraint of this rmw instruction.
850  return getSubclassData<AtomicOrderingField>();
851  }
852 
853  /// Sets the ordering constraint of this rmw instruction.
854  void setOrdering(AtomicOrdering Ordering) {
855  assert(Ordering != AtomicOrdering::NotAtomic &&
856  "atomicrmw instructions can only be atomic.");
857  setSubclassData<AtomicOrderingField>(Ordering);
858  }
859 
860  /// Returns the synchronization scope ID of this rmw instruction.
862  return SSID;
863  }
864 
865  /// Sets the synchronization scope ID of this rmw instruction.
867  this->SSID = SSID;
868  }
869 
870  Value *getPointerOperand() { return getOperand(0); }
871  const Value *getPointerOperand() const { return getOperand(0); }
872  static unsigned getPointerOperandIndex() { return 0U; }
873 
874  Value *getValOperand() { return getOperand(1); }
875  const Value *getValOperand() const { return getOperand(1); }
876 
877  /// Returns the address space of the pointer operand.
878  unsigned getPointerAddressSpace() const {
880  }
881 
883  return isFPOperation(getOperation());
884  }
885 
886  // Methods for support type inquiry through isa, cast, and dyn_cast:
887  static bool classof(const Instruction *I) {
888  return I->getOpcode() == Instruction::AtomicRMW;
889  }
890  static bool classof(const Value *V) {
891  return isa<Instruction>(V) && classof(cast<Instruction>(V));
892  }
893 
894 private:
895  void Init(BinOp Operation, Value *Ptr, Value *Val, Align Align,
896  AtomicOrdering Ordering, SyncScope::ID SSID);
897 
898  // Shadow Instruction::setInstructionSubclassData with a private forwarding
899  // method so that subclasses cannot accidentally use it.
900  template <typename Bitfield>
901  void setSubclassData(typename Bitfield::Type Value) {
902  Instruction::setSubclassData<Bitfield>(Value);
903  }
904 
905  /// The synchronization scope ID of this rmw instruction. Not quite enough
906  /// room in SubClassData for everything, so synchronization scope ID gets its
907  /// own field.
908  SyncScope::ID SSID;
909 };
910 
911 template <>
913  : public FixedNumOperandTraits<AtomicRMWInst,2> {
914 };
915 
917 
918 //===----------------------------------------------------------------------===//
919 // GetElementPtrInst Class
920 //===----------------------------------------------------------------------===//
921 
922 // checkGEPType - Simple wrapper function to give a better assertion failure
923 // message on bad indexes for a gep instruction.
924 //
926  assert(Ty && "Invalid GetElementPtrInst indices for type!");
927  return Ty;
928 }
929 
930 /// an instruction for type-safe pointer arithmetic to
931 /// access elements of arrays and structs
932 ///
934  Type *SourceElementType;
935  Type *ResultElementType;
936 
938 
939  /// Constructors - Create a getelementptr instruction with a base pointer an
940  /// list of indices. The first ctor can optionally insert before an existing
941  /// instruction, the second appends the new instruction to the specified
942  /// BasicBlock.
943  inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
944  ArrayRef<Value *> IdxList, unsigned Values,
945  const Twine &NameStr, Instruction *InsertBefore);
946  inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
947  ArrayRef<Value *> IdxList, unsigned Values,
948  const Twine &NameStr, BasicBlock *InsertAtEnd);
949 
950  void init(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr);
951 
952 protected:
953  // Note: Instruction needs to be a friend here to call cloneImpl.
954  friend class Instruction;
955 
956  GetElementPtrInst *cloneImpl() const;
957 
958 public:
959  static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
960  ArrayRef<Value *> IdxList,
961  const Twine &NameStr = "",
962  Instruction *InsertBefore = nullptr) {
963  unsigned Values = 1 + unsigned(IdxList.size());
964  assert(PointeeType && "Must specify element type");
965  assert(cast<PointerType>(Ptr->getType()->getScalarType())
966  ->isOpaqueOrPointeeTypeMatches(PointeeType));
967  return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
968  NameStr, InsertBefore);
969  }
970 
971  static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
972  ArrayRef<Value *> IdxList,
973  const Twine &NameStr,
974  BasicBlock *InsertAtEnd) {
975  unsigned Values = 1 + unsigned(IdxList.size());
976  assert(PointeeType && "Must specify element type");
977  assert(cast<PointerType>(Ptr->getType()->getScalarType())
978  ->isOpaqueOrPointeeTypeMatches(PointeeType));
979  return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
980  NameStr, InsertAtEnd);
981  }
982 
983  /// Create an "inbounds" getelementptr. See the documentation for the
984  /// "inbounds" flag in LangRef.html for details.
985  static GetElementPtrInst *
986  CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef<Value *> IdxList,
987  const Twine &NameStr = "",
988  Instruction *InsertBefore = nullptr) {
990  Create(PointeeType, Ptr, IdxList, NameStr, InsertBefore);
991  GEP->setIsInBounds(true);
992  return GEP;
993  }
994 
995  static GetElementPtrInst *CreateInBounds(Type *PointeeType, Value *Ptr,
996  ArrayRef<Value *> IdxList,
997  const Twine &NameStr,
998  BasicBlock *InsertAtEnd) {
1000  Create(PointeeType, Ptr, IdxList, NameStr, InsertAtEnd);
1001  GEP->setIsInBounds(true);
1002  return GEP;
1003  }
1004 
1005  /// Transparently provide more efficient getOperand methods.
1007 
1008  Type *getSourceElementType() const { return SourceElementType; }
1009 
1010  void setSourceElementType(Type *Ty) { SourceElementType = Ty; }
1011  void setResultElementType(Type *Ty) { ResultElementType = Ty; }
1012 
1014  assert(cast<PointerType>(getType()->getScalarType())
1015  ->isOpaqueOrPointeeTypeMatches(ResultElementType));
1016  return ResultElementType;
1017  }
1018 
1019  /// Returns the address space of this instruction's pointer type.
1020  unsigned getAddressSpace() const {
1021  // Note that this is always the same as the pointer operand's address space
1022  // and that is cheaper to compute, so cheat here.
1023  return getPointerAddressSpace();
1024  }
1025 
1026  /// Returns the result type of a getelementptr with the given source
1027  /// element type and indexes.
1028  ///
1029  /// Null is returned if the indices are invalid for the specified
1030  /// source element type.
1031  static Type *getIndexedType(Type *Ty, ArrayRef<Value *> IdxList);
1032  static Type *getIndexedType(Type *Ty, ArrayRef<Constant *> IdxList);
1033  static Type *getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList);
1034 
1035  /// Return the type of the element at the given index of an indexable
1036  /// type. This is equivalent to "getIndexedType(Agg, {Zero, Idx})".
1037  ///
1038  /// Returns null if the type can't be indexed, or the given index is not
1039  /// legal for the given type.
1040  static Type *getTypeAtIndex(Type *Ty, Value *Idx);
1041  static Type *getTypeAtIndex(Type *Ty, uint64_t Idx);
1042 
1043  inline op_iterator idx_begin() { return op_begin()+1; }
1044  inline const_op_iterator idx_begin() const { return op_begin()+1; }
1045  inline op_iterator idx_end() { return op_end(); }
1046  inline const_op_iterator idx_end() const { return op_end(); }
1047 
1049  return make_range(idx_begin(), idx_end());
1050  }
1051 
1053  return make_range(idx_begin(), idx_end());
1054  }
1055 
1057  return getOperand(0);
1058  }
1059  const Value *getPointerOperand() const {
1060  return getOperand(0);
1061  }
1062  static unsigned getPointerOperandIndex() {
1063  return 0U; // get index for modifying correct operand.
1064  }
1065 
1066  /// Method to return the pointer operand as a
1067  /// PointerType.
1069  return getPointerOperand()->getType();
1070  }
1071 
1072  /// Returns the address space of the pointer operand.
1073  unsigned getPointerAddressSpace() const {
1075  }
1076 
1077  /// Returns the pointer type returned by the GEP
1078  /// instruction, which may be a vector of pointers.
1079  static Type *getGEPReturnType(Type *ElTy, Value *Ptr,
1080  ArrayRef<Value *> IdxList) {
1081  PointerType *OrigPtrTy = cast<PointerType>(Ptr->getType()->getScalarType());
1082  unsigned AddrSpace = OrigPtrTy->getAddressSpace();
1083  Type *ResultElemTy = checkGEPType(getIndexedType(ElTy, IdxList));
1084  Type *PtrTy = OrigPtrTy->isOpaque()
1085  ? PointerType::get(OrigPtrTy->getContext(), AddrSpace)
1086  : PointerType::get(ResultElemTy, AddrSpace);
1087  // Vector GEP
1088  if (auto *PtrVTy = dyn_cast<VectorType>(Ptr->getType())) {
1089  ElementCount EltCount = PtrVTy->getElementCount();
1090  return VectorType::get(PtrTy, EltCount);
1091  }
1092  for (Value *Index : IdxList)
1093  if (auto *IndexVTy = dyn_cast<VectorType>(Index->getType())) {
1094  ElementCount EltCount = IndexVTy->getElementCount();
1095  return VectorType::get(PtrTy, EltCount);
1096  }
1097  // Scalar GEP
1098  return PtrTy;
1099  }
1100 
1101  unsigned getNumIndices() const { // Note: always non-negative
1102  return getNumOperands() - 1;
1103  }
1104 
1105  bool hasIndices() const {
1106  return getNumOperands() > 1;
1107  }
1108 
1109  /// Return true if all of the indices of this GEP are
1110  /// zeros. If so, the result pointer and the first operand have the same
1111  /// value, just potentially different types.
1112  bool hasAllZeroIndices() const;
1113 
1114  /// Return true if all of the indices of this GEP are
1115  /// constant integers. If so, the result pointer and the first operand have
1116  /// a constant offset between them.
1117  bool hasAllConstantIndices() const;
1118 
1119  /// Set or clear the inbounds flag on this GEP instruction.
1120  /// See LangRef.html for the meaning of inbounds on a getelementptr.
1121  void setIsInBounds(bool b = true);
1122 
1123  /// Determine whether the GEP has the inbounds flag.
1124  bool isInBounds() const;
1125 
1126  /// Accumulate the constant address offset of this GEP if possible.
1127  ///
1128  /// This routine accepts an APInt into which it will accumulate the constant
1129  /// offset of this GEP if the GEP is in fact constant. If the GEP is not
1130  /// all-constant, it returns false and the value of the offset APInt is
1131  /// undefined (it is *not* preserved!). The APInt passed into this routine
1132  /// must be at least as wide as the IntPtr type for the address space of
1133  /// the base GEP pointer.
1134  bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const;
1135  bool collectOffset(const DataLayout &DL, unsigned BitWidth,
1136  MapVector<Value *, APInt> &VariableOffsets,
1137  APInt &ConstantOffset) const;
1138  // Methods for support type inquiry through isa, cast, and dyn_cast:
1139  static bool classof(const Instruction *I) {
1140  return (I->getOpcode() == Instruction::GetElementPtr);
1141  }
1142  static bool classof(const Value *V) {
1143  return isa<Instruction>(V) && classof(cast<Instruction>(V));
1144  }
1145 };
1146 
1147 template <>
1149  public VariadicOperandTraits<GetElementPtrInst, 1> {
1150 };
1151 
1152 GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1153  ArrayRef<Value *> IdxList, unsigned Values,
1154  const Twine &NameStr,
1155  Instruction *InsertBefore)
1156  : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
1157  OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1158  Values, InsertBefore),
1159  SourceElementType(PointeeType),
1160  ResultElementType(getIndexedType(PointeeType, IdxList)) {
1161  assert(cast<PointerType>(getType()->getScalarType())
1162  ->isOpaqueOrPointeeTypeMatches(ResultElementType));
1163  init(Ptr, IdxList, NameStr);
1164 }
1165 
1166 GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1167  ArrayRef<Value *> IdxList, unsigned Values,
1168  const Twine &NameStr,
1169  BasicBlock *InsertAtEnd)
1170  : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
1171  OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1172  Values, InsertAtEnd),
1173  SourceElementType(PointeeType),
1174  ResultElementType(getIndexedType(PointeeType, IdxList)) {
1175  assert(cast<PointerType>(getType()->getScalarType())
1176  ->isOpaqueOrPointeeTypeMatches(ResultElementType));
1177  init(Ptr, IdxList, NameStr);
1178 }
1179 
1180 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value)
1181 
1182 //===----------------------------------------------------------------------===//
1183 // ICmpInst Class
1184 //===----------------------------------------------------------------------===//
1185 
1186 /// This instruction compares its operands according to the predicate given
1187 /// to the constructor. It only operates on integers or pointers. The operands
1188 /// must be identical types.
1189 /// Represent an integer comparison operator.
1190 class ICmpInst: public CmpInst {
1191  void AssertOK() {
1192  assert(isIntPredicate() &&
1193  "Invalid ICmp predicate value");
1194  assert(getOperand(0)->getType() == getOperand(1)->getType() &&
1195  "Both operands to ICmp instruction are not of the same type!");
1196  // Check that the operands are the right type
1197  assert((getOperand(0)->getType()->isIntOrIntVectorTy() ||
1198  getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
1199  "Invalid operand types for ICmp instruction");
1200  }
1201 
1202 protected:
1203  // Note: Instruction needs to be a friend here to call cloneImpl.
1204  friend class Instruction;
1205 
1206  /// Clone an identical ICmpInst
1207  ICmpInst *cloneImpl() const;
1208 
1209 public:
1210  /// Constructor with insert-before-instruction semantics.
1212  Instruction *InsertBefore, ///< Where to insert
1213  Predicate pred, ///< The predicate to use for the comparison
1214  Value *LHS, ///< The left-hand-side of the expression
1215  Value *RHS, ///< The right-hand-side of the expression
1216  const Twine &NameStr = "" ///< Name of the instruction
1217  ) : CmpInst(makeCmpResultType(LHS->getType()),
1218  Instruction::ICmp, pred, LHS, RHS, NameStr,
1219  InsertBefore) {
1220 #ifndef NDEBUG
1221  AssertOK();
1222 #endif
1223  }
1224 
1225  /// Constructor with insert-at-end semantics.
1227  BasicBlock &InsertAtEnd, ///< Block to insert into.
1228  Predicate pred, ///< The predicate to use for the comparison
1229  Value *LHS, ///< The left-hand-side of the expression
1230  Value *RHS, ///< The right-hand-side of the expression
1231  const Twine &NameStr = "" ///< Name of the instruction
1232  ) : CmpInst(makeCmpResultType(LHS->getType()),
1233  Instruction::ICmp, pred, LHS, RHS, NameStr,
1234  &InsertAtEnd) {
1235 #ifndef NDEBUG
1236  AssertOK();
1237 #endif
1238  }
1239 
1240  /// Constructor with no-insertion semantics
1242  Predicate pred, ///< The predicate to use for the comparison
1243  Value *LHS, ///< The left-hand-side of the expression
1244  Value *RHS, ///< The right-hand-side of the expression
1245  const Twine &NameStr = "" ///< Name of the instruction
1246  ) : CmpInst(makeCmpResultType(LHS->getType()),
1247  Instruction::ICmp, pred, LHS, RHS, NameStr) {
1248 #ifndef NDEBUG
1249  AssertOK();
1250 #endif
1251  }
1252 
1253  /// For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
1254  /// @returns the predicate that would be the result if the operand were
1255  /// regarded as signed.
1256  /// Return the signed version of the predicate
1258  return getSignedPredicate(getPredicate());
1259  }
1260 
1261  /// This is a static version that you can use without an instruction.
1262  /// Return the signed version of the predicate.
1263  static Predicate getSignedPredicate(Predicate pred);
1264 
1265  /// For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
1266  /// @returns the predicate that would be the result if the operand were
1267  /// regarded as unsigned.
1268  /// Return the unsigned version of the predicate
1270  return getUnsignedPredicate(getPredicate());
1271  }
1272 
1273  /// This is a static version that you can use without an instruction.
1274  /// Return the unsigned version of the predicate.
1275  static Predicate getUnsignedPredicate(Predicate pred);
1276 
1277  /// Return true if this predicate is either EQ or NE. This also
1278  /// tests for commutativity.
1279  static bool isEquality(Predicate P) {
1280  return P == ICMP_EQ || P == ICMP_NE;
1281  }
1282 
1283  /// Return true if this predicate is either EQ or NE. This also
1284  /// tests for commutativity.
1285  bool isEquality() const {
1286  return isEquality(getPredicate());
1287  }
1288 
1289  /// @returns true if the predicate of this ICmpInst is commutative
1290  /// Determine if this relation is commutative.
1291  bool isCommutative() const { return isEquality(); }
1292 
1293  /// Return true if the predicate is relational (not EQ or NE).
1294  ///
1295  bool isRelational() const {
1296  return !isEquality();
1297  }
1298 
1299  /// Return true if the predicate is relational (not EQ or NE).
1300  ///
1301  static bool isRelational(Predicate P) {
1302  return !isEquality(P);
1303  }
1304 
1305  /// Return true if the predicate is SGT or UGT.
1306  ///
1307  static bool isGT(Predicate P) {
1308  return P == ICMP_SGT || P == ICMP_UGT;
1309  }
1310 
1311  /// Return true if the predicate is SLT or ULT.
1312  ///
1313  static bool isLT(Predicate P) {
1314  return P == ICMP_SLT || P == ICMP_ULT;
1315  }
1316 
1317  /// Return true if the predicate is SGE or UGE.
1318  ///
1319  static bool isGE(Predicate P) {
1320  return P == ICMP_SGE || P == ICMP_UGE;
1321  }
1322 
1323  /// Return true if the predicate is SLE or ULE.
1324  ///
1325  static bool isLE(Predicate P) {
1326  return P == ICMP_SLE || P == ICMP_ULE;
1327  }
1328 
1329  /// Returns the sequence of all ICmp predicates.
1330  ///
1331  static auto predicates() { return ICmpPredicates(); }
1332 
1333  /// Exchange the two operands to this instruction in such a way that it does
1334  /// not modify the semantics of the instruction. The predicate value may be
1335  /// changed to retain the same result if the predicate is order dependent
1336  /// (e.g. ult).
1337  /// Swap operands and adjust predicate.
1338  void swapOperands() {
1339  setPredicate(getSwappedPredicate());
1340  Op<0>().swap(Op<1>());
1341  }
1342 
1343  /// Return result of `LHS Pred RHS` comparison.
1344  static bool compare(const APInt &LHS, const APInt &RHS,
1345  ICmpInst::Predicate Pred);
1346 
1347  // Methods for support type inquiry through isa, cast, and dyn_cast:
1348  static bool classof(const Instruction *I) {
1349  return I->getOpcode() == Instruction::ICmp;
1350  }
1351  static bool classof(const Value *V) {
1352  return isa<Instruction>(V) && classof(cast<Instruction>(V));
1353  }
1354 };
1355 
1356 //===----------------------------------------------------------------------===//
1357 // FCmpInst Class
1358 //===----------------------------------------------------------------------===//
1359 
1360 /// This instruction compares its operands according to the predicate given
1361 /// to the constructor. It only operates on floating point values or packed
1362 /// vectors of floating point values. The operands must be identical types.
1363 /// Represents a floating point comparison operator.
1364 class FCmpInst: public CmpInst {
1365  void AssertOK() {
1366  assert(isFPPredicate() && "Invalid FCmp predicate value");
1367  assert(getOperand(0)->getType() == getOperand(1)->getType() &&
1368  "Both operands to FCmp instruction are not of the same type!");
1369  // Check that the operands are the right type
1370  assert(getOperand(0)->getType()->isFPOrFPVectorTy() &&
1371  "Invalid operand types for FCmp instruction");
1372  }
1373 
1374 protected:
1375  // Note: Instruction needs to be a friend here to call cloneImpl.
1376  friend class Instruction;
1377 
1378  /// Clone an identical FCmpInst
1379  FCmpInst *cloneImpl() const;
1380 
1381 public:
1382  /// Constructor with insert-before-instruction semantics.
1384  Instruction *InsertBefore, ///< Where to insert
1385  Predicate pred, ///< The predicate to use for the comparison
1386  Value *LHS, ///< The left-hand-side of the expression
1387  Value *RHS, ///< The right-hand-side of the expression
1388  const Twine &NameStr = "" ///< Name of the instruction
1390  Instruction::FCmp, pred, LHS, RHS, NameStr,
1391  InsertBefore) {
1392  AssertOK();
1393  }
1394 
1395  /// Constructor with insert-at-end semantics.
1397  BasicBlock &InsertAtEnd, ///< Block to insert into.
1398  Predicate pred, ///< The predicate to use for the comparison
1399  Value *LHS, ///< The left-hand-side of the expression
1400  Value *RHS, ///< The right-hand-side of the expression
1401  const Twine &NameStr = "" ///< Name of the instruction
1403  Instruction::FCmp, pred, LHS, RHS, NameStr,
1404  &InsertAtEnd) {
1405  AssertOK();
1406  }
1407 
1408  /// Constructor with no-insertion semantics
1410  Predicate Pred, ///< The predicate to use for the comparison
1411  Value *LHS, ///< The left-hand-side of the expression
1412  Value *RHS, ///< The right-hand-side of the expression
1413  const Twine &NameStr = "", ///< Name of the instruction
1414  Instruction *FlagsSource = nullptr
1415  ) : CmpInst(makeCmpResultType(LHS->getType()), Instruction::FCmp, Pred, LHS,
1416  RHS, NameStr, nullptr, FlagsSource) {
1417  AssertOK();
1418  }
1419 
1420  /// @returns true if the predicate of this instruction is EQ or NE.
1421  /// Determine if this is an equality predicate.
1422  static bool isEquality(Predicate Pred) {
1423  return Pred == FCMP_OEQ || Pred == FCMP_ONE || Pred == FCMP_UEQ ||
1424  Pred == FCMP_UNE;
1425  }
1426 
1427  /// @returns true if the predicate of this instruction is EQ or NE.
1428  /// Determine if this is an equality predicate.
1429  bool isEquality() const { return isEquality(getPredicate()); }
1430 
1431  /// @returns true if the predicate of this instruction is commutative.
1432  /// Determine if this is a commutative predicate.
1433  bool isCommutative() const {
1434  return isEquality() ||
1435  getPredicate() == FCMP_FALSE ||
1436  getPredicate() == FCMP_TRUE ||
1437  getPredicate() == FCMP_ORD ||
1438  getPredicate() == FCMP_UNO;
1439  }
1440 
1441  /// @returns true if the predicate is relational (not EQ or NE).
1442  /// Determine if this a relational predicate.
1443  bool isRelational() const { return !isEquality(); }
1444 
1445  /// Exchange the two operands to this instruction in such a way that it does
1446  /// not modify the semantics of the instruction. The predicate value may be
1447  /// changed to retain the same result if the predicate is order dependent
1448  /// (e.g. ult).
1449  /// Swap operands and adjust predicate.
1450  void swapOperands() {
1452  Op<0>().swap(Op<1>());
1453  }
1454 
1455  /// Returns the sequence of all FCmp predicates.
1456  ///
1457  static auto predicates() { return FCmpPredicates(); }
1458 
1459  /// Return result of `LHS Pred RHS` comparison.
1460  static bool compare(const APFloat &LHS, const APFloat &RHS,
1461  FCmpInst::Predicate Pred);
1462 
1463  /// Methods for support type inquiry through isa, cast, and dyn_cast:
1464  static bool classof(const Instruction *I) {
1465  return I->getOpcode() == Instruction::FCmp;
1466  }
1467  static bool classof(const Value *V) {
1468  return isa<Instruction>(V) && classof(cast<Instruction>(V));
1469  }
1470 };
1471 
1472 //===----------------------------------------------------------------------===//
1473 /// This class represents a function call, abstracting a target
1474 /// machine's calling convention. This class uses low bit of the SubClassData
1475 /// field to indicate whether or not this is a tail call. The rest of the bits
1476 /// hold the calling convention of the call.
1477 ///
1478 class CallInst : public CallBase {
1479  CallInst(const CallInst &CI);
1480 
1481  /// Construct a CallInst given a range of arguments.
1482  /// Construct a CallInst from a range of arguments
1483  inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1484  ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1485  Instruction *InsertBefore);
1486 
1487  inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1488  const Twine &NameStr, Instruction *InsertBefore)
1489  : CallInst(Ty, Func, Args, None, NameStr, InsertBefore) {}
1490 
1491  /// Construct a CallInst given a range of arguments.
1492  /// Construct a CallInst from a range of arguments
1493  inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1494  ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1495  BasicBlock *InsertAtEnd);
1496 
1497  explicit CallInst(FunctionType *Ty, Value *F, const Twine &NameStr,
1498  Instruction *InsertBefore);
1499 
1500  CallInst(FunctionType *ty, Value *F, const Twine &NameStr,
1501  BasicBlock *InsertAtEnd);
1502 
1503  void init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
1504  ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
1505  void init(FunctionType *FTy, Value *Func, const Twine &NameStr);
1506 
1507  /// Compute the number of operands to allocate.
1508  static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) {
1509  // We need one operand for the called function, plus the input operand
1510  // counts provided.
1511  return 1 + NumArgs + NumBundleInputs;
1512  }
1513 
1514 protected:
1515  // Note: Instruction needs to be a friend here to call cloneImpl.
1516  friend class Instruction;
1517 
1518  CallInst *cloneImpl() const;
1519 
1520 public:
1521  static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr = "",
1522  Instruction *InsertBefore = nullptr) {
1523  return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertBefore);
1524  }
1525 
1527  const Twine &NameStr,
1528  Instruction *InsertBefore = nullptr) {
1529  return new (ComputeNumOperands(Args.size()))
1530  CallInst(Ty, Func, Args, None, NameStr, InsertBefore);
1531  }
1532 
1534  ArrayRef<OperandBundleDef> Bundles = None,
1535  const Twine &NameStr = "",
1536  Instruction *InsertBefore = nullptr) {
1537  const int NumOperands =
1538  ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
1539  const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1540 
1541  return new (NumOperands, DescriptorBytes)
1542  CallInst(Ty, Func, Args, Bundles, NameStr, InsertBefore);
1543  }
1544 
1545  static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr,
1546  BasicBlock *InsertAtEnd) {
1547  return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertAtEnd);
1548  }
1549 
1551  const Twine &NameStr, BasicBlock *InsertAtEnd) {
1552  return new (ComputeNumOperands(Args.size()))
1553  CallInst(Ty, Func, Args, None, NameStr, InsertAtEnd);
1554  }
1555 
1558  const Twine &NameStr, BasicBlock *InsertAtEnd) {
1559  const int NumOperands =
1560  ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
1561  const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1562 
1563  return new (NumOperands, DescriptorBytes)
1564  CallInst(Ty, Func, Args, Bundles, NameStr, InsertAtEnd);
1565  }
1566 
1567  static CallInst *Create(FunctionCallee Func, const Twine &NameStr = "",
1568  Instruction *InsertBefore = nullptr) {
1569  return Create(Func.getFunctionType(), Func.getCallee(), NameStr,
1570  InsertBefore);
1571  }
1572 
1574  ArrayRef<OperandBundleDef> Bundles = None,
1575  const Twine &NameStr = "",
1576  Instruction *InsertBefore = nullptr) {
1577  return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles,
1578  NameStr, InsertBefore);
1579  }
1580 
1582  const Twine &NameStr,
1583  Instruction *InsertBefore = nullptr) {
1584  return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr,
1585  InsertBefore);
1586  }
1587 
1588  static CallInst *Create(FunctionCallee Func, const Twine &NameStr,
1589  BasicBlock *InsertAtEnd) {
1590  return Create(Func.getFunctionType(), Func.getCallee(), NameStr,
1591  InsertAtEnd);
1592  }
1593 
1595  const Twine &NameStr, BasicBlock *InsertAtEnd) {
1596  return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr,
1597  InsertAtEnd);
1598  }
1599 
1602  const Twine &NameStr, BasicBlock *InsertAtEnd) {
1603  return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles,
1604  NameStr, InsertAtEnd);
1605  }
1606 
1607  /// Create a clone of \p CI with a different set of operand bundles and
1608  /// insert it before \p InsertPt.
1609  ///
1610  /// The returned call instruction is identical \p CI in every way except that
1611  /// the operand bundles for the new instruction are set to the operand bundles
1612  /// in \p Bundles.
1613  static CallInst *Create(CallInst *CI, ArrayRef<OperandBundleDef> Bundles,
1614  Instruction *InsertPt = nullptr);
1615 
1616  /// Generate the IR for a call to malloc:
1617  /// 1. Compute the malloc call's argument as the specified type's size,
1618  /// possibly multiplied by the array size if the array size is not
1619  /// constant 1.
1620  /// 2. Call malloc with that argument.
1621  /// 3. Bitcast the result of the malloc call to the specified type.
1622  static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy,
1623  Type *AllocTy, Value *AllocSize,
1624  Value *ArraySize = nullptr,
1625  Function *MallocF = nullptr,
1626  const Twine &Name = "");
1627  static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy,
1628  Type *AllocTy, Value *AllocSize,
1629  Value *ArraySize = nullptr,
1630  Function *MallocF = nullptr,
1631  const Twine &Name = "");
1632  static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy,
1633  Type *AllocTy, Value *AllocSize,
1634  Value *ArraySize = nullptr,
1635  ArrayRef<OperandBundleDef> Bundles = None,
1636  Function *MallocF = nullptr,
1637  const Twine &Name = "");
1638  static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy,
1639  Type *AllocTy, Value *AllocSize,
1640  Value *ArraySize = nullptr,
1641  ArrayRef<OperandBundleDef> Bundles = None,
1642  Function *MallocF = nullptr,
1643  const Twine &Name = "");
1644  /// Generate the IR for a call to the builtin free function.
1645  static Instruction *CreateFree(Value *Source, Instruction *InsertBefore);
1646  static Instruction *CreateFree(Value *Source, BasicBlock *InsertAtEnd);
1649  Instruction *InsertBefore);
1652  BasicBlock *InsertAtEnd);
1653 
1654  // Note that 'musttail' implies 'tail'.
1655  enum TailCallKind : unsigned {
1661  };
1662 
1664  static_assert(
1665  Bitfield::areContiguous<TailCallKindField, CallBase::CallingConvField>(),
1666  "Bitfields must be contiguous");
1667 
1669  return getSubclassData<TailCallKindField>();
1670  }
1671 
1672  bool isTailCall() const {
1674  return Kind == TCK_Tail || Kind == TCK_MustTail;
1675  }
1676 
1677  bool isMustTailCall() const { return getTailCallKind() == TCK_MustTail; }
1678 
1679  bool isNoTailCall() const { return getTailCallKind() == TCK_NoTail; }
1680 
1682  setSubclassData<TailCallKindField>(TCK);
1683  }
1684 
1685  void setTailCall(bool IsTc = true) {
1686  setTailCallKind(IsTc ? TCK_Tail : TCK_None);
1687  }
1688 
1689  /// Return true if the call can return twice
1690  bool canReturnTwice() const { return hasFnAttr(Attribute::ReturnsTwice); }
1691  void setCanReturnTwice() { addFnAttr(Attribute::ReturnsTwice); }
1692 
1693  // Methods for support type inquiry through isa, cast, and dyn_cast:
1694  static bool classof(const Instruction *I) {
1695  return I->getOpcode() == Instruction::Call;
1696  }
1697  static bool classof(const Value *V) {
1698  return isa<Instruction>(V) && classof(cast<Instruction>(V));
1699  }
1700 
1701  /// Updates profile metadata by scaling it by \p S / \p T.
1703 
1704 private:
1705  // Shadow Instruction::setInstructionSubclassData with a private forwarding
1706  // method so that subclasses cannot accidentally use it.
1707  template <typename Bitfield>
1708  void setSubclassData(typename Bitfield::Type Value) {
1709  Instruction::setSubclassData<Bitfield>(Value);
1710  }
1711 };
1712 
1713 CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1714  ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1715  BasicBlock *InsertAtEnd)
1716  : CallBase(Ty->getReturnType(), Instruction::Call,
1717  OperandTraits<CallBase>::op_end(this) -
1718  (Args.size() + CountBundleInputs(Bundles) + 1),
1719  unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
1720  InsertAtEnd) {
1721  init(Ty, Func, Args, Bundles, NameStr);
1722 }
1723 
1724 CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1725  ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1726  Instruction *InsertBefore)
1727  : CallBase(Ty->getReturnType(), Instruction::Call,
1728  OperandTraits<CallBase>::op_end(this) -
1729  (Args.size() + CountBundleInputs(Bundles) + 1),
1730  unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
1731  InsertBefore) {
1732  init(Ty, Func, Args, Bundles, NameStr);
1733 }
1734 
1735 //===----------------------------------------------------------------------===//
1736 // SelectInst Class
1737 //===----------------------------------------------------------------------===//
1738 
1739 /// This class represents the LLVM 'select' instruction.
1740 ///
1741 class SelectInst : public Instruction {
1742  SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1743  Instruction *InsertBefore)
1745  &Op<0>(), 3, InsertBefore) {
1746  init(C, S1, S2);
1747  setName(NameStr);
1748  }
1749 
1750  SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1751  BasicBlock *InsertAtEnd)
1753  &Op<0>(), 3, InsertAtEnd) {
1754  init(C, S1, S2);
1755  setName(NameStr);
1756  }
1757 
1758  void init(Value *C, Value *S1, Value *S2) {
1759  assert(!areInvalidOperands(C, S1, S2) && "Invalid operands for select");
1760  Op<0>() = C;
1761  Op<1>() = S1;
1762  Op<2>() = S2;
1763  }
1764 
1765 protected:
1766  // Note: Instruction needs to be a friend here to call cloneImpl.
1767  friend class Instruction;
1768 
1769  SelectInst *cloneImpl() const;
1770 
1771 public:
1772  static SelectInst *Create(Value *C, Value *S1, Value *S2,
1773  const Twine &NameStr = "",
1774  Instruction *InsertBefore = nullptr,
1775  Instruction *MDFrom = nullptr) {
1776  SelectInst *Sel = new(3) SelectInst(C, S1, S2, NameStr, InsertBefore);
1777  if (MDFrom)
1778  Sel->copyMetadata(*MDFrom);
1779  return Sel;
1780  }
1781 
1782  static SelectInst *Create(Value *C, Value *S1, Value *S2,
1783  const Twine &NameStr,
1784  BasicBlock *InsertAtEnd) {
1785  return new(3) SelectInst(C, S1, S2, NameStr, InsertAtEnd);
1786  }
1787 
1788  const Value *getCondition() const { return Op<0>(); }
1789  const Value *getTrueValue() const { return Op<1>(); }
1790  const Value *getFalseValue() const { return Op<2>(); }
1791  Value *getCondition() { return Op<0>(); }
1792  Value *getTrueValue() { return Op<1>(); }
1793  Value *getFalseValue() { return Op<2>(); }
1794 
1795  void setCondition(Value *V) { Op<0>() = V; }
1796  void setTrueValue(Value *V) { Op<1>() = V; }
1797  void setFalseValue(Value *V) { Op<2>() = V; }
1798 
1799  /// Swap the true and false values of the select instruction.
1800  /// This doesn't swap prof metadata.
1801  void swapValues() { Op<1>().swap(Op<2>()); }
1802 
1803  /// Return a string if the specified operands are invalid
1804  /// for a select operation, otherwise return null.
1805  static const char *areInvalidOperands(Value *Cond, Value *True, Value *False);
1806 
1807  /// Transparently provide more efficient getOperand methods.
1809 
1811  return static_cast<OtherOps>(Instruction::getOpcode());
1812  }
1813 
1814  // Methods for support type inquiry through isa, cast, and dyn_cast:
1815  static bool classof(const Instruction *I) {
1816  return I->getOpcode() == Instruction::Select;
1817  }
1818  static bool classof(const Value *V) {
1819  return isa<Instruction>(V) && classof(cast<Instruction>(V));
1820  }
1821 };
1822 
1823 template <>
1824 struct OperandTraits<SelectInst> : public FixedNumOperandTraits<SelectInst, 3> {
1825 };
1826 
1828 
1829 //===----------------------------------------------------------------------===//
1830 // VAArgInst Class
1831 //===----------------------------------------------------------------------===//
1832 
1833 /// This class represents the va_arg llvm instruction, which returns
1834 /// an argument of the specified type given a va_list and increments that list
1835 ///
1836 class VAArgInst : public UnaryInstruction {
1837 protected:
1838  // Note: Instruction needs to be a friend here to call cloneImpl.
1839  friend class Instruction;
1840 
1841  VAArgInst *cloneImpl() const;
1842 
1843 public:
1844  VAArgInst(Value *List, Type *Ty, const Twine &NameStr = "",
1845  Instruction *InsertBefore = nullptr)
1846  : UnaryInstruction(Ty, VAArg, List, InsertBefore) {
1847  setName(NameStr);
1848  }
1849 
1850  VAArgInst(Value *List, Type *Ty, const Twine &NameStr,
1851  BasicBlock *InsertAtEnd)
1852  : UnaryInstruction(Ty, VAArg, List, InsertAtEnd) {
1853  setName(NameStr);
1854  }
1855 
1856  Value *getPointerOperand() { return getOperand(0); }
1857  const Value *getPointerOperand() const { return getOperand(0); }
1858  static unsigned getPointerOperandIndex() { return 0U; }
1859 
1860  // Methods for support type inquiry through isa, cast, and dyn_cast:
1861  static bool classof(const Instruction *I) {
1862  return I->getOpcode() == VAArg;
1863  }
1864  static bool classof(const Value *V) {
1865  return isa<Instruction>(V) && classof(cast<Instruction>(V));
1866  }
1867 };
1868 
1869 //===----------------------------------------------------------------------===//
1870 // ExtractElementInst Class
1871 //===----------------------------------------------------------------------===//
1872 
1873 /// This instruction extracts a single (scalar)
1874 /// element from a VectorType value
1875 ///
1877  ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr = "",
1878  Instruction *InsertBefore = nullptr);
1879  ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr,
1880  BasicBlock *InsertAtEnd);
1881 
1882 protected:
1883  // Note: Instruction needs to be a friend here to call cloneImpl.
1884  friend class Instruction;
1885 
1886  ExtractElementInst *cloneImpl() const;
1887 
1888 public:
1889  static ExtractElementInst *Create(Value *Vec, Value *Idx,
1890  const Twine &NameStr = "",
1891  Instruction *InsertBefore = nullptr) {
1892  return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertBefore);
1893  }
1894 
1895  static ExtractElementInst *Create(Value *Vec, Value *Idx,
1896  const Twine &NameStr,
1897  BasicBlock *InsertAtEnd) {
1898  return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertAtEnd);
1899  }
1900 
1901  /// Return true if an extractelement instruction can be
1902  /// formed with the specified operands.
1903  static bool isValidOperands(const Value *Vec, const Value *Idx);
1904 
1905  Value *getVectorOperand() { return Op<0>(); }
1906  Value *getIndexOperand() { return Op<1>(); }
1907  const Value *getVectorOperand() const { return Op<0>(); }
1908  const Value *getIndexOperand() const { return Op<1>(); }
1909 
1911  return cast<VectorType>(getVectorOperand()->getType());
1912  }
1913 
1914  /// Transparently provide more efficient getOperand methods.
1916 
1917  // Methods for support type inquiry through isa, cast, and dyn_cast:
1918  static bool classof(const Instruction *I) {
1919  return I->getOpcode() == Instruction::ExtractElement;
1920  }
1921  static bool classof(const Value *V) {
1922  return isa<Instruction>(V) && classof(cast<Instruction>(V));
1923  }
1924 };
1925 
1926 template <>
1928  public FixedNumOperandTraits<ExtractElementInst, 2> {
1929 };
1930 
1932 
1933 //===----------------------------------------------------------------------===//
1934 // InsertElementInst Class
1935 //===----------------------------------------------------------------------===//
1936 
1937 /// This instruction inserts a single (scalar)
1938 /// element into a VectorType value
1939 ///
1941  InsertElementInst(Value *Vec, Value *NewElt, Value *Idx,
1942  const Twine &NameStr = "",
1943  Instruction *InsertBefore = nullptr);
1944  InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr,
1945  BasicBlock *InsertAtEnd);
1946 
1947 protected:
1948  // Note: Instruction needs to be a friend here to call cloneImpl.
1949  friend class Instruction;
1950 
1951  InsertElementInst *cloneImpl() const;
1952 
1953 public:
1954  static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
1955  const Twine &NameStr = "",
1956  Instruction *InsertBefore = nullptr) {
1957  return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertBefore);
1958  }
1959 
1960  static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
1961  const Twine &NameStr,
1962  BasicBlock *InsertAtEnd) {
1963  return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertAtEnd);
1964  }
1965 
1966  /// Return true if an insertelement instruction can be
1967  /// formed with the specified operands.
1968  static bool isValidOperands(const Value *Vec, const Value *NewElt,
1969  const Value *Idx);
1970 
1971  /// Overload to return most specific vector type.
1972  ///
1973  VectorType *getType() const {
1974  return cast<VectorType>(Instruction::getType());
1975  }
1976 
1977  /// Transparently provide more efficient getOperand methods.
1979 
1980  // Methods for support type inquiry through isa, cast, and dyn_cast:
1981  static bool classof(const Instruction *I) {
1982  return I->getOpcode() == Instruction::InsertElement;
1983  }
1984  static bool classof(const Value *V) {
1985  return isa<Instruction>(V) && classof(cast<Instruction>(V));
1986  }
1987 };
1988 
1989 template <>
1991  public FixedNumOperandTraits<InsertElementInst, 3> {
1992 };
1993 
1995 
1996 //===----------------------------------------------------------------------===//
1997 // ShuffleVectorInst Class
1998 //===----------------------------------------------------------------------===//
1999 
2000 constexpr int UndefMaskElem = -1;
2001 
2002 /// This instruction constructs a fixed permutation of two
2003 /// input vectors.
2004 ///
2005 /// For each element of the result vector, the shuffle mask selects an element
2006 /// from one of the input vectors to copy to the result. Non-negative elements
2007 /// in the mask represent an index into the concatenated pair of input vectors.
2008 /// UndefMaskElem (-1) specifies that the result element is undefined.
2009 ///
2010 /// For scalable vectors, all the elements of the mask must be 0 or -1. This
2011 /// requirement may be relaxed in the future.
2013  SmallVector<int, 4> ShuffleMask;
2014  Constant *ShuffleMaskForBitcode;
2015 
2016 protected:
2017  // Note: Instruction needs to be a friend here to call cloneImpl.
2018  friend class Instruction;
2019 
2020  ShuffleVectorInst *cloneImpl() const;
2021 
2022 public:
2023  ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr = "",
2024  Instruction *InsertBefore = nullptr);
2025  ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr,
2026  BasicBlock *InsertAtEnd);
2027  ShuffleVectorInst(Value *V1, ArrayRef<int> Mask, const Twine &NameStr = "",
2028  Instruction *InsertBefore = nullptr);
2029  ShuffleVectorInst(Value *V1, ArrayRef<int> Mask, const Twine &NameStr,
2030  BasicBlock *InsertAtEnd);
2032  const Twine &NameStr = "",
2033  Instruction *InsertBefor = nullptr);
2035  const Twine &NameStr, BasicBlock *InsertAtEnd);
2037  const Twine &NameStr = "",
2038  Instruction *InsertBefor = nullptr);
2040  const Twine &NameStr, BasicBlock *InsertAtEnd);
2041 
2042  void *operator new(size_t S) { return User::operator new(S, 2); }
2043  void operator delete(void *Ptr) { return User::operator delete(Ptr); }
2044 
2045  /// Swap the operands and adjust the mask to preserve the semantics
2046  /// of the instruction.
2047  void commute();
2048 
2049  /// Return true if a shufflevector instruction can be
2050  /// formed with the specified operands.
2051  static bool isValidOperands(const Value *V1, const Value *V2,
2052  const Value *Mask);
2053  static bool isValidOperands(const Value *V1, const Value *V2,
2055 
2056  /// Overload to return most specific vector type.
2057  ///
2058  VectorType *getType() const {
2059  return cast<VectorType>(Instruction::getType());
2060  }
2061 
2062  /// Transparently provide more efficient getOperand methods.
2064 
2065  /// Return the shuffle mask value of this instruction for the given element
2066  /// index. Return UndefMaskElem if the element is undef.
2067  int getMaskValue(unsigned Elt) const { return ShuffleMask[Elt]; }
2068 
2069  /// Convert the input shuffle mask operand to a vector of integers. Undefined
2070  /// elements of the mask are returned as UndefMaskElem.
2071  static void getShuffleMask(const Constant *Mask,
2072  SmallVectorImpl<int> &Result);
2073 
2074  /// Return the mask for this instruction as a vector of integers. Undefined
2075  /// elements of the mask are returned as UndefMaskElem.
2076  void getShuffleMask(SmallVectorImpl<int> &Result) const {
2077  Result.assign(ShuffleMask.begin(), ShuffleMask.end());
2078  }
2079 
2080  /// Return the mask for this instruction, for use in bitcode.
2081  ///
2082  /// TODO: This is temporary until we decide a new bitcode encoding for
2083  /// shufflevector.
2084  Constant *getShuffleMaskForBitcode() const { return ShuffleMaskForBitcode; }
2085 
2086  static Constant *convertShuffleMaskForBitcode(ArrayRef<int> Mask,
2087  Type *ResultTy);
2088 
2089  void setShuffleMask(ArrayRef<int> Mask);
2090 
2091  ArrayRef<int> getShuffleMask() const { return ShuffleMask; }
2092 
2093  /// Return true if this shuffle returns a vector with a different number of
2094  /// elements than its source vectors.
2095  /// Examples: shufflevector <4 x n> A, <4 x n> B, <1,2,3>
2096  /// shufflevector <4 x n> A, <4 x n> B, <1,2,3,4,5>
2097  bool changesLength() const {
2098  unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType())
2099  ->getElementCount()
2100  .getKnownMinValue();
2101  unsigned NumMaskElts = ShuffleMask.size();
2102  return NumSourceElts != NumMaskElts;
2103  }
2104 
2105  /// Return true if this shuffle returns a vector with a greater number of
2106  /// elements than its source vectors.
2107  /// Example: shufflevector <2 x n> A, <2 x n> B, <1,2,3>
2108  bool increasesLength() const {
2109  unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType())
2110  ->getElementCount()
2111  .getKnownMinValue();
2112  unsigned NumMaskElts = ShuffleMask.size();
2113  return NumSourceElts < NumMaskElts;
2114  }
2115 
2116  /// Return true if this shuffle mask chooses elements from exactly one source
2117  /// vector.
2118  /// Example: <7,5,undef,7>
2119  /// This assumes that vector operands are the same length as the mask.
2120  static bool isSingleSourceMask(ArrayRef<int> Mask);
2121  static bool isSingleSourceMask(const Constant *Mask) {
2122  assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2123  SmallVector<int, 16> MaskAsInts;
2124  getShuffleMask(Mask, MaskAsInts);
2125  return isSingleSourceMask(MaskAsInts);
2126  }
2127 
2128  /// Return true if this shuffle chooses elements from exactly one source
2129  /// vector without changing the length of that vector.
2130  /// Example: shufflevector <4 x n> A, <4 x n> B, <3,0,undef,3>
2131  /// TODO: Optionally allow length-changing shuffles.
2132  bool isSingleSource() const {
2133  return !changesLength() && isSingleSourceMask(ShuffleMask);
2134  }
2135 
2136  /// Return true if this shuffle mask chooses elements from exactly one source
2137  /// vector without lane crossings. A shuffle using this mask is not
2138  /// necessarily a no-op because it may change the number of elements from its
2139  /// input vectors or it may provide demanded bits knowledge via undef lanes.
2140  /// Example: <undef,undef,2,3>
2141  static bool isIdentityMask(ArrayRef<int> Mask);
2142  static bool isIdentityMask(const Constant *Mask) {
2143  assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2144  SmallVector<int, 16> MaskAsInts;
2145  getShuffleMask(Mask, MaskAsInts);
2146  return isIdentityMask(MaskAsInts);
2147  }
2148 
2149  /// Return true if this shuffle chooses elements from exactly one source
2150  /// vector without lane crossings and does not change the number of elements
2151  /// from its input vectors.
2152  /// Example: shufflevector <4 x n> A, <4 x n> B, <4,undef,6,undef>
2153  bool isIdentity() const {
2154  return !changesLength() && isIdentityMask(ShuffleMask);
2155  }
2156 
2157  /// Return true if this shuffle lengthens exactly one source vector with
2158  /// undefs in the high elements.
2159  bool isIdentityWithPadding() const;
2160 
2161  /// Return true if this shuffle extracts the first N elements of exactly one
2162  /// source vector.
2163  bool isIdentityWithExtract() const;
2164 
2165  /// Return true if this shuffle concatenates its 2 source vectors. This
2166  /// returns false if either input is undefined. In that case, the shuffle is
2167  /// is better classified as an identity with padding operation.
2168  bool isConcat() const;
2169 
2170  /// Return true if this shuffle mask chooses elements from its source vectors
2171  /// without lane crossings. A shuffle using this mask would be
2172  /// equivalent to a vector select with a constant condition operand.
2173  /// Example: <4,1,6,undef>
2174  /// This returns false if the mask does not choose from both input vectors.
2175  /// In that case, the shuffle is better classified as an identity shuffle.
2176  /// This assumes that vector operands are the same length as the mask
2177  /// (a length-changing shuffle can never be equivalent to a vector select).
2178  static bool isSelectMask(ArrayRef<int> Mask);
2179  static bool isSelectMask(const Constant *Mask) {
2180  assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2181  SmallVector<int, 16> MaskAsInts;
2182  getShuffleMask(Mask, MaskAsInts);
2183  return isSelectMask(MaskAsInts);
2184  }
2185 
2186  /// Return true if this shuffle chooses elements from its source vectors
2187  /// without lane crossings and all operands have the same number of elements.
2188  /// In other words, this shuffle is equivalent to a vector select with a
2189  /// constant condition operand.
2190  /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,1,6,3>
2191  /// This returns false if the mask does not choose from both input vectors.
2192  /// In that case, the shuffle is better classified as an identity shuffle.
2193  /// TODO: Optionally allow length-changing shuffles.
2194  bool isSelect() const {
2195  return !changesLength() && isSelectMask(ShuffleMask);
2196  }
2197 
2198  /// Return true if this shuffle mask swaps the order of elements from exactly
2199  /// one source vector.
2200  /// Example: <7,6,undef,4>
2201  /// This assumes that vector operands are the same length as the mask.
2202  static bool isReverseMask(ArrayRef<int> Mask);
2203  static bool isReverseMask(const Constant *Mask) {
2204  assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2205  SmallVector<int, 16> MaskAsInts;
2206  getShuffleMask(Mask, MaskAsInts);
2207  return isReverseMask(MaskAsInts);
2208  }
2209 
2210  /// Return true if this shuffle swaps the order of elements from exactly
2211  /// one source vector.
2212  /// Example: shufflevector <4 x n> A, <4 x n> B, <3,undef,1,undef>
2213  /// TODO: Optionally allow length-changing shuffles.
2214  bool isReverse() const {
2215  return !changesLength() && isReverseMask(ShuffleMask);
2216  }
2217 
2218  /// Return true if this shuffle mask chooses all elements with the same value
2219  /// as the first element of exactly one source vector.
2220  /// Example: <4,undef,undef,4>
2221  /// This assumes that vector operands are the same length as the mask.
2222  static bool isZeroEltSplatMask(ArrayRef<int> Mask);
2223  static bool isZeroEltSplatMask(const Constant *Mask) {
2224  assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2225  SmallVector<int, 16> MaskAsInts;
2226  getShuffleMask(Mask, MaskAsInts);
2227  return isZeroEltSplatMask(MaskAsInts);
2228  }
2229 
2230  /// Return true if all elements of this shuffle are the same value as the
2231  /// first element of exactly one source vector without changing the length
2232  /// of that vector.
2233  /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,0,undef,0>
2234  /// TODO: Optionally allow length-changing shuffles.
2235  /// TODO: Optionally allow splats from other elements.
2236  bool isZeroEltSplat() const {
2237  return !changesLength() && isZeroEltSplatMask(ShuffleMask);
2238  }
2239 
2240  /// Return true if this shuffle mask is a transpose mask.
2241  /// Transpose vector masks transpose a 2xn matrix. They read corresponding
2242  /// even- or odd-numbered vector elements from two n-dimensional source
2243  /// vectors and write each result into consecutive elements of an
2244  /// n-dimensional destination vector. Two shuffles are necessary to complete
2245  /// the transpose, one for the even elements and another for the odd elements.
2246  /// This description closely follows how the TRN1 and TRN2 AArch64
2247  /// instructions operate.
2248  ///
2249  /// For example, a simple 2x2 matrix can be transposed with:
2250  ///
2251  /// ; Original matrix
2252  /// m0 = < a, b >
2253  /// m1 = < c, d >
2254  ///
2255  /// ; Transposed matrix
2256  /// t0 = < a, c > = shufflevector m0, m1, < 0, 2 >
2257  /// t1 = < b, d > = shufflevector m0, m1, < 1, 3 >
2258  ///
2259  /// For matrices having greater than n columns, the resulting nx2 transposed
2260  /// matrix is stored in two result vectors such that one vector contains
2261  /// interleaved elements from all the even-numbered rows and the other vector
2262  /// contains interleaved elements from all the odd-numbered rows. For example,
2263  /// a 2x4 matrix can be transposed with:
2264  ///
2265  /// ; Original matrix
2266  /// m0 = < a, b, c, d >
2267  /// m1 = < e, f, g, h >
2268  ///
2269  /// ; Transposed matrix
2270  /// t0 = < a, e, c, g > = shufflevector m0, m1 < 0, 4, 2, 6 >
2271  /// t1 = < b, f, d, h > = shufflevector m0, m1 < 1, 5, 3, 7 >
2272  static bool isTransposeMask(ArrayRef<int> Mask);
2273  static bool isTransposeMask(const Constant *Mask) {
2274  assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2275  SmallVector<int, 16> MaskAsInts;
2276  getShuffleMask(Mask, MaskAsInts);
2277  return isTransposeMask(MaskAsInts);
2278  }
2279 
2280  /// Return true if this shuffle transposes the elements of its inputs without
2281  /// changing the length of the vectors. This operation may also be known as a
2282  /// merge or interleave. See the description for isTransposeMask() for the
2283  /// exact specification.
2284  /// Example: shufflevector <4 x n> A, <4 x n> B, <0,4,2,6>
2285  bool isTranspose() const {
2286  return !changesLength() && isTransposeMask(ShuffleMask);
2287  }
2288 
2289  /// Return true if this shuffle mask is an extract subvector mask.
2290  /// A valid extract subvector mask returns a smaller vector from a single
2291  /// source operand. The base extraction index is returned as well.
2292  static bool isExtractSubvectorMask(ArrayRef<int> Mask, int NumSrcElts,
2293  int &Index);
2294  static bool isExtractSubvectorMask(const Constant *Mask, int NumSrcElts,
2295  int &Index) {
2296  assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2297  // Not possible to express a shuffle mask for a scalable vector for this
2298  // case.
2299  if (isa<ScalableVectorType>(Mask->getType()))
2300  return false;
2301  SmallVector<int, 16> MaskAsInts;
2302  getShuffleMask(Mask, MaskAsInts);
2303  return isExtractSubvectorMask(MaskAsInts, NumSrcElts, Index);
2304  }
2305 
2306  /// Return true if this shuffle mask is an extract subvector mask.
2307  bool isExtractSubvectorMask(int &Index) const {
2308  // Not possible to express a shuffle mask for a scalable vector for this
2309  // case.
2310  if (isa<ScalableVectorType>(getType()))
2311  return false;
2312 
2313  int NumSrcElts =
2314  cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2315  return isExtractSubvectorMask(ShuffleMask, NumSrcElts, Index);
2316  }
2317 
2318  /// Return true if this shuffle mask is an insert subvector mask.
2319  /// A valid insert subvector mask inserts the lowest elements of a second
2320  /// source operand into an in-place first source operand operand.
2321  /// Both the sub vector width and the insertion index is returned.
2322  static bool isInsertSubvectorMask(ArrayRef<int> Mask, int NumSrcElts,
2323  int &NumSubElts, int &Index);
2324  static bool isInsertSubvectorMask(const Constant *Mask, int NumSrcElts,
2325  int &NumSubElts, int &Index) {
2326  assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2327  // Not possible to express a shuffle mask for a scalable vector for this
2328  // case.
2329  if (isa<ScalableVectorType>(Mask->getType()))
2330  return false;
2331  SmallVector<int, 16> MaskAsInts;
2332  getShuffleMask(Mask, MaskAsInts);
2333  return isInsertSubvectorMask(MaskAsInts, NumSrcElts, NumSubElts, Index);
2334  }
2335 
2336  /// Return true if this shuffle mask is an insert subvector mask.
2337  bool isInsertSubvectorMask(int &NumSubElts, int &Index) const {
2338  // Not possible to express a shuffle mask for a scalable vector for this
2339  // case.
2340  if (isa<ScalableVectorType>(getType()))
2341  return false;
2342 
2343  int NumSrcElts =
2344  cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2345  return isInsertSubvectorMask(ShuffleMask, NumSrcElts, NumSubElts, Index);
2346  }
2347 
2348  /// Return true if this shuffle mask replicates each of the \p VF elements
2349  /// in a vector \p ReplicationFactor times.
2350  /// For example, the mask for \p ReplicationFactor=3 and \p VF=4 is:
2351  /// <0,0,0,1,1,1,2,2,2,3,3,3>
2352  static bool isReplicationMask(ArrayRef<int> Mask, int &ReplicationFactor,
2353  int &VF);
2354  static bool isReplicationMask(const Constant *Mask, int &ReplicationFactor,
2355  int &VF) {
2356  assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2357  // Not possible to express a shuffle mask for a scalable vector for this
2358  // case.
2359  if (isa<ScalableVectorType>(Mask->getType()))
2360  return false;
2361  SmallVector<int, 16> MaskAsInts;
2362  getShuffleMask(Mask, MaskAsInts);
2363  return isReplicationMask(MaskAsInts, ReplicationFactor, VF);
2364  }
2365 
2366  /// Return true if this shuffle mask is a replication mask.
2367  bool isReplicationMask(int &ReplicationFactor, int &VF) const;
2368 
2369  /// Change values in a shuffle permute mask assuming the two vector operands
2370  /// of length InVecNumElts have swapped position.
2372  unsigned InVecNumElts) {
2373  for (int &Idx : Mask) {
2374  if (Idx == -1)
2375  continue;
2376  Idx = Idx < (int)InVecNumElts ? Idx + InVecNumElts : Idx - InVecNumElts;
2377  assert(Idx >= 0 && Idx < (int)InVecNumElts * 2 &&
2378  "shufflevector mask index out of range");
2379  }
2380  }
2381 
2382  // Methods for support type inquiry through isa, cast, and dyn_cast:
2383  static bool classof(const Instruction *I) {
2384  return I->getOpcode() == Instruction::ShuffleVector;
2385  }
2386  static bool classof(const Value *V) {
2387  return isa<Instruction>(V) && classof(cast<Instruction>(V));
2388  }
2389 };
2390 
2391 template <>
2393  : public FixedNumOperandTraits<ShuffleVectorInst, 2> {};
2394 
2396 
2397 //===----------------------------------------------------------------------===//
2398 // ExtractValueInst Class
2399 //===----------------------------------------------------------------------===//
2400 
2401 /// This instruction extracts a struct member or array
2402 /// element value from an aggregate value.
2403 ///
2405  SmallVector<unsigned, 4> Indices;
2406 
2407  ExtractValueInst(const ExtractValueInst &EVI);
2408 
2409  /// Constructors - Create a extractvalue instruction with a base aggregate
2410  /// value and a list of indices. The first ctor can optionally insert before
2411  /// an existing instruction, the second appends the new instruction to the
2412  /// specified BasicBlock.
2413  inline ExtractValueInst(Value *Agg,
2414  ArrayRef<unsigned> Idxs,
2415  const Twine &NameStr,
2416  Instruction *InsertBefore);
2417  inline ExtractValueInst(Value *Agg,
2418  ArrayRef<unsigned> Idxs,
2419  const Twine &NameStr, BasicBlock *InsertAtEnd);
2420 
2421  void init(ArrayRef<unsigned> Idxs, const Twine &NameStr);
2422 
2423 protected:
2424  // Note: Instruction needs to be a friend here to call cloneImpl.
2425  friend class Instruction;
2426 
2427  ExtractValueInst *cloneImpl() const;
2428 
2429 public:
2431  ArrayRef<unsigned> Idxs,
2432  const Twine &NameStr = "",
2433  Instruction *InsertBefore = nullptr) {
2434  return new
2435  ExtractValueInst(Agg, Idxs, NameStr, InsertBefore);
2436  }
2437 
2439  ArrayRef<unsigned> Idxs,
2440  const Twine &NameStr,
2441  BasicBlock *InsertAtEnd) {
2442  return new ExtractValueInst(Agg, Idxs, NameStr, InsertAtEnd);
2443  }
2444 
2445  /// Returns the type of the element that would be extracted
2446  /// with an extractvalue instruction with the specified parameters.
2447  ///
2448  /// Null is returned if the indices are invalid for the specified type.
2449  static Type *getIndexedType(Type *Agg, ArrayRef<unsigned> Idxs);
2450 
2451  using idx_iterator = const unsigned*;
2452 
2453  inline idx_iterator idx_begin() const { return Indices.begin(); }
2454  inline idx_iterator idx_end() const { return Indices.end(); }
2456  return make_range(idx_begin(), idx_end());
2457  }
2458 
2460  return getOperand(0);
2461  }
2462  const Value *getAggregateOperand() const {
2463  return getOperand(0);
2464  }
2465  static unsigned getAggregateOperandIndex() {
2466  return 0U; // get index for modifying correct operand
2467  }
2468 
2470  return Indices;
2471  }
2472 
2473  unsigned getNumIndices() const {
2474  return (unsigned)Indices.size();
2475  }
2476 
2477  bool hasIndices() const {
2478  return true;
2479  }
2480 
2481  // Methods for support type inquiry through isa, cast, and dyn_cast:
2482  static bool classof(const Instruction *I) {
2483  return I->getOpcode() == Instruction::ExtractValue;
2484  }
2485  static bool classof(const Value *V) {
2486  return isa<Instruction>(V) && classof(cast<Instruction>(V));
2487  }
2488 };
2489 
2490 ExtractValueInst::ExtractValueInst(Value *Agg,
2491  ArrayRef<unsigned> Idxs,
2492  const Twine &NameStr,
2493  Instruction *InsertBefore)
2494  : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2495  ExtractValue, Agg, InsertBefore) {
2496  init(Idxs, NameStr);
2497 }
2498 
2499 ExtractValueInst::ExtractValueInst(Value *Agg,
2500  ArrayRef<unsigned> Idxs,
2501  const Twine &NameStr,
2502  BasicBlock *InsertAtEnd)
2503  : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2504  ExtractValue, Agg, InsertAtEnd) {
2505  init(Idxs, NameStr);
2506 }
2507 
2508 //===----------------------------------------------------------------------===//
2509 // InsertValueInst Class
2510 //===----------------------------------------------------------------------===//
2511 
2512 /// This instruction inserts a struct field of array element
2513 /// value into an aggregate value.
2514 ///
2516  SmallVector<unsigned, 4> Indices;
2517 
2518  InsertValueInst(const InsertValueInst &IVI);
2519 
2520  /// Constructors - Create a insertvalue instruction with a base aggregate
2521  /// value, a value to insert, and a list of indices. The first ctor can
2522  /// optionally insert before an existing instruction, the second appends
2523  /// the new instruction to the specified BasicBlock.
2524  inline InsertValueInst(Value *Agg, Value *Val,
2525  ArrayRef<unsigned> Idxs,
2526  const Twine &NameStr,
2527  Instruction *InsertBefore);
2528  inline InsertValueInst(Value *Agg, Value *Val,
2529  ArrayRef<unsigned> Idxs,
2530  const Twine &NameStr, BasicBlock *InsertAtEnd);
2531 
2532  /// Constructors - These two constructors are convenience methods because one
2533  /// and two index insertvalue instructions are so common.
2534  InsertValueInst(Value *Agg, Value *Val, unsigned Idx,
2535  const Twine &NameStr = "",
2536  Instruction *InsertBefore = nullptr);
2537  InsertValueInst(Value *Agg, Value *Val, unsigned Idx, const Twine &NameStr,
2538  BasicBlock *InsertAtEnd);
2539 
2540  void init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
2541  const Twine &NameStr);
2542 
2543 protected:
2544  // Note: Instruction needs to be a friend here to call cloneImpl.
2545  friend class Instruction;
2546 
2547  InsertValueInst *cloneImpl() const;
2548 
2549 public:
2550  // allocate space for exactly two operands
2551  void *operator new(size_t S) { return User::operator new(S, 2); }
2552  void operator delete(void *Ptr) { User::operator delete(Ptr); }
2553 
2554  static InsertValueInst *Create(Value *Agg, Value *Val,
2555  ArrayRef<unsigned> Idxs,
2556  const Twine &NameStr = "",
2557  Instruction *InsertBefore = nullptr) {
2558  return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertBefore);
2559  }
2560 
2561  static InsertValueInst *Create(Value *Agg, Value *Val,
2562  ArrayRef<unsigned> Idxs,
2563  const Twine &NameStr,
2564  BasicBlock *InsertAtEnd) {
2565  return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertAtEnd);
2566  }
2567 
2568  /// Transparently provide more efficient getOperand methods.
2570 
2571  using idx_iterator = const unsigned*;
2572 
2573  inline idx_iterator idx_begin() const { return Indices.begin(); }
2574  inline idx_iterator idx_end() const { return Indices.end(); }
2576  return make_range(idx_begin(), idx_end());
2577  }
2578 
2580  return getOperand(0);
2581  }
2582  const Value *getAggregateOperand() const {
2583  return getOperand(0);
2584  }
2585  static unsigned getAggregateOperandIndex() {
2586  return 0U; // get index for modifying correct operand
2587  }
2588 
2590  return getOperand(1);
2591  }
2593  return getOperand(1);
2594  }
2595  static unsigned getInsertedValueOperandIndex() {
2596  return 1U; // get index for modifying correct operand
2597  }
2598 
2600  return Indices;
2601  }
2602 
2603  unsigned getNumIndices() const {
2604  return (unsigned)Indices.size();
2605  }
2606 
2607  bool hasIndices() const {
2608  return true;
2609  }
2610 
2611  // Methods for support type inquiry through isa, cast, and dyn_cast:
2612  static bool classof(const Instruction *I) {
2613  return I->getOpcode() == Instruction::InsertValue;
2614  }
2615  static bool classof(const Value *V) {
2616  return isa<Instruction>(V) && classof(cast<Instruction>(V));
2617  }
2618 };
2619 
2620 template <>
2622  public FixedNumOperandTraits<InsertValueInst, 2> {
2623 };
2624 
2625 InsertValueInst::InsertValueInst(Value *Agg,
2626  Value *Val,
2627  ArrayRef<unsigned> Idxs,
2628  const Twine &NameStr,
2629  Instruction *InsertBefore)
2630  : Instruction(Agg->getType(), InsertValue,
2631  OperandTraits<InsertValueInst>::op_begin(this),
2632  2, InsertBefore) {
2633  init(Agg, Val, Idxs, NameStr);
2634 }
2635 
2636 InsertValueInst::InsertValueInst(Value *Agg,
2637  Value *Val,
2638  ArrayRef<unsigned> Idxs,
2639  const Twine &NameStr,
2640  BasicBlock *InsertAtEnd)
2641  : Instruction(Agg->getType(), InsertValue,
2642  OperandTraits<InsertValueInst>::op_begin(this),
2643  2, InsertAtEnd) {
2644  init(Agg, Val, Idxs, NameStr);
2645 }
2646 
2648 
2649 //===----------------------------------------------------------------------===//
2650 // PHINode Class
2651 //===----------------------------------------------------------------------===//
2652 
2653 // PHINode - The PHINode class is used to represent the magical mystical PHI
2654 // node, that can not exist in nature, but can be synthesized in a computer
2655 // scientist's overactive imagination.
2656 //
2657 class PHINode : public Instruction {
2658  /// The number of operands actually allocated. NumOperands is
2659  /// the number actually in use.
2660  unsigned ReservedSpace;
2661 
2662  PHINode(const PHINode &PN);
2663 
2664  explicit PHINode(Type *Ty, unsigned NumReservedValues,
2665  const Twine &NameStr = "",
2666  Instruction *InsertBefore = nullptr)
2667  : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertBefore),
2668  ReservedSpace(NumReservedValues) {
2669  assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!");
2670  setName(NameStr);
2671  allocHungoffUses(ReservedSpace);
2672  }
2673 
2674  PHINode(Type *Ty, unsigned NumReservedValues, const Twine &NameStr,
2675  BasicBlock *InsertAtEnd)
2676  : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertAtEnd),
2677  ReservedSpace(NumReservedValues) {
2678  assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!");
2679  setName(NameStr);
2680  allocHungoffUses(ReservedSpace);
2681  }
2682 
2683 protected:
2684  // Note: Instruction needs to be a friend here to call cloneImpl.
2685  friend class Instruction;
2686 
2687  PHINode *cloneImpl() const;
2688 
2689  // allocHungoffUses - this is more complicated than the generic
2690  // User::allocHungoffUses, because we have to allocate Uses for the incoming
2691  // values and pointers to the incoming blocks, all in one allocation.
2692  void allocHungoffUses(unsigned N) {
2693  User::allocHungoffUses(N, /* IsPhi */ true);
2694  }
2695 
2696 public:
2697  /// Constructors - NumReservedValues is a hint for the number of incoming
2698  /// edges that this phi node will have (use 0 if you really have no idea).
2699  static PHINode *Create(Type *Ty, unsigned NumReservedValues,
2700  const Twine &NameStr = "",
2701  Instruction *InsertBefore = nullptr) {
2702  return new PHINode(Ty, NumReservedValues, NameStr, InsertBefore);
2703  }
2704 
2705  static PHINode *Create(Type *Ty, unsigned NumReservedValues,
2706  const Twine &NameStr, BasicBlock *InsertAtEnd) {
2707  return new PHINode(Ty, NumReservedValues, NameStr, InsertAtEnd);
2708  }
2709 
2710  /// Provide fast operand accessors
2712 
2713  // Block iterator interface. This provides access to the list of incoming
2714  // basic blocks, which parallels the list of incoming values.
2715 
2718 
2720  return reinterpret_cast<block_iterator>(op_begin() + ReservedSpace);
2721  }
2722 
2724  return reinterpret_cast<const_block_iterator>(op_begin() + ReservedSpace);
2725  }
2726 
2728  return block_begin() + getNumOperands();
2729  }
2730 
2732  return block_begin() + getNumOperands();
2733  }
2734 
2736  return make_range(block_begin(), block_end());
2737  }
2738 
2740  return make_range(block_begin(), block_end());
2741  }
2742 
2743  op_range incoming_values() { return operands(); }
2744 
2745  const_op_range incoming_values() const { return operands(); }
2746 
2747  /// Return the number of incoming edges
2748  ///
2749  unsigned getNumIncomingValues() const { return getNumOperands(); }
2750 
2751  /// Return incoming value number x
2752  ///
2753  Value *getIncomingValue(unsigned i) const {
2754  return getOperand(i);
2755  }
2756  void setIncomingValue(unsigned i, Value *V) {
2757  assert(V && "PHI node got a null value!");
2758  assert(getType() == V->getType() &&
2759  "All operands to PHI node must be the same type as the PHI node!");
2760  setOperand(i, V);
2761  }
2762 
2763  static unsigned getOperandNumForIncomingValue(unsigned i) {
2764  return i;
2765  }
2766 
2767  static unsigned getIncomingValueNumForOperand(unsigned i) {
2768  return i;
2769  }
2770 
2771  /// Return incoming basic block number @p i.
2772  ///
2773  BasicBlock *getIncomingBlock(unsigned i) const {
2774  return block_begin()[i];
2775  }
2776 
2777  /// Return incoming basic block corresponding
2778  /// to an operand of the PHI.
2779  ///
2780  BasicBlock *getIncomingBlock(const Use &U) const {
2781  assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?");
2782  return getIncomingBlock(unsigned(&U - op_begin()));
2783  }
2784 
2785  /// Return incoming basic block corresponding
2786  /// to value use iterator.
2787  ///
2789  return getIncomingBlock(I.getUse());
2790  }
2791 
2792  void setIncomingBlock(unsigned i, BasicBlock *BB) {
2793  assert(BB && "PHI node got a null basic block!");
2794  block_begin()[i] = BB;
2795  }
2796 
2797  /// Replace every incoming basic block \p Old to basic block \p New.
2799  assert(New && Old && "PHI node got a null basic block!");
2800  for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op)
2801  if (getIncomingBlock(Op) == Old)
2802  setIncomingBlock(Op, New);
2803  }
2804 
2805  /// Add an incoming value to the end of the PHI list
2806  ///
2808  if (getNumOperands() == ReservedSpace)
2809  growOperands(); // Get more space!
2810  // Initialize some new operands.
2811  setNumHungOffUseOperands(getNumOperands() + 1);
2812  setIncomingValue(getNumOperands() - 1, V);
2813  setIncomingBlock(getNumOperands() - 1, BB);
2814  }
2815 
2816  /// Remove an incoming value. This is useful if a
2817  /// predecessor basic block is deleted. The value removed is returned.
2818  ///
2819  /// If the last incoming value for a PHI node is removed (and DeletePHIIfEmpty
2820  /// is true), the PHI node is destroyed and any uses of it are replaced with
2821  /// dummy values. The only time there should be zero incoming values to a PHI
2822  /// node is when the block is dead, so this strategy is sound.
2823  ///
2824  Value *removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty = true);
2825 
2826  Value *removeIncomingValue(const BasicBlock *BB, bool DeletePHIIfEmpty=true) {
2827  int Idx = getBasicBlockIndex(BB);
2828  assert(Idx >= 0 && "Invalid basic block argument to remove!");
2829  return removeIncomingValue(Idx, DeletePHIIfEmpty);
2830  }
2831 
2832  /// Return the first index of the specified basic
2833  /// block in the value list for this PHI. Returns -1 if no instance.
2834  ///
2835  int getBasicBlockIndex(const BasicBlock *BB) const {
2836  for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
2837  if (block_begin()[i] == BB)
2838  return i;
2839  return -1;
2840  }
2841 
2843  int Idx = getBasicBlockIndex(BB);
2844  assert(Idx >= 0 && "Invalid basic block argument!");
2845  return getIncomingValue(Idx);
2846  }
2847 
2848  /// Set every incoming value(s) for block \p BB to \p V.
2850  assert(BB && "PHI node got a null basic block!");
2851  bool Found = false;
2852  for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op)
2853  if (getIncomingBlock(Op) == BB) {
2854  Found = true;
2855  setIncomingValue(Op, V);
2856  }
2857  (void)Found;
2858  assert(Found && "Invalid basic block argument to set!");
2859  }
2860 
2861  /// If the specified PHI node always merges together the
2862  /// same value, return the value, otherwise return null.
2863  Value *hasConstantValue() const;
2864 
2865  /// Whether the specified PHI node always merges
2866  /// together the same value, assuming undefs are equal to a unique
2867  /// non-undef value.
2868  bool hasConstantOrUndefValue() const;
2869 
2870  /// If the PHI node is complete which means all of its parent's predecessors
2871  /// have incoming value in this PHI, return true, otherwise return false.
2872  bool isComplete() const {
2874  [this](const BasicBlock *Pred) {
2875  return getBasicBlockIndex(Pred) >= 0;
2876  });
2877  }
2878 
2879  /// Methods for support type inquiry through isa, cast, and dyn_cast:
2880  static bool classof(const Instruction *I) {
2881  return I->getOpcode() == Instruction::PHI;
2882  }
2883  static bool classof(const Value *V) {
2884  return isa<Instruction>(V) && classof(cast<Instruction>(V));
2885  }
2886 
2887 private:
2888  void growOperands();
2889 };
2890 
2891 template <>
2893 };
2894 
2896 
2897 //===----------------------------------------------------------------------===//
2898 // LandingPadInst Class
2899 //===----------------------------------------------------------------------===//
2900 
2901 //===---------------------------------------------------------------------------
2902 /// The landingpad instruction holds all of the information
2903 /// necessary to generate correct exception handling. The landingpad instruction
2904 /// cannot be moved from the top of a landing pad block, which itself is
2905 /// accessible only from the 'unwind' edge of an invoke. This uses the
2906 /// SubclassData field in Value to store whether or not the landingpad is a
2907 /// cleanup.
2908 ///
2909 class LandingPadInst : public Instruction {
2910  using CleanupField = BoolBitfieldElementT<0>;
2911 
2912  /// The number of operands actually allocated. NumOperands is
2913  /// the number actually in use.
2914  unsigned ReservedSpace;
2915 
2916  LandingPadInst(const LandingPadInst &LP);
2917 
2918 public:
2920 
2921 private:
2922  explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
2923  const Twine &NameStr, Instruction *InsertBefore);
2924  explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
2925  const Twine &NameStr, BasicBlock *InsertAtEnd);
2926 
2927  // Allocate space for exactly zero operands.
2928  void *operator new(size_t S) { return User::operator new(S); }
2929 
2930  void growOperands(unsigned Size);
2931  void init(unsigned NumReservedValues, const Twine &NameStr);
2932 
2933 protected:
2934  // Note: Instruction needs to be a friend here to call cloneImpl.
2935  friend class Instruction;
2936 
2937  LandingPadInst *cloneImpl() const;
2938 
2939 public:
2940  void operator delete(void *Ptr) { User::operator delete(Ptr); }
2941 
2942  /// Constructors - NumReservedClauses is a hint for the number of incoming
2943  /// clauses that this landingpad will have (use 0 if you really have no idea).
2944  static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
2945  const Twine &NameStr = "",
2946  Instruction *InsertBefore = nullptr);
2947  static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
2948  const Twine &NameStr, BasicBlock *InsertAtEnd);
2949 
2950  /// Provide fast operand accessors
2952 
2953  /// Return 'true' if this landingpad instruction is a
2954  /// cleanup. I.e., it should be run when unwinding even if its landing pad
2955  /// doesn't catch the exception.
2956  bool isCleanup() const { return getSubclassData<CleanupField>(); }
2957 
2958  /// Indicate that this landingpad instruction is a cleanup.
2959  void setCleanup(bool V) { setSubclassData<CleanupField>(V); }
2960 
2961  /// Add a catch or filter clause to the landing pad.
2962  void addClause(Constant *ClauseVal);
2963 
2964  /// Get the value of the clause at index Idx. Use isCatch/isFilter to
2965  /// determine what type of clause this is.
2966  Constant *getClause(unsigned Idx) const {
2967  return cast<Constant>(getOperandList()[Idx]);
2968  }
2969 
2970  /// Return 'true' if the clause and index Idx is a catch clause.
2971  bool isCatch(unsigned Idx) const {
2972  return !isa<ArrayType>(getOperandList()[Idx]->getType());
2973  }
2974 
2975  /// Return 'true' if the clause and index Idx is a filter clause.
2976  bool isFilter(unsigned Idx) const {
2977  return isa<ArrayType>(getOperandList()[Idx]->getType());
2978  }
2979 
2980  /// Get the number of clauses for this landing pad.
2981  unsigned getNumClauses() const { return getNumOperands(); }
2982 
2983  /// Grow the size of the operand list to accommodate the new
2984  /// number of clauses.
2985  void reserveClauses(unsigned Size) { growOperands(Size); }
2986 
2987  // Methods for support type inquiry through isa, cast, and dyn_cast:
2988  static bool classof(const Instruction *I) {
2989  return I->getOpcode() == Instruction::LandingPad;
2990  }
2991  static bool classof(const Value *V) {
2992  return isa<Instruction>(V) && classof(cast<Instruction>(V));
2993  }
2994 };
2995 
2996 template <>
2998 };
2999 
3001 
3002 //===----------------------------------------------------------------------===//
3003 // ReturnInst Class
3004 //===----------------------------------------------------------------------===//
3005 
3006 //===---------------------------------------------------------------------------
3007 /// Return a value (possibly void), from a function. Execution
3008 /// does not continue in this function any longer.
3009 ///
3010 class ReturnInst : public Instruction {
3011  ReturnInst(const ReturnInst &RI);
3012 
3013 private:
3014  // ReturnInst constructors:
3015  // ReturnInst() - 'ret void' instruction
3016  // ReturnInst( null) - 'ret void' instruction
3017  // ReturnInst(Value* X) - 'ret X' instruction
3018  // ReturnInst( null, Inst *I) - 'ret void' instruction, insert before I
3019  // ReturnInst(Value* X, Inst *I) - 'ret X' instruction, insert before I
3020  // ReturnInst( null, BB *B) - 'ret void' instruction, insert @ end of B
3021  // ReturnInst(Value* X, BB *B) - 'ret X' instruction, insert @ end of B
3022  //
3023  // NOTE: If the Value* passed is of type void then the constructor behaves as
3024  // if it was passed NULL.
3025  explicit ReturnInst(LLVMContext &C, Value *retVal = nullptr,
3026  Instruction *InsertBefore = nullptr);
3027  ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd);
3028  explicit ReturnInst(LLVMContext &C, BasicBlock *InsertAtEnd);
3029 
3030 protected:
3031  // Note: Instruction needs to be a friend here to call cloneImpl.
3032  friend class Instruction;
3033 
3034  ReturnInst *cloneImpl() const;
3035 
3036 public:
3037  static ReturnInst* Create(LLVMContext &C, Value *retVal = nullptr,
3038  Instruction *InsertBefore = nullptr) {
3039  return new(!!retVal) ReturnInst(C, retVal, InsertBefore);
3040  }
3041 
3042  static ReturnInst* Create(LLVMContext &C, Value *retVal,
3043  BasicBlock *InsertAtEnd) {
3044  return new(!!retVal) ReturnInst(C, retVal, InsertAtEnd);
3045  }
3046 
3047  static ReturnInst* Create(LLVMContext &C, BasicBlock *InsertAtEnd) {
3048  return new(0) ReturnInst(C, InsertAtEnd);
3049  }
3050 
3051  /// Provide fast operand accessors
3053 
3054  /// Convenience accessor. Returns null if there is no return value.
3056  return getNumOperands() != 0 ? getOperand(0) : nullptr;
3057  }
3058 
3059  unsigned getNumSuccessors() const { return 0; }
3060 
3061  // Methods for support type inquiry through isa, cast, and dyn_cast:
3062  static bool classof(const Instruction *I) {
3063  return (I->getOpcode() == Instruction::Ret);
3064  }
3065  static bool classof(const Value *V) {
3066  return isa<Instruction>(V) && classof(cast<Instruction>(V));
3067  }
3068 
3069 private:
3070  BasicBlock *getSuccessor(unsigned idx) const {
3071  llvm_unreachable("ReturnInst has no successors!");
3072  }
3073 
3074  void setSuccessor(unsigned idx, BasicBlock *B) {
3075  llvm_unreachable("ReturnInst has no successors!");
3076  }
3077 };
3078 
3079 template <>
3080 struct OperandTraits<ReturnInst> : public VariadicOperandTraits<ReturnInst> {
3081 };
3082 
3084 
3085 //===----------------------------------------------------------------------===//
3086 // BranchInst Class
3087 //===----------------------------------------------------------------------===//
3088 
3089 //===---------------------------------------------------------------------------
3090 /// Conditional or Unconditional Branch instruction.
3091 ///
3092 class BranchInst : public Instruction {
3093  /// Ops list - Branches are strange. The operands are ordered:
3094  /// [Cond, FalseDest,] TrueDest. This makes some accessors faster because
3095  /// they don't have to check for cond/uncond branchness. These are mostly
3096  /// accessed relative from op_end().
3097  BranchInst(const BranchInst &BI);
3098  // BranchInst constructors (where {B, T, F} are blocks, and C is a condition):
3099  // BranchInst(BB *B) - 'br B'
3100  // BranchInst(BB* T, BB *F, Value *C) - 'br C, T, F'
3101  // BranchInst(BB* B, Inst *I) - 'br B' insert before I
3102  // BranchInst(BB* T, BB *F, Value *C, Inst *I) - 'br C, T, F', insert before I
3103  // BranchInst(BB* B, BB *I) - 'br B' insert at end
3104  // BranchInst(BB* T, BB *F, Value *C, BB *I) - 'br C, T, F', insert at end
3105  explicit BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore = nullptr);
3106  BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
3107  Instruction *InsertBefore = nullptr);
3108  BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd);
3109  BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
3110  BasicBlock *InsertAtEnd);
3111 
3112  void AssertOK();
3113 
3114 protected:
3115  // Note: Instruction needs to be a friend here to call cloneImpl.
3116  friend class Instruction;
3117 
3118  BranchInst *cloneImpl() const;
3119 
3120 public:
3121  /// Iterator type that casts an operand to a basic block.
3122  ///
3123  /// This only makes sense because the successors are stored as adjacent
3124  /// operands for branch instructions.
3126  : iterator_adaptor_base<succ_op_iterator, value_op_iterator,
3127  std::random_access_iterator_tag, BasicBlock *,
3128  ptrdiff_t, BasicBlock *, BasicBlock *> {
3129  explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {}
3130 
3131  BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3132  BasicBlock *operator->() const { return operator*(); }
3133  };
3134 
3135  /// The const version of `succ_op_iterator`.
3137  : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator,
3138  std::random_access_iterator_tag,
3139  const BasicBlock *, ptrdiff_t, const BasicBlock *,
3140  const BasicBlock *> {
3142  : iterator_adaptor_base(I) {}
3143 
3144  const BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3145  const BasicBlock *operator->() const { return operator*(); }
3146  };
3147 
3148  static BranchInst *Create(BasicBlock *IfTrue,
3149  Instruction *InsertBefore = nullptr) {
3150  return new(1) BranchInst(IfTrue, InsertBefore);
3151  }
3152 
3153  static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
3154  Value *Cond, Instruction *InsertBefore = nullptr) {
3155  return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertBefore);
3156  }
3157 
3158  static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *InsertAtEnd) {
3159  return new(1) BranchInst(IfTrue, InsertAtEnd);
3160  }
3161 
3162  static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
3163  Value *Cond, BasicBlock *InsertAtEnd) {
3164  return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertAtEnd);
3165  }
3166 
3167  /// Transparently provide more efficient getOperand methods.
3169 
3170  bool isUnconditional() const { return getNumOperands() == 1; }
3171  bool isConditional() const { return getNumOperands() == 3; }
3172 
3173  Value *getCondition() const {
3174  assert(isConditional() && "Cannot get condition of an uncond branch!");
3175  return Op<-3>();
3176  }
3177 
3178  void setCondition(Value *V) {
3179  assert(isConditional() && "Cannot set condition of unconditional branch!");
3180  Op<-3>() = V;
3181  }
3182 
3183  unsigned getNumSuccessors() const { return 1+isConditional(); }
3184 
3185  BasicBlock *getSuccessor(unsigned i) const {
3186  assert(i < getNumSuccessors() && "Successor # out of range for Branch!");
3187  return cast_or_null<BasicBlock>((&Op<-1>() - i)->get());
3188  }
3189 
3190  void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3191  assert(idx < getNumSuccessors() && "Successor # out of range for Branch!");
3192  *(&Op<-1>() - idx) = NewSucc;
3193  }
3194 
3195  /// Swap the successors of this branch instruction.
3196  ///
3197  /// Swaps the successors of the branch instruction. This also swaps any
3198  /// branch weight metadata associated with the instruction so that it
3199  /// continues to map correctly to each operand.
3200  void swapSuccessors();
3201 
3203  return make_range(
3204  succ_op_iterator(std::next(value_op_begin(), isConditional() ? 1 : 0)),
3205  succ_op_iterator(value_op_end()));
3206  }
3207 
3210  std::next(value_op_begin(), isConditional() ? 1 : 0)),
3211  const_succ_op_iterator(value_op_end()));
3212  }
3213 
3214  // Methods for support type inquiry through isa, cast, and dyn_cast:
3215  static bool classof(const Instruction *I) {
3216  return (I->getOpcode() == Instruction::Br);
3217  }
3218  static bool classof(const Value *V) {
3219  return isa<Instruction>(V) && classof(cast<Instruction>(V));
3220  }
3221 };
3222 
3223 template <>
3224 struct OperandTraits<BranchInst> : public VariadicOperandTraits<BranchInst, 1> {
3225 };
3226 
3228 
3229 //===----------------------------------------------------------------------===//
3230 // SwitchInst Class
3231 //===----------------------------------------------------------------------===//
3232 
3233 //===---------------------------------------------------------------------------
3234 /// Multiway switch
3235 ///
3236 class SwitchInst : public Instruction {
3237  unsigned ReservedSpace;
3238 
3239  // Operand[0] = Value to switch on
3240  // Operand[1] = Default basic block destination
3241  // Operand[2n ] = Value to match
3242  // Operand[2n+1] = BasicBlock to go to on match
3243  SwitchInst(const SwitchInst &SI);
3244 
3245  /// Create a new switch instruction, specifying a value to switch on and a
3246  /// default destination. The number of additional cases can be specified here
3247  /// to make memory allocation more efficient. This constructor can also
3248  /// auto-insert before another instruction.
3249  SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3250  Instruction *InsertBefore);
3251 
3252  /// Create a new switch instruction, specifying a value to switch on and a
3253  /// default destination. The number of additional cases can be specified here
3254  /// to make memory allocation more efficient. This constructor also
3255  /// auto-inserts at the end of the specified BasicBlock.
3256  SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3257  BasicBlock *InsertAtEnd);
3258 
3259  // allocate space for exactly zero operands
3260  void *operator new(size_t S) { return User::operator new(S); }
3261 
3262  void init(Value *Value, BasicBlock *Default, unsigned NumReserved);
3263  void growOperands();
3264 
3265 protected:
3266  // Note: Instruction needs to be a friend here to call cloneImpl.
3267  friend class Instruction;
3268 
3269  SwitchInst *cloneImpl() const;
3270 
3271 public:
3272  void operator delete(void *Ptr) { User::operator delete(Ptr); }
3273 
3274  // -2
3275  static const unsigned DefaultPseudoIndex = static_cast<unsigned>(~0L-1);
3276 
3277  template <typename CaseHandleT> class CaseIteratorImpl;
3278 
3279  /// A handle to a particular switch case. It exposes a convenient interface
3280  /// to both the case value and the successor block.
3281  ///
3282  /// We define this as a template and instantiate it to form both a const and
3283  /// non-const handle.
3284  template <typename SwitchInstT, typename ConstantIntT, typename BasicBlockT>
3286  // Directly befriend both const and non-const iterators.
3287  friend class SwitchInst::CaseIteratorImpl<
3288  CaseHandleImpl<SwitchInstT, ConstantIntT, BasicBlockT>>;
3289 
3290  protected:
3291  // Expose the switch type we're parameterized with to the iterator.
3292  using SwitchInstType = SwitchInstT;
3293 
3294  SwitchInstT *SI;
3296 
3297  CaseHandleImpl() = default;
3298  CaseHandleImpl(SwitchInstT *SI, ptrdiff_t Index) : SI(SI), Index(Index) {}
3299 
3300  public:
3301  /// Resolves case value for current case.
3302  ConstantIntT *getCaseValue() const {
3303  assert((unsigned)Index < SI->getNumCases() &&
3304  "Index out the number of cases.");
3305  return reinterpret_cast<ConstantIntT *>(SI->getOperand(2 + Index * 2));
3306  }
3307 
3308  /// Resolves successor for current case.
3309  BasicBlockT *getCaseSuccessor() const {
3310  assert(((unsigned)Index < SI->getNumCases() ||
3311  (unsigned)Index == DefaultPseudoIndex) &&
3312  "Index out the number of cases.");
3313  return SI->getSuccessor(getSuccessorIndex());
3314  }
3315 
3316  /// Returns number of current case.
3317  unsigned getCaseIndex() const { return Index; }
3318 
3319  /// Returns successor index for current case successor.
3320  unsigned getSuccessorIndex() const {
3321  assert(((unsigned)Index == DefaultPseudoIndex ||
3322  (unsigned)Index < SI->getNumCases()) &&
3323  "Index out the number of cases.");
3324  return (unsigned)Index != DefaultPseudoIndex ? Index + 1 : 0;
3325  }
3326 
3327  bool operator==(const CaseHandleImpl &RHS) const {
3328  assert(SI == RHS.SI && "Incompatible operators.");
3329  return Index == RHS.Index;
3330  }
3331  };
3332 
3333  using ConstCaseHandle =
3335 
3337  : public CaseHandleImpl<SwitchInst, ConstantInt, BasicBlock> {
3339 
3340  public:
3342 
3343  /// Sets the new value for current case.
3344  void setValue(ConstantInt *V) const {
3345  assert((unsigned)Index < SI->getNumCases() &&
3346  "Index out the number of cases.");
3347  SI->setOperand(2 + Index*2, reinterpret_cast<Value*>(V));
3348  }
3349 
3350  /// Sets the new successor for current case.
3351  void setSuccessor(BasicBlock *S) const {
3352  SI->setSuccessor(getSuccessorIndex(), S);
3353  }
3354  };
3355 
3356  template <typename CaseHandleT>
3357  class CaseIteratorImpl
3358  : public iterator_facade_base<CaseIteratorImpl<CaseHandleT>,
3359  std::random_access_iterator_tag,
3360  const CaseHandleT> {
3361  using SwitchInstT = typename CaseHandleT::SwitchInstType;
3362 
3363  CaseHandleT Case;
3364 
3365  public:
3366  /// Default constructed iterator is in an invalid state until assigned to
3367  /// a case for a particular switch.
3368  CaseIteratorImpl() = default;
3369 
3370  /// Initializes case iterator for given SwitchInst and for given
3371  /// case number.
3372  CaseIteratorImpl(SwitchInstT *SI, unsigned CaseNum) : Case(SI, CaseNum) {}
3373 
3374  /// Initializes case iterator for given SwitchInst and for given
3375  /// successor index.
3377  unsigned SuccessorIndex) {
3378  assert(SuccessorIndex < SI->getNumSuccessors() &&
3379  "Successor index # out of range!");
3380  return SuccessorIndex != 0 ? CaseIteratorImpl(SI, SuccessorIndex - 1)
3381  : CaseIteratorImpl(SI, DefaultPseudoIndex);
3382  }
3383 
3384  /// Support converting to the const variant. This will be a no-op for const
3385  /// variant.
3387  return CaseIteratorImpl<ConstCaseHandle>(Case.SI, Case.Index);
3388  }
3389 
3391  // Check index correctness after addition.
3392  // Note: Index == getNumCases() means end().
3393  assert(Case.Index + N >= 0 &&
3394  (unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&
3395  "Case.Index out the number of cases.");
3396  Case.Index += N;
3397  return *this;
3398  }
3400  // Check index correctness after subtraction.
3401  // Note: Case.Index == getNumCases() means end().
3402  assert(Case.Index - N >= 0 &&
3403  (unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&
3404  "Case.Index out the number of cases.");
3405  Case.Index -= N;
3406  return *this;
3407  }
3409  assert(Case.SI == RHS.Case.SI && "Incompatible operators.");
3410  return Case.Index - RHS.Case.Index;
3411  }
3412  bool operator==(const CaseIteratorImpl &RHS) const {
3413  return Case == RHS.Case;
3414  }
3415  bool operator<(const CaseIteratorImpl &RHS) const {
3416  assert(Case.SI == RHS.Case.SI && "Incompatible operators.");
3417  return Case.Index < RHS.Case.Index;
3418  }
3419  const CaseHandleT &operator*() const { return Case; }
3420  };
3421 
3424 
3426  unsigned NumCases,
3427  Instruction *InsertBefore = nullptr) {
3428  return new SwitchInst(Value, Default, NumCases, InsertBefore);
3429  }
3430 
3432  unsigned NumCases, BasicBlock *InsertAtEnd) {
3433  return new SwitchInst(Value, Default, NumCases, InsertAtEnd);
3434  }
3435 
3436  /// Provide fast operand accessors
3438 
3439  // Accessor Methods for Switch stmt
3440  Value *getCondition() const { return getOperand(0); }
3441  void setCondition(Value *V) { setOperand(0, V); }
3442 
3444  return cast<BasicBlock>(getOperand(1));
3445  }
3446 
3447  void setDefaultDest(BasicBlock *DefaultCase) {
3448  setOperand(1, reinterpret_cast<Value*>(DefaultCase));
3449  }
3450 
3451  /// Return the number of 'cases' in this switch instruction, excluding the
3452  /// default case.
3453  unsigned getNumCases() const {
3454  return getNumOperands()/2 - 1;
3455  }
3456 
3457  /// Returns a read/write iterator that points to the first case in the
3458  /// SwitchInst.
3460  return CaseIt(this, 0);
3461  }
3462 
3463  /// Returns a read-only iterator that points to the first case in the
3464  /// SwitchInst.
3466  return ConstCaseIt(this, 0);
3467  }
3468 
3469  /// Returns a read/write iterator that points one past the last in the
3470  /// SwitchInst.
3472  return CaseIt(this, getNumCases());
3473  }
3474 
3475  /// Returns a read-only iterator that points one past the last in the
3476  /// SwitchInst.
3478  return ConstCaseIt(this, getNumCases());
3479  }
3480 
3481  /// Iteration adapter for range-for loops.
3483  return make_range(case_begin(), case_end());
3484  }
3485 
3486  /// Constant iteration adapter for range-for loops.
3488  return make_range(case_begin(), case_end());
3489  }
3490 
3491  /// Returns an iterator that points to the default case.
3492  /// Note: this iterator allows to resolve successor only. Attempt
3493  /// to resolve case value causes an assertion.
3494  /// Also note, that increment and decrement also causes an assertion and
3495  /// makes iterator invalid.
3497  return CaseIt(this, DefaultPseudoIndex);
3498  }
3500  return ConstCaseIt(this, DefaultPseudoIndex);
3501  }
3502 
3503  /// Search all of the case values for the specified constant. If it is
3504  /// explicitly handled, return the case iterator of it, otherwise return
3505  /// default case iterator to indicate that it is handled by the default
3506  /// handler.
3508  return CaseIt(
3509  this,
3510  const_cast<const SwitchInst *>(this)->findCaseValue(C)->getCaseIndex());
3511  }
3513  ConstCaseIt I = llvm::find_if(cases(), [C](const ConstCaseHandle &Case) {
3514  return Case.getCaseValue() == C;
3515  });
3516  if (I != case_end())
3517  return I;
3518 
3519  return case_default();
3520  }
3521 
3522  /// Finds the unique case value for a given successor. Returns null if the
3523  /// successor is not found, not unique, or is the default case.
3525  if (BB == getDefaultDest())
3526  return nullptr;
3527 
3528  ConstantInt *CI = nullptr;
3529  for (auto Case : cases()) {
3530  if (Case.getCaseSuccessor() != BB)
3531  continue;
3532 
3533  if (CI)
3534  return nullptr; // Multiple cases lead to BB.
3535 
3536  CI = Case.getCaseValue();
3537  }
3538 
3539  return CI;
3540  }
3541 
3542  /// Add an entry to the switch instruction.
3543  /// Note:
3544  /// This action invalidates case_end(). Old case_end() iterator will
3545  /// point to the added case.
3546  void addCase(ConstantInt *OnVal, BasicBlock *Dest);
3547 
3548  /// This method removes the specified case and its successor from the switch
3549  /// instruction. Note that this operation may reorder the remaining cases at
3550  /// index idx and above.
3551  /// Note:
3552  /// This action invalidates iterators for all cases following the one removed,
3553  /// including the case_end() iterator. It returns an iterator for the next
3554  /// case.
3555  CaseIt removeCase(CaseIt I);
3556 
3557  unsigned getNumSuccessors() const { return getNumOperands()/2; }
3558  BasicBlock *getSuccessor(unsigned idx) const {
3559  assert(idx < getNumSuccessors() &&"Successor idx out of range for switch!");
3560  return cast<BasicBlock>(getOperand(idx*2+1));
3561  }
3562  void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3563  assert(idx < getNumSuccessors() && "Successor # out of range for switch!");
3564  setOperand(idx * 2 + 1, NewSucc);
3565  }
3566 
3567  // Methods for support type inquiry through isa, cast, and dyn_cast:
3568  static bool classof(const Instruction *I) {
3569  return I->getOpcode() == Instruction::Switch;
3570  }
3571  static bool classof(const Value *V) {
3572  return isa<Instruction>(V) && classof(cast<Instruction>(V));
3573  }
3574 };
3575 
3576 /// A wrapper class to simplify modification of SwitchInst cases along with
3577 /// their prof branch_weights metadata.
3579  SwitchInst &SI;
3581  bool Changed = false;
3582 
3583 protected:
3584  static MDNode *getProfBranchWeightsMD(const SwitchInst &SI);
3585 
3587 
3588  void init();
3589 
3590 public:
3592  SwitchInst *operator->() { return &SI; }
3593  SwitchInst &operator*() { return SI; }
3594  operator SwitchInst *() { return &SI; }
3595 
3597 
3599  if (Changed)
3600  SI.setMetadata(LLVMContext::MD_prof, buildProfBranchWeightsMD());
3601  }
3602 
3603  /// Delegate the call to the underlying SwitchInst::removeCase() and remove
3604  /// correspondent branch weight.
3606 
3607  /// Delegate the call to the underlying SwitchInst::addCase() and set the
3608  /// specified branch weight for the added case.
3609  void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W);
3610 
3611  /// Delegate the call to the underlying SwitchInst::eraseFromParent() and mark
3612  /// this object to not touch the underlying SwitchInst in destructor.
3614 
3615  void setSuccessorWeight(unsigned idx, CaseWeightOpt W);
3616  CaseWeightOpt getSuccessorWeight(unsigned idx);
3617 
3618  static CaseWeightOpt getSuccessorWeight(const SwitchInst &SI, unsigned idx);
3619 };
3620 
3621 template <>
3623 };
3624 
3626 
3627 //===----------------------------------------------------------------------===//
3628 // IndirectBrInst Class
3629 //===----------------------------------------------------------------------===//
3630 
3631 //===---------------------------------------------------------------------------
3632 /// Indirect Branch Instruction.
3633 ///
3634 class IndirectBrInst : public Instruction {
3635  unsigned ReservedSpace;
3636 
3637  // Operand[0] = Address to jump to
3638  // Operand[n+1] = n-th destination
3639  IndirectBrInst(const IndirectBrInst &IBI);
3640 
3641  /// Create a new indirectbr instruction, specifying an
3642  /// Address to jump to. The number of expected destinations can be specified
3643  /// here to make memory allocation more efficient. This constructor can also
3644  /// autoinsert before another instruction.
3645  IndirectBrInst(Value *Address, unsigned NumDests, Instruction *InsertBefore);
3646 
3647  /// Create a new indirectbr instruction, specifying an
3648  /// Address to jump to. The number of expected destinations can be specified
3649  /// here to make memory allocation more efficient. This constructor also
3650  /// autoinserts at the end of the specified BasicBlock.
3651  IndirectBrInst(Value *Address, unsigned NumDests, BasicBlock *InsertAtEnd);
3652 
3653  // allocate space for exactly zero operands
3654  void *operator new(size_t S) { return User::operator new(S); }
3655 
3656  void init(Value *Address, unsigned NumDests);
3657  void growOperands();
3658 
3659 protected:
3660  // Note: Instruction needs to be a friend here to call cloneImpl.
3661  friend class Instruction;
3662 
3663  IndirectBrInst *cloneImpl() const;
3664 
3665 public:
3666  void operator delete(void *Ptr) { User::operator delete(Ptr); }
3667 
3668  /// Iterator type that casts an operand to a basic block.
3669  ///
3670  /// This only makes sense because the successors are stored as adjacent
3671  /// operands for indirectbr instructions.
3673  : iterator_adaptor_base<succ_op_iterator, value_op_iterator,
3674  std::random_access_iterator_tag, BasicBlock *,
3675  ptrdiff_t, BasicBlock *, BasicBlock *> {
3677 
3678  BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3679  BasicBlock *operator->() const { return operator*(); }
3680  };
3681 
3682  /// The const version of `succ_op_iterator`.
3684  : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator,
3685  std::random_access_iterator_tag,
3686  const BasicBlock *, ptrdiff_t, const BasicBlock *,
3687  const BasicBlock *> {
3689  : iterator_adaptor_base(I) {}
3690 
3691  const BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3692  const BasicBlock *operator->() const { return operator*(); }
3693  };
3694 
3695  static IndirectBrInst *Create(Value *Address, unsigned NumDests,
3696  Instruction *InsertBefore = nullptr) {
3697  return new IndirectBrInst(Address, NumDests, InsertBefore);
3698  }
3699 
3700  static IndirectBrInst *Create(Value *Address, unsigned NumDests,
3701  BasicBlock *InsertAtEnd) {
3702  return new IndirectBrInst(Address, NumDests, InsertAtEnd);
3703  }
3704 
3705  /// Provide fast operand accessors.
3707 
3708  // Accessor Methods for IndirectBrInst instruction.
3709  Value *getAddress() { return getOperand(0); }
3710  const Value *getAddress() const { return getOperand(0); }
3711  void setAddress(Value *V) { setOperand(0, V); }
3712 
3713  /// return the number of possible destinations in this
3714  /// indirectbr instruction.
3715  unsigned getNumDestinations() const { return getNumOperands()-1; }
3716 
3717  /// Return the specified destination.
3718  BasicBlock *getDestination(unsigned i) { return getSuccessor(i); }
3719  const BasicBlock *getDestination(unsigned i) const { return getSuccessor(i); }
3720 
3721  /// Add a destination.
3722  ///
3723  void addDestination(BasicBlock *Dest);
3724 
3725  /// This method removes the specified successor from the
3726  /// indirectbr instruction.
3727  void removeDestination(unsigned i);
3728 
3729  unsigned getNumSuccessors() const { return getNumOperands()-1; }
3730  BasicBlock *getSuccessor(unsigned i) const {
3731  return cast<BasicBlock>(getOperand(i+1));
3732  }
3733  void setSuccessor(unsigned i, BasicBlock *NewSucc) {
3734  setOperand(i + 1, NewSucc);
3735  }
3736 
3738  return make_range(succ_op_iterator(std::next(value_op_begin())),
3739  succ_op_iterator(value_op_end()));
3740  }
3741 
3743  return make_range(const_succ_op_iterator(std::next(value_op_begin())),
3744  const_succ_op_iterator(value_op_end()));
3745  }
3746 
3747  // Methods for support type inquiry through isa, cast, and dyn_cast:
3748  static bool classof(const Instruction *I) {
3749  return I->getOpcode() == Instruction::IndirectBr;
3750  }
3751  static bool classof(const Value *V) {
3752  return isa<Instruction>(V) && classof(cast<Instruction>(V));
3753  }
3754 };
3755 
3756 template <>
3758 };
3759 
3761 
3762 //===----------------------------------------------------------------------===//
3763 // InvokeInst Class
3764 //===----------------------------------------------------------------------===//
3765 
3766 /// Invoke instruction. The SubclassData field is used to hold the
3767 /// calling convention of the call.
3768 ///
3769 class InvokeInst : public CallBase {
3770  /// The number of operands for this call beyond the called function,
3771  /// arguments, and operand bundles.
3772  static constexpr int NumExtraOperands = 2;
3773 
3774  /// The index from the end of the operand array to the normal destination.
3775  static constexpr int NormalDestOpEndIdx = -3;
3776 
3777  /// The index from the end of the operand array to the unwind destination.
3778  static constexpr int UnwindDestOpEndIdx = -2;
3779 
3780  InvokeInst(const InvokeInst &BI);
3781 
3782  /// Construct an InvokeInst given a range of arguments.
3783  ///
3784  /// Construct an InvokeInst from a range of arguments
3785  inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3786  BasicBlock *IfException, ArrayRef<Value *> Args,
3787  ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3788  const Twine &NameStr, Instruction *InsertBefore);
3789 
3790  inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3791  BasicBlock *IfException, ArrayRef<Value *> Args,
3792  ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3793  const Twine &NameStr, BasicBlock *InsertAtEnd);
3794 
3795  void init(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3796  BasicBlock *IfException, ArrayRef<Value *> Args,
3797  ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
3798 
3799  /// Compute the number of operands to allocate.
3800  static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) {
3801  // We need one operand for the called function, plus our extra operands and
3802  // the input operand counts provided.
3803  return 1 + NumExtraOperands + NumArgs + NumBundleInputs;
3804  }
3805 
3806 protected:
3807  // Note: Instruction needs to be a friend here to call cloneImpl.
3808  friend class Instruction;
3809 
3810  InvokeInst *cloneImpl() const;
3811 
3812 public:
3813  static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3814  BasicBlock *IfException, ArrayRef<Value *> Args,
3815  const Twine &NameStr,
3816  Instruction *InsertBefore = nullptr) {
3817  int NumOperands = ComputeNumOperands(Args.size());
3818  return new (NumOperands)
3819  InvokeInst(Ty, Func, IfNormal, IfException, Args, None, NumOperands,
3820  NameStr, InsertBefore);
3821  }
3822 
3823  static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3824  BasicBlock *IfException, ArrayRef<Value *> Args,
3825  ArrayRef<OperandBundleDef> Bundles = None,
3826  const Twine &NameStr = "",
3827  Instruction *InsertBefore = nullptr) {
3828  int NumOperands =
3829  ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
3830  unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
3831 
3832  return new (NumOperands, DescriptorBytes)
3833  InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands,
3834  NameStr, InsertBefore);
3835  }
3836 
3837  static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3838  BasicBlock *IfException, ArrayRef<Value *> Args,
3839  const Twine &NameStr, BasicBlock *InsertAtEnd) {
3840  int NumOperands = ComputeNumOperands(Args.size());
3841  return new (NumOperands)
3842  InvokeInst(Ty, Func, IfNormal, IfException, Args, None, NumOperands,
3843  NameStr, InsertAtEnd);
3844  }
3845 
3846  static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3847  BasicBlock *IfException, ArrayRef<Value *> Args,
3849  const Twine &NameStr, BasicBlock *InsertAtEnd) {
3850  int NumOperands =
3851  ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
3852  unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
3853 
3854  return new (NumOperands, DescriptorBytes)
3855  InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands,
3856  NameStr, InsertAtEnd);
3857  }
3858 
3859  static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3860  BasicBlock *IfException, ArrayRef<Value *> Args,
3861  const Twine &NameStr,
3862  Instruction *InsertBefore = nullptr) {
3863  return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3864  IfException, Args, None, NameStr, InsertBefore);
3865  }
3866 
3867  static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3868  BasicBlock *IfException, ArrayRef<Value *> Args,
3869  ArrayRef<OperandBundleDef> Bundles = None,
3870  const Twine &NameStr = "",
3871  Instruction *InsertBefore = nullptr) {
3872  return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3873  IfException, Args, Bundles, NameStr, InsertBefore);
3874  }
3875 
3876  static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3877  BasicBlock *IfException, ArrayRef<Value *> Args,
3878  const Twine &NameStr, BasicBlock *InsertAtEnd) {
3879  return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3880  IfException, Args, NameStr, InsertAtEnd);
3881  }
3882 
3883  static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3884  BasicBlock *IfException, ArrayRef<Value *> Args,
3886  const Twine &NameStr, BasicBlock *InsertAtEnd) {
3887  return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3888  IfException, Args, Bundles, NameStr, InsertAtEnd);
3889  }
3890 
3891  /// Create a clone of \p II with a different set of operand bundles and
3892  /// insert it before \p InsertPt.
3893  ///
3894  /// The returned invoke instruction is identical to \p II in every way except
3895  /// that the operand bundles for the new instruction are set to the operand
3896  /// bundles in \p Bundles.
3897  static InvokeInst *Create(InvokeInst *II, ArrayRef<OperandBundleDef> Bundles,
3898  Instruction *InsertPt = nullptr);
3899 
3900  // get*Dest - Return the destination basic blocks...
3902  return cast<BasicBlock>(Op<NormalDestOpEndIdx>());
3903  }
3905  return cast<BasicBlock>(Op<UnwindDestOpEndIdx>());
3906  }
3908  Op<NormalDestOpEndIdx>() = reinterpret_cast<Value *>(B);
3909  }
3911  Op<UnwindDestOpEndIdx>() = reinterpret_cast<Value *>(B);
3912  }
3913 
3914  /// Get the landingpad instruction from the landing pad
3915  /// block (the unwind destination).
3916  LandingPadInst *getLandingPadInst() const;
3917 
3918  BasicBlock *getSuccessor(unsigned i) const {
3919  assert(i < 2 && "Successor # out of range for invoke!");
3920  return i == 0 ? getNormalDest() : getUnwindDest();
3921  }
3922 
3923  void setSuccessor(unsigned i, BasicBlock *NewSucc) {
3924  assert(i < 2 && "Successor # out of range for invoke!");
3925  if (i == 0)
3926  setNormalDest(NewSucc);
3927  else
3928  setUnwindDest(NewSucc);
3929  }
3930 
3931  unsigned getNumSuccessors() const { return 2; }
3932 
3933  // Methods for support type inquiry through isa, cast, and dyn_cast:
3934  static bool classof(const Instruction *I) {
3935  return (I->getOpcode() == Instruction::Invoke);
3936  }
3937  static bool classof(const Value *V) {
3938  return isa<Instruction>(V) && classof(cast<Instruction>(V));
3939  }
3940 
3941 private:
3942  // Shadow Instruction::setInstructionSubclassData with a private forwarding
3943  // method so that subclasses cannot accidentally use it.
3944  template <typename Bitfield>
3945  void setSubclassData(typename Bitfield::Type Value) {
3946  Instruction::setSubclassData<Bitfield>(Value);
3947  }
3948 };
3949 
3950 InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3951  BasicBlock *IfException, ArrayRef<Value *> Args,
3952  ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3953  const Twine &NameStr, Instruction *InsertBefore)
3954  : CallBase(Ty->getReturnType(), Instruction::Invoke,
3955  OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
3956  InsertBefore) {
3957  init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr);
3958 }
3959 
3960 InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3961  BasicBlock *IfException, ArrayRef<Value *> Args,
3962  ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3963  const Twine &NameStr, BasicBlock *InsertAtEnd)
3964  : CallBase(Ty->getReturnType(), Instruction::Invoke,
3965  OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
3966  InsertAtEnd) {
3967  init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr);
3968 }
3969 
3970 //===----------------------------------------------------------------------===//
3971 // CallBrInst Class
3972 //===----------------------------------------------------------------------===//
3973 
3974 /// CallBr instruction, tracking function calls that may not return control but
3975 /// instead transfer it to a third location. The SubclassData field is used to
3976 /// hold the calling convention of the call.
3977 ///
3978 class CallBrInst : public CallBase {
3979 
3980  unsigned NumIndirectDests;
3981 
3982  CallBrInst(const CallBrInst &BI);
3983 
3984  /// Construct a CallBrInst given a range of arguments.
3985  ///
3986  /// Construct a CallBrInst from a range of arguments
3987  inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
3988  ArrayRef<BasicBlock *> IndirectDests,
3990  ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3991  const Twine &NameStr, Instruction *InsertBefore);
3992 
3993  inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
3994  ArrayRef<BasicBlock *> IndirectDests,
3996  ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3997  const Twine &NameStr, BasicBlock *InsertAtEnd);
3998 
3999  void init(FunctionType *FTy, Value *Func, BasicBlock *DefaultDest,
4001  ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
4002 
4003  /// Should the Indirect Destinations change, scan + update the Arg list.
4004  void updateArgBlockAddresses(unsigned i, BasicBlock *B);
4005 
4006  /// Compute the number of operands to allocate.
4007  static int ComputeNumOperands(int NumArgs, int NumIndirectDests,
4008  int NumBundleInputs = 0) {
4009  // We need one operand for the called function, plus our extra operands and
4010  // the input operand counts provided.
4011  return 2 + NumIndirectDests + NumArgs + NumBundleInputs;
4012  }
4013 
4014 protected:
4015  // Note: Instruction needs to be a friend here to call cloneImpl.
4016  friend class Instruction;
4017 
4018  CallBrInst *cloneImpl() const;
4019 
4020 public:
4021  static CallBrInst *Create(FunctionType *Ty, Value *Func,
4022  BasicBlock *DefaultDest,
4023  ArrayRef<BasicBlock *> IndirectDests,
4024  ArrayRef<Value *> Args, const Twine &NameStr,
4025  Instruction *InsertBefore = nullptr) {
4026  int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size());
4027  return new (NumOperands)
4028  CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, None,
4029  NumOperands, NameStr, InsertBefore);
4030  }
4031 
4032  static CallBrInst *Create(FunctionType *Ty, Value *Func,
4033  BasicBlock *DefaultDest,
4034  ArrayRef<BasicBlock *> IndirectDests,
4036  ArrayRef<OperandBundleDef> Bundles = None,
4037  const Twine &NameStr = "",
4038  Instruction *InsertBefore = nullptr) {
4039  int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(),
4040  CountBundleInputs(Bundles));
4041  unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
4042 
4043  return new (NumOperands, DescriptorBytes)
4044  CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles,
4045  NumOperands, NameStr, InsertBefore);
4046  }
4047 
4048  static CallBrInst *Create(FunctionType *Ty, Value *Func,
4049  BasicBlock *DefaultDest,
4050  ArrayRef<BasicBlock *> IndirectDests,
4051  ArrayRef<Value *> Args, const Twine &NameStr,
4052  BasicBlock *InsertAtEnd) {
4053  int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size());
4054  return new (NumOperands)
4055  CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, None,
4056  NumOperands, NameStr, InsertAtEnd);
4057  }
4058 
4059  static CallBrInst *Create(FunctionType *Ty, Value *Func,
4060  BasicBlock *DefaultDest,
4061  ArrayRef<BasicBlock *> IndirectDests,
4064  const Twine &NameStr, BasicBlock *InsertAtEnd) {
4065  int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(),
4066  CountBundleInputs(Bundles));
4067  unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
4068 
4069  return new (NumOperands, DescriptorBytes)
4070  CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles,
4071  NumOperands, NameStr, InsertAtEnd);
4072  }
4073 
4074  static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
4075  ArrayRef<BasicBlock *> IndirectDests,
4076  ArrayRef<Value *> Args, const Twine &NameStr,
4077  Instruction *InsertBefore = nullptr) {
4078  return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4079  IndirectDests, Args, NameStr, InsertBefore);
4080  }
4081 
4082  static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
4083  ArrayRef<BasicBlock *> IndirectDests,
4085  ArrayRef<OperandBundleDef> Bundles = None,
4086  const Twine &NameStr = "",
4087  Instruction *InsertBefore = nullptr) {
4088  return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4089  IndirectDests, Args, Bundles, NameStr, InsertBefore);
4090  }
4091 
4092  static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
4093  ArrayRef<BasicBlock *> IndirectDests,
4094  ArrayRef<Value *> Args, const Twine &NameStr,
4095  BasicBlock *InsertAtEnd) {
4096  return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4097  IndirectDests, Args, NameStr, InsertAtEnd);
4098  }
4099 
4101  BasicBlock *DefaultDest,
4102  ArrayRef<BasicBlock *> IndirectDests,
4105  const Twine &NameStr, BasicBlock *InsertAtEnd) {
4106  return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4107  IndirectDests, Args, Bundles, NameStr, InsertAtEnd);
4108  }
4109 
4110  /// Create a clone of \p CBI with a different set of operand bundles and
4111  /// insert it before \p InsertPt.
4112  ///
4113  /// The returned callbr instruction is identical to \p CBI in every way
4114  /// except that the operand bundles for the new instruction are set to the
4115  /// operand bundles in \p Bundles.
4116  static CallBrInst *Create(CallBrInst *CBI,
4118  Instruction *InsertPt = nullptr);
4119 
4120  /// Return the number of callbr indirect dest labels.
4121  ///
4122  unsigned getNumIndirectDests() const { return NumIndirectDests; }
4123 
4124  /// getIndirectDestLabel - Return the i-th indirect dest label.
4125  ///
4126  Value *getIndirectDestLabel(unsigned i) const {
4127  assert(i < getNumIndirectDests() && "Out of bounds!");
4128  return getOperand(i + arg_size() + getNumTotalBundleOperands() + 1);
4129  }
4130 
4131  Value *getIndirectDestLabelUse(unsigned i) const {
4132  assert(i < getNumIndirectDests() && "Out of bounds!");
4133  return getOperandUse(i + arg_size() + getNumTotalBundleOperands() + 1);
4134  }
4135 
4136  // Return the destination basic blocks...
4138  return cast<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() - 1));
4139  }
4140  BasicBlock *getIndirectDest(unsigned i) const {
4141  return cast_or_null<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() + i));
4142  }
4144  SmallVector<BasicBlock *, 16> IndirectDests;
4145  for (unsigned i = 0, e = getNumIndirectDests(); i < e; ++i)
4146  IndirectDests.push_back(getIndirectDest(i));
4147  return IndirectDests;
4148  }
4150  *(&Op<-1>() - getNumIndirectDests() - 1) = reinterpret_cast<Value *>(B);
4151  }
4152  void setIndirectDest(unsigned i, BasicBlock *B) {
4153  updateArgBlockAddresses(i, B);
4154  *(&Op<-1>() - getNumIndirectDests() + i) = reinterpret_cast<Value *>(B);
4155  }
4156 
4157  BasicBlock *getSuccessor(unsigned i) const {
4158  assert(i < getNumSuccessors() + 1 &&
4159  "Successor # out of range for callbr!");
4160  return i == 0 ? getDefaultDest() : getIndirectDest(i - 1);
4161  }
4162 
4163  void setSuccessor(unsigned i, BasicBlock *NewSucc) {
4164  assert(i < getNumIndirectDests() + 1 &&
4165  "Successor # out of range for callbr!");
4166  return i == 0 ? setDefaultDest(NewSucc) : setIndirectDest(i - 1, NewSucc);
4167  }
4168 
4169  unsigned getNumSuccessors() const { return getNumIndirectDests() + 1; }
4170 
4171  // Methods for support type inquiry through isa, cast, and dyn_cast:
4172  static bool classof(const Instruction *I) {
4173  return (I->getOpcode() == Instruction::CallBr);
4174  }
4175  static bool classof(const Value *V) {
4176  return isa<Instruction>(V) && classof(cast<Instruction>(V));
4177  }
4178 
4179 private:
4180  // Shadow Instruction::setInstructionSubclassData with a private forwarding
4181  // method so that subclasses cannot accidentally use it.
4182  template <typename Bitfield>
4183  void setSubclassData(typename Bitfield::Type Value) {
4184  Instruction::setSubclassData<Bitfield>(Value);
4185  }
4186 };
4187 
4188 CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4189  ArrayRef<BasicBlock *> IndirectDests,
4190  ArrayRef<Value *> Args,
4191  ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4192  const Twine &NameStr, Instruction *InsertBefore)
4193  : CallBase(Ty->getReturnType(), Instruction::CallBr,
4194  OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
4195  InsertBefore) {
4196  init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr);
4197 }
4198 
4199 CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4200  ArrayRef<BasicBlock *> IndirectDests,
4201  ArrayRef<Value *> Args,
4202  ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4203  const Twine &NameStr, BasicBlock *InsertAtEnd)
4204  : CallBase(Ty->getReturnType(), Instruction::CallBr,
4205  OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
4206  InsertAtEnd) {
4207  init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr);
4208 }
4209 
4210 //===----------------------------------------------------------------------===//
4211 // ResumeInst Class
4212 //===----------------------------------------------------------------------===//
4213 
4214 //===---------------------------------------------------------------------------
4215 /// Resume the propagation of an exception.
4216 ///
4217 class ResumeInst : public Instruction {
4218  ResumeInst(const ResumeInst &RI);
4219 
4220  explicit ResumeInst(Value *Exn, Instruction *InsertBefore=nullptr);
4221  ResumeInst(Value *Exn, BasicBlock *InsertAtEnd);
4222 
4223 protected:
4224  // Note: Instruction needs to be a friend here to call cloneImpl.
4225  friend class Instruction;
4226 
4227  ResumeInst *cloneImpl() const;
4228 
4229 public:
4230  static ResumeInst *Create(Value *Exn, Instruction *InsertBefore = nullptr) {
4231  return new(1) ResumeInst(Exn, InsertBefore);
4232  }
4233 
4234  static ResumeInst *Create(Value *Exn, BasicBlock *InsertAtEnd) {
4235  return new(1) ResumeInst(Exn, InsertAtEnd);
4236  }
4237 
4238  /// Provide fast operand accessors
4240 
4241  /// Convenience accessor.
4242  Value *getValue() const { return Op<0>(); }
4243 
4244  unsigned getNumSuccessors() const { return 0; }
4245 
4246  // Methods for support type inquiry through isa, cast, and dyn_cast:
4247  static bool classof(const Instruction *I) {
4248  return I->getOpcode() == Instruction::Resume;
4249  }
4250  static bool classof(const Value *V) {
4251  return isa<Instruction>(V) && classof(cast<Instruction>(V));
4252  }
4253 
4254 private:
4255  BasicBlock *getSuccessor(unsigned idx) const {
4256  llvm_unreachable("ResumeInst has no successors!");
4257  }
4258 
4259  void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
4260  llvm_unreachable("ResumeInst has no successors!");
4261  }
4262 };
4263 
4264 template <>
4266  public FixedNumOperandTraits<ResumeInst, 1> {
4267 };
4268 
4270 
4271 //===----------------------------------------------------------------------===//
4272 // CatchSwitchInst Class
4273 //===----------------------------------------------------------------------===//
4275  using UnwindDestField = BoolBitfieldElementT<0>;
4276 
4277  /// The number of operands actually allocated. NumOperands is
4278  /// the number actually in use.
4279  unsigned ReservedSpace;
4280 
4281  // Operand[0] = Outer scope
4282  // Operand[1] = Unwind block destination
4283  // Operand[n] = BasicBlock to go to on match
4284  CatchSwitchInst(const CatchSwitchInst &CSI);
4285 
4286  /// Create a new switch instruction, specifying a
4287  /// default destination. The number of additional handlers can be specified
4288  /// here to make memory allocation more efficient.
4289  /// This constructor can also autoinsert before another instruction.
4290  CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
4291  unsigned NumHandlers, const Twine &NameStr,
4292  Instruction *InsertBefore);
4293 
4294  /// Create a new switch instruction, specifying a
4295  /// default destination. The number of additional handlers can be specified
4296  /// here to make memory allocation more efficient.
4297  /// This constructor also autoinserts at the end of the specified BasicBlock.
4298  CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
4299  unsigned NumHandlers, const Twine &NameStr,
4300  BasicBlock *InsertAtEnd);
4301 
4302  // allocate space for exactly zero operands
4303  void *operator new(size_t S) { return User::operator new(S); }
4304 
4305  void init(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumReserved);
4306  void growOperands(unsigned Size);
4307 
4308 protected:
4309  // Note: Instruction needs to be a friend here to call cloneImpl.
4310  friend class Instruction;
4311 
4312  CatchSwitchInst *cloneImpl() const;
4313 
4314 public:
4315  void operator delete(void *Ptr) { return User::operator delete(Ptr); }
4316 
4317  static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest,
4318  unsigned NumHandlers,
4319  const Twine &NameStr = "",
4320  Instruction *InsertBefore = nullptr) {
4321  return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr,
4322  InsertBefore);
4323  }
4324 
4325  static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest,
4326  unsigned NumHandlers, const Twine &NameStr,
4327  BasicBlock *InsertAtEnd) {
4328  return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr,
4329  InsertAtEnd);
4330  }
4331 
4332  /// Provide fast operand accessors
4334 
4335  // Accessor Methods for CatchSwitch stmt
4336  Value *getParentPad() const { return getOperand(0); }
4337  void setParentPad(Value *ParentPad) { setOperand(0, ParentPad); }
4338 
4339  // Accessor Methods for CatchSwitch stmt
4340  bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); }
4341  bool unwindsToCaller() const { return !hasUnwindDest(); }
4343  if (hasUnwindDest())
4344  return cast<BasicBlock>(getOperand(1));
4345  return nullptr;
4346  }
4347  void setUnwindDest(BasicBlock *UnwindDest) {
4348  assert(UnwindDest);
4349  assert(hasUnwindDest());
4350  setOperand(1, UnwindDest);
4351  }
4352 
4353  /// return the number of 'handlers' in this catchswitch
4354  /// instruction, except the default handler
4355  unsigned getNumHandlers() const {
4356  if (hasUnwindDest())
4357  return getNumOperands() - 2;
4358  return getNumOperands() - 1;
4359  }
4360 
4361 private:
4362  static BasicBlock *handler_helper(Value *V) { return cast<BasicBlock>(V); }
4363  static const BasicBlock *handler_helper(const Value *V) {
4364  return cast<BasicBlock>(V);
4365  }
4366 
4367 public:
4368  using DerefFnTy = BasicBlock *(*)(Value *);
4371  using ConstDerefFnTy = const BasicBlock *(*)(const Value *);
4372  using const_handler_iterator =
4375 
4376  /// Returns an iterator that points to the first handler in CatchSwitchInst.
4378  op_iterator It = op_begin() + 1;
4379  if (hasUnwindDest())
4380  ++It;
4381  return handler_iterator(It, DerefFnTy(handler_helper));
4382  }
4383 
4384  /// Returns an iterator that points to the first handler in the
4385  /// CatchSwitchInst.
4387  const_op_iterator It = op_begin() + 1;
4388  if (hasUnwindDest())
4389  ++It;
4390  return const_handler_iterator(It, ConstDerefFnTy(handler_helper));
4391  }
4392 
4393  /// Returns a read-only iterator that points one past the last
4394  /// handler in the CatchSwitchInst.
4396  return handler_iterator(op_end(), DerefFnTy(handler_helper));
4397  }
4398 
4399  /// Returns an iterator that points one past the last handler in the
4400  /// CatchSwitchInst.
4402  return const_handler_iterator(op_end(), ConstDerefFnTy(handler_helper));
4403  }
4404 
4405  /// iteration adapter for range-for loops.
4407  return make_range(handler_begin(), handler_end());
4408  }
4409 
4410  /// iteration adapter for range-for loops.
4412  return make_range(handler_begin(), handler_end());
4413  }
4414 
4415  /// Add an entry to the switch instruction...
4416  /// Note:
4417  /// This action invalidates handler_end(). Old handler_end() iterator will
4418  /// point to the added handler.
4419  void addHandler(BasicBlock *Dest);
4420 
4421  void removeHandler(handler_iterator HI);
4422 
4423  unsigned getNumSuccessors() const { return getNumOperands() - 1; }
4424  BasicBlock *getSuccessor(unsigned Idx) const {
4425  assert(Idx < getNumSuccessors() &&
4426  "Successor # out of range for catchswitch!");
4427  return cast<BasicBlock>(getOperand(Idx + 1));
4428  }
4429  void setSuccessor(unsigned Idx, BasicBlock *NewSucc) {
4430  assert(Idx < getNumSuccessors() &&
4431  "Successor # out of range for catchswitch!");
4432  setOperand(Idx + 1, NewSucc);
4433  }
4434 
4435  // Methods for support type inquiry through isa, cast, and dyn_cast:
4436  static bool classof(const Instruction *I) {
4437  return I->getOpcode() == Instruction::CatchSwitch;
4438  }
4439  static bool classof(const Value *V) {
4440  return isa<Instruction>(V) && classof(cast<Instruction>(V));
4441  }
4442 };
4443 
4444 template <>
4446 
4448 
4449 //===----------------------------------------------------------------------===//
4450 // CleanupPadInst Class
4451 //===----------------------------------------------------------------------===//
4453 private:
4454  explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args,
4455  unsigned Values, const Twine &NameStr,
4456  Instruction *InsertBefore)
4457  : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values,
4458  NameStr, InsertBefore) {}
4459  explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args,
4460  unsigned Values, const Twine &NameStr,
4461  BasicBlock *InsertAtEnd)
4462  : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values,
4463  NameStr, InsertAtEnd) {}
4464 
4465 public:
4467  const Twine &NameStr = "",
4468  Instruction *InsertBefore = nullptr) {
4469  unsigned Values = 1 + Args.size();
4470  return new (Values)
4471  CleanupPadInst(ParentPad, Args, Values, NameStr, InsertBefore);
4472  }
4473 
4475  const Twine &NameStr, BasicBlock *InsertAtEnd) {
4476  unsigned Values = 1 + Args.size();
4477  return new (Values)
4478  CleanupPadInst(ParentPad, Args, Values, NameStr, InsertAtEnd);
4479  }
4480 
4481  /// Methods for support type inquiry through isa, cast, and dyn_cast:
4482  static bool classof(const Instruction *I) {
4483  return I->getOpcode() == Instruction::CleanupPad;
4484  }
4485  static bool classof(const Value *V) {
4486  return isa<Instruction>(V) && classof(cast<Instruction>(V));
4487  }
4488 };
4489 
4490 //===----------------------------------------------------------------------===//
4491 // CatchPadInst Class
4492 //===----------------------------------------------------------------------===//
4494 private:
4495  explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args,
4496  unsigned Values, const Twine &NameStr,
4497  Instruction *InsertBefore)
4498  : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values,
4499  NameStr, InsertBefore) {}
4500  explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args,
4501  unsigned Values, const Twine &NameStr,
4502  BasicBlock *InsertAtEnd)
4503  : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values,
4504  NameStr, InsertAtEnd) {}
4505 
4506 public:
4508  const Twine &NameStr = "",
4509  Instruction *InsertBefore = nullptr) {
4510  unsigned Values = 1 + Args.size();
4511  return new (Values)
4512  CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertBefore);
4513  }
4514 
4516  const Twine &NameStr, BasicBlock *InsertAtEnd) {
4517  unsigned Values = 1 + Args.size();
4518  return new (Values)
4519  CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertAtEnd);
4520  }
4521 
4522  /// Convenience accessors
4524  return cast<CatchSwitchInst>(Op<-1>());
4525  }
4526  void setCatchSwitch(Value *CatchSwitch) {
4527  assert(CatchSwitch);
4528  Op<-1>() = CatchSwitch;
4529  }
4530 
4531  /// Methods for support type inquiry through isa, cast, and dyn_cast:
4532  static bool classof(const Instruction *I) {
4533  return I->getOpcode() == Instruction::CatchPad;
4534  }
4535  static bool classof(const Value *V) {
4536  return isa<Instruction>(V) && classof(cast<Instruction>(V));
4537  }
4538 };
4539 
4540 //===----------------------------------------------------------------------===//
4541 // CatchReturnInst Class
4542 //===----------------------------------------------------------------------===//
4543 
4545  CatchReturnInst(const CatchReturnInst &RI);
4546  CatchReturnInst(Value *CatchPad, BasicBlock *BB, Instruction *InsertBefore);
4547  CatchReturnInst(Value *CatchPad, BasicBlock *BB, BasicBlock *InsertAtEnd);
4548 
4549  void init(Value *CatchPad, BasicBlock *BB);
4550 
4551 protected:
4552  // Note: Instruction needs to be a friend here to call cloneImpl.
4553  friend class Instruction;
4554 
4555  CatchReturnInst *cloneImpl() const;
4556 
4557 public:
4559  Instruction *InsertBefore = nullptr) {
4560  assert(CatchPad);
4561  assert(BB);
4562  return new (2) CatchReturnInst(CatchPad, BB, InsertBefore);
4563  }
4564 
4566  BasicBlock *InsertAtEnd) {
4567  assert(CatchPad);
4568  assert(BB);
4569  return new (2) CatchReturnInst(CatchPad, BB, InsertAtEnd);
4570  }
4571 
4572  /// Provide fast operand accessors
4574 
4575  /// Convenience accessors.
4576  CatchPadInst *getCatchPad() const { return cast<CatchPadInst>(Op<0>()); }
4577  void setCatchPad(CatchPadInst *CatchPad) {
4578  assert(CatchPad);
4579  Op<0>() = CatchPad;
4580  }
4581 
4582  BasicBlock *getSuccessor() const { return cast<BasicBlock>(Op<1>()); }
4583  void setSuccessor(BasicBlock *NewSucc) {
4584  assert(NewSucc);
4585  Op<1>() = NewSucc;
4586  }
4587  unsigned getNumSuccessors() const { return 1; }
4588 
4589  /// Get the parentPad of this catchret's catchpad's catchswitch.
4590  /// The successor block is implicitly a member of this funclet.
4592  return getCatchPad()->getCatchSwitch()->getParentPad();
4593  }
4594 
4595  // Methods for support type inquiry through isa, cast, and dyn_cast:
4596  static bool classof(const Instruction *I) {
4597  return (I->getOpcode() == Instruction::CatchRet);
4598  }
4599  static bool classof(const Value *V) {
4600  return isa<Instruction>(V) && classof(cast<Instruction>(V));
4601  }
4602 
4603 private:
4604  BasicBlock *getSuccessor(unsigned Idx) const {
4605  assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!");
4606  return getSuccessor();
4607  }
4608 
4609  void setSuccessor(unsigned Idx, BasicBlock *B) {
4610  assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!");
4611  setSuccessor(B);
4612  }
4613 };
4614 
4615 template <>
4617  : public FixedNumOperandTraits<CatchReturnInst, 2> {};
4618 
4620 
4621 //===----------------------------------------------------------------------===//
4622 // CleanupReturnInst Class
4623 //===----------------------------------------------------------------------===//
4624 
4626  using UnwindDestField = BoolBitfieldElementT<0>;
4627