LLVM 20.0.0git
Instructions.h
Go to the documentation of this file.
1//===- llvm/Instructions.h - Instruction subclass definitions ---*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file exposes the class definitions of all of the subclasses of the
10// Instruction class. This is meant to be an easy way to get access to all
11// instruction subclasses.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_IR_INSTRUCTIONS_H
16#define LLVM_IR_INSTRUCTIONS_H
17
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/Bitfields.h"
20#include "llvm/ADT/MapVector.h"
21#include "llvm/ADT/STLExtras.h"
23#include "llvm/ADT/Twine.h"
24#include "llvm/ADT/iterator.h"
26#include "llvm/IR/CFG.h"
28#include "llvm/IR/Constant.h"
31#include "llvm/IR/InstrTypes.h"
32#include "llvm/IR/Instruction.h"
33#include "llvm/IR/Intrinsics.h"
35#include "llvm/IR/Use.h"
36#include "llvm/IR/User.h"
39#include <cassert>
40#include <cstddef>
41#include <cstdint>
42#include <iterator>
43#include <optional>
44
45namespace llvm {
46
47class APFloat;
48class APInt;
49class BasicBlock;
50class ConstantInt;
51class DataLayout;
52struct KnownBits;
53class StringRef;
54class Type;
55class Value;
56class UnreachableInst;
57
58//===----------------------------------------------------------------------===//
59// AllocaInst Class
60//===----------------------------------------------------------------------===//
61
62/// an instruction to allocate memory on the stack
64 Type *AllocatedType;
65
66 using AlignmentField = AlignmentBitfieldElementT<0>;
67 using UsedWithInAllocaField = BoolBitfieldElementT<AlignmentField::NextBit>;
69 static_assert(Bitfield::areContiguous<AlignmentField, UsedWithInAllocaField,
70 SwiftErrorField>(),
71 "Bitfields must be contiguous");
72
73protected:
74 // Note: Instruction needs to be a friend here to call cloneImpl.
75 friend class Instruction;
76
77 AllocaInst *cloneImpl() const;
78
79public:
80 explicit AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
81 const Twine &Name, InsertPosition InsertBefore);
82
83 AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
84 InsertPosition InsertBefore);
85
86 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
87 const Twine &Name = "", InsertPosition InsertBefore = nullptr);
88
89 /// Return true if there is an allocation size parameter to the allocation
90 /// instruction that is not 1.
91 bool isArrayAllocation() const;
92
93 /// Get the number of elements allocated. For a simple allocation of a single
94 /// element, this will return a constant 1 value.
95 const Value *getArraySize() const { return getOperand(0); }
96 Value *getArraySize() { return getOperand(0); }
97
98 /// Overload to return most specific pointer type.
100 return cast<PointerType>(Instruction::getType());
101 }
102
103 /// Return the address space for the allocation.
104 unsigned getAddressSpace() const {
105 return getType()->getAddressSpace();
106 }
107
108 /// Get allocation size in bytes. Returns std::nullopt if size can't be
109 /// determined, e.g. in case of a VLA.
110 std::optional<TypeSize> getAllocationSize(const DataLayout &DL) const;
111
112 /// Get allocation size in bits. Returns std::nullopt if size can't be
113 /// determined, e.g. in case of a VLA.
114 std::optional<TypeSize> getAllocationSizeInBits(const DataLayout &DL) const;
115
116 /// Return the type that is being allocated by the instruction.
117 Type *getAllocatedType() const { return AllocatedType; }
118 /// for use only in special circumstances that need to generically
119 /// transform a whole instruction (eg: IR linking and vectorization).
120 void setAllocatedType(Type *Ty) { AllocatedType = Ty; }
121
122 /// Return the alignment of the memory that is being allocated by the
123 /// instruction.
124 Align getAlign() const {
125 return Align(1ULL << getSubclassData<AlignmentField>());
126 }
127
129 setSubclassData<AlignmentField>(Log2(Align));
130 }
131
132 /// Return true if this alloca is in the entry block of the function and is a
133 /// constant size. If so, the code generator will fold it into the
134 /// prolog/epilog code, so it is basically free.
135 bool isStaticAlloca() const;
136
137 /// Return true if this alloca is used as an inalloca argument to a call. Such
138 /// allocas are never considered static even if they are in the entry block.
139 bool isUsedWithInAlloca() const {
140 return getSubclassData<UsedWithInAllocaField>();
141 }
142
143 /// Specify whether this alloca is used to represent the arguments to a call.
144 void setUsedWithInAlloca(bool V) {
145 setSubclassData<UsedWithInAllocaField>(V);
146 }
147
148 /// Return true if this alloca is used as a swifterror argument to a call.
149 bool isSwiftError() const { return getSubclassData<SwiftErrorField>(); }
150 /// Specify whether this alloca is used to represent a swifterror.
151 void setSwiftError(bool V) { setSubclassData<SwiftErrorField>(V); }
152
153 // Methods for support type inquiry through isa, cast, and dyn_cast:
154 static bool classof(const Instruction *I) {
155 return (I->getOpcode() == Instruction::Alloca);
156 }
157 static bool classof(const Value *V) {
158 return isa<Instruction>(V) && classof(cast<Instruction>(V));
159 }
160
161private:
162 // Shadow Instruction::setInstructionSubclassData with a private forwarding
163 // method so that subclasses cannot accidentally use it.
164 template <typename Bitfield>
165 void setSubclassData(typename Bitfield::Type Value) {
166 Instruction::setSubclassData<Bitfield>(Value);
167 }
168};
169
170//===----------------------------------------------------------------------===//
171// LoadInst Class
172//===----------------------------------------------------------------------===//
173
174/// An instruction for reading from memory. This uses the SubclassData field in
175/// Value to store whether or not the load is volatile.
177 using VolatileField = BoolBitfieldElementT<0>;
180 static_assert(
181 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
182 "Bitfields must be contiguous");
183
184 void AssertOK();
185
186protected:
187 // Note: Instruction needs to be a friend here to call cloneImpl.
188 friend class Instruction;
189
190 LoadInst *cloneImpl() const;
191
192public:
193 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr,
194 InsertPosition InsertBefore);
195 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
196 InsertPosition InsertBefore);
197 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
198 Align Align, InsertPosition InsertBefore = nullptr);
199 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
202 InsertPosition InsertBefore = nullptr);
203
204 /// Return true if this is a load from a volatile memory location.
205 bool isVolatile() const { return getSubclassData<VolatileField>(); }
206
207 /// Specify whether this is a volatile load or not.
208 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
209
210 /// Return the alignment of the access that is being performed.
211 Align getAlign() const {
212 return Align(1ULL << (getSubclassData<AlignmentField>()));
213 }
214
216 setSubclassData<AlignmentField>(Log2(Align));
217 }
218
219 /// Returns the ordering constraint of this load instruction.
221 return getSubclassData<OrderingField>();
222 }
223 /// Sets the ordering constraint of this load instruction. May not be Release
224 /// or AcquireRelease.
226 setSubclassData<OrderingField>(Ordering);
227 }
228
229 /// Returns the synchronization scope ID of this load instruction.
231 return SSID;
232 }
233
234 /// Sets the synchronization scope ID of this load instruction.
236 this->SSID = SSID;
237 }
238
239 /// Sets the ordering constraint and the synchronization scope ID of this load
240 /// instruction.
243 setOrdering(Ordering);
244 setSyncScopeID(SSID);
245 }
246
247 bool isSimple() const { return !isAtomic() && !isVolatile(); }
248
249 bool isUnordered() const {
252 !isVolatile();
253 }
254
256 const Value *getPointerOperand() const { return getOperand(0); }
257 static unsigned getPointerOperandIndex() { return 0U; }
259
260 /// Returns the address space of the pointer operand.
261 unsigned getPointerAddressSpace() const {
263 }
264
265 // Methods for support type inquiry through isa, cast, and dyn_cast:
266 static bool classof(const Instruction *I) {
267 return I->getOpcode() == Instruction::Load;
268 }
269 static bool classof(const Value *V) {
270 return isa<Instruction>(V) && classof(cast<Instruction>(V));
271 }
272
273private:
274 // Shadow Instruction::setInstructionSubclassData with a private forwarding
275 // method so that subclasses cannot accidentally use it.
276 template <typename Bitfield>
277 void setSubclassData(typename Bitfield::Type Value) {
278 Instruction::setSubclassData<Bitfield>(Value);
279 }
280
281 /// The synchronization scope ID of this load instruction. Not quite enough
282 /// room in SubClassData for everything, so synchronization scope ID gets its
283 /// own field.
284 SyncScope::ID SSID;
285};
286
287//===----------------------------------------------------------------------===//
288// StoreInst Class
289//===----------------------------------------------------------------------===//
290
291/// An instruction for storing to memory.
292class StoreInst : public Instruction {
293 using VolatileField = BoolBitfieldElementT<0>;
296 static_assert(
297 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
298 "Bitfields must be contiguous");
299
300 void AssertOK();
301
302 constexpr static IntrusiveOperandsAllocMarker AllocMarker{2};
303
304protected:
305 // Note: Instruction needs to be a friend here to call cloneImpl.
306 friend class Instruction;
307
308 StoreInst *cloneImpl() const;
309
310public:
311 StoreInst(Value *Val, Value *Ptr, InsertPosition InsertBefore);
312 StoreInst(Value *Val, Value *Ptr, bool isVolatile,
313 InsertPosition InsertBefore);
315 InsertPosition InsertBefore = nullptr);
318 InsertPosition InsertBefore = nullptr);
319
320 // allocate space for exactly two operands
321 void *operator new(size_t S) { return User::operator new(S, AllocMarker); }
322 void operator delete(void *Ptr) { User::operator delete(Ptr); }
323
324 /// Return true if this is a store to a volatile memory location.
325 bool isVolatile() const { return getSubclassData<VolatileField>(); }
326
327 /// Specify whether this is a volatile store or not.
328 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
329
330 /// Transparently provide more efficient getOperand methods.
332
333 Align getAlign() const {
334 return Align(1ULL << (getSubclassData<AlignmentField>()));
335 }
336
338 setSubclassData<AlignmentField>(Log2(Align));
339 }
340
341 /// Returns the ordering constraint of this store instruction.
343 return getSubclassData<OrderingField>();
344 }
345
346 /// Sets the ordering constraint of this store instruction. May not be
347 /// Acquire or AcquireRelease.
349 setSubclassData<OrderingField>(Ordering);
350 }
351
352 /// Returns the synchronization scope ID of this store instruction.
354 return SSID;
355 }
356
357 /// Sets the synchronization scope ID of this store instruction.
359 this->SSID = SSID;
360 }
361
362 /// Sets the ordering constraint and the synchronization scope ID of this
363 /// store instruction.
366 setOrdering(Ordering);
367 setSyncScopeID(SSID);
368 }
369
370 bool isSimple() const { return !isAtomic() && !isVolatile(); }
371
372 bool isUnordered() const {
375 !isVolatile();
376 }
377
379 const Value *getValueOperand() const { return getOperand(0); }
380
382 const Value *getPointerOperand() const { return getOperand(1); }
383 static unsigned getPointerOperandIndex() { return 1U; }
385
386 /// Returns the address space of the pointer operand.
387 unsigned getPointerAddressSpace() const {
389 }
390
391 // Methods for support type inquiry through isa, cast, and dyn_cast:
392 static bool classof(const Instruction *I) {
393 return I->getOpcode() == Instruction::Store;
394 }
395 static bool classof(const Value *V) {
396 return isa<Instruction>(V) && classof(cast<Instruction>(V));
397 }
398
399private:
400 // Shadow Instruction::setInstructionSubclassData with a private forwarding
401 // method so that subclasses cannot accidentally use it.
402 template <typename Bitfield>
403 void setSubclassData(typename Bitfield::Type Value) {
404 Instruction::setSubclassData<Bitfield>(Value);
405 }
406
407 /// The synchronization scope ID of this store instruction. Not quite enough
408 /// room in SubClassData for everything, so synchronization scope ID gets its
409 /// own field.
410 SyncScope::ID SSID;
411};
412
413template <>
414struct OperandTraits<StoreInst> : public FixedNumOperandTraits<StoreInst, 2> {
415};
416
418
419//===----------------------------------------------------------------------===//
420// FenceInst Class
421//===----------------------------------------------------------------------===//
422
423/// An instruction for ordering other memory operations.
424class FenceInst : public Instruction {
425 using OrderingField = AtomicOrderingBitfieldElementT<0>;
426
427 constexpr static IntrusiveOperandsAllocMarker AllocMarker{0};
428
429 void Init(AtomicOrdering Ordering, SyncScope::ID SSID);
430
431protected:
432 // Note: Instruction needs to be a friend here to call cloneImpl.
433 friend class Instruction;
434
435 FenceInst *cloneImpl() const;
436
437public:
438 // Ordering may only be Acquire, Release, AcquireRelease, or
439 // SequentiallyConsistent.
442 InsertPosition InsertBefore = nullptr);
443
444 // allocate space for exactly zero operands
445 void *operator new(size_t S) { return User::operator new(S, AllocMarker); }
446 void operator delete(void *Ptr) { User::operator delete(Ptr); }
447
448 /// Returns the ordering constraint of this fence instruction.
450 return getSubclassData<OrderingField>();
451 }
452
453 /// Sets the ordering constraint of this fence instruction. May only be
454 /// Acquire, Release, AcquireRelease, or SequentiallyConsistent.
456 setSubclassData<OrderingField>(Ordering);
457 }
458
459 /// Returns the synchronization scope ID of this fence instruction.
461 return SSID;
462 }
463
464 /// Sets the synchronization scope ID of this fence instruction.
466 this->SSID = SSID;
467 }
468
469 // Methods for support type inquiry through isa, cast, and dyn_cast:
470 static bool classof(const Instruction *I) {
471 return I->getOpcode() == Instruction::Fence;
472 }
473 static bool classof(const Value *V) {
474 return isa<Instruction>(V) && classof(cast<Instruction>(V));
475 }
476
477private:
478 // Shadow Instruction::setInstructionSubclassData with a private forwarding
479 // method so that subclasses cannot accidentally use it.
480 template <typename Bitfield>
481 void setSubclassData(typename Bitfield::Type Value) {
482 Instruction::setSubclassData<Bitfield>(Value);
483 }
484
485 /// The synchronization scope ID of this fence instruction. Not quite enough
486 /// room in SubClassData for everything, so synchronization scope ID gets its
487 /// own field.
488 SyncScope::ID SSID;
489};
490
491//===----------------------------------------------------------------------===//
492// AtomicCmpXchgInst Class
493//===----------------------------------------------------------------------===//
494
495/// An instruction that atomically checks whether a
496/// specified value is in a memory location, and, if it is, stores a new value
497/// there. The value returned by this instruction is a pair containing the
498/// original value as first element, and an i1 indicating success (true) or
499/// failure (false) as second element.
500///
502 void Init(Value *Ptr, Value *Cmp, Value *NewVal, Align Align,
503 AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering,
504 SyncScope::ID SSID);
505
506 template <unsigned Offset>
507 using AtomicOrderingBitfieldElement =
510
511 constexpr static IntrusiveOperandsAllocMarker AllocMarker{3};
512
513protected:
514 // Note: Instruction needs to be a friend here to call cloneImpl.
515 friend class Instruction;
516
518
519public:
520 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
521 AtomicOrdering SuccessOrdering,
522 AtomicOrdering FailureOrdering, SyncScope::ID SSID,
523 InsertPosition InsertBefore = nullptr);
524
525 // allocate space for exactly three operands
526 void *operator new(size_t S) { return User::operator new(S, AllocMarker); }
527 void operator delete(void *Ptr) { User::operator delete(Ptr); }
528
537 static_assert(
540 "Bitfields must be contiguous");
541
542 /// Return the alignment of the memory that is being allocated by the
543 /// instruction.
544 Align getAlign() const {
545 return Align(1ULL << getSubclassData<AlignmentField>());
546 }
547
549 setSubclassData<AlignmentField>(Log2(Align));
550 }
551
552 /// Return true if this is a cmpxchg from a volatile memory
553 /// location.
554 ///
555 bool isVolatile() const { return getSubclassData<VolatileField>(); }
556
557 /// Specify whether this is a volatile cmpxchg.
558 ///
559 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
560
561 /// Return true if this cmpxchg may spuriously fail.
562 bool isWeak() const { return getSubclassData<WeakField>(); }
563
564 void setWeak(bool IsWeak) { setSubclassData<WeakField>(IsWeak); }
565
566 /// Transparently provide more efficient getOperand methods.
568
570 return Ordering != AtomicOrdering::NotAtomic &&
571 Ordering != AtomicOrdering::Unordered;
572 }
573
575 return Ordering != AtomicOrdering::NotAtomic &&
576 Ordering != AtomicOrdering::Unordered &&
577 Ordering != AtomicOrdering::AcquireRelease &&
578 Ordering != AtomicOrdering::Release;
579 }
580
581 /// Returns the success ordering constraint of this cmpxchg instruction.
583 return getSubclassData<SuccessOrderingField>();
584 }
585
586 /// Sets the success ordering constraint of this cmpxchg instruction.
588 assert(isValidSuccessOrdering(Ordering) &&
589 "invalid CmpXchg success ordering");
590 setSubclassData<SuccessOrderingField>(Ordering);
591 }
592
593 /// Returns the failure ordering constraint of this cmpxchg instruction.
595 return getSubclassData<FailureOrderingField>();
596 }
597
598 /// Sets the failure ordering constraint of this cmpxchg instruction.
600 assert(isValidFailureOrdering(Ordering) &&
601 "invalid CmpXchg failure ordering");
602 setSubclassData<FailureOrderingField>(Ordering);
603 }
604
605 /// Returns a single ordering which is at least as strong as both the
606 /// success and failure orderings for this cmpxchg.
615 }
616 return getSuccessOrdering();
617 }
618
619 /// Returns the synchronization scope ID of this cmpxchg instruction.
621 return SSID;
622 }
623
624 /// Sets the synchronization scope ID of this cmpxchg instruction.
626 this->SSID = SSID;
627 }
628
630 const Value *getPointerOperand() const { return getOperand(0); }
631 static unsigned getPointerOperandIndex() { return 0U; }
632
634 const Value *getCompareOperand() const { return getOperand(1); }
635
637 const Value *getNewValOperand() const { return getOperand(2); }
638
639 /// Returns the address space of the pointer operand.
640 unsigned getPointerAddressSpace() const {
642 }
643
644 /// Returns the strongest permitted ordering on failure, given the
645 /// desired ordering on success.
646 ///
647 /// If the comparison in a cmpxchg operation fails, there is no atomic store
648 /// so release semantics cannot be provided. So this function drops explicit
649 /// Release requests from the AtomicOrdering. A SequentiallyConsistent
650 /// operation would remain SequentiallyConsistent.
651 static AtomicOrdering
653 switch (SuccessOrdering) {
654 default:
655 llvm_unreachable("invalid cmpxchg success ordering");
664 }
665 }
666
667 // Methods for support type inquiry through isa, cast, and dyn_cast:
668 static bool classof(const Instruction *I) {
669 return I->getOpcode() == Instruction::AtomicCmpXchg;
670 }
671 static bool classof(const Value *V) {
672 return isa<Instruction>(V) && classof(cast<Instruction>(V));
673 }
674
675private:
676 // Shadow Instruction::setInstructionSubclassData with a private forwarding
677 // method so that subclasses cannot accidentally use it.
678 template <typename Bitfield>
679 void setSubclassData(typename Bitfield::Type Value) {
680 Instruction::setSubclassData<Bitfield>(Value);
681 }
682
683 /// The synchronization scope ID of this cmpxchg instruction. Not quite
684 /// enough room in SubClassData for everything, so synchronization scope ID
685 /// gets its own field.
686 SyncScope::ID SSID;
687};
688
689template <>
691 public FixedNumOperandTraits<AtomicCmpXchgInst, 3> {
692};
693
695
696//===----------------------------------------------------------------------===//
697// AtomicRMWInst Class
698//===----------------------------------------------------------------------===//
699
700/// an instruction that atomically reads a memory location,
701/// combines it with another value, and then stores the result back. Returns
702/// the old value.
703///
705protected:
706 // Note: Instruction needs to be a friend here to call cloneImpl.
707 friend class Instruction;
708
709 AtomicRMWInst *cloneImpl() const;
710
711public:
712 /// This enumeration lists the possible modifications atomicrmw can make. In
713 /// the descriptions, 'p' is the pointer to the instruction's memory location,
714 /// 'old' is the initial value of *p, and 'v' is the other value passed to the
715 /// instruction. These instructions always return 'old'.
716 enum BinOp : unsigned {
717 /// *p = v
719 /// *p = old + v
721 /// *p = old - v
723 /// *p = old & v
725 /// *p = ~(old & v)
727 /// *p = old | v
729 /// *p = old ^ v
731 /// *p = old >signed v ? old : v
733 /// *p = old <signed v ? old : v
735 /// *p = old >unsigned v ? old : v
737 /// *p = old <unsigned v ? old : v
739
740 /// *p = old + v
742
743 /// *p = old - v
745
746 /// *p = maxnum(old, v)
747 /// \p maxnum matches the behavior of \p llvm.maxnum.*.
749
750 /// *p = minnum(old, v)
751 /// \p minnum matches the behavior of \p llvm.minnum.*.
753
754 /// Increment one up to a maximum value.
755 /// *p = (old u>= v) ? 0 : (old + 1)
757
758 /// Decrement one until a minimum value or zero.
759 /// *p = ((old == 0) || (old u> v)) ? v : (old - 1)
761
762 /// Subtract only if no unsigned overflow.
763 /// *p = (old u>= v) ? old - v : old
765
766 /// *p = usub.sat(old, v)
767 /// \p usub.sat matches the behavior of \p llvm.usub.sat.*.
769
770 FIRST_BINOP = Xchg,
771 LAST_BINOP = USubSat,
772 BAD_BINOP
773 };
774
775private:
776 template <unsigned Offset>
777 using AtomicOrderingBitfieldElement =
780
781 template <unsigned Offset>
782 using BinOpBitfieldElement =
784
785 constexpr static IntrusiveOperandsAllocMarker AllocMarker{2};
786
787public:
788 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
789 AtomicOrdering Ordering, SyncScope::ID SSID,
790 InsertPosition InsertBefore = nullptr);
791
792 // allocate space for exactly two operands
793 void *operator new(size_t S) { return User::operator new(S, AllocMarker); }
794 void operator delete(void *Ptr) { User::operator delete(Ptr); }
795
799 using OperationField = BinOpBitfieldElement<AtomicOrderingField::NextBit>;
803 "Bitfields must be contiguous");
804
805 BinOp getOperation() const { return getSubclassData<OperationField>(); }
806
807 static StringRef getOperationName(BinOp Op);
808
809 static bool isFPOperation(BinOp Op) {
810 switch (Op) {
815 return true;
816 default:
817 return false;
818 }
819 }
820
822 setSubclassData<OperationField>(Operation);
823 }
824
825 /// Return the alignment of the memory that is being allocated by the
826 /// instruction.
827 Align getAlign() const {
828 return Align(1ULL << getSubclassData<AlignmentField>());
829 }
830
832 setSubclassData<AlignmentField>(Log2(Align));
833 }
834
835 /// Return true if this is a RMW on a volatile memory location.
836 ///
837 bool isVolatile() const { return getSubclassData<VolatileField>(); }
838
839 /// Specify whether this is a volatile RMW or not.
840 ///
841 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
842
843 /// Transparently provide more efficient getOperand methods.
845
846 /// Returns the ordering constraint of this rmw instruction.
848 return getSubclassData<AtomicOrderingField>();
849 }
850
851 /// Sets the ordering constraint of this rmw instruction.
853 assert(Ordering != AtomicOrdering::NotAtomic &&
854 "atomicrmw instructions can only be atomic.");
855 assert(Ordering != AtomicOrdering::Unordered &&
856 "atomicrmw instructions cannot be unordered.");
857 setSubclassData<AtomicOrderingField>(Ordering);
858 }
859
860 /// Returns the synchronization scope ID of this rmw instruction.
862 return SSID;
863 }
864
865 /// Sets the synchronization scope ID of this rmw instruction.
867 this->SSID = SSID;
868 }
869
870 Value *getPointerOperand() { return getOperand(0); }
871 const Value *getPointerOperand() const { return getOperand(0); }
872 static unsigned getPointerOperandIndex() { return 0U; }
873
874 Value *getValOperand() { return getOperand(1); }
875 const Value *getValOperand() const { return getOperand(1); }
876
877 /// Returns the address space of the pointer operand.
878 unsigned getPointerAddressSpace() const {
880 }
881
883 return isFPOperation(getOperation());
884 }
885
886 // Methods for support type inquiry through isa, cast, and dyn_cast:
887 static bool classof(const Instruction *I) {
888 return I->getOpcode() == Instruction::AtomicRMW;
889 }
890 static bool classof(const Value *V) {
891 return isa<Instruction>(V) && classof(cast<Instruction>(V));
892 }
893
894private:
895 void Init(BinOp Operation, Value *Ptr, Value *Val, Align Align,
896 AtomicOrdering Ordering, SyncScope::ID SSID);
897
898 // Shadow Instruction::setInstructionSubclassData with a private forwarding
899 // method so that subclasses cannot accidentally use it.
900 template <typename Bitfield>
901 void setSubclassData(typename Bitfield::Type Value) {
902 Instruction::setSubclassData<Bitfield>(Value);
903 }
904
905 /// The synchronization scope ID of this rmw instruction. Not quite enough
906 /// room in SubClassData for everything, so synchronization scope ID gets its
907 /// own field.
908 SyncScope::ID SSID;
909};
910
911template <>
913 : public FixedNumOperandTraits<AtomicRMWInst,2> {
914};
915
917
918//===----------------------------------------------------------------------===//
919// GetElementPtrInst Class
920//===----------------------------------------------------------------------===//
921
922// checkGEPType - Simple wrapper function to give a better assertion failure
923// message on bad indexes for a gep instruction.
924//
926 assert(Ty && "Invalid GetElementPtrInst indices for type!");
927 return Ty;
928}
929
930/// an instruction for type-safe pointer arithmetic to
931/// access elements of arrays and structs
932///
934 Type *SourceElementType;
935 Type *ResultElementType;
936
938
939 /// Constructors - Create a getelementptr instruction with a base pointer an
940 /// list of indices. The first and second ctor can optionally insert before an
941 /// existing instruction, the third appends the new instruction to the
942 /// specified BasicBlock.
943 inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
945 const Twine &NameStr, InsertPosition InsertBefore);
946
947 void init(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr);
948
949protected:
950 // Note: Instruction needs to be a friend here to call cloneImpl.
951 friend class Instruction;
952
954
955public:
956 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
957 ArrayRef<Value *> IdxList,
958 const Twine &NameStr = "",
959 InsertPosition InsertBefore = nullptr) {
960 unsigned Values = 1 + unsigned(IdxList.size());
961 assert(PointeeType && "Must specify element type");
962 IntrusiveOperandsAllocMarker AllocMarker{Values};
963 return new (AllocMarker) GetElementPtrInst(
964 PointeeType, Ptr, IdxList, AllocMarker, NameStr, InsertBefore);
965 }
966
967 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
969 const Twine &NameStr = "",
970 InsertPosition InsertBefore = nullptr) {
972 Create(PointeeType, Ptr, IdxList, NameStr, InsertBefore);
973 GEP->setNoWrapFlags(NW);
974 return GEP;
975 }
976
977 /// Create an "inbounds" getelementptr. See the documentation for the
978 /// "inbounds" flag in LangRef.html for details.
979 static GetElementPtrInst *
981 const Twine &NameStr = "",
982 InsertPosition InsertBefore = nullptr) {
983 return Create(PointeeType, Ptr, IdxList, GEPNoWrapFlags::inBounds(),
984 NameStr, InsertBefore);
985 }
986
987 /// Transparently provide more efficient getOperand methods.
989
990 Type *getSourceElementType() const { return SourceElementType; }
991
992 void setSourceElementType(Type *Ty) { SourceElementType = Ty; }
993 void setResultElementType(Type *Ty) { ResultElementType = Ty; }
994
996 return ResultElementType;
997 }
998
999 /// Returns the address space of this instruction's pointer type.
1000 unsigned getAddressSpace() const {
1001 // Note that this is always the same as the pointer operand's address space
1002 // and that is cheaper to compute, so cheat here.
1003 return getPointerAddressSpace();
1004 }
1005
1006 /// Returns the result type of a getelementptr with the given source
1007 /// element type and indexes.
1008 ///
1009 /// Null is returned if the indices are invalid for the specified
1010 /// source element type.
1011 static Type *getIndexedType(Type *Ty, ArrayRef<Value *> IdxList);
1012 static Type *getIndexedType(Type *Ty, ArrayRef<Constant *> IdxList);
1013 static Type *getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList);
1014
1015 /// Return the type of the element at the given index of an indexable
1016 /// type. This is equivalent to "getIndexedType(Agg, {Zero, Idx})".
1017 ///
1018 /// Returns null if the type can't be indexed, or the given index is not
1019 /// legal for the given type.
1020 static Type *getTypeAtIndex(Type *Ty, Value *Idx);
1021 static Type *getTypeAtIndex(Type *Ty, uint64_t Idx);
1022
1023 inline op_iterator idx_begin() { return op_begin()+1; }
1024 inline const_op_iterator idx_begin() const { return op_begin()+1; }
1025 inline op_iterator idx_end() { return op_end(); }
1026 inline const_op_iterator idx_end() const { return op_end(); }
1027
1029 return make_range(idx_begin(), idx_end());
1030 }
1031
1033 return make_range(idx_begin(), idx_end());
1034 }
1035
1037 return getOperand(0);
1038 }
1039 const Value *getPointerOperand() const {
1040 return getOperand(0);
1041 }
1042 static unsigned getPointerOperandIndex() {
1043 return 0U; // get index for modifying correct operand.
1044 }
1045
1046 /// Method to return the pointer operand as a
1047 /// PointerType.
1049 return getPointerOperand()->getType();
1050 }
1051
1052 /// Returns the address space of the pointer operand.
1053 unsigned getPointerAddressSpace() const {
1055 }
1056
1057 /// Returns the pointer type returned by the GEP
1058 /// instruction, which may be a vector of pointers.
1060 // Vector GEP
1061 Type *Ty = Ptr->getType();
1062 if (Ty->isVectorTy())
1063 return Ty;
1064
1065 for (Value *Index : IdxList)
1066 if (auto *IndexVTy = dyn_cast<VectorType>(Index->getType())) {
1067 ElementCount EltCount = IndexVTy->getElementCount();
1068 return VectorType::get(Ty, EltCount);
1069 }
1070 // Scalar GEP
1071 return Ty;
1072 }
1073
1074 unsigned getNumIndices() const { // Note: always non-negative
1075 return getNumOperands() - 1;
1076 }
1077
1078 bool hasIndices() const {
1079 return getNumOperands() > 1;
1080 }
1081
1082 /// Return true if all of the indices of this GEP are
1083 /// zeros. If so, the result pointer and the first operand have the same
1084 /// value, just potentially different types.
1085 bool hasAllZeroIndices() const;
1086
1087 /// Return true if all of the indices of this GEP are
1088 /// constant integers. If so, the result pointer and the first operand have
1089 /// a constant offset between them.
1090 bool hasAllConstantIndices() const;
1091
1092 /// Set nowrap flags for GEP instruction.
1094
1095 /// Set or clear the inbounds flag on this GEP instruction.
1096 /// See LangRef.html for the meaning of inbounds on a getelementptr.
1097 /// TODO: Remove this method in favor of setNoWrapFlags().
1098 void setIsInBounds(bool b = true);
1099
1100 /// Get the nowrap flags for the GEP instruction.
1102
1103 /// Determine whether the GEP has the inbounds flag.
1104 bool isInBounds() const;
1105
1106 /// Determine whether the GEP has the nusw flag.
1107 bool hasNoUnsignedSignedWrap() const;
1108
1109 /// Determine whether the GEP has the nuw flag.
1110 bool hasNoUnsignedWrap() const;
1111
1112 /// Accumulate the constant address offset of this GEP if possible.
1113 ///
1114 /// This routine accepts an APInt into which it will accumulate the constant
1115 /// offset of this GEP if the GEP is in fact constant. If the GEP is not
1116 /// all-constant, it returns false and the value of the offset APInt is
1117 /// undefined (it is *not* preserved!). The APInt passed into this routine
1118 /// must be at least as wide as the IntPtr type for the address space of
1119 /// the base GEP pointer.
1120 bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const;
1121 bool collectOffset(const DataLayout &DL, unsigned BitWidth,
1122 SmallMapVector<Value *, APInt, 4> &VariableOffsets,
1123 APInt &ConstantOffset) const;
1124 // Methods for support type inquiry through isa, cast, and dyn_cast:
1125 static bool classof(const Instruction *I) {
1126 return (I->getOpcode() == Instruction::GetElementPtr);
1127 }
1128 static bool classof(const Value *V) {
1129 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1130 }
1131};
1132
1133template <>
1135 : public VariadicOperandTraits<GetElementPtrInst> {};
1136
1137GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1138 ArrayRef<Value *> IdxList,
1139 AllocInfo AllocInfo, const Twine &NameStr,
1140 InsertPosition InsertBefore)
1141 : Instruction(getGEPReturnType(Ptr, IdxList), GetElementPtr, AllocInfo,
1142 InsertBefore),
1143 SourceElementType(PointeeType),
1144 ResultElementType(getIndexedType(PointeeType, IdxList)) {
1145 init(Ptr, IdxList, NameStr);
1146}
1147
1148DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value)
1149
1150//===----------------------------------------------------------------------===//
1151// ICmpInst Class
1152//===----------------------------------------------------------------------===//
1153
1154/// This instruction compares its operands according to the predicate given
1155/// to the constructor. It only operates on integers or pointers. The operands
1156/// must be identical types.
1157/// Represent an integer comparison operator.
1158class ICmpInst: public CmpInst {
1159 void AssertOK() {
1160 assert(isIntPredicate() &&
1161 "Invalid ICmp predicate value");
1162 assert(getOperand(0)->getType() == getOperand(1)->getType() &&
1163 "Both operands to ICmp instruction are not of the same type!");
1164 // Check that the operands are the right type
1165 assert((getOperand(0)->getType()->isIntOrIntVectorTy() ||
1166 getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
1167 "Invalid operand types for ICmp instruction");
1168 }
1169
1170 enum { SameSign = (1 << 0) };
1171
1172protected:
1173 // Note: Instruction needs to be a friend here to call cloneImpl.
1174 friend class Instruction;
1175
1176 /// Clone an identical ICmpInst
1177 ICmpInst *cloneImpl() const;
1178
1179public:
1180 /// Constructor with insertion semantics.
1181 ICmpInst(InsertPosition InsertBefore, ///< Where to insert
1182 Predicate pred, ///< The predicate to use for the comparison
1183 Value *LHS, ///< The left-hand-side of the expression
1184 Value *RHS, ///< The right-hand-side of the expression
1185 const Twine &NameStr = "" ///< Name of the instruction
1186 )
1187 : CmpInst(makeCmpResultType(LHS->getType()), Instruction::ICmp, pred, LHS,
1188 RHS, NameStr, InsertBefore) {
1189#ifndef NDEBUG
1190 AssertOK();
1191#endif
1192 }
1193
1194 /// Constructor with no-insertion semantics
1196 Predicate pred, ///< The predicate to use for the comparison
1197 Value *LHS, ///< The left-hand-side of the expression
1198 Value *RHS, ///< The right-hand-side of the expression
1199 const Twine &NameStr = "" ///< Name of the instruction
1200 ) : CmpInst(makeCmpResultType(LHS->getType()),
1201 Instruction::ICmp, pred, LHS, RHS, NameStr) {
1202#ifndef NDEBUG
1203 AssertOK();
1204#endif
1205 }
1206
1207 /// @returns the predicate along with samesign information.
1209 return {getPredicate(), hasSameSign()};
1210 }
1211
1212 /// @returns the inverse predicate along with samesign information: static
1213 /// variant.
1215 return {getInversePredicate(Pred), Pred.hasSameSign()};
1216 }
1217
1218 /// @returns the inverse predicate along with samesign information.
1220 return getInverseCmpPredicate(getCmpPredicate());
1221 }
1222
1223 /// @returns the swapped predicate along with samesign information: static
1224 /// variant.
1226 return {getSwappedPredicate(Pred), Pred.hasSameSign()};
1227 }
1228
1229 /// @returns the swapped predicate along with samesign information.
1231 return getSwappedCmpPredicate(getCmpPredicate());
1232 }
1233
1234 /// For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
1235 /// @returns the predicate that would be the result if the operand were
1236 /// regarded as signed.
1237 /// Return the signed version of the predicate.
1239 return getSignedPredicate(getPredicate());
1240 }
1241
1242 /// Return the signed version of the predicate: static variant.
1243 static Predicate getSignedPredicate(Predicate Pred);
1244
1245 /// For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
1246 /// @returns the predicate that would be the result if the operand were
1247 /// regarded as unsigned.
1248 /// Return the unsigned version of the predicate.
1250 return getUnsignedPredicate(getPredicate());
1251 }
1252
1253 /// Return the unsigned version of the predicate: static variant.
1254 static Predicate getUnsignedPredicate(Predicate Pred);
1255
1256 /// For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->EQ
1257 /// @returns the unsigned version of the signed predicate pred or
1258 /// the signed version of the signed predicate pred.
1259 /// Static variant.
1260 static Predicate getFlippedSignednessPredicate(Predicate Pred);
1261
1262 /// For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->EQ
1263 /// @returns the unsigned version of the signed predicate pred or
1264 /// the signed version of the signed predicate pred.
1266 return getFlippedSignednessPredicate(getPredicate());
1267 }
1268
1269 /// Determine if Pred1 implies Pred2 is true, false, or if nothing can be
1270 /// inferred about the implication, when two compares have matching operands.
1271 static std::optional<bool> isImpliedByMatchingCmp(CmpPredicate Pred1,
1272 CmpPredicate Pred2);
1273
1274 void setSameSign(bool B = true) {
1275 SubclassOptionalData = (SubclassOptionalData & ~SameSign) | (B * SameSign);
1276 }
1277
1278 /// An icmp instruction, which can be marked as "samesign", indicating that
1279 /// the two operands have the same sign. This means that we can convert
1280 /// "slt" to "ult" and vice versa, which enables more optimizations.
1281 bool hasSameSign() const { return SubclassOptionalData & SameSign; }
1282
1283 /// Return true if this predicate is either EQ or NE. This also
1284 /// tests for commutativity.
1285 static bool isEquality(Predicate P) {
1286 return P == ICMP_EQ || P == ICMP_NE;
1287 }
1288
1289 /// Return true if this predicate is either EQ or NE. This also
1290 /// tests for commutativity.
1291 bool isEquality() const {
1292 return isEquality(getPredicate());
1293 }
1294
1295 /// @returns true if the predicate is commutative
1296 /// Determine if this relation is commutative.
1297 static bool isCommutative(Predicate P) { return isEquality(P); }
1298
1299 /// @returns true if the predicate of this ICmpInst is commutative
1300 /// Determine if this relation is commutative.
1301 bool isCommutative() const { return isCommutative(getPredicate()); }
1302
1303 /// Return true if the predicate is relational (not EQ or NE).
1304 ///
1305 bool isRelational() const {
1306 return !isEquality();
1307 }
1308
1309 /// Return true if the predicate is relational (not EQ or NE).
1310 ///
1311 static bool isRelational(Predicate P) {
1312 return !isEquality(P);
1313 }
1314
1315 /// Return true if the predicate is SGT or UGT.
1316 ///
1317 static bool isGT(Predicate P) {
1318 return P == ICMP_SGT || P == ICMP_UGT;
1319 }
1320
1321 /// Return true if the predicate is SLT or ULT.
1322 ///
1323 static bool isLT(Predicate P) {
1324 return P == ICMP_SLT || P == ICMP_ULT;
1325 }
1326
1327 /// Return true if the predicate is SGE or UGE.
1328 ///
1329 static bool isGE(Predicate P) {
1330 return P == ICMP_SGE || P == ICMP_UGE;
1331 }
1332
1333 /// Return true if the predicate is SLE or ULE.
1334 ///
1335 static bool isLE(Predicate P) {
1336 return P == ICMP_SLE || P == ICMP_ULE;
1337 }
1338
1339 /// Returns the sequence of all ICmp predicates.
1340 ///
1341 static auto predicates() { return ICmpPredicates(); }
1342
1343 /// Exchange the two operands to this instruction in such a way that it does
1344 /// not modify the semantics of the instruction. The predicate value may be
1345 /// changed to retain the same result if the predicate is order dependent
1346 /// (e.g. ult).
1347 /// Swap operands and adjust predicate.
1349 setPredicate(getSwappedPredicate());
1350 Op<0>().swap(Op<1>());
1351 }
1352
1353 /// Return result of `LHS Pred RHS` comparison.
1354 static bool compare(const APInt &LHS, const APInt &RHS,
1355 ICmpInst::Predicate Pred);
1356
1357 /// Return result of `LHS Pred RHS`, if it can be determined from the
1358 /// KnownBits. Otherwise return nullopt.
1359 static std::optional<bool> compare(const KnownBits &LHS, const KnownBits &RHS,
1360 ICmpInst::Predicate Pred);
1361
1362 // Methods for support type inquiry through isa, cast, and dyn_cast:
1363 static bool classof(const Instruction *I) {
1364 return I->getOpcode() == Instruction::ICmp;
1365 }
1366 static bool classof(const Value *V) {
1367 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1368 }
1369};
1370
1371//===----------------------------------------------------------------------===//
1372// FCmpInst Class
1373//===----------------------------------------------------------------------===//
1374
1375/// This instruction compares its operands according to the predicate given
1376/// to the constructor. It only operates on floating point values or packed
1377/// vectors of floating point values. The operands must be identical types.
1378/// Represents a floating point comparison operator.
1379class FCmpInst: public CmpInst {
1380 void AssertOK() {
1381 assert(isFPPredicate() && "Invalid FCmp predicate value");
1382 assert(getOperand(0)->getType() == getOperand(1)->getType() &&
1383 "Both operands to FCmp instruction are not of the same type!");
1384 // Check that the operands are the right type
1385 assert(getOperand(0)->getType()->isFPOrFPVectorTy() &&
1386 "Invalid operand types for FCmp instruction");
1387 }
1388
1389protected:
1390 // Note: Instruction needs to be a friend here to call cloneImpl.
1391 friend class Instruction;
1392
1393 /// Clone an identical FCmpInst
1394 FCmpInst *cloneImpl() const;
1395
1396public:
1397 /// Constructor with insertion semantics.
1398 FCmpInst(InsertPosition InsertBefore, ///< Where to insert
1399 Predicate pred, ///< The predicate to use for the comparison
1400 Value *LHS, ///< The left-hand-side of the expression
1401 Value *RHS, ///< The right-hand-side of the expression
1402 const Twine &NameStr = "" ///< Name of the instruction
1403 )
1405 RHS, NameStr, InsertBefore) {
1406 AssertOK();
1407 }
1408
1409 /// Constructor with no-insertion semantics
1410 FCmpInst(Predicate Pred, ///< The predicate to use for the comparison
1411 Value *LHS, ///< The left-hand-side of the expression
1412 Value *RHS, ///< The right-hand-side of the expression
1413 const Twine &NameStr = "", ///< Name of the instruction
1414 Instruction *FlagsSource = nullptr)
1415 : CmpInst(makeCmpResultType(LHS->getType()), Instruction::FCmp, Pred, LHS,
1416 RHS, NameStr, nullptr, FlagsSource) {
1417 AssertOK();
1418 }
1419
1420 /// @returns true if the predicate is EQ or NE.
1421 /// Determine if this is an equality predicate.
1422 static bool isEquality(Predicate Pred) {
1423 return Pred == FCMP_OEQ || Pred == FCMP_ONE || Pred == FCMP_UEQ ||
1424 Pred == FCMP_UNE;
1425 }
1426
1427 /// @returns true if the predicate of this instruction is EQ or NE.
1428 /// Determine if this is an equality predicate.
1429 bool isEquality() const { return isEquality(getPredicate()); }
1430
1431 /// @returns true if the predicate is commutative.
1432 /// Determine if this is a commutative predicate.
1433 static bool isCommutative(Predicate Pred) {
1434 return isEquality(Pred) || Pred == FCMP_FALSE || Pred == FCMP_TRUE ||
1435 Pred == FCMP_ORD || Pred == FCMP_UNO;
1436 }
1437
1438 /// @returns true if the predicate of this instruction is commutative.
1439 /// Determine if this is a commutative predicate.
1440 bool isCommutative() const { return isCommutative(getPredicate()); }
1441
1442 /// @returns true if the predicate is relational (not EQ or NE).
1443 /// Determine if this a relational predicate.
1444 bool isRelational() const { return !isEquality(); }
1445
1446 /// Exchange the two operands to this instruction in such a way that it does
1447 /// not modify the semantics of the instruction. The predicate value may be
1448 /// changed to retain the same result if the predicate is order dependent
1449 /// (e.g. ult).
1450 /// Swap operands and adjust predicate.
1453 Op<0>().swap(Op<1>());
1454 }
1455
1456 /// Returns the sequence of all FCmp predicates.
1457 ///
1458 static auto predicates() { return FCmpPredicates(); }
1459
1460 /// Return result of `LHS Pred RHS` comparison.
1461 static bool compare(const APFloat &LHS, const APFloat &RHS,
1462 FCmpInst::Predicate Pred);
1463
1464 /// Methods for support type inquiry through isa, cast, and dyn_cast:
1465 static bool classof(const Instruction *I) {
1466 return I->getOpcode() == Instruction::FCmp;
1467 }
1468 static bool classof(const Value *V) {
1469 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1470 }
1471};
1472
1473//===----------------------------------------------------------------------===//
1474/// This class represents a function call, abstracting a target
1475/// machine's calling convention. This class uses low bit of the SubClassData
1476/// field to indicate whether or not this is a tail call. The rest of the bits
1477/// hold the calling convention of the call.
1478///
1479class CallInst : public CallBase {
1481
1482 /// Construct a CallInst from a range of arguments
1483 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1484 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1485 AllocInfo AllocInfo, InsertPosition InsertBefore);
1486
1487 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1488 const Twine &NameStr, AllocInfo AllocInfo,
1489 InsertPosition InsertBefore)
1490 : CallInst(Ty, Func, Args, {}, NameStr, AllocInfo, InsertBefore) {}
1491
1492 explicit CallInst(FunctionType *Ty, Value *F, const Twine &NameStr,
1493 AllocInfo AllocInfo, InsertPosition InsertBefore);
1494
1495 void init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
1496 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
1497 void init(FunctionType *FTy, Value *Func, const Twine &NameStr);
1498
1499 /// Compute the number of operands to allocate.
1500 static unsigned ComputeNumOperands(unsigned NumArgs,
1501 unsigned NumBundleInputs = 0) {
1502 // We need one operand for the called function, plus the input operand
1503 // counts provided.
1504 return 1 + NumArgs + NumBundleInputs;
1505 }
1506
1507protected:
1508 // Note: Instruction needs to be a friend here to call cloneImpl.
1509 friend class Instruction;
1510
1511 CallInst *cloneImpl() const;
1512
1513public:
1514 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr = "",
1515 InsertPosition InsertBefore = nullptr) {
1516 IntrusiveOperandsAllocMarker AllocMarker{ComputeNumOperands(0)};
1517 return new (AllocMarker)
1518 CallInst(Ty, F, NameStr, AllocMarker, InsertBefore);
1519 }
1520
1522 const Twine &NameStr,
1523 InsertPosition InsertBefore = nullptr) {
1524 IntrusiveOperandsAllocMarker AllocMarker{ComputeNumOperands(Args.size())};
1525 return new (AllocMarker)
1526 CallInst(Ty, Func, Args, {}, NameStr, AllocMarker, InsertBefore);
1527 }
1528
1530 ArrayRef<OperandBundleDef> Bundles = {},
1531 const Twine &NameStr = "",
1532 InsertPosition InsertBefore = nullptr) {
1533 IntrusiveOperandsAndDescriptorAllocMarker AllocMarker{
1534 ComputeNumOperands(unsigned(Args.size()), CountBundleInputs(Bundles)),
1535 unsigned(Bundles.size() * sizeof(BundleOpInfo))};
1536
1537 return new (AllocMarker)
1538 CallInst(Ty, Func, Args, Bundles, NameStr, AllocMarker, InsertBefore);
1539 }
1540
1541 static CallInst *Create(FunctionCallee Func, const Twine &NameStr = "",
1542 InsertPosition InsertBefore = nullptr) {
1543 return Create(Func.getFunctionType(), Func.getCallee(), NameStr,
1544 InsertBefore);
1545 }
1546
1548 ArrayRef<OperandBundleDef> Bundles = {},
1549 const Twine &NameStr = "",
1550 InsertPosition InsertBefore = nullptr) {
1551 return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles,
1552 NameStr, InsertBefore);
1553 }
1554
1556 const Twine &NameStr,
1557 InsertPosition InsertBefore = nullptr) {
1558 return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr,
1559 InsertBefore);
1560 }
1561
1562 /// Create a clone of \p CI with a different set of operand bundles and
1563 /// insert it before \p InsertBefore.
1564 ///
1565 /// The returned call instruction is identical \p CI in every way except that
1566 /// the operand bundles for the new instruction are set to the operand bundles
1567 /// in \p Bundles.
1569 InsertPosition InsertPt = nullptr);
1570
1571 // Note that 'musttail' implies 'tail'.
1572 enum TailCallKind : unsigned {
1579
1581 static_assert(
1582 Bitfield::areContiguous<TailCallKindField, CallBase::CallingConvField>(),
1583 "Bitfields must be contiguous");
1584
1586 return getSubclassData<TailCallKindField>();
1587 }
1588
1589 bool isTailCall() const {
1591 return Kind == TCK_Tail || Kind == TCK_MustTail;
1592 }
1593
1594 bool isMustTailCall() const { return getTailCallKind() == TCK_MustTail; }
1595
1596 bool isNoTailCall() const { return getTailCallKind() == TCK_NoTail; }
1597
1599 setSubclassData<TailCallKindField>(TCK);
1600 }
1601
1602 void setTailCall(bool IsTc = true) {
1604 }
1605
1606 /// Return true if the call can return twice
1607 bool canReturnTwice() const { return hasFnAttr(Attribute::ReturnsTwice); }
1608 void setCanReturnTwice() { addFnAttr(Attribute::ReturnsTwice); }
1609
1610 /// Return true if the call is for a noreturn trap intrinsic.
1612 switch (getIntrinsicID()) {
1613 case Intrinsic::trap:
1614 case Intrinsic::ubsantrap:
1615 return !hasFnAttr("trap-func-name");
1616 default:
1617 return false;
1618 }
1619 }
1620
1621 // Methods for support type inquiry through isa, cast, and dyn_cast:
1622 static bool classof(const Instruction *I) {
1623 return I->getOpcode() == Instruction::Call;
1624 }
1625 static bool classof(const Value *V) {
1626 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1627 }
1628
1629 /// Updates profile metadata by scaling it by \p S / \p T.
1631
1632private:
1633 // Shadow Instruction::setInstructionSubclassData with a private forwarding
1634 // method so that subclasses cannot accidentally use it.
1635 template <typename Bitfield>
1636 void setSubclassData(typename Bitfield::Type Value) {
1637 Instruction::setSubclassData<Bitfield>(Value);
1638 }
1639};
1640
1641CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1642 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1643 AllocInfo AllocInfo, InsertPosition InsertBefore)
1644 : CallBase(Ty->getReturnType(), Instruction::Call, AllocInfo,
1645 InsertBefore) {
1646 assert(AllocInfo.NumOps ==
1647 unsigned(Args.size() + CountBundleInputs(Bundles) + 1));
1648 init(Ty, Func, Args, Bundles, NameStr);
1649}
1650
1651//===----------------------------------------------------------------------===//
1652// SelectInst Class
1653//===----------------------------------------------------------------------===//
1654
1655/// This class represents the LLVM 'select' instruction.
1656///
1657class SelectInst : public Instruction {
1658 constexpr static IntrusiveOperandsAllocMarker AllocMarker{3};
1659
1660 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1661 InsertPosition InsertBefore)
1662 : Instruction(S1->getType(), Instruction::Select, AllocMarker,
1663 InsertBefore) {
1664 init(C, S1, S2);
1665 setName(NameStr);
1666 }
1667
1668 void init(Value *C, Value *S1, Value *S2) {
1669 assert(!areInvalidOperands(C, S1, S2) && "Invalid operands for select");
1670 Op<0>() = C;
1671 Op<1>() = S1;
1672 Op<2>() = S2;
1673 }
1674
1675protected:
1676 // Note: Instruction needs to be a friend here to call cloneImpl.
1677 friend class Instruction;
1678
1679 SelectInst *cloneImpl() const;
1680
1681public:
1683 const Twine &NameStr = "",
1684 InsertPosition InsertBefore = nullptr,
1685 Instruction *MDFrom = nullptr) {
1686 SelectInst *Sel =
1687 new (AllocMarker) SelectInst(C, S1, S2, NameStr, InsertBefore);
1688 if (MDFrom)
1689 Sel->copyMetadata(*MDFrom);
1690 return Sel;
1691 }
1692
1693 const Value *getCondition() const { return Op<0>(); }
1694 const Value *getTrueValue() const { return Op<1>(); }
1695 const Value *getFalseValue() const { return Op<2>(); }
1696 Value *getCondition() { return Op<0>(); }
1697 Value *getTrueValue() { return Op<1>(); }
1698 Value *getFalseValue() { return Op<2>(); }
1699
1700 void setCondition(Value *V) { Op<0>() = V; }
1701 void setTrueValue(Value *V) { Op<1>() = V; }
1702 void setFalseValue(Value *V) { Op<2>() = V; }
1703
1704 /// Swap the true and false values of the select instruction.
1705 /// This doesn't swap prof metadata.
1706 void swapValues() { Op<1>().swap(Op<2>()); }
1707
1708 /// Return a string if the specified operands are invalid
1709 /// for a select operation, otherwise return null.
1710 static const char *areInvalidOperands(Value *Cond, Value *True, Value *False);
1711
1712 /// Transparently provide more efficient getOperand methods.
1714
1716 return static_cast<OtherOps>(Instruction::getOpcode());
1717 }
1718
1719 // Methods for support type inquiry through isa, cast, and dyn_cast:
1720 static bool classof(const Instruction *I) {
1721 return I->getOpcode() == Instruction::Select;
1722 }
1723 static bool classof(const Value *V) {
1724 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1725 }
1726};
1727
1728template <>
1729struct OperandTraits<SelectInst> : public FixedNumOperandTraits<SelectInst, 3> {
1730};
1731
1733
1734//===----------------------------------------------------------------------===//
1735// VAArgInst Class
1736//===----------------------------------------------------------------------===//
1737
1738/// This class represents the va_arg llvm instruction, which returns
1739/// an argument of the specified type given a va_list and increments that list
1740///
1742protected:
1743 // Note: Instruction needs to be a friend here to call cloneImpl.
1744 friend class Instruction;
1745
1746 VAArgInst *cloneImpl() const;
1747
1748public:
1749 VAArgInst(Value *List, Type *Ty, const Twine &NameStr = "",
1750 InsertPosition InsertBefore = nullptr)
1751 : UnaryInstruction(Ty, VAArg, List, InsertBefore) {
1752 setName(NameStr);
1753 }
1754
1755 Value *getPointerOperand() { return getOperand(0); }
1756 const Value *getPointerOperand() const { return getOperand(0); }
1757 static unsigned getPointerOperandIndex() { return 0U; }
1758
1759 // Methods for support type inquiry through isa, cast, and dyn_cast:
1760 static bool classof(const Instruction *I) {
1761 return I->getOpcode() == VAArg;
1762 }
1763 static bool classof(const Value *V) {
1764 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1765 }
1766};
1767
1768//===----------------------------------------------------------------------===//
1769// ExtractElementInst Class
1770//===----------------------------------------------------------------------===//
1771
1772/// This instruction extracts a single (scalar)
1773/// element from a VectorType value
1774///
1776 constexpr static IntrusiveOperandsAllocMarker AllocMarker{2};
1777
1778 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr = "",
1779 InsertPosition InsertBefore = nullptr);
1780
1781protected:
1782 // Note: Instruction needs to be a friend here to call cloneImpl.
1783 friend class Instruction;
1784
1786
1787public:
1789 const Twine &NameStr = "",
1790 InsertPosition InsertBefore = nullptr) {
1791 return new (AllocMarker)
1792 ExtractElementInst(Vec, Idx, NameStr, InsertBefore);
1793 }
1794
1795 /// Return true if an extractelement instruction can be
1796 /// formed with the specified operands.
1797 static bool isValidOperands(const Value *Vec, const Value *Idx);
1798
1800 Value *getIndexOperand() { return Op<1>(); }
1801 const Value *getVectorOperand() const { return Op<0>(); }
1802 const Value *getIndexOperand() const { return Op<1>(); }
1803
1805 return cast<VectorType>(getVectorOperand()->getType());
1806 }
1807
1808 /// Transparently provide more efficient getOperand methods.
1810
1811 // Methods for support type inquiry through isa, cast, and dyn_cast:
1812 static bool classof(const Instruction *I) {
1813 return I->getOpcode() == Instruction::ExtractElement;
1814 }
1815 static bool classof(const Value *V) {
1816 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1817 }
1818};
1819
1820template <>
1822 public FixedNumOperandTraits<ExtractElementInst, 2> {
1823};
1824
1826
1827//===----------------------------------------------------------------------===//
1828// InsertElementInst Class
1829//===----------------------------------------------------------------------===//
1830
1831/// This instruction inserts a single (scalar)
1832/// element into a VectorType value
1833///
1835 constexpr static IntrusiveOperandsAllocMarker AllocMarker{3};
1836
1837 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx,
1838 const Twine &NameStr = "",
1839 InsertPosition InsertBefore = nullptr);
1840
1841protected:
1842 // Note: Instruction needs to be a friend here to call cloneImpl.
1843 friend class Instruction;
1844
1845 InsertElementInst *cloneImpl() const;
1846
1847public:
1848 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
1849 const Twine &NameStr = "",
1850 InsertPosition InsertBefore = nullptr) {
1851 return new (AllocMarker)
1852 InsertElementInst(Vec, NewElt, Idx, NameStr, InsertBefore);
1853 }
1854
1855 /// Return true if an insertelement instruction can be
1856 /// formed with the specified operands.
1857 static bool isValidOperands(const Value *Vec, const Value *NewElt,
1858 const Value *Idx);
1859
1860 /// Overload to return most specific vector type.
1861 ///
1863 return cast<VectorType>(Instruction::getType());
1864 }
1865
1866 /// Transparently provide more efficient getOperand methods.
1868
1869 // Methods for support type inquiry through isa, cast, and dyn_cast:
1870 static bool classof(const Instruction *I) {
1871 return I->getOpcode() == Instruction::InsertElement;
1872 }
1873 static bool classof(const Value *V) {
1874 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1875 }
1876};
1877
1878template <>
1880 public FixedNumOperandTraits<InsertElementInst, 3> {
1881};
1882
1884
1885//===----------------------------------------------------------------------===//
1886// ShuffleVectorInst Class
1887//===----------------------------------------------------------------------===//
1888
1889constexpr int PoisonMaskElem = -1;
1890
1891/// This instruction constructs a fixed permutation of two
1892/// input vectors.
1893///
1894/// For each element of the result vector, the shuffle mask selects an element
1895/// from one of the input vectors to copy to the result. Non-negative elements
1896/// in the mask represent an index into the concatenated pair of input vectors.
1897/// PoisonMaskElem (-1) specifies that the result element is poison.
1898///
1899/// For scalable vectors, all the elements of the mask must be 0 or -1. This
1900/// requirement may be relaxed in the future.
1902 constexpr static IntrusiveOperandsAllocMarker AllocMarker{2};
1903
1904 SmallVector<int, 4> ShuffleMask;
1905 Constant *ShuffleMaskForBitcode;
1906
1907protected:
1908 // Note: Instruction needs to be a friend here to call cloneImpl.
1909 friend class Instruction;
1910
1911 ShuffleVectorInst *cloneImpl() const;
1912
1913public:
1914 ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr = "",
1915 InsertPosition InsertBefore = nullptr);
1916 ShuffleVectorInst(Value *V1, ArrayRef<int> Mask, const Twine &NameStr = "",
1917 InsertPosition InsertBefore = nullptr);
1918 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
1919 const Twine &NameStr = "",
1920 InsertPosition InsertBefore = nullptr);
1922 const Twine &NameStr = "",
1923 InsertPosition InsertBefore = nullptr);
1924
1925 void *operator new(size_t S) { return User::operator new(S, AllocMarker); }
1926 void operator delete(void *Ptr) { return User::operator delete(Ptr); }
1927
1928 /// Swap the operands and adjust the mask to preserve the semantics
1929 /// of the instruction.
1930 void commute();
1931
1932 /// Return true if a shufflevector instruction can be
1933 /// formed with the specified operands.
1934 static bool isValidOperands(const Value *V1, const Value *V2,
1935 const Value *Mask);
1936 static bool isValidOperands(const Value *V1, const Value *V2,
1937 ArrayRef<int> Mask);
1938
1939 /// Overload to return most specific vector type.
1940 ///
1942 return cast<VectorType>(Instruction::getType());
1943 }
1944
1945 /// Transparently provide more efficient getOperand methods.
1947
1948 /// Return the shuffle mask value of this instruction for the given element
1949 /// index. Return PoisonMaskElem if the element is undef.
1950 int getMaskValue(unsigned Elt) const { return ShuffleMask[Elt]; }
1951
1952 /// Convert the input shuffle mask operand to a vector of integers. Undefined
1953 /// elements of the mask are returned as PoisonMaskElem.
1954 static void getShuffleMask(const Constant *Mask,
1955 SmallVectorImpl<int> &Result);
1956
1957 /// Return the mask for this instruction as a vector of integers. Undefined
1958 /// elements of the mask are returned as PoisonMaskElem.
1960 Result.assign(ShuffleMask.begin(), ShuffleMask.end());
1961 }
1962
1963 /// Return the mask for this instruction, for use in bitcode.
1964 ///
1965 /// TODO: This is temporary until we decide a new bitcode encoding for
1966 /// shufflevector.
1967 Constant *getShuffleMaskForBitcode() const { return ShuffleMaskForBitcode; }
1968
1969 static Constant *convertShuffleMaskForBitcode(ArrayRef<int> Mask,
1970 Type *ResultTy);
1971
1972 void setShuffleMask(ArrayRef<int> Mask);
1973
1974 ArrayRef<int> getShuffleMask() const { return ShuffleMask; }
1975
1976 /// Return true if this shuffle returns a vector with a different number of
1977 /// elements than its source vectors.
1978 /// Examples: shufflevector <4 x n> A, <4 x n> B, <1,2,3>
1979 /// shufflevector <4 x n> A, <4 x n> B, <1,2,3,4,5>
1980 bool changesLength() const {
1981 unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType())
1982 ->getElementCount()
1983 .getKnownMinValue();
1984 unsigned NumMaskElts = ShuffleMask.size();
1985 return NumSourceElts != NumMaskElts;
1986 }
1987
1988 /// Return true if this shuffle returns a vector with a greater number of
1989 /// elements than its source vectors.
1990 /// Example: shufflevector <2 x n> A, <2 x n> B, <1,2,3>
1991 bool increasesLength() const {
1992 unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType())
1993 ->getElementCount()
1994 .getKnownMinValue();
1995 unsigned NumMaskElts = ShuffleMask.size();
1996 return NumSourceElts < NumMaskElts;
1997 }
1998
1999 /// Return true if this shuffle mask chooses elements from exactly one source
2000 /// vector.
2001 /// Example: <7,5,undef,7>
2002 /// This assumes that vector operands (of length \p NumSrcElts) are the same
2003 /// length as the mask.
2004 static bool isSingleSourceMask(ArrayRef<int> Mask, int NumSrcElts);
2005 static bool isSingleSourceMask(const Constant *Mask, int NumSrcElts) {
2006 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2007 SmallVector<int, 16> MaskAsInts;
2008 getShuffleMask(Mask, MaskAsInts);
2009 return isSingleSourceMask(MaskAsInts, NumSrcElts);
2010 }
2011
2012 /// Return true if this shuffle chooses elements from exactly one source
2013 /// vector without changing the length of that vector.
2014 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,0,undef,3>
2015 /// TODO: Optionally allow length-changing shuffles.
2016 bool isSingleSource() const {
2017 return !changesLength() &&
2018 isSingleSourceMask(ShuffleMask, ShuffleMask.size());
2019 }
2020
2021 /// Return true if this shuffle mask chooses elements from exactly one source
2022 /// vector without lane crossings. A shuffle using this mask is not
2023 /// necessarily a no-op because it may change the number of elements from its
2024 /// input vectors or it may provide demanded bits knowledge via undef lanes.
2025 /// Example: <undef,undef,2,3>
2026 static bool isIdentityMask(ArrayRef<int> Mask, int NumSrcElts);
2027 static bool isIdentityMask(const Constant *Mask, int NumSrcElts) {
2028 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2029
2030 // Not possible to express a shuffle mask for a scalable vector for this
2031 // case.
2032 if (isa<ScalableVectorType>(Mask->getType()))
2033 return false;
2034
2035 SmallVector<int, 16> MaskAsInts;
2036 getShuffleMask(Mask, MaskAsInts);
2037 return isIdentityMask(MaskAsInts, NumSrcElts);
2038 }
2039
2040 /// Return true if this shuffle chooses elements from exactly one source
2041 /// vector without lane crossings and does not change the number of elements
2042 /// from its input vectors.
2043 /// Example: shufflevector <4 x n> A, <4 x n> B, <4,undef,6,undef>
2044 bool isIdentity() const {
2045 // Not possible to express a shuffle mask for a scalable vector for this
2046 // case.
2047 if (isa<ScalableVectorType>(getType()))
2048 return false;
2049
2050 return !changesLength() && isIdentityMask(ShuffleMask, ShuffleMask.size());
2051 }
2052
2053 /// Return true if this shuffle lengthens exactly one source vector with
2054 /// undefs in the high elements.
2055 bool isIdentityWithPadding() const;
2056
2057 /// Return true if this shuffle extracts the first N elements of exactly one
2058 /// source vector.
2059 bool isIdentityWithExtract() const;
2060
2061 /// Return true if this shuffle concatenates its 2 source vectors. This
2062 /// returns false if either input is undefined. In that case, the shuffle is
2063 /// is better classified as an identity with padding operation.
2064 bool isConcat() const;
2065
2066 /// Return true if this shuffle mask chooses elements from its source vectors
2067 /// without lane crossings. A shuffle using this mask would be
2068 /// equivalent to a vector select with a constant condition operand.
2069 /// Example: <4,1,6,undef>
2070 /// This returns false if the mask does not choose from both input vectors.
2071 /// In that case, the shuffle is better classified as an identity shuffle.
2072 /// This assumes that vector operands are the same length as the mask
2073 /// (a length-changing shuffle can never be equivalent to a vector select).
2074 static bool isSelectMask(ArrayRef<int> Mask, int NumSrcElts);
2075 static bool isSelectMask(const Constant *Mask, int NumSrcElts) {
2076 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2077 SmallVector<int, 16> MaskAsInts;
2078 getShuffleMask(Mask, MaskAsInts);
2079 return isSelectMask(MaskAsInts, NumSrcElts);
2080 }
2081
2082 /// Return true if this shuffle chooses elements from its source vectors
2083 /// without lane crossings and all operands have the same number of elements.
2084 /// In other words, this shuffle is equivalent to a vector select with a
2085 /// constant condition operand.
2086 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,1,6,3>
2087 /// This returns false if the mask does not choose from both input vectors.
2088 /// In that case, the shuffle is better classified as an identity shuffle.
2089 /// TODO: Optionally allow length-changing shuffles.
2090 bool isSelect() const {
2091 return !changesLength() && isSelectMask(ShuffleMask, ShuffleMask.size());
2092 }
2093
2094 /// Return true if this shuffle mask swaps the order of elements from exactly
2095 /// one source vector.
2096 /// Example: <7,6,undef,4>
2097 /// This assumes that vector operands (of length \p NumSrcElts) are the same
2098 /// length as the mask.
2099 static bool isReverseMask(ArrayRef<int> Mask, int NumSrcElts);
2100 static bool isReverseMask(const Constant *Mask, int NumSrcElts) {
2101 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2102 SmallVector<int, 16> MaskAsInts;
2103 getShuffleMask(Mask, MaskAsInts);
2104 return isReverseMask(MaskAsInts, NumSrcElts);
2105 }
2106
2107 /// Return true if this shuffle swaps the order of elements from exactly
2108 /// one source vector.
2109 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,undef,1,undef>
2110 /// TODO: Optionally allow length-changing shuffles.
2111 bool isReverse() const {
2112 return !changesLength() && isReverseMask(ShuffleMask, ShuffleMask.size());
2113 }
2114
2115 /// Return true if this shuffle mask chooses all elements with the same value
2116 /// as the first element of exactly one source vector.
2117 /// Example: <4,undef,undef,4>
2118 /// This assumes that vector operands (of length \p NumSrcElts) are the same
2119 /// length as the mask.
2120 static bool isZeroEltSplatMask(ArrayRef<int> Mask, int NumSrcElts);
2121 static bool isZeroEltSplatMask(const Constant *Mask, int NumSrcElts) {
2122 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2123 SmallVector<int, 16> MaskAsInts;
2124 getShuffleMask(Mask, MaskAsInts);
2125 return isZeroEltSplatMask(MaskAsInts, NumSrcElts);
2126 }
2127
2128 /// Return true if all elements of this shuffle are the same value as the
2129 /// first element of exactly one source vector without changing the length
2130 /// of that vector.
2131 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,0,undef,0>
2132 /// TODO: Optionally allow length-changing shuffles.
2133 /// TODO: Optionally allow splats from other elements.
2134 bool isZeroEltSplat() const {
2135 return !changesLength() &&
2136 isZeroEltSplatMask(ShuffleMask, ShuffleMask.size());
2137 }
2138
2139 /// Return true if this shuffle mask is a transpose mask.
2140 /// Transpose vector masks transpose a 2xn matrix. They read corresponding
2141 /// even- or odd-numbered vector elements from two n-dimensional source
2142 /// vectors and write each result into consecutive elements of an
2143 /// n-dimensional destination vector. Two shuffles are necessary to complete
2144 /// the transpose, one for the even elements and another for the odd elements.
2145 /// This description closely follows how the TRN1 and TRN2 AArch64
2146 /// instructions operate.
2147 ///
2148 /// For example, a simple 2x2 matrix can be transposed with:
2149 ///
2150 /// ; Original matrix
2151 /// m0 = < a, b >
2152 /// m1 = < c, d >
2153 ///
2154 /// ; Transposed matrix
2155 /// t0 = < a, c > = shufflevector m0, m1, < 0, 2 >
2156 /// t1 = < b, d > = shufflevector m0, m1, < 1, 3 >
2157 ///
2158 /// For matrices having greater than n columns, the resulting nx2 transposed
2159 /// matrix is stored in two result vectors such that one vector contains
2160 /// interleaved elements from all the even-numbered rows and the other vector
2161 /// contains interleaved elements from all the odd-numbered rows. For example,
2162 /// a 2x4 matrix can be transposed with:
2163 ///
2164 /// ; Original matrix
2165 /// m0 = < a, b, c, d >
2166 /// m1 = < e, f, g, h >
2167 ///
2168 /// ; Transposed matrix
2169 /// t0 = < a, e, c, g > = shufflevector m0, m1 < 0, 4, 2, 6 >
2170 /// t1 = < b, f, d, h > = shufflevector m0, m1 < 1, 5, 3, 7 >
2171 static bool isTransposeMask(ArrayRef<int> Mask, int NumSrcElts);
2172 static bool isTransposeMask(const Constant *Mask, int NumSrcElts) {
2173 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2174 SmallVector<int, 16> MaskAsInts;
2175 getShuffleMask(Mask, MaskAsInts);
2176 return isTransposeMask(MaskAsInts, NumSrcElts);
2177 }
2178
2179 /// Return true if this shuffle transposes the elements of its inputs without
2180 /// changing the length of the vectors. This operation may also be known as a
2181 /// merge or interleave. See the description for isTransposeMask() for the
2182 /// exact specification.
2183 /// Example: shufflevector <4 x n> A, <4 x n> B, <0,4,2,6>
2184 bool isTranspose() const {
2185 return !changesLength() && isTransposeMask(ShuffleMask, ShuffleMask.size());
2186 }
2187
2188 /// Return true if this shuffle mask is a splice mask, concatenating the two
2189 /// inputs together and then extracts an original width vector starting from
2190 /// the splice index.
2191 /// Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4>
2192 /// This assumes that vector operands (of length \p NumSrcElts) are the same
2193 /// length as the mask.
2194 static bool isSpliceMask(ArrayRef<int> Mask, int NumSrcElts, int &Index);
2195 static bool isSpliceMask(const Constant *Mask, int NumSrcElts, int &Index) {
2196 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2197 SmallVector<int, 16> MaskAsInts;
2198 getShuffleMask(Mask, MaskAsInts);
2199 return isSpliceMask(MaskAsInts, NumSrcElts, Index);
2200 }
2201
2202 /// Return true if this shuffle splices two inputs without changing the length
2203 /// of the vectors. This operation concatenates the two inputs together and
2204 /// then extracts an original width vector starting from the splice index.
2205 /// Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4>
2206 bool isSplice(int &Index) const {
2207 return !changesLength() &&
2208 isSpliceMask(ShuffleMask, ShuffleMask.size(), Index);
2209 }
2210
2211 /// Return true if this shuffle mask is an extract subvector mask.
2212 /// A valid extract subvector mask returns a smaller vector from a single
2213 /// source operand. The base extraction index is returned as well.
2214 static bool isExtractSubvectorMask(ArrayRef<int> Mask, int NumSrcElts,
2215 int &Index);
2216 static bool isExtractSubvectorMask(const Constant *Mask, int NumSrcElts,
2217 int &Index) {
2218 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2219 // Not possible to express a shuffle mask for a scalable vector for this
2220 // case.
2221 if (isa<ScalableVectorType>(Mask->getType()))
2222 return false;
2223 SmallVector<int, 16> MaskAsInts;
2224 getShuffleMask(Mask, MaskAsInts);
2225 return isExtractSubvectorMask(MaskAsInts, NumSrcElts, Index);
2226 }
2227
2228 /// Return true if this shuffle mask is an extract subvector mask.
2230 // Not possible to express a shuffle mask for a scalable vector for this
2231 // case.
2232 if (isa<ScalableVectorType>(getType()))
2233 return false;
2234
2235 int NumSrcElts =
2236 cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2237 return isExtractSubvectorMask(ShuffleMask, NumSrcElts, Index);
2238 }
2239
2240 /// Return true if this shuffle mask is an insert subvector mask.
2241 /// A valid insert subvector mask inserts the lowest elements of a second
2242 /// source operand into an in-place first source operand.
2243 /// Both the sub vector width and the insertion index is returned.
2244 static bool isInsertSubvectorMask(ArrayRef<int> Mask, int NumSrcElts,
2245 int &NumSubElts, int &Index);
2246 static bool isInsertSubvectorMask(const Constant *Mask, int NumSrcElts,
2247 int &NumSubElts, int &Index) {
2248 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2249 // Not possible to express a shuffle mask for a scalable vector for this
2250 // case.
2251 if (isa<ScalableVectorType>(Mask->getType()))
2252 return false;
2253 SmallVector<int, 16> MaskAsInts;
2254 getShuffleMask(Mask, MaskAsInts);
2255 return isInsertSubvectorMask(MaskAsInts, NumSrcElts, NumSubElts, Index);
2256 }
2257
2258 /// Return true if this shuffle mask is an insert subvector mask.
2259 bool isInsertSubvectorMask(int &NumSubElts, int &Index) const {
2260 // Not possible to express a shuffle mask for a scalable vector for this
2261 // case.
2262 if (isa<ScalableVectorType>(getType()))
2263 return false;
2264
2265 int NumSrcElts =
2266 cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2267 return isInsertSubvectorMask(ShuffleMask, NumSrcElts, NumSubElts, Index);
2268 }
2269
2270 /// Return true if this shuffle mask replicates each of the \p VF elements
2271 /// in a vector \p ReplicationFactor times.
2272 /// For example, the mask for \p ReplicationFactor=3 and \p VF=4 is:
2273 /// <0,0,0,1,1,1,2,2,2,3,3,3>
2274 static bool isReplicationMask(ArrayRef<int> Mask, int &ReplicationFactor,
2275 int &VF);
2276 static bool isReplicationMask(const Constant *Mask, int &ReplicationFactor,
2277 int &VF) {
2278 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2279 // Not possible to express a shuffle mask for a scalable vector for this
2280 // case.
2281 if (isa<ScalableVectorType>(Mask->getType()))
2282 return false;
2283 SmallVector<int, 16> MaskAsInts;
2284 getShuffleMask(Mask, MaskAsInts);
2285 return isReplicationMask(MaskAsInts, ReplicationFactor, VF);
2286 }
2287
2288 /// Return true if this shuffle mask is a replication mask.
2289 bool isReplicationMask(int &ReplicationFactor, int &VF) const;
2290
2291 /// Return true if this shuffle mask represents "clustered" mask of size VF,
2292 /// i.e. each index between [0..VF) is used exactly once in each submask of
2293 /// size VF.
2294 /// For example, the mask for \p VF=4 is:
2295 /// 0, 1, 2, 3, 3, 2, 0, 1 - "clustered", because each submask of size 4
2296 /// (0,1,2,3 and 3,2,0,1) uses indices [0..VF) exactly one time.
2297 /// 0, 1, 2, 3, 3, 3, 1, 0 - not "clustered", because
2298 /// element 3 is used twice in the second submask
2299 /// (3,3,1,0) and index 2 is not used at all.
2300 static bool isOneUseSingleSourceMask(ArrayRef<int> Mask, int VF);
2301
2302 /// Return true if this shuffle mask is a one-use-single-source("clustered")
2303 /// mask.
2304 bool isOneUseSingleSourceMask(int VF) const;
2305
2306 /// Change values in a shuffle permute mask assuming the two vector operands
2307 /// of length InVecNumElts have swapped position.
2309 unsigned InVecNumElts) {
2310 for (int &Idx : Mask) {
2311 if (Idx == -1)
2312 continue;
2313 Idx = Idx < (int)InVecNumElts ? Idx + InVecNumElts : Idx - InVecNumElts;
2314 assert(Idx >= 0 && Idx < (int)InVecNumElts * 2 &&
2315 "shufflevector mask index out of range");
2316 }
2317 }
2318
2319 /// Return if this shuffle interleaves its two input vectors together.
2320 bool isInterleave(unsigned Factor);
2321
2322 /// Return true if the mask interleaves one or more input vectors together.
2323 ///
2324 /// I.e. <0, LaneLen, ... , LaneLen*(Factor - 1), 1, LaneLen + 1, ...>
2325 /// E.g. For a Factor of 2 (LaneLen=4):
2326 /// <0, 4, 1, 5, 2, 6, 3, 7>
2327 /// E.g. For a Factor of 3 (LaneLen=4):
2328 /// <4, 0, 9, 5, 1, 10, 6, 2, 11, 7, 3, 12>
2329 /// E.g. For a Factor of 4 (LaneLen=2):
2330 /// <0, 2, 6, 4, 1, 3, 7, 5>
2331 ///
2332 /// NumInputElts is the total number of elements in the input vectors.
2333 ///
2334 /// StartIndexes are the first indexes of each vector being interleaved,
2335 /// substituting any indexes that were undef
2336 /// E.g. <4, -1, 2, 5, 1, 3> (Factor=3): StartIndexes=<4, 0, 2>
2337 ///
2338 /// Note that this does not check if the input vectors are consecutive:
2339 /// It will return true for masks such as
2340 /// <0, 4, 6, 1, 5, 7> (Factor=3, LaneLen=2)
2341 static bool isInterleaveMask(ArrayRef<int> Mask, unsigned Factor,
2342 unsigned NumInputElts,
2343 SmallVectorImpl<unsigned> &StartIndexes);
2344 static bool isInterleaveMask(ArrayRef<int> Mask, unsigned Factor,
2345 unsigned NumInputElts) {
2346 SmallVector<unsigned, 8> StartIndexes;
2347 return isInterleaveMask(Mask, Factor, NumInputElts, StartIndexes);
2348 }
2349
2350 /// Check if the mask is a DE-interleave mask of the given factor
2351 /// \p Factor like:
2352 /// <Index, Index+Factor, ..., Index+(NumElts-1)*Factor>
2353 static bool isDeInterleaveMaskOfFactor(ArrayRef<int> Mask, unsigned Factor,
2354 unsigned &Index);
2355 static bool isDeInterleaveMaskOfFactor(ArrayRef<int> Mask, unsigned Factor) {
2356 unsigned Unused;
2357 return isDeInterleaveMaskOfFactor(Mask, Factor, Unused);
2358 }
2359
2360 /// Checks if the shuffle is a bit rotation of the first operand across
2361 /// multiple subelements, e.g:
2362 ///
2363 /// shuffle <8 x i8> %a, <8 x i8> poison, <8 x i32> <1, 0, 3, 2, 5, 4, 7, 6>
2364 ///
2365 /// could be expressed as
2366 ///
2367 /// rotl <4 x i16> %a, 8
2368 ///
2369 /// If it can be expressed as a rotation, returns the number of subelements to
2370 /// group by in NumSubElts and the number of bits to rotate left in RotateAmt.
2371 static bool isBitRotateMask(ArrayRef<int> Mask, unsigned EltSizeInBits,
2372 unsigned MinSubElts, unsigned MaxSubElts,
2373 unsigned &NumSubElts, unsigned &RotateAmt);
2374
2375 // Methods for support type inquiry through isa, cast, and dyn_cast:
2376 static bool classof(const Instruction *I) {
2377 return I->getOpcode() == Instruction::ShuffleVector;
2378 }
2379 static bool classof(const Value *V) {
2380 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2381 }
2382};
2383
2384template <>
2386 : public FixedNumOperandTraits<ShuffleVectorInst, 2> {};
2387
2389
2390//===----------------------------------------------------------------------===//
2391// ExtractValueInst Class
2392//===----------------------------------------------------------------------===//
2393
2394/// This instruction extracts a struct member or array
2395/// element value from an aggregate value.
2396///
2399
2401
2402 /// Constructors - Create a extractvalue instruction with a base aggregate
2403 /// value and a list of indices. The first and second ctor can optionally
2404 /// insert before an existing instruction, the third appends the new
2405 /// instruction to the specified BasicBlock.
2406 inline ExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs,
2407 const Twine &NameStr, InsertPosition InsertBefore);
2408
2409 void init(ArrayRef<unsigned> Idxs, const Twine &NameStr);
2410
2411protected:
2412 // Note: Instruction needs to be a friend here to call cloneImpl.
2413 friend class Instruction;
2414
2415 ExtractValueInst *cloneImpl() const;
2416
2417public:
2419 const Twine &NameStr = "",
2420 InsertPosition InsertBefore = nullptr) {
2421 return new
2422 ExtractValueInst(Agg, Idxs, NameStr, InsertBefore);
2423 }
2424
2425 /// Returns the type of the element that would be extracted
2426 /// with an extractvalue instruction with the specified parameters.
2427 ///
2428 /// Null is returned if the indices are invalid for the specified type.
2429 static Type *getIndexedType(Type *Agg, ArrayRef<unsigned> Idxs);
2430
2431 using idx_iterator = const unsigned*;
2432
2433 inline idx_iterator idx_begin() const { return Indices.begin(); }
2434 inline idx_iterator idx_end() const { return Indices.end(); }
2436 return make_range(idx_begin(), idx_end());
2437 }
2438
2440 return getOperand(0);
2441 }
2443 return getOperand(0);
2444 }
2445 static unsigned getAggregateOperandIndex() {
2446 return 0U; // get index for modifying correct operand
2447 }
2448
2450 return Indices;
2451 }
2452
2453 unsigned getNumIndices() const {
2454 return (unsigned)Indices.size();
2455 }
2456
2457 bool hasIndices() const {
2458 return true;
2459 }
2460
2461 // Methods for support type inquiry through isa, cast, and dyn_cast:
2462 static bool classof(const Instruction *I) {
2463 return I->getOpcode() == Instruction::ExtractValue;
2464 }
2465 static bool classof(const Value *V) {
2466 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2467 }
2468};
2469
2470ExtractValueInst::ExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs,
2471 const Twine &NameStr,
2472 InsertPosition InsertBefore)
2473 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2474 ExtractValue, Agg, InsertBefore) {
2475 init(Idxs, NameStr);
2476}
2477
2478//===----------------------------------------------------------------------===//
2479// InsertValueInst Class
2480//===----------------------------------------------------------------------===//
2481
2482/// This instruction inserts a struct field of array element
2483/// value into an aggregate value.
2484///
2486 constexpr static IntrusiveOperandsAllocMarker AllocMarker{2};
2487
2489
2490 InsertValueInst(const InsertValueInst &IVI);
2491
2492 /// Constructors - Create a insertvalue instruction with a base aggregate
2493 /// value, a value to insert, and a list of indices. The first and second ctor
2494 /// can optionally insert before an existing instruction, the third appends
2495 /// the new instruction to the specified BasicBlock.
2496 inline InsertValueInst(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
2497 const Twine &NameStr, InsertPosition InsertBefore);
2498
2499 /// Constructors - These three constructors are convenience methods because
2500 /// one and two index insertvalue instructions are so common.
2501 InsertValueInst(Value *Agg, Value *Val, unsigned Idx,
2502 const Twine &NameStr = "",
2503 InsertPosition InsertBefore = nullptr);
2504
2505 void init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
2506 const Twine &NameStr);
2507
2508protected:
2509 // Note: Instruction needs to be a friend here to call cloneImpl.
2510 friend class Instruction;
2511
2512 InsertValueInst *cloneImpl() const;
2513
2514public:
2515 // allocate space for exactly two operands
2516 void *operator new(size_t S) { return User::operator new(S, AllocMarker); }
2517 void operator delete(void *Ptr) { User::operator delete(Ptr); }
2518
2519 static InsertValueInst *Create(Value *Agg, Value *Val,
2520 ArrayRef<unsigned> Idxs,
2521 const Twine &NameStr = "",
2522 InsertPosition InsertBefore = nullptr) {
2523 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertBefore);
2524 }
2525
2526 /// Transparently provide more efficient getOperand methods.
2528
2529 using idx_iterator = const unsigned*;
2530
2531 inline idx_iterator idx_begin() const { return Indices.begin(); }
2532 inline idx_iterator idx_end() const { return Indices.end(); }
2534 return make_range(idx_begin(), idx_end());
2535 }
2536
2538 return getOperand(0);
2539 }
2541 return getOperand(0);
2542 }
2543 static unsigned getAggregateOperandIndex() {
2544 return 0U; // get index for modifying correct operand
2545 }
2546
2548 return getOperand(1);
2549 }
2551 return getOperand(1);
2552 }
2554 return 1U; // get index for modifying correct operand
2555 }
2556
2558 return Indices;
2559 }
2560
2561 unsigned getNumIndices() const {
2562 return (unsigned)Indices.size();
2563 }
2564
2565 bool hasIndices() const {
2566 return true;
2567 }
2568
2569 // Methods for support type inquiry through isa, cast, and dyn_cast:
2570 static bool classof(const Instruction *I) {
2571 return I->getOpcode() == Instruction::InsertValue;
2572 }
2573 static bool classof(const Value *V) {
2574 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2575 }
2576};
2577
2578template <>
2580 public FixedNumOperandTraits<InsertValueInst, 2> {
2581};
2582
2583InsertValueInst::InsertValueInst(Value *Agg, Value *Val,
2584 ArrayRef<unsigned> Idxs, const Twine &NameStr,
2585 InsertPosition InsertBefore)
2586 : Instruction(Agg->getType(), InsertValue, AllocMarker, InsertBefore) {
2587 init(Agg, Val, Idxs, NameStr);
2588}
2589
2590DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertValueInst, Value)
2591
2592//===----------------------------------------------------------------------===//
2593// PHINode Class
2594//===----------------------------------------------------------------------===//
2595
2596// PHINode - The PHINode class is used to represent the magical mystical PHI
2597// node, that can not exist in nature, but can be synthesized in a computer
2598// scientist's overactive imagination.
2599//
2600class PHINode : public Instruction {
2601 constexpr static HungOffOperandsAllocMarker AllocMarker{};
2602
2603 /// The number of operands actually allocated. NumOperands is
2604 /// the number actually in use.
2605 unsigned ReservedSpace;
2606
2607 PHINode(const PHINode &PN);
2608
2609 explicit PHINode(Type *Ty, unsigned NumReservedValues,
2610 const Twine &NameStr = "",
2611 InsertPosition InsertBefore = nullptr)
2612 : Instruction(Ty, Instruction::PHI, AllocMarker, InsertBefore),
2613 ReservedSpace(NumReservedValues) {
2614 assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!");
2615 setName(NameStr);
2616 allocHungoffUses(ReservedSpace);
2617 }
2618
2619protected:
2620 // Note: Instruction needs to be a friend here to call cloneImpl.
2621 friend class Instruction;
2622
2623 PHINode *cloneImpl() const;
2624
2625 // allocHungoffUses - this is more complicated than the generic
2626 // User::allocHungoffUses, because we have to allocate Uses for the incoming
2627 // values and pointers to the incoming blocks, all in one allocation.
2628 void allocHungoffUses(unsigned N) {
2629 User::allocHungoffUses(N, /* IsPhi */ true);
2630 }
2631
2632public:
2633 /// Constructors - NumReservedValues is a hint for the number of incoming
2634 /// edges that this phi node will have (use 0 if you really have no idea).
2635 static PHINode *Create(Type *Ty, unsigned NumReservedValues,
2636 const Twine &NameStr = "",
2637 InsertPosition InsertBefore = nullptr) {
2638 return new (AllocMarker)
2639 PHINode(Ty, NumReservedValues, NameStr, InsertBefore);
2640 }
2641
2642 /// Provide fast operand accessors
2644
2645 // Block iterator interface. This provides access to the list of incoming
2646 // basic blocks, which parallels the list of incoming values.
2647 // Please note that we are not providing non-const iterators for blocks to
2648 // force all updates go through an interface function.
2649
2652
2654 return reinterpret_cast<const_block_iterator>(op_begin() + ReservedSpace);
2655 }
2656
2658 return block_begin() + getNumOperands();
2659 }
2660
2662 return make_range(block_begin(), block_end());
2663 }
2664
2665 op_range incoming_values() { return operands(); }
2666
2667 const_op_range incoming_values() const { return operands(); }
2668
2669 /// Return the number of incoming edges
2670 ///
2671 unsigned getNumIncomingValues() const { return getNumOperands(); }
2672
2673 /// Return incoming value number x
2674 ///
2675 Value *getIncomingValue(unsigned i) const {
2676 return getOperand(i);
2677 }
2678 void setIncomingValue(unsigned i, Value *V) {
2679 assert(V && "PHI node got a null value!");
2680 assert(getType() == V->getType() &&
2681 "All operands to PHI node must be the same type as the PHI node!");
2682 setOperand(i, V);
2683 }
2684
2685 static unsigned getOperandNumForIncomingValue(unsigned i) {
2686 return i;
2687 }
2688
2689 static unsigned getIncomingValueNumForOperand(unsigned i) {
2690 return i;
2691 }
2692
2693 /// Return incoming basic block number @p i.
2694 ///
2695 BasicBlock *getIncomingBlock(unsigned i) const {
2696 return block_begin()[i];
2697 }
2698
2699 /// Return incoming basic block corresponding
2700 /// to an operand of the PHI.
2701 ///
2703 assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?");
2704 return getIncomingBlock(unsigned(&U - op_begin()));
2705 }
2706
2707 /// Return incoming basic block corresponding
2708 /// to value use iterator.
2709 ///
2711 return getIncomingBlock(I.getUse());
2712 }
2713
2714 void setIncomingBlock(unsigned i, BasicBlock *BB) {
2715 const_cast<block_iterator>(block_begin())[i] = BB;
2716 }
2717
2718 /// Copies the basic blocks from \p BBRange to the incoming basic block list
2719 /// of this PHINode, starting at \p ToIdx.
2721 uint32_t ToIdx = 0) {
2722 copy(BBRange, const_cast<block_iterator>(block_begin()) + ToIdx);
2723 }
2724
2725 /// Replace every incoming basic block \p Old to basic block \p New.
2727 assert(New && Old && "PHI node got a null basic block!");
2728 for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op)
2729 if (getIncomingBlock(Op) == Old)
2730 setIncomingBlock(Op, New);
2731 }
2732
2733 /// Add an incoming value to the end of the PHI list
2734 ///
2736 if (getNumOperands() == ReservedSpace)
2737 growOperands(); // Get more space!
2738 // Initialize some new operands.
2739 setNumHungOffUseOperands(getNumOperands() + 1);
2740 setIncomingValue(getNumOperands() - 1, V);
2741 setIncomingBlock(getNumOperands() - 1, BB);
2742 }
2743
2744 /// Remove an incoming value. This is useful if a
2745 /// predecessor basic block is deleted. The value removed is returned.
2746 ///
2747 /// If the last incoming value for a PHI node is removed (and DeletePHIIfEmpty
2748 /// is true), the PHI node is destroyed and any uses of it are replaced with
2749 /// dummy values. The only time there should be zero incoming values to a PHI
2750 /// node is when the block is dead, so this strategy is sound.
2751 ///
2752 Value *removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty = true);
2753
2754 Value *removeIncomingValue(const BasicBlock *BB, bool DeletePHIIfEmpty=true) {
2755 int Idx = getBasicBlockIndex(BB);
2756 assert(Idx >= 0 && "Invalid basic block argument to remove!");
2757 return removeIncomingValue(Idx, DeletePHIIfEmpty);
2758 }
2759
2760 /// Remove all incoming values for which the predicate returns true.
2761 /// The predicate accepts the incoming value index.
2762 void removeIncomingValueIf(function_ref<bool(unsigned)> Predicate,
2763 bool DeletePHIIfEmpty = true);
2764
2765 /// Return the first index of the specified basic
2766 /// block in the value list for this PHI. Returns -1 if no instance.
2767 ///
2768 int getBasicBlockIndex(const BasicBlock *BB) const {
2769 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
2770 if (block_begin()[i] == BB)
2771 return i;
2772 return -1;
2773 }
2774
2776 int Idx = getBasicBlockIndex(BB);
2777 assert(Idx >= 0 && "Invalid basic block argument!");
2778 return getIncomingValue(Idx);
2779 }
2780
2781 /// Set every incoming value(s) for block \p BB to \p V.
2783 assert(BB && "PHI node got a null basic block!");
2784 bool Found = false;
2785 for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op)
2786 if (getIncomingBlock(Op) == BB) {
2787 Found = true;
2788 setIncomingValue(Op, V);
2789 }
2790 (void)Found;
2791 assert(Found && "Invalid basic block argument to set!");
2792 }
2793
2794 /// If the specified PHI node always merges together the
2795 /// same value, return the value, otherwise return null.
2796 Value *hasConstantValue() const;
2797
2798 /// Whether the specified PHI node always merges
2799 /// together the same value, assuming undefs are equal to a unique
2800 /// non-undef value.
2801 bool hasConstantOrUndefValue() const;
2802
2803 /// If the PHI node is complete which means all of its parent's predecessors
2804 /// have incoming value in this PHI, return true, otherwise return false.
2805 bool isComplete() const {
2807 [this](const BasicBlock *Pred) {
2808 return getBasicBlockIndex(Pred) >= 0;
2809 });
2810 }
2811
2812 /// Methods for support type inquiry through isa, cast, and dyn_cast:
2813 static bool classof(const Instruction *I) {
2814 return I->getOpcode() == Instruction::PHI;
2815 }
2816 static bool classof(const Value *V) {
2817 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2818 }
2819
2820private:
2821 void growOperands();
2822};
2823
2824template <> struct OperandTraits<PHINode> : public HungoffOperandTraits {};
2825
2827
2828//===----------------------------------------------------------------------===//
2829// LandingPadInst Class
2830//===----------------------------------------------------------------------===//
2831
2832//===---------------------------------------------------------------------------
2833/// The landingpad instruction holds all of the information
2834/// necessary to generate correct exception handling. The landingpad instruction
2835/// cannot be moved from the top of a landing pad block, which itself is
2836/// accessible only from the 'unwind' edge of an invoke. This uses the
2837/// SubclassData field in Value to store whether or not the landingpad is a
2838/// cleanup.
2839///
2841 using CleanupField = BoolBitfieldElementT<0>;
2842
2843 constexpr static HungOffOperandsAllocMarker AllocMarker{};
2844
2845 /// The number of operands actually allocated. NumOperands is
2846 /// the number actually in use.
2847 unsigned ReservedSpace;
2848
2849 LandingPadInst(const LandingPadInst &LP);
2850
2851public:
2853
2854private:
2855 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
2856 const Twine &NameStr, InsertPosition InsertBefore);
2857
2858 // Allocate space for exactly zero operands.
2859 void *operator new(size_t S) { return User::operator new(S, AllocMarker); }
2860
2861 void growOperands(unsigned Size);
2862 void init(unsigned NumReservedValues, const Twine &NameStr);
2863
2864protected:
2865 // Note: Instruction needs to be a friend here to call cloneImpl.
2866 friend class Instruction;
2867
2868 LandingPadInst *cloneImpl() const;
2869
2870public:
2871 void operator delete(void *Ptr) { User::operator delete(Ptr); }
2872
2873 /// Constructors - NumReservedClauses is a hint for the number of incoming
2874 /// clauses that this landingpad will have (use 0 if you really have no idea).
2875 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
2876 const Twine &NameStr = "",
2877 InsertPosition InsertBefore = nullptr);
2878
2879 /// Provide fast operand accessors
2881
2882 /// Return 'true' if this landingpad instruction is a
2883 /// cleanup. I.e., it should be run when unwinding even if its landing pad
2884 /// doesn't catch the exception.
2885 bool isCleanup() const { return getSubclassData<CleanupField>(); }
2886
2887 /// Indicate that this landingpad instruction is a cleanup.
2888 void setCleanup(bool V) { setSubclassData<CleanupField>(V); }
2889
2890 /// Add a catch or filter clause to the landing pad.
2891 void addClause(Constant *ClauseVal);
2892
2893 /// Get the value of the clause at index Idx. Use isCatch/isFilter to
2894 /// determine what type of clause this is.
2895 Constant *getClause(unsigned Idx) const {
2896 return cast<Constant>(getOperandList()[Idx]);
2897 }
2898
2899 /// Return 'true' if the clause and index Idx is a catch clause.
2900 bool isCatch(unsigned Idx) const {
2901 return !isa<ArrayType>(getOperandList()[Idx]->getType());
2902 }
2903
2904 /// Return 'true' if the clause and index Idx is a filter clause.
2905 bool isFilter(unsigned Idx) const {
2906 return isa<ArrayType>(getOperandList()[Idx]->getType());
2907 }
2908
2909 /// Get the number of clauses for this landing pad.
2910 unsigned getNumClauses() const { return getNumOperands(); }
2911
2912 /// Grow the size of the operand list to accommodate the new
2913 /// number of clauses.
2914 void reserveClauses(unsigned Size) { growOperands(Size); }
2915
2916 // Methods for support type inquiry through isa, cast, and dyn_cast:
2917 static bool classof(const Instruction *I) {
2918 return I->getOpcode() == Instruction::LandingPad;
2919 }
2920 static bool classof(const Value *V) {
2921 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2922 }
2923};
2924
2925template <>
2927
2929
2930//===----------------------------------------------------------------------===//
2931// ReturnInst Class
2932//===----------------------------------------------------------------------===//
2933
2934//===---------------------------------------------------------------------------
2935/// Return a value (possibly void), from a function. Execution
2936/// does not continue in this function any longer.
2937///
2938class ReturnInst : public Instruction {
2940
2941private:
2942 // ReturnInst constructors:
2943 // ReturnInst() - 'ret void' instruction
2944 // ReturnInst( null) - 'ret void' instruction
2945 // ReturnInst(Value* X) - 'ret X' instruction
2946 // ReturnInst(null, Iterator It) - 'ret void' instruction, insert before I
2947 // ReturnInst(Value* X, Iterator It) - 'ret X' instruction, insert before I
2948 // ReturnInst( null, Inst *I) - 'ret void' instruction, insert before I
2949 // ReturnInst(Value* X, Inst *I) - 'ret X' instruction, insert before I
2950 // ReturnInst( null, BB *B) - 'ret void' instruction, insert @ end of B
2951 // ReturnInst(Value* X, BB *B) - 'ret X' instruction, insert @ end of B
2952 //
2953 // NOTE: If the Value* passed is of type void then the constructor behaves as
2954 // if it was passed NULL.
2955 explicit ReturnInst(LLVMContext &C, Value *retVal, AllocInfo AllocInfo,
2956 InsertPosition InsertBefore);
2957
2958protected:
2959 // Note: Instruction needs to be a friend here to call cloneImpl.
2960 friend class Instruction;
2961
2962 ReturnInst *cloneImpl() const;
2963
2964public:
2965 static ReturnInst *Create(LLVMContext &C, Value *retVal = nullptr,
2966 InsertPosition InsertBefore = nullptr) {
2967 IntrusiveOperandsAllocMarker AllocMarker{retVal ? 1U : 0U};
2968 return new (AllocMarker) ReturnInst(C, retVal, AllocMarker, InsertBefore);
2969 }
2970
2971 static ReturnInst *Create(LLVMContext &C, BasicBlock *InsertAtEnd) {
2972 IntrusiveOperandsAllocMarker AllocMarker{0};
2973 return new (AllocMarker) ReturnInst(C, nullptr, AllocMarker, InsertAtEnd);
2974 }
2975
2976 /// Provide fast operand accessors
2978
2979 /// Convenience accessor. Returns null if there is no return value.
2981 return getNumOperands() != 0 ? getOperand(0) : nullptr;
2982 }
2983
2984 unsigned getNumSuccessors() const { return 0; }
2985
2986 // Methods for support type inquiry through isa, cast, and dyn_cast:
2987 static bool classof(const Instruction *I) {
2988 return (I->getOpcode() == Instruction::Ret);
2989 }
2990 static bool classof(const Value *V) {
2991 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2992 }
2993
2994private:
2995 BasicBlock *getSuccessor(unsigned idx) const {
2996 llvm_unreachable("ReturnInst has no successors!");
2997 }
2998
2999 void setSuccessor(unsigned idx, BasicBlock *B) {
3000 llvm_unreachable("ReturnInst has no successors!");
3001 }
3002};
3003
3004template <>
3005struct OperandTraits<ReturnInst> : public VariadicOperandTraits<ReturnInst> {};
3006
3008
3009//===----------------------------------------------------------------------===//
3010// BranchInst Class
3011//===----------------------------------------------------------------------===//
3012
3013//===---------------------------------------------------------------------------
3014/// Conditional or Unconditional Branch instruction.
3015///
3016class BranchInst : public Instruction {
3017 /// Ops list - Branches are strange. The operands are ordered:
3018 /// [Cond, FalseDest,] TrueDest. This makes some accessors faster because
3019 /// they don't have to check for cond/uncond branchness. These are mostly
3020 /// accessed relative from op_end().
3022 // BranchInst constructors (where {B, T, F} are blocks, and C is a condition):
3023 // BranchInst(BB *B) - 'br B'
3024 // BranchInst(BB* T, BB *F, Value *C) - 'br C, T, F'
3025 // BranchInst(BB* B, Iter It) - 'br B' insert before I
3026 // BranchInst(BB* T, BB *F, Value *C, Iter It) - 'br C, T, F', insert before I
3027 // BranchInst(BB* B, Inst *I) - 'br B' insert before I
3028 // BranchInst(BB* T, BB *F, Value *C, Inst *I) - 'br C, T, F', insert before I
3029 // BranchInst(BB* B, BB *I) - 'br B' insert at end
3030 // BranchInst(BB* T, BB *F, Value *C, BB *I) - 'br C, T, F', insert at end
3031 explicit BranchInst(BasicBlock *IfTrue, AllocInfo AllocInfo,
3032 InsertPosition InsertBefore);
3033 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
3034 AllocInfo AllocInfo, InsertPosition InsertBefore);
3035
3036 void AssertOK();
3037
3038protected:
3039 // Note: Instruction needs to be a friend here to call cloneImpl.
3040 friend class Instruction;
3041
3042 BranchInst *cloneImpl() const;
3043
3044public:
3045 /// Iterator type that casts an operand to a basic block.
3046 ///
3047 /// This only makes sense because the successors are stored as adjacent
3048 /// operands for branch instructions.
3050 : iterator_adaptor_base<succ_op_iterator, value_op_iterator,
3051 std::random_access_iterator_tag, BasicBlock *,
3052 ptrdiff_t, BasicBlock *, BasicBlock *> {
3054
3055 BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3056 BasicBlock *operator->() const { return operator*(); }
3057 };
3058
3059 /// The const version of `succ_op_iterator`.
3061 : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator,
3062 std::random_access_iterator_tag,
3063 const BasicBlock *, ptrdiff_t, const BasicBlock *,
3064 const BasicBlock *> {
3067
3068 const BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3069 const BasicBlock *operator->() const { return operator*(); }
3070 };
3071
3073 InsertPosition InsertBefore = nullptr) {
3074 IntrusiveOperandsAllocMarker AllocMarker{1};
3075 return new (AllocMarker) BranchInst(IfTrue, AllocMarker, InsertBefore);
3076 }
3077
3078 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
3079 Value *Cond,
3080 InsertPosition InsertBefore = nullptr) {
3081 IntrusiveOperandsAllocMarker AllocMarker{3};
3082 return new (AllocMarker)
3083 BranchInst(IfTrue, IfFalse, Cond, AllocMarker, InsertBefore);
3084 }
3085
3086 /// Transparently provide more efficient getOperand methods.
3088
3089 bool isUnconditional() const { return getNumOperands() == 1; }
3090 bool isConditional() const { return getNumOperands() == 3; }
3091
3093 assert(isConditional() && "Cannot get condition of an uncond branch!");
3094 return Op<-3>();
3095 }
3096
3098 assert(isConditional() && "Cannot set condition of unconditional branch!");
3099 Op<-3>() = V;
3100 }
3101
3102 unsigned getNumSuccessors() const { return 1+isConditional(); }
3103
3104 BasicBlock *getSuccessor(unsigned i) const {
3105 assert(i < getNumSuccessors() && "Successor # out of range for Branch!");
3106 return cast_or_null<BasicBlock>((&Op<-1>() - i)->get());
3107 }
3108
3109 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3110 assert(idx < getNumSuccessors() && "Successor # out of range for Branch!");
3111 *(&Op<-1>() - idx) = NewSucc;
3112 }
3113
3114 /// Swap the successors of this branch instruction.
3115 ///
3116 /// Swaps the successors of the branch instruction. This also swaps any
3117 /// branch weight metadata associated with the instruction so that it
3118 /// continues to map correctly to each operand.
3119 void swapSuccessors();
3120
3122 return make_range(
3123 succ_op_iterator(std::next(value_op_begin(), isConditional() ? 1 : 0)),
3124 succ_op_iterator(value_op_end()));
3125 }
3126
3129 std::next(value_op_begin(), isConditional() ? 1 : 0)),
3130 const_succ_op_iterator(value_op_end()));
3131 }
3132
3133 // Methods for support type inquiry through isa, cast, and dyn_cast:
3134 static bool classof(const Instruction *I) {
3135 return (I->getOpcode() == Instruction::Br);
3136 }
3137 static bool classof(const Value *V) {
3138 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3139 }
3140};
3141
3142template <>
3143struct OperandTraits<BranchInst> : public VariadicOperandTraits<BranchInst> {};
3144
3146
3147//===----------------------------------------------------------------------===//
3148// SwitchInst Class
3149//===----------------------------------------------------------------------===//
3150
3151//===---------------------------------------------------------------------------
3152/// Multiway switch
3153///
3154class SwitchInst : public Instruction {
3155 constexpr static HungOffOperandsAllocMarker AllocMarker{};
3156
3157 unsigned ReservedSpace;
3158
3159 // Operand[0] = Value to switch on
3160 // Operand[1] = Default basic block destination
3161 // Operand[2n ] = Value to match
3162 // Operand[2n+1] = BasicBlock to go to on match
3163 SwitchInst(const SwitchInst &SI);
3164
3165 /// Create a new switch instruction, specifying a value to switch on and a
3166 /// default destination. The number of additional cases can be specified here
3167 /// to make memory allocation more efficient. This constructor can also
3168 /// auto-insert before another instruction.
3169 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3170 InsertPosition InsertBefore);
3171
3172 // allocate space for exactly zero operands
3173 void *operator new(size_t S) { return User::operator new(S, AllocMarker); }
3174
3175 void init(Value *Value, BasicBlock *Default, unsigned NumReserved);
3176 void growOperands();
3177
3178protected:
3179 // Note: Instruction needs to be a friend here to call cloneImpl.
3180 friend class Instruction;
3181
3182 SwitchInst *cloneImpl() const;
3183
3184public:
3185 void operator delete(void *Ptr) { User::operator delete(Ptr); }
3186
3187 // -2
3188 static const unsigned DefaultPseudoIndex = static_cast<unsigned>(~0L-1);
3189
3190 template <typename CaseHandleT> class CaseIteratorImpl;
3191
3192 /// A handle to a particular switch case. It exposes a convenient interface
3193 /// to both the case value and the successor block.
3194 ///
3195 /// We define this as a template and instantiate it to form both a const and
3196 /// non-const handle.
3197 template <typename SwitchInstT, typename ConstantIntT, typename BasicBlockT>
3199 // Directly befriend both const and non-const iterators.
3200 friend class SwitchInst::CaseIteratorImpl<
3201 CaseHandleImpl<SwitchInstT, ConstantIntT, BasicBlockT>>;
3202
3203 protected:
3204 // Expose the switch type we're parameterized with to the iterator.
3205 using SwitchInstType = SwitchInstT;
3206
3207 SwitchInstT *SI;
3209
3210 CaseHandleImpl() = default;
3212
3213 public:
3214 /// Resolves case value for current case.
3215 ConstantIntT *getCaseValue() const {
3216 assert((unsigned)Index < SI->getNumCases() &&
3217 "Index out the number of cases.");
3218 return reinterpret_cast<ConstantIntT *>(SI->getOperand(2 + Index * 2));
3219 }
3220
3221 /// Resolves successor for current case.
3222 BasicBlockT *getCaseSuccessor() const {
3223 assert(((unsigned)Index < SI->getNumCases() ||
3224 (unsigned)Index == DefaultPseudoIndex) &&
3225 "Index out the number of cases.");
3226 return SI->getSuccessor(getSuccessorIndex());
3227 }
3228
3229 /// Returns number of current case.
3230 unsigned getCaseIndex() const { return Index; }
3231
3232 /// Returns successor index for current case successor.
3233 unsigned getSuccessorIndex() const {
3234 assert(((unsigned)Index == DefaultPseudoIndex ||
3235 (unsigned)Index < SI->getNumCases()) &&
3236 "Index out the number of cases.");
3237 return (unsigned)Index != DefaultPseudoIndex ? Index + 1 : 0;
3238 }
3239
3240 bool operator==(const CaseHandleImpl &RHS) const {
3241 assert(SI == RHS.SI && "Incompatible operators.");
3242 return Index == RHS.Index;
3243 }
3244 };
3245
3248
3250 : public CaseHandleImpl<SwitchInst, ConstantInt, BasicBlock> {
3252
3253 public:
3255
3256 /// Sets the new value for current case.
3257 void setValue(ConstantInt *V) const {
3258 assert((unsigned)Index < SI->getNumCases() &&
3259 "Index out the number of cases.");
3260 SI->setOperand(2 + Index*2, reinterpret_cast<Value*>(V));
3261 }
3262
3263 /// Sets the new successor for current case.
3264 void setSuccessor(BasicBlock *S) const {
3265 SI->setSuccessor(getSuccessorIndex(), S);
3266 }
3267 };
3268
3269 template <typename CaseHandleT>
3271 : public iterator_facade_base<CaseIteratorImpl<CaseHandleT>,
3272 std::random_access_iterator_tag,
3273 const CaseHandleT> {
3274 using SwitchInstT = typename CaseHandleT::SwitchInstType;
3275
3276 CaseHandleT Case;
3277
3278 public:
3279 /// Default constructed iterator is in an invalid state until assigned to
3280 /// a case for a particular switch.
3281 CaseIteratorImpl() = default;
3282
3283 /// Initializes case iterator for given SwitchInst and for given
3284 /// case number.
3285 CaseIteratorImpl(SwitchInstT *SI, unsigned CaseNum) : Case(SI, CaseNum) {}
3286
3287 /// Initializes case iterator for given SwitchInst and for given
3288 /// successor index.
3290 unsigned SuccessorIndex) {
3291 assert(SuccessorIndex < SI->getNumSuccessors() &&
3292 "Successor index # out of range!");
3293 return SuccessorIndex != 0 ? CaseIteratorImpl(SI, SuccessorIndex - 1)
3294 : CaseIteratorImpl(SI, DefaultPseudoIndex);
3295 }
3296
3297 /// Support converting to the const variant. This will be a no-op for const
3298 /// variant.
3300 return CaseIteratorImpl<ConstCaseHandle>(Case.SI, Case.Index);
3301 }
3302
3304 // Check index correctness after addition.
3305 // Note: Index == getNumCases() means end().
3306 assert(Case.Index + N >= 0 &&
3307 (unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&
3308 "Case.Index out the number of cases.");
3309 Case.Index += N;
3310 return *this;
3311 }
3313 // Check index correctness after subtraction.
3314 // Note: Case.Index == getNumCases() means end().
3315 assert(Case.Index - N >= 0 &&
3316 (unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&
3317 "Case.Index out the number of cases.");
3318 Case.Index -= N;
3319 return *this;
3320 }
3322 assert(Case.SI == RHS.Case.SI && "Incompatible operators.");
3323 return Case.Index - RHS.Case.Index;
3324 }
3325 bool operator==(const CaseIteratorImpl &RHS) const {
3326 return Case == RHS.Case;
3327 }
3328 bool operator<(const CaseIteratorImpl &RHS) const {
3329 assert(Case.SI == RHS.Case.SI && "Incompatible operators.");
3330 return Case.Index < RHS.Case.Index;
3331 }
3332 const CaseHandleT &operator*() const { return Case; }
3333 };
3334
3337
3339 unsigned NumCases,
3340 InsertPosition InsertBefore = nullptr) {
3341 return new SwitchInst(Value, Default, NumCases, InsertBefore);
3342 }
3343
3344 /// Provide fast operand accessors
3346
3347 // Accessor Methods for Switch stmt
3348 Value *getCondition() const { return getOperand(0); }
3349 void setCondition(Value *V) { setOperand(0, V); }
3350
3352 return cast<BasicBlock>(getOperand(1));
3353 }
3354
3355 /// Returns true if the default branch must result in immediate undefined
3356 /// behavior, false otherwise.
3358 return isa<UnreachableInst>(getDefaultDest()->getFirstNonPHIOrDbg());
3359 }
3360
3361 void setDefaultDest(BasicBlock *DefaultCase) {
3362 setOperand(1, reinterpret_cast<Value*>(DefaultCase));
3363 }
3364
3365 /// Return the number of 'cases' in this switch instruction, excluding the
3366 /// default case.
3367 unsigned getNumCases() const {
3368 return getNumOperands()/2 - 1;
3369 }
3370
3371 /// Returns a read/write iterator that points to the first case in the
3372 /// SwitchInst.
3374 return CaseIt(this, 0);
3375 }
3376
3377 /// Returns a read-only iterator that points to the first case in the
3378 /// SwitchInst.
3380 return ConstCaseIt(this, 0);
3381 }
3382
3383 /// Returns a read/write iterator that points one past the last in the
3384 /// SwitchInst.
3386 return CaseIt(this, getNumCases());
3387 }
3388
3389 /// Returns a read-only iterator that points one past the last in the
3390 /// SwitchInst.
3392 return ConstCaseIt(this, getNumCases());
3393 }
3394
3395 /// Iteration adapter for range-for loops.
3397 return make_range(case_begin(), case_end());
3398 }
3399
3400 /// Constant iteration adapter for range-for loops.
3402 return make_range(case_begin(), case_end());
3403 }
3404
3405 /// Returns an iterator that points to the default case.
3406 /// Note: this iterator allows to resolve successor only. Attempt
3407 /// to resolve case value causes an assertion.
3408 /// Also note, that increment and decrement also causes an assertion and
3409 /// makes iterator invalid.
3411 return CaseIt(this, DefaultPseudoIndex);
3412 }
3414 return ConstCaseIt(this, DefaultPseudoIndex);
3415 }
3416
3417 /// Search all of the case values for the specified constant. If it is
3418 /// explicitly handled, return the case iterator of it, otherwise return
3419 /// default case iterator to indicate that it is handled by the default
3420 /// handler.
3422 return CaseIt(
3423 this,
3424 const_cast<const SwitchInst *>(this)->findCaseValue(C)->getCaseIndex());
3425 }
3427 ConstCaseIt I = llvm::find_if(cases(), [C](const ConstCaseHandle &Case) {
3428 return Case.getCaseValue() == C;
3429 });
3430 if (I != case_end())
3431 return I;
3432
3433 return case_default();
3434 }
3435
3436 /// Finds the unique case value for a given successor. Returns null if the
3437 /// successor is not found, not unique, or is the default case.
3439 if (BB == getDefaultDest())
3440 return nullptr;
3441
3442 ConstantInt *CI = nullptr;
3443 for (auto Case : cases()) {
3444 if (Case.getCaseSuccessor() != BB)
3445 continue;
3446
3447 if (CI)
3448 return nullptr; // Multiple cases lead to BB.
3449
3450 CI = Case.getCaseValue();
3451 }
3452
3453 return CI;
3454 }
3455
3456 /// Add an entry to the switch instruction.
3457 /// Note:
3458 /// This action invalidates case_end(). Old case_end() iterator will
3459 /// point to the added case.
3460 void addCase(ConstantInt *OnVal, BasicBlock *Dest);
3461
3462 /// This method removes the specified case and its successor from the switch
3463 /// instruction. Note that this operation may reorder the remaining cases at
3464 /// index idx and above.
3465 /// Note:
3466 /// This action invalidates iterators for all cases following the one removed,
3467 /// including the case_end() iterator. It returns an iterator for the next
3468 /// case.
3469 CaseIt removeCase(CaseIt I);
3470
3471 unsigned getNumSuccessors() const { return getNumOperands()/2; }
3472 BasicBlock *getSuccessor(unsigned idx) const {
3473 assert(idx < getNumSuccessors() &&"Successor idx out of range for switch!");
3474 return cast<BasicBlock>(getOperand(idx*2+1));
3475 }
3476 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3477 assert(idx < getNumSuccessors() && "Successor # out of range for switch!");
3478 setOperand(idx * 2 + 1, NewSucc);
3479 }
3480
3481 // Methods for support type inquiry through isa, cast, and dyn_cast:
3482 static bool classof(const Instruction *I) {
3483 return I->getOpcode() == Instruction::Switch;
3484 }
3485 static bool classof(const Value *V) {
3486 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3487 }
3488};
3489
3490/// A wrapper class to simplify modification of SwitchInst cases along with
3491/// their prof branch_weights metadata.
3493 SwitchInst &SI;
3494 std::optional<SmallVector<uint32_t, 8>> Weights;
3495 bool Changed = false;
3496
3497protected:
3499
3500 void init();
3501
3502public:
3503 using CaseWeightOpt = std::optional<uint32_t>;
3504 SwitchInst *operator->() { return &SI; }
3505 SwitchInst &operator*() { return SI; }
3506 operator SwitchInst *() { return &SI; }
3507
3509
3511 if (Changed)
3512 SI.setMetadata(LLVMContext::MD_prof, buildProfBranchWeightsMD());
3513 }
3514
3515 /// Delegate the call to the underlying SwitchInst::removeCase() and remove
3516 /// correspondent branch weight.
3518
3519 /// Delegate the call to the underlying SwitchInst::addCase() and set the
3520 /// specified branch weight for the added case.
3521 void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W);
3522
3523 /// Delegate the call to the underlying SwitchInst::eraseFromParent() and mark
3524 /// this object to not touch the underlying SwitchInst in destructor.
3526
3527 void setSuccessorWeight(unsigned idx, CaseWeightOpt W);
3528 CaseWeightOpt getSuccessorWeight(unsigned idx);
3529
3530 static CaseWeightOpt getSuccessorWeight(const SwitchInst &SI, unsigned idx);
3531};
3532
3533template <> struct OperandTraits<SwitchInst> : public HungoffOperandTraits {};
3534
3536
3537//===----------------------------------------------------------------------===//
3538// IndirectBrInst Class
3539//===----------------------------------------------------------------------===//
3540
3541//===---------------------------------------------------------------------------
3542/// Indirect Branch Instruction.
3543///
3545 constexpr static HungOffOperandsAllocMarker AllocMarker{};
3546
3547 unsigned ReservedSpace;
3548
3549 // Operand[0] = Address to jump to
3550 // Operand[n+1] = n-th destination
3551 IndirectBrInst(const IndirectBrInst &IBI);
3552
3553 /// Create a new indirectbr instruction, specifying an
3554 /// Address to jump to. The number of expected destinations can be specified
3555 /// here to make memory allocation more efficient. This constructor can also
3556 /// autoinsert before another instruction.
3557 IndirectBrInst(Value *Address, unsigned NumDests,
3558 InsertPosition InsertBefore);
3559
3560 // allocate space for exactly zero operands
3561 void *operator new(size_t S) { return User::operator new(S, AllocMarker); }
3562
3563 void init(Value *Address, unsigned NumDests);
3564 void growOperands();
3565
3566protected:
3567 // Note: Instruction needs to be a friend here to call cloneImpl.
3568 friend class Instruction;
3569
3570 IndirectBrInst *cloneImpl() const;
3571
3572public:
3573 void operator delete(void *Ptr) { User::operator delete(Ptr); }
3574
3575 /// Iterator type that casts an operand to a basic block.
3576 ///
3577 /// This only makes sense because the successors are stored as adjacent
3578 /// operands for indirectbr instructions.
3580 : iterator_adaptor_base<succ_op_iterator, value_op_iterator,
3581 std::random_access_iterator_tag, BasicBlock *,
3582 ptrdiff_t, BasicBlock *, BasicBlock *> {
3584
3585 BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3586 BasicBlock *operator->() const { return operator*(); }
3587 };
3588
3589 /// The const version of `succ_op_iterator`.
3591 : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator,
3592 std::random_access_iterator_tag,
3593 const BasicBlock *, ptrdiff_t, const BasicBlock *,
3594 const BasicBlock *> {
3597
3598 const BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3599 const BasicBlock *operator->() const { return operator*(); }
3600 };
3601
3602 static IndirectBrInst *Create(Value *Address, unsigned NumDests,
3603 InsertPosition InsertBefore = nullptr) {
3604 return new IndirectBrInst(Address, NumDests, InsertBefore);
3605 }
3606
3607 /// Provide fast operand accessors.
3609
3610 // Accessor Methods for IndirectBrInst instruction.
3611 Value *getAddress() { return getOperand(0); }
3612 const Value *getAddress() const { return getOperand(0); }
3613 void setAddress(Value *V) { setOperand(0, V); }
3614
3615 /// return the number of possible destinations in this
3616 /// indirectbr instruction.
3617 unsigned getNumDestinations() const { return getNumOperands()-1; }
3618
3619 /// Return the specified destination.
3620 BasicBlock *getDestination(unsigned i) { return getSuccessor(i); }
3621 const BasicBlock *getDestination(unsigned i) const { return getSuccessor(i); }
3622
3623 /// Add a destination.
3624 ///
3625 void addDestination(BasicBlock *Dest);
3626
3627 /// This method removes the specified successor from the
3628 /// indirectbr instruction.
3629 void removeDestination(unsigned i);
3630
3631 unsigned getNumSuccessors() const { return getNumOperands()-1; }
3632 BasicBlock *getSuccessor(unsigned i) const {
3633 return cast<BasicBlock>(getOperand(i+1));
3634 }
3635 void setSuccessor(unsigned i, BasicBlock *NewSucc) {
3636 setOperand(i + 1, NewSucc);
3637 }
3638
3640 return make_range(succ_op_iterator(std::next(value_op_begin())),
3641 succ_op_iterator(value_op_end()));
3642 }
3643
3645 return make_range(const_succ_op_iterator(std::next(value_op_begin())),
3646 const_succ_op_iterator(value_op_end()));
3647 }
3648
3649 // Methods for support type inquiry through isa, cast, and dyn_cast:
3650 static bool classof(const Instruction *I) {
3651 return I->getOpcode() == Instruction::IndirectBr;
3652 }
3653 static bool classof(const Value *V) {
3654 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3655 }
3656};
3657
3658template <>
3660
3662
3663//===----------------------------------------------------------------------===//
3664// InvokeInst Class
3665//===----------------------------------------------------------------------===//
3666
3667/// Invoke instruction. The SubclassData field is used to hold the
3668/// calling convention of the call.
3669///
3670class InvokeInst : public CallBase {
3671 /// The number of operands for this call beyond the called function,
3672 /// arguments, and operand bundles.
3673 static constexpr int NumExtraOperands = 2;
3674
3675 /// The index from the end of the operand array to the normal destination.
3676 static constexpr int NormalDestOpEndIdx = -3;
3677
3678 /// The index from the end of the operand array to the unwind destination.
3679 static constexpr int UnwindDestOpEndIdx = -2;
3680
3682
3683 /// Construct an InvokeInst given a range of arguments.
3684 ///
3685 /// Construct an InvokeInst from a range of arguments
3686 inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3687 BasicBlock *IfException, ArrayRef<Value *> Args,
3689 const Twine &NameStr, InsertPosition InsertBefore);
3690
3691 void init(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3692 BasicBlock *IfException, ArrayRef<Value *> Args,
3693 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
3694
3695 /// Compute the number of operands to allocate.
3696 static unsigned ComputeNumOperands(unsigned NumArgs,
3697 size_t NumBundleInputs = 0) {
3698 // We need one operand for the called function, plus our extra operands and
3699 // the input operand counts provided.
3700 return 1 + NumExtraOperands + NumArgs + unsigned(NumBundleInputs);
3701 }
3702
3703protected:
3704 // Note: Instruction needs to be a friend here to call cloneImpl.
3705 friend class Instruction;
3706
3707 InvokeInst *cloneImpl() const;
3708
3709public:
3710 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3711 BasicBlock *IfException, ArrayRef<Value *> Args,
3712 const Twine &NameStr,
3713 InsertPosition InsertBefore = nullptr) {
3714 IntrusiveOperandsAllocMarker AllocMarker{
3715 ComputeNumOperands(unsigned(Args.size()))};
3716 return new (AllocMarker) InvokeInst(Ty, Func, IfNormal, IfException, Args,
3717 {}, AllocMarker, NameStr, InsertBefore);
3718 }
3719
3720 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3721 BasicBlock *IfException, ArrayRef<Value *> Args,
3722 ArrayRef<OperandBundleDef> Bundles = {},
3723 const Twine &NameStr = "",
3724 InsertPosition InsertBefore = nullptr) {
3725 IntrusiveOperandsAndDescriptorAllocMarker AllocMarker{
3726 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)),
3727 unsigned(Bundles.size() * sizeof(BundleOpInfo))};
3728
3729 return new (AllocMarker)
3730 InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, AllocMarker,
3731 NameStr, InsertBefore);
3732 }
3733
3735 BasicBlock *IfException, ArrayRef<Value *> Args,
3736 const Twine &NameStr,
3737 InsertPosition InsertBefore = nullptr) {
3738 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3739 IfException, Args, {}, NameStr, InsertBefore);
3740 }
3741
3743 BasicBlock *IfException, ArrayRef<Value *> Args,
3744 ArrayRef<OperandBundleDef> Bundles = {},
3745 const Twine &NameStr = "",
3746 InsertPosition InsertBefore = nullptr) {
3747 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3748 IfException, Args, Bundles, NameStr, InsertBefore);
3749 }
3750
3751 /// Create a clone of \p II with a different set of operand bundles and
3752 /// insert it before \p InsertBefore.
3753 ///
3754 /// The returned invoke instruction is identical to \p II in every way except
3755 /// that the operand bundles for the new instruction are set to the operand
3756 /// bundles in \p Bundles.
3757 static InvokeInst *Create(InvokeInst *II, ArrayRef<OperandBundleDef> Bundles,
3758 InsertPosition InsertPt = nullptr);
3759
3760 // get*Dest - Return the destination basic blocks...
3762 return cast<BasicBlock>(Op<NormalDestOpEndIdx>());
3763 }
3765 return cast<BasicBlock>(Op<UnwindDestOpEndIdx>());
3766 }
3768 Op<NormalDestOpEndIdx>() = reinterpret_cast<Value *>(B);
3769 }
3771 Op<UnwindDestOpEndIdx>() = reinterpret_cast<Value *>(B);
3772 }
3773
3774 /// Get the landingpad instruction from the landing pad
3775 /// block (the unwind destination).
3776 LandingPadInst *getLandingPadInst() const;
3777
3778 BasicBlock *getSuccessor(unsigned i) const {
3779 assert(i < 2 && "Successor # out of range for invoke!");
3780 return i == 0 ? getNormalDest() : getUnwindDest();
3781 }
3782
3783 void setSuccessor(unsigned i, BasicBlock *NewSucc) {
3784 assert(i < 2 && "Successor # out of range for invoke!");
3785 if (i == 0)
3786 setNormalDest(NewSucc);
3787 else
3788 setUnwindDest(NewSucc);
3789 }
3790
3791 unsigned getNumSuccessors() const { return 2; }
3792
3793 /// Updates profile metadata by scaling it by \p S / \p T.
3794 void updateProfWeight(uint64_t S, uint64_t T);
3795
3796 // Methods for support type inquiry through isa, cast, and dyn_cast:
3797 static bool classof(const Instruction *I) {
3798 return (I->getOpcode() == Instruction::Invoke);
3799 }
3800 static bool classof(const Value *V) {
3801 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3802 }
3803
3804private:
3805 // Shadow Instruction::setInstructionSubclassData with a private forwarding
3806 // method so that subclasses cannot accidentally use it.
3807 template <typename Bitfield>
3808 void setSubclassData(typename Bitfield::Type Value) {
3809 Instruction::setSubclassData<Bitfield>(Value);
3810 }
3811};
3812
3813InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3814 BasicBlock *IfException, ArrayRef<Value *> Args,
3815 ArrayRef<OperandBundleDef> Bundles, AllocInfo AllocInfo,
3816 const Twine &NameStr, InsertPosition InsertBefore)
3817 : CallBase(Ty->getReturnType(), Instruction::Invoke, AllocInfo,
3818 InsertBefore) {
3819 init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr);
3820}
3821
3822//===----------------------------------------------------------------------===//
3823// CallBrInst Class
3824//===----------------------------------------------------------------------===//
3825
3826/// CallBr instruction, tracking function calls that may not return control but
3827/// instead transfer it to a third location. The SubclassData field is used to
3828/// hold the calling convention of the call.
3829///
3830class CallBrInst : public CallBase {
3831
3832 unsigned NumIndirectDests;
3833
3835
3836 /// Construct a CallBrInst given a range of arguments.
3837 ///
3838 /// Construct a CallBrInst from a range of arguments
3839 inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
3840 ArrayRef<BasicBlock *> IndirectDests,
3842 AllocInfo AllocInfo, const Twine &NameStr,
3843 InsertPosition InsertBefore);
3844
3845 void init(FunctionType *FTy, Value *Func, BasicBlock *DefaultDest,
3846 ArrayRef<BasicBlock *> IndirectDests, ArrayRef<Value *> Args,
3847 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
3848
3849 /// Compute the number of operands to allocate.
3850 static unsigned ComputeNumOperands(int NumArgs, int NumIndirectDests,
3851 int NumBundleInputs = 0) {
3852 // We need one operand for the called function, plus our extra operands and
3853 // the input operand counts provided.
3854 return unsigned(2 + NumIndirectDests + NumArgs + NumBundleInputs);
3855 }
3856
3857protected:
3858 // Note: Instruction needs to be a friend here to call cloneImpl.
3859 friend class Instruction;
3860
3861 CallBrInst *cloneImpl() const;
3862
3863public:
3865 BasicBlock *DefaultDest,
3866 ArrayRef<BasicBlock *> IndirectDests,
3867 ArrayRef<Value *> Args, const Twine &NameStr,
3868 InsertPosition InsertBefore = nullptr) {
3869 IntrusiveOperandsAllocMarker AllocMarker{
3870 ComputeNumOperands(Args.size(), IndirectDests.size())};
3871 return new (AllocMarker)
3872 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, {}, AllocMarker,
3873 NameStr, InsertBefore);
3874 }
3875
3876 static CallBrInst *
3877 Create(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
3878 ArrayRef<BasicBlock *> IndirectDests, ArrayRef<Value *> Args,
3879 ArrayRef<OperandBundleDef> Bundles = {}, const Twine &NameStr = "",
3880 InsertPosition InsertBefore = nullptr) {
3881 IntrusiveOperandsAndDescriptorAllocMarker AllocMarker{
3882 ComputeNumOperands(Args.size(), IndirectDests.size(),
3883 CountBundleInputs(Bundles)),
3884 unsigned(Bundles.size() * sizeof(BundleOpInfo))};
3885
3886 return new (AllocMarker)
3887 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles,
3888 AllocMarker, NameStr, InsertBefore);
3889 }
3890
3891 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
3892 ArrayRef<BasicBlock *> IndirectDests,
3893 ArrayRef<Value *> Args, const Twine &NameStr,
3894 InsertPosition InsertBefore = nullptr) {
3895 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
3896 IndirectDests, Args, NameStr, InsertBefore);
3897 }
3898
3899 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
3900 ArrayRef<BasicBlock *> IndirectDests,
3901 ArrayRef<Value *> Args,
3902 ArrayRef<OperandBundleDef> Bundles = {},
3903 const Twine &NameStr = "",
3904 InsertPosition InsertBefore = nullptr) {
3905 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
3906 IndirectDests, Args, Bundles, NameStr, InsertBefore);
3907 }
3908
3909 /// Create a clone of \p CBI with a different set of operand bundles and
3910 /// insert it before \p InsertBefore.
3911 ///
3912 /// The returned callbr instruction is identical to \p CBI in every way
3913 /// except that the operand bundles for the new instruction are set to the
3914 /// operand bundles in \p Bundles.
3915 static CallBrInst *Create(CallBrInst *CBI, ArrayRef<OperandBundleDef> Bundles,
3916 InsertPosition InsertBefore = nullptr);
3917
3918 /// Return the number of callbr indirect dest labels.
3919 ///
3920 unsigned getNumIndirectDests() const { return NumIndirectDests; }
3921
3922 /// getIndirectDestLabel - Return the i-th indirect dest label.
3923 ///
3924 Value *getIndirectDestLabel(unsigned i) const {
3925 assert(i < getNumIndirectDests() && "Out of bounds!");
3926 return getOperand(i + arg_size() + getNumTotalBundleOperands() + 1);
3927 }
3928
3929 Value *getIndirectDestLabelUse(unsigned i) const {
3930 assert(i < getNumIndirectDests() && "Out of bounds!");
3931 return getOperandUse(i + arg_size() + getNumTotalBundleOperands() + 1);
3932 }
3933
3934 // Return the destination basic blocks...
3936 return cast<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() - 1));
3937 }
3938 BasicBlock *getIndirectDest(unsigned i) const {
3939 return cast_or_null<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() + i));
3940 }
3942 SmallVector<BasicBlock *, 16> IndirectDests;
3943 for (unsigned i = 0, e = getNumIndirectDests(); i < e; ++i)
3944 IndirectDests.push_back(getIndirectDest(i));
3945 return IndirectDests;
3946 }
3948 *(&Op<-1>() - getNumIndirectDests() - 1) = reinterpret_cast<Value *>(B);
3949 }
3950 void setIndirectDest(unsigned i, BasicBlock *B) {
3951 *(&Op<-1>() - getNumIndirectDests() + i) = reinterpret_cast<Value *>(B);
3952 }
3953
3954 BasicBlock *getSuccessor(unsigned i) const {
3955 assert(i < getNumSuccessors() + 1 &&
3956 "Successor # out of range for callbr!");
3957 return i == 0 ? getDefaultDest() : getIndirectDest(i - 1);
3958 }
3959
3960 void setSuccessor(unsigned i, BasicBlock *NewSucc) {
3961 assert(i < getNumIndirectDests() + 1 &&
3962 "Successor # out of range for callbr!");
3963 return i == 0 ? setDefaultDest(NewSucc) : setIndirectDest(i - 1, NewSucc);
3964 }
3965
3966 unsigned getNumSuccessors() const { return getNumIndirectDests() + 1; }
3967
3968 // Methods for support type inquiry through isa, cast, and dyn_cast:
3969 static bool classof(const Instruction *I) {
3970 return (I->getOpcode() == Instruction::CallBr);
3971 }
3972 static bool classof(const Value *V) {
3973 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3974 }
3975
3976private:
3977 // Shadow Instruction::setInstructionSubclassData with a private forwarding
3978 // method so that subclasses cannot accidentally use it.
3979 template <typename Bitfield>
3980 void setSubclassData(typename Bitfield::Type Value) {
3981 Instruction::setSubclassData<Bitfield>(Value);
3982 }
3983};
3984
3985CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
3986 ArrayRef<BasicBlock *> IndirectDests,
3987 ArrayRef<Value *> Args,
3988 ArrayRef<OperandBundleDef> Bundles, AllocInfo AllocInfo,
3989 const Twine &NameStr, InsertPosition InsertBefore)
3990 : CallBase(Ty->getReturnType(), Instruction::CallBr, AllocInfo,
3991 InsertBefore) {
3992 init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr);
3993}
3994
3995//===----------------------------------------------------------------------===//
3996// ResumeInst Class
3997//===----------------------------------------------------------------------===//
3998
3999//===---------------------------------------------------------------------------
4000/// Resume the propagation of an exception.
4001///
4002class ResumeInst : public Instruction {
4003 constexpr static IntrusiveOperandsAllocMarker AllocMarker{1};
4004
4005 ResumeInst(const ResumeInst &RI);
4006
4007 explicit ResumeInst(Value *Exn, InsertPosition InsertBefore = nullptr);
4008
4009protected:
4010 // Note: Instruction needs to be a friend here to call cloneImpl.
4011 friend class Instruction;
4012
4013 ResumeInst *cloneImpl() const;
4014
4015public:
4016 static ResumeInst *Create(Value *Exn, InsertPosition InsertBefore = nullptr) {
4017 return new (AllocMarker) ResumeInst(Exn, InsertBefore);
4018 }
4019
4020 /// Provide fast operand accessors
4022
4023 /// Convenience accessor.
4024 Value *getValue() const { return Op<0>(); }
4025
4026 unsigned getNumSuccessors() const { return 0; }
4027
4028 // Methods for support type inquiry through isa, cast, and dyn_cast:
4029 static bool classof(const Instruction *I) {
4030 return I->getOpcode() == Instruction::Resume;
4031 }
4032 static bool classof(const Value *V) {
4033 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4034 }
4035
4036private:
4037 BasicBlock *getSuccessor(unsigned idx) const {
4038 llvm_unreachable("ResumeInst has no successors!");
4039 }
4040
4041 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
4042 llvm_unreachable("ResumeInst has no successors!");
4043 }
4044};
4045
4046template <>
4048 public FixedNumOperandTraits<ResumeInst, 1> {
4049};
4050
4052
4053//===----------------------------------------------------------------------===//
4054// CatchSwitchInst Class
4055//===----------------------------------------------------------------------===//
4057 using UnwindDestField = BoolBitfieldElementT<0>;
4058
4059 constexpr static HungOffOperandsAllocMarker AllocMarker{};
4060
4061 /// The number of operands actually allocated. NumOperands is
4062 /// the number actually in use.
4063 unsigned ReservedSpace;
4064
4065 // Operand[0] = Outer scope
4066 // Operand[1] = Unwind block destination
4067 // Operand[n] = BasicBlock to go to on match
4068 CatchSwitchInst(const CatchSwitchInst &CSI);
4069
4070 /// Create a new switch instruction, specifying a
4071 /// default destination. The number of additional handlers can be specified
4072 /// here to make memory allocation more efficient.
4073 /// This constructor can also autoinsert before another instruction.
4074 CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
4075 unsigned NumHandlers, const Twine &NameStr,
4076 InsertPosition InsertBefore);
4077
4078 // allocate space for exactly zero operands
4079 void *operator new(size_t S) { return User::operator new(S, AllocMarker); }
4080
4081 void init(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumReserved);
4082 void growOperands(unsigned Size);
4083
4084protected:
4085 // Note: Instruction needs to be a friend here to call cloneImpl.
4086 friend class Instruction;
4087
4088 CatchSwitchInst *cloneImpl() const;
4089
4090public:
4091 void operator delete(void *Ptr) { return User::operator delete(Ptr); }
4092
4093 static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest,
4094 unsigned NumHandlers,
4095 const Twine &NameStr = "",
4096 InsertPosition InsertBefore = nullptr) {
4097 return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr,
4098 InsertBefore);
4099 }
4100
4101 /// Provide fast operand accessors
4103
4104 // Accessor Methods for CatchSwitch stmt
4105 Value *getParentPad() const { return getOperand(0); }
4106 void setParentPad(Value *ParentPad) { setOperand(0, ParentPad); }
4107
4108 // Accessor Methods for CatchSwitch stmt
4109 bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); }
4110 bool unwindsToCaller() const { return !hasUnwindDest(); }
4112 if (hasUnwindDest())
4113 return cast<BasicBlock>(getOperand(1));
4114 return nullptr;
4115 }
4116 void setUnwindDest(BasicBlock *UnwindDest) {
4117 assert(UnwindDest);
4118 assert(hasUnwindDest());
4119 setOperand(1, UnwindDest);
4120 }
4121
4122 /// return the number of 'handlers' in this catchswitch
4123 /// instruction, except the default handler
4124 unsigned getNumHandlers() const {
4125 if (hasUnwindDest())
4126 return getNumOperands() - 2;
4127 return getNumOperands() - 1;
4128 }
4129
4130private:
4131 static BasicBlock *handler_helper(Value *V) { return cast<BasicBlock>(V); }
4132 static const BasicBlock *handler_helper(const Value *V) {
4133 return cast<BasicBlock>(V);
4134 }
4135
4136public:
4137 using DerefFnTy = BasicBlock *(*)(Value *);
4140 using ConstDerefFnTy = const BasicBlock *(*)(const Value *);
4144
4145 /// Returns an iterator that points to the first handler in CatchSwitchInst.
4147 op_iterator It = op_begin() + 1;
4148 if (hasUnwindDest())
4149 ++It;
4150 return handler_iterator(It, DerefFnTy(handler_helper));
4151 }
4152
4153 /// Returns an iterator that points to the first handler in the
4154 /// CatchSwitchInst.
4156 const_op_iterator It = op_begin() + 1;
4157 if (hasUnwindDest())
4158 ++It;
4159 return const_handler_iterator(It, ConstDerefFnTy(handler_helper));
4160 }
4161
4162 /// Returns a read-only iterator that points one past the last
4163 /// handler in the CatchSwitchInst.
4165 return handler_iterator(op_end(), DerefFnTy(handler_helper));
4166 }
4167
4168 /// Returns an iterator that points one past the last handler in the
4169 /// CatchSwitchInst.
4171 return const_handler_iterator(op_end(), ConstDerefFnTy(handler_helper));
4172 }
4173
4174 /// iteration adapter for range-for loops.
4176 return make_range(handler_begin(), handler_end());
4177 }
4178
4179 /// iteration adapter for range-for loops.
4181 return make_range(handler_begin(), handler_end());
4182 }
4183
4184 /// Add an entry to the switch instruction...
4185 /// Note:
4186 /// This action invalidates handler_end(). Old handler_end() iterator will
4187 /// point to the added handler.
4188 void addHandler(BasicBlock *Dest);
4189
4190 void removeHandler(handler_iterator HI);
4191
4192 unsigned getNumSuccessors() const { return getNumOperands() - 1; }
4193 BasicBlock *getSuccessor(unsigned Idx) const {
4194 assert(Idx < getNumSuccessors() &&
4195 "Successor # out of range for catchswitch!");
4196 return cast<BasicBlock>(getOperand(Idx + 1));
4197 }
4198 void setSuccessor(unsigned Idx, BasicBlock *NewSucc) {
4199 assert(Idx < getNumSuccessors() &&
4200 "Successor # out of range for catchswitch!");
4201 setOperand(Idx + 1, NewSucc);
4202 }
4203
4204 // Methods for support type inquiry through isa, cast, and dyn_cast:
4205 static bool classof(const Instruction *I) {
4206 return I->getOpcode() == Instruction::CatchSwitch;
4207 }
4208 static bool classof(const Value *V) {
4209 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4210 }
4211};
4212
4213template <>
4215
4217
4218//===----------------------------------------------------------------------===//
4219// CleanupPadInst Class
4220//===----------------------------------------------------------------------===//
4222private:
4223 explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args,
4224 AllocInfo AllocInfo, const Twine &NameStr,
4225 InsertPosition InsertBefore)
4226 : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, AllocInfo,
4227 NameStr, InsertBefore) {}
4228
4229public:
4230 static CleanupPadInst *Create(Value *ParentPad, ArrayRef<Value *> Args = {},
4231 const Twine &NameStr = "",
4232 InsertPosition InsertBefore = nullptr) {
4233 IntrusiveOperandsAllocMarker AllocMarker{unsigned(1 + Args.size())};
4234 return new (AllocMarker)
4235 CleanupPadInst(ParentPad, Args, AllocMarker, NameStr, InsertBefore);
4236 }
4237
4238 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4239 static bool classof(const Instruction *I) {
4240 return I->getOpcode() == Instruction::CleanupPad;
4241 }
4242 static bool classof(const Value *V) {
4243 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4244 }
4245};
4246
4247//===----------------------------------------------------------------------===//
4248// CatchPadInst Class
4249//===----------------------------------------------------------------------===//
4251private:
4252 explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args,
4253 AllocInfo AllocInfo, const Twine &NameStr,
4254 InsertPosition InsertBefore)
4255 : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, AllocInfo,
4256 NameStr, InsertBefore) {}
4257
4258public:
4259 static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args,
4260 const Twine &NameStr = "",
4261 InsertPosition InsertBefore = nullptr) {
4262 IntrusiveOperandsAllocMarker AllocMarker{unsigned(1 + Args.size())};
4263 return new (AllocMarker)
4264 CatchPadInst(CatchSwitch, Args, AllocMarker, NameStr, InsertBefore);
4265 }
4266
4267 /// Convenience accessors
4269 return cast<CatchSwitchInst>(Op<-1>());
4270 }
4271 void setCatchSwitch(Value *CatchSwitch) {
4272 assert(CatchSwitch);
4273 Op<-1>() = CatchSwitch;
4274 }
4275
4276 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4277 static bool classof(const Instruction *I) {
4278 return I->getOpcode() == Instruction::CatchPad;
4279 }
4280 static bool classof(const Value *V) {
4281 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4282 }
4283};
4284
4285//===----------------------------------------------------------------------===//
4286// CatchReturnInst Class
4287//===----------------------------------------------------------------------===//
4288
4290 constexpr static IntrusiveOperandsAllocMarker AllocMarker{2};
4291
4293 CatchReturnInst(Value *CatchPad, BasicBlock *BB, InsertPosition InsertBefore);
4294
4295 void init(Value *CatchPad, BasicBlock *BB);
4296
4297protected:
4298 // Note: Instruction needs to be a friend here to call cloneImpl.
4299 friend class Instruction;
4300
4301 CatchReturnInst *cloneImpl() const;
4302
4303public:
4304 static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB,
4305 InsertPosition InsertBefore = nullptr) {
4306 assert(CatchPad);
4307 assert(BB);
4308 return new (AllocMarker) CatchReturnInst(CatchPad, BB, InsertBefore);
4309 }
4310
4311 /// Provide fast operand accessors
4313
4314 /// Convenience accessors.
4315 CatchPadInst *getCatchPad() const { return cast<CatchPadInst>(Op<0>()); }
4316 void setCatchPad(CatchPadInst *CatchPad) {
4317 assert(CatchPad);
4318 Op<0>() = CatchPad;
4319 }
4320
4321 BasicBlock *getSuccessor() const { return cast<BasicBlock>(Op<1>()); }
4322 void setSuccessor(BasicBlock *NewSucc) {
4323 assert(NewSucc);
4324 Op<1>() = NewSucc;
4325 }
4326 unsigned getNumSuccessors() const { return 1; }
4327
4328 /// Get the parentPad of this catchret's catchpad's catchswitch.
4329 /// The successor block is implicitly a member of this funclet.
4332 }
4333
4334 // Methods for support type inquiry through isa, cast, and dyn_cast:
4335 static bool classof(const Instruction *I) {
4336 return (I->getOpcode() == Instruction::CatchRet);
4337 }
4338 static bool classof(const Value *V) {
4339 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4340 }
4341
4342private:
4343 BasicBlock *getSuccessor(unsigned Idx) const {
4344 assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!");
4345 return getSuccessor();
4346 }
4347
4348 void setSuccessor(unsigned Idx, BasicBlock *B) {
4349 assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!");
4350 setSuccessor(B);
4351 }
4352};
4353
4354template <>
4356 : public FixedNumOperandTraits<CatchReturnInst, 2> {};
4357
4359
4360//===----------------------------------------------------------------------===//
4361// CleanupReturnInst Class
4362//===----------------------------------------------------------------------===//
4363
4365 using UnwindDestField = BoolBitfieldElementT<0>;
4366
4367private:
4369 CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB,
4370 AllocInfo AllocInfo, InsertPosition InsertBefore = nullptr);
4371
4372 void init(Value *CleanupPad, BasicBlock *UnwindBB);
4373
4374protected:
4375 // Note: Instruction needs to be a friend here to call cloneImpl.
4376 friend class Instruction;
4377
4378 CleanupReturnInst *cloneImpl() const;
4379
4380public:
4381 static CleanupReturnInst *Create(Value *CleanupPad,
4382 BasicBlock *UnwindBB = nullptr,
4383 InsertPosition InsertBefore = nullptr) {
4384 assert(CleanupPad);
4385 unsigned Values = 1;
4386 if (UnwindBB)
4387 ++Values;
4388 IntrusiveOperandsAllocMarker AllocMarker{Values};
4389 return new (AllocMarker)
4390 CleanupReturnInst(CleanupPad, UnwindBB, AllocMarker, InsertBefore);
4391 }
4392
4393 /// Provide fast operand accessors
4395
4396 bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); }
4397 bool unwindsToCaller() const { return !hasUnwindDest(); }
4398
4399 /// Convenience accessor.
4401 return cast<CleanupPadInst>(Op<0>());
4402 }
4403 void setCleanupPad(CleanupPadInst *CleanupPad) {
4404 assert(CleanupPad);
4405 Op<0>() = CleanupPad;
4406 }
4407
4408 unsigned getNumSuccessors() const { return hasUnwindDest() ? 1 : 0; }
4409
4411 return hasUnwindDest() ? cast<BasicBlock>(Op<1>()) : nullptr;
4412 }
4413 void setUnwindDest(BasicBlock *NewDest) {
4414 assert(NewDest);
4415 assert(hasUnwindDest());
4416 Op<1>() = NewDest;
4417 }
4418
4419 // Methods for support type inquiry through isa, cast, and dyn_cast:
4420 static bool classof(const Instruction *I) {
4421 return (I->getOpcode() == Instruction::CleanupRet);
4422 }
4423 static bool classof(const Value *V) {
4424 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4425 }
4426
4427private:
4428 BasicBlock *getSuccessor(unsigned Idx) const {
4429 assert(Idx == 0);
4430 return getUnwindDest();
4431 }
4432
4433 void setSuccessor(unsigned Idx, BasicBlock *B) {
4434 assert(Idx == 0);
4435 setUnwindDest(B);
4436 }
4437
4438 // Shadow Instruction::setInstructionSubclassData with a private forwarding
4439 // method so that subclasses cannot accidentally use it.
4440 template <typename Bitfield>
4441 void setSubclassData(typename Bitfield::Type Value) {
4442 Instruction::setSubclassData<Bitfield>(Value);
4443 }
4444};
4445
4446template <>
4448 : public VariadicOperandTraits<CleanupReturnInst> {};
4449
4451
4452//===----------------------------------------------------------------------===//
4453// UnreachableInst Class
4454//===----------------------------------------------------------------------===//
4455
4456//===---------------------------------------------------------------------------
4457/// This function has undefined behavior. In particular, the
4458/// presence of this instruction indicates some higher level knowledge that the
4459/// end of the block cannot be reached.
4460///
4462 constexpr static IntrusiveOperandsAllocMarker AllocMarker{0};
4463
4464protected:
4465 // Note: Instruction needs to be a friend here to call cloneImpl.
4466 friend class Instruction;
4467
4468 UnreachableInst *cloneImpl() const;
4469
4470public:
4471 explicit UnreachableInst(LLVMContext &C,
4472 InsertPosition InsertBefore = nullptr);
4473
4474 // allocate space for exactly zero operands
4475 void *operator new(size_t S) { return User::operator new(S, AllocMarker); }
4476 void operator delete(void *Ptr) { User::operator delete(Ptr); }
4477
4478 unsigned getNumSuccessors() const { return 0; }
4479
4480 // Methods for support type inquiry through isa, cast, and dyn_cast:
4481 static bool classof(const Instruction *I) {
4482 return I->getOpcode() == Instruction::Unreachable;
4483 }
4484 static bool classof(const Value *V) {
4485 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4486 }
4487
4488private:
4489 BasicBlock *getSuccessor(unsigned idx) const {
4490 llvm_unreachable("UnreachableInst has no successors!");
4491 }
4492
4493 void setSuccessor(unsigned idx, BasicBlock *B) {
4494 llvm_unreachable("UnreachableInst has no successors!");
4495 }
4496};
4497
4498//===----------------------------------------------------------------------===//
4499// TruncInst Class
4500//===----------------------------------------------------------------------===//
4501
4502/// This class represents a truncation of integer types.
4503class TruncInst : public CastInst {
4504protected:
4505 // Note: Instruction needs to be a friend here to call cloneImpl.
4506 friend class Instruction;
4507
4508 /// Clone an identical TruncInst
4509 TruncInst *cloneImpl() const;
4510
4511public:
4512 enum { AnyWrap = 0, NoUnsignedWrap = (1 << 0), NoSignedWrap = (1 << 1) };
4513
4514 /// Constructor with insert-before-instruction semantics
4515 TruncInst(Value *S, ///< The value to be truncated
4516 Type *Ty, ///< The (smaller) type to truncate to
4517 const Twine &NameStr = "", ///< A name for the new instruction
4518 InsertPosition InsertBefore =
4519 nullptr ///< Where to insert the new instruction
4520 );
4521
4522 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4523 static bool classof(const Instruction *I) {
4524 return I->getOpcode() == Trunc;
4525 }
4526 static bool classof(const Value *V) {
4527 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4528 }
4529
4532 (SubclassOptionalData & ~NoUnsignedWrap) | (B * NoUnsignedWrap);
4533 }
4536 (SubclassOptionalData & ~NoSignedWrap) | (B * NoSignedWrap);
4537 }
4538
4539 /// Test whether this operation is known to never
4540 /// undergo unsigned overflow, aka the nuw property.
4541 bool hasNoUnsignedWrap() const {
4543 }
4544
4545 /// Test whether this operation is known to never
4546 /// undergo signed overflow, aka the nsw property.
4547 bool hasNoSignedWrap() const {
4548 return (SubclassOptionalData & NoSignedWrap) != 0;
4549 }
4550
4551 /// Returns the no-wrap kind of the operation.
4552 unsigned getNoWrapKind() const {
4553 unsigned NoWrapKind = 0;
4554 if (hasNoUnsignedWrap())
4555 NoWrapKind |= NoUnsignedWrap;
4556
4557 if (hasNoSignedWrap())
4558 NoWrapKind |= NoSignedWrap;
4559
4560 return NoWrapKind;
4561 }
4562};
4563
4564//===----------------------------------------------------------------------===//
4565// ZExtInst Class
4566//===----------------------------------------------------------------------===//
4567
4568/// This class represents zero extension of integer types.
4569class ZExtInst : public CastInst {
4570protected:
4571 // Note: Instruction needs to be a friend here to call cloneImpl.
4572 friend class Instruction;
4573
4574 /// Clone an identical ZExtInst
4575 ZExtInst *cloneImpl() const;
4576
4577public:
4578 /// Constructor with insert-before-instruction semantics
4579 ZExtInst(Value *S, ///< The value to be zero extended
4580 Type *Ty, ///< The type to zero extend to
4581 const Twine &NameStr = "", ///< A name for the new instruction
4582 InsertPosition InsertBefore =
4583 nullptr ///< Where to insert the new instruction
4584 );
4585
4586 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4587 static bool classof(const Instruction *I) {
4588 return I->getOpcode() == ZExt;
4589 }
4590 static bool classof(const Value *V) {
4591 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4592 }
4593};
4594
4595//===----------------------------------------------------------------------===//
4596// SExtInst Class
4597//===----------------------------------------------------------------------===//
4598
4599/// This class represents a sign extension of integer types.
4600class SExtInst : public CastInst {
4601protected:
4602 // Note: Instruction needs to be a friend here to call cloneImpl.
4603 friend class Instruction;
4604
4605 /// Clone an identical SExtInst
4606 SExtInst *cloneImpl() const;
4607
4608public:
4609 /// Constructor with insert-before-instruction semantics
4610 SExtInst(Value *S, ///< The value to be sign extended
4611 Type *Ty, ///< The type to sign extend to
4612 const Twine &NameStr = "", ///< A name for the new instruction
4613 InsertPosition InsertBefore =
4614 nullptr ///< Where to insert the new instruction
4615 );
4616
4617 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4618 static bool classof(const Instruction *I) {
4619 return I->getOpcode() == SExt;
4620 }
4621 static bool classof(const Value *V) {
4622 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4623 }
4624};
4625
4626//===----------------------------------------------------------------------===//
4627// FPTruncInst Class
4628//===----------------------------------------------------------------------===//
4629
4630/// This class represents a truncation of floating point types.
4631class FPTruncInst : public CastInst {
4632protected:
4633 // Note: Instruction needs to be a friend here to call cloneImpl.
4634 friend class Instruction;
4635
4636 /// Clone an identical FPTruncInst
4637 FPTruncInst *cloneImpl() const;
4638
4639public: /// Constructor with insert-before-instruction semantics
4640 FPTruncInst(Value *S, ///< The value to be truncated
4641 Type *Ty, ///< The type to truncate to
4642 const Twine &NameStr = "", ///< A name for the new instruction
4643 InsertPosition InsertBefore =
4644 nullptr ///< Where to insert the new instruction
4645 );
4646
4647 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4648 static bool classof(const Instruction *I) {
4649 return I->getOpcode() == FPTrunc;
4650 }
4651 static bool classof(const Value *V) {
4652 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4653 }
4654};
4655
4656//===----------------------------------------------------------------------===//
4657// FPExtInst Class
4658//===----------------------------------------------------------------------===//
4659
4660/// This class represents an extension of floating point types.
4661class FPExtInst : public CastInst {
4662protected:
4663 // Note: Instruction needs to be a friend here to call cloneImpl.
4664 friend class Instruction;
4665
4666 /// Clone an identical FPExtInst
4667 FPExtInst *cloneImpl() const;
4668
4669public:
4670 /// Constructor with insert-before-instruction semantics
4671 FPExtInst(Value *S, ///< The value to be extended
4672 Type *Ty, ///< The type to extend to
4673 const Twine &NameStr = "", ///< A name for the new instruction
4674 InsertPosition InsertBefore =
4675 nullptr ///< Where to insert the new instruction
4676 );
4677
4678 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4679 static bool classof(const Instruction *I) {
4680 return I->getOpcode() == FPExt;
4681 }
4682 static bool classof(const Value *V) {
4683 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4684 }
4685};
4686
4687//===----------------------------------------------------------------------===//
4688// UIToFPInst Class
4689//===----------------------------------------------------------------------===//
4690
4691/// This class represents a cast unsigned integer to floating point.
4692class UIToFPInst : public CastInst {
4693protected:
4694 // Note: Instruction needs to be a friend here to call cloneImpl.
4695 friend class Instruction;
4696
4697 /// Clone an identical UIToFPInst
4698 UIToFPInst *cloneImpl() const;
4699
4700public:
4701 /// Constructor with insert-before-instruction semantics
4702 UIToFPInst(Value *S, ///< The value to be converted
4703 Type *Ty, ///< The type to convert to
4704 const Twine &NameStr = "", ///< A name for the new instruction
4705 InsertPosition InsertBefore =
4706 nullptr ///< Where to insert the new instruction
4707 );
4708
4709 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4710 static bool classof(const Instruction *I) {
4711 return I->getOpcode() == UIToFP;
4712 }
4713 static bool classof(const Value *V) {
4714 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4715 }
4716};
4717
4718//===----------------------------------------------------------------------===//
4719// SIToFPInst Class
4720//===----------------------------------------------------------------------===//
4721
4722/// This class represents a cast from signed integer to floating point.
4723class SIToFPInst : public CastInst {
4724protected:
4725 // Note: Instruction needs to be a friend here to call cloneImpl.
4726 friend class Instruction;
4727
4728 /// Clone an identical SIToFPInst
4729 SIToFPInst *cloneImpl() const;
4730
4731public:
4732 /// Constructor with insert-before-instruction semantics
4733 SIToFPInst(Value *S, ///< The value to be converted
4734 Type *Ty, ///< The type to convert to
4735 const Twine &NameStr = "", ///< A name for the new instruction
4736 InsertPosition InsertBefore =
4737 nullptr ///< Where to insert the new instruction
4738 );
4739
4740 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4741 static bool classof(const Instruction *I) {
4742 return I->getOpcode() == SIToFP;
4743 }
4744 static bool classof(const Value *V) {
4745 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4746 }
4747};
4748
4749//===----------------------------------------------------------------------===//
4750// FPToUIInst Class
4751//===----------------------------------------------------------------------===//
4752
4753/// This class represents a cast from floating point to unsigned integer
4754class FPToUIInst : public CastInst {
4755protected:
4756 // Note: Instruction needs to be a friend here to call cloneImpl.
4757 friend class Instruction;
4758
4759 /// Clone an identical FPToUIInst
4760 FPToUIInst *cloneImpl() const;
4761
4762public:
4763 /// Constructor with insert-before-instruction semantics
4764 FPToUIInst(Value *S, ///< The value to be converted
4765 Type *Ty, ///< The type to convert to
4766 const Twine &NameStr = "", ///< A name for the new instruction
4767 InsertPosition InsertBefore =
4768 nullptr ///< Where to insert the new instruction
4769 );
4770
4771 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4772 static bool classof(const Instruction *I) {
4773 return I->getOpcode() == FPToUI;
4774 }
4775 static bool classof(const Value *V) {
4776 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4777 }
4778};
4779
4780//===----------------------------------------------------------------------===//
4781// FPToSIInst Class
4782//===----------------------------------------------------------------------===//
4783
4784/// This class represents a cast from floating point to signed integer.
4785class FPToSIInst : public CastInst {
4786protected:
4787 // Note: Instruction needs to be a friend here to call cloneImpl.
4788 friend class Instruction;
4789
4790 /// Clone an identical FPToSIInst
4791 FPToSIInst *cloneImpl() const;
4792
4793public:
4794 /// Constructor with insert-before-instruction semantics
4795 FPToSIInst(Value *S, ///< The value to be converted
4796 Type *Ty, ///< The type to convert to
4797 const Twine &NameStr = "", ///< A name for the new instruction
4798 InsertPosition InsertBefore =
4799 nullptr ///< Where to insert the new instruction
4800 );
4801
4802 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4803 static bool classof(const Instruction *I) {
4804 return I->getOpcode() == FPToSI;
4805 }
4806 static bool classof(const Value *V) {
4807 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4808 }
4809};
4810
4811//===----------------------------------------------------------------------===//
4812// IntToPtrInst Class
4813//===----------------------------------------------------------------------===//
4814
4815/// This class represents a cast from an integer to a pointer.
4816class IntToPtrInst : public CastInst {
4817public:
4818 // Note: Instruction needs to be a friend here to call cloneImpl.
4819 friend class Instruction;
4820
4821 /// Constructor with insert-before-instruction semantics
4822 IntToPtrInst(Value *S, ///< The value to be converted
4823 Type *Ty, ///< The type to convert to
4824 const Twine &NameStr = "", ///< A name for the new instruction
4825 InsertPosition InsertBefore =
4826 nullptr ///< Where to insert the new instruction
4827 );
4828
4829 /// Clone an identical IntToPtrInst.
4830 IntToPtrInst *cloneImpl() const;
4831
4832 /// Returns the address space of this instruction's pointer type.
4833 unsigned getAddressSpace() const {
4834 return getType()->getPointerAddressSpace();
4835 }
4836
4837 // Methods for support type inquiry through isa, cast, and dyn_cast:
4838 static bool classof(const Instruction *I) {
4839 return I->getOpcode() == IntToPtr;
4840 }
4841 static bool classof(const Value *V) {
4842 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4843 }
4844};
4845
4846//===----------------------------------------------------------------------===//
4847// PtrToIntInst Class
4848//===----------------------------------------------------------------------===//
4849
4850/// This class represents a cast from a pointer to an integer.
4851class PtrToIntInst : public CastInst {
4852protected:
4853 // Note: Instruction needs to be a friend here to call cloneImpl.
4854 friend class Instruction;
4855
4856 /// Clone an identical PtrToIntInst.
4857 PtrToIntInst *cloneImpl() const;
4858
4859public:
4860 /// Constructor with insert-before-instruction semantics
4861 PtrToIntInst(Value *S, ///< The value to be converted
4862 Type *Ty, ///< The type to convert to
4863 const Twine &NameStr = "", ///< A name for the new instruction
4864 InsertPosition InsertBefore =
4865 nullptr ///< Where to insert the new instruction
4866 );
4867
4868 /// Gets the pointer operand.
4870 /// Gets the pointer operand.
4871 const Value *getPointerOperand() const { return getOperand(0); }
4872 /// Gets the operand index of the pointer operand.
4873 static unsigned getPointerOperandIndex() { return 0U; }
4874
4875 /// Returns the address space of the pointer operand.
4876 unsigned getPointerAddressSpace() const {
4878 }
4879
4880 // Methods for support type inquiry through isa, cast, and dyn_cast:
4881 static bool classof(const Instruction *I) {
4882 return I->getOpcode() == PtrToInt;
4883 }
4884 static bool classof(const Value *V) {
4885 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4886 }
4887};
4888
4889//===----------------------------------------------------------------------===//
4890// BitCastInst Class
4891//===----------------------------------------------------------------------===//
4892
4893/// This class represents a no-op cast from one type to another.
4894class BitCastInst : public CastInst {
4895protected:
4896 // Note: Instruction needs to be a friend here to call cloneImpl.
4897 friend class Instruction;
4898
4899 /// Clone an identical BitCastInst.
4900 BitCastInst *cloneImpl() const;
4901
4902public:
4903 /// Constructor with insert-before-instruction semantics
4904 BitCastInst(Value *S, ///< The value to be casted
4905 Type *Ty, ///< The type to casted to
4906 const Twine &NameStr = "", ///< A name for the new instruction
4907 InsertPosition InsertBefore =
4908 nullptr ///< Where to insert the new instruction
4909 );
4910
4911 // Methods for support type inquiry through isa, cast, and dyn_cast:
4912 static bool classof(const Instruction *I) {
4913 return I->getOpcode() == BitCast;
4914 }
4915 static bool classof(const Value *V) {
4916 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4917 }
4918};
4919
4920//===----------------------------------------------------------------------===//
4921// AddrSpaceCastInst Class
4922//===----------------------------------------------------------------------===//
4923
4924/// This class represents a conversion between pointers from one address space
4925/// to another.
4927protected:
4928 // Note: Instruction needs to be a friend here to call cloneImpl.
4929 friend class Instruction;
4930
4931 /// Clone an identical AddrSpaceCastInst.
4933
4934public:
4935 /// Constructor with insert-before-instruction semantics
4937 Value *S, ///< The value to be casted
4938 Type *Ty, ///< The type to casted to
4939 const Twine &NameStr = "", ///< A name for the new instruction
4940 InsertPosition InsertBefore =
4941 nullptr ///< Where to insert the new instruction
4942 );
4943
4944 // Methods for support type inquiry through isa, cast, and dyn_cast:
4945 static bool classof(const Instruction *I) {
4946 return I->getOpcode() == AddrSpaceCast;
4947 }
4948 static bool classof(const Value *V) {
4949 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4950 }
4951
4952 /// Gets the pointer operand.
4954 return getOperand(0);
4955 }
4956
4957 /// Gets the pointer operand.
4958 const Value *getPointerOperand() const {
4959 return getOperand(0);
4960 }
4961
4962 /// Gets the operand index of the pointer operand.
4963 static unsigned getPointerOperandIndex() {
4964 return 0U;
4965 }
4966
4967 /// Returns the address space of the pointer operand.
4968 unsigned getSrcAddressSpace() const {
4970 }
4971
4972 /// Returns the address space of the result.
4973 unsigned getDestAddressSpace() const {
4974 return getType()->getPointerAddressSpace();
4975 }
4976};
4977
4978//===----------------------------------------------------------------------===//
4979// Helper functions
4980//===----------------------------------------------------------------------===//
4981
4982/// A helper function that returns the pointer operand of a load or store
4983/// instruction. Returns nullptr if not load or store.
4984inline const Value *getLoadStorePointerOperand(const Value *V) {
4985 if (auto *Load = dyn_cast<LoadInst>(V))
4986 return Load->getPointerOperand();
4987 if (auto *Store = dyn_cast<StoreInst>(V))
4988 return Store->getPointerOperand();
4989 return nullptr;
4990}
4992 return const_cast<Value *>(
4993 getLoadStorePointerOperand(static_cast<const Value *>(V)));
4994}
4995
4996/// A helper function that returns the pointer operand of a load, store
4997/// or GEP instruction. Returns nullptr if not load, store, or GEP.
4998inline const Value *getPointerOperand(const Value *V) {
4999 if (auto *Ptr = getLoadStorePointerOperand(V))
5000 return Ptr;
5001 if (auto *Gep = dyn_cast<GetElementPtrInst>(V))
5002 return Gep->getPointerOperand();
5003 return nullptr;
5004}
5006 return const_cast<Value *>(getPointerOperand(static_cast<const Value *>(V)));
5007}
5008
5009/// A helper function that returns the alignment of load or store instruction.
5011 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
5012 "Expected Load or Store instruction");
5013 if (auto *LI = dyn_cast<LoadInst>(I))
5014 return LI->getAlign();
5015 return cast<StoreInst>(I)->getAlign();
5016}
5017
5018/// A helper function that set the alignment of load or store instruction.
5019inline void setLoadStoreAlignment(Value *I, Align NewAlign) {
5020 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
5021 "Expected Load or Store instruction");
5022 if (auto *LI = dyn_cast<LoadInst>(I))
5023 LI->setAlignment(NewAlign);
5024 else
5025 cast<StoreInst>(I)->setAlignment(NewAlign);
5026}
5027
5028/// A helper function that returns the address space of the pointer operand of
5029/// load or store instruction.
5030inline unsigned getLoadStoreAddressSpace(const Value *I) {
5031 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
5032 "Expected Load or Store instruction");
5033 if (auto *LI = dyn_cast<LoadInst>(I))
5034 return LI->getPointerAddressSpace();
5035 return cast<StoreInst>(I)->getPointerAddressSpace();
5036}
5037
5038/// A helper function that returns the type of a load or store instruction.
5039inline Type *getLoadStoreType(const Value *I) {
5040 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
5041 "Expected Load or Store instruction");
5042 if (auto *LI = dyn_cast<LoadInst>(I))
5043 return LI->getType();
5044 return cast<StoreInst>(I)->getValueOperand()->getType();
5045}
5046
5047/// A helper function that returns an atomic operation's sync scope; returns
5048/// std::nullopt if it is not an atomic operation.
5049inline std::optional<SyncScope::ID> getAtomicSyncScopeID(const Instruction *I) {
5050 if (!I->isAtomic())
5051 return std::nullopt;
5052 if (auto *AI = dyn_cast<LoadInst>(I))
5053 return AI->getSyncScopeID();
5054 if (auto *AI = dyn_cast<StoreInst>(I))
5055 return AI->getSyncScopeID();
5056 if (auto *AI = dyn_cast<FenceInst>(I))
5057 return AI->getSyncScopeID();
5058 if (auto *AI = dyn_cast<AtomicCmpXchgInst>(I))
5059 return AI->getSyncScopeID();
5060 if (auto *AI = dyn_cast<AtomicRMWInst>(I))
5061 return AI->getSyncScopeID();
5062 llvm_unreachable("unhandled atomic operation");
5063}
5064
5065/// A helper function that sets an atomic operation's sync scope.
5067 assert(I->isAtomic());
5068 if (auto *AI = dyn_cast<LoadInst>(I))
5069 AI->setSyncScopeID(SSID);
5070 else if (auto *AI = dyn_cast<StoreInst>(I))
5071 AI->setSyncScopeID(SSID);
5072 else if (auto *AI = dyn_cast<FenceInst>(I))
5073 AI->setSyncScopeID(SSID);
5074 else if (auto *AI = dyn_cast<AtomicCmpXchgInst>(I))
5075 AI->setSyncScopeID(SSID);
5076 else if (auto *AI = dyn_cast<AtomicRMWInst>(I))
5077 AI->setSyncScopeID(SSID);
5078 else
5079 llvm_unreachable("unhandled atomic operation");
5080}
5081
5082//===----------------------------------------------------------------------===//
5083// FreezeInst Class
5084//===----------------------------------------------------------------------===//
5085
5086/// This class represents a freeze function that returns random concrete
5087/// value if an operand is either a poison value or an undef value
5089protected:
5090 // Note: Instruction needs to be a friend here to call cloneImpl.
5091 friend class Instruction;
5092
5093 /// Clone an identical FreezeInst
5094 FreezeInst *cloneImpl() const;
5095
5096public:
5097 explicit FreezeInst(Value *S, const Twine &NameStr = "",
5098 InsertPosition InsertBefore = nullptr);
5099
5100 // Methods for support type inquiry through isa, cast, and dyn_cast:
5101 static inline bool classof(const Instruction *I) {
5102 return I->getOpcode() == Freeze;
5103 }
5104 static inline bool classof(const Value *V) {
5105 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5106 }
5107};
5108
5109} // end namespace llvm
5110
5111#endif // LLVM_IR_INSTRUCTIONS_H
static const LLT S1
static bool isReverseMask(ArrayRef< int > M, EVT VT)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
always inline
Atomic ordering constants.
static const Function * getParent(const Value *V)
This file implements methods to test, set and extract typed bits from packed unsigned integers.
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
RelocType Type
Definition: COFFYAML.cpp:410
return RetTy
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
uint64_t Align
std::string Name
uint32_t Index
uint64_t Size
Hexagon Common GEP
hexagon gen pred
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
This defines the Use class.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file implements a map that provides insertion order iteration.
uint64_t IntrinsicInst * II
#define DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CLASS, VALUECLASS)
Macro for generating out-of-class operand accessor definitions.
#define P(N)
PowerPC Reduce CR logical Operation
StandardInstrumentations SI(Mod->getContext(), Debug, VerifyEach)
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
This file defines the SmallVector class.
static SymbolRef::Type getType(const Symbol *Sym)
Definition: TapiFile.cpp:39
Value * RHS
Value * LHS
Class for arbitrary precision integers.
Definition: APInt.h:78
This class represents a conversion between pointers from one address space to another.
const Value * getPointerOperand() const
Gets the pointer operand.
AddrSpaceCastInst * cloneImpl() const
Clone an identical AddrSpaceCastInst.
Value * getPointerOperand()
Gets the pointer operand.
static bool classof(const Instruction *I)
static bool classof(const Value *V)
unsigned getSrcAddressSpace() const
Returns the address space of the pointer operand.
unsigned getDestAddressSpace() const
Returns the address space of the result.
static unsigned getPointerOperandIndex()
Gets the operand index of the pointer operand.
an instruction to allocate memory on the stack
Definition: Instructions.h:63
std::optional< TypeSize > getAllocationSizeInBits(const DataLayout &DL) const
Get allocation size in bits.
static bool classof(const Value *V)
Definition: Instructions.h:157
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
Definition: Instructions.h:149
void setSwiftError(bool V)
Specify whether this alloca is used to represent a swifterror.
Definition: Instructions.h:151
bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Definition: Instructions.h:124
void setAllocatedType(Type *Ty)
for use only in special circumstances that need to generically transform a whole instruction (eg: IR ...
Definition: Instructions.h:120
static bool classof(const Instruction *I)
Definition: Instructions.h:154
PointerType * getType() const
Overload to return most specific pointer type.
Definition: Instructions.h:99
void setUsedWithInAlloca(bool V)
Specify whether this alloca is used to represent the arguments to a call.
Definition: Instructions.h:144
AllocaInst * cloneImpl() const
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Definition: Instructions.h:117
bool isUsedWithInAlloca() const
Return true if this alloca is used as an inalloca argument to a call.
Definition: Instructions.h:139
Value * getArraySize()
Definition: Instructions.h:96
unsigned getAddressSpace() const
Return the address space for the allocation.
Definition: Instructions.h:104
std::optional< TypeSize > getAllocationSize(const DataLayout &DL) const
Get allocation size in bytes.
bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
void setAlignment(Align Align)
Definition: Instructions.h:128
const Value * getArraySize() const
Get the number of elements allocated.
Definition: Instructions.h:95
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:168
An instruction that atomically checks whether a specified value is in a memory location,...
Definition: Instructions.h:501
BoolBitfieldElementT< 0 > VolatileField
Definition: Instructions.h:529
const Value * getCompareOperand() const
Definition: Instructions.h:634
void setSyncScopeID(SyncScope::ID SSID)
Sets the synchronization scope ID of this cmpxchg instruction.
Definition: Instructions.h:625
AtomicOrdering getMergedOrdering() const
Returns a single ordering which is at least as strong as both the success and failure orderings for t...
Definition: Instructions.h:607
void setWeak(bool IsWeak)
Definition: Instructions.h:564
bool isVolatile() const
Return true if this is a cmpxchg from a volatile memory location.
Definition: Instructions.h:555
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
Definition: Instructions.h:640
BoolBitfieldElementT< VolatileField::NextBit > WeakField
Definition: Instructions.h:530
AtomicOrderingBitfieldElementT< SuccessOrderingField::NextBit > FailureOrderingField
Definition: Instructions.h:534
void setFailureOrdering(AtomicOrdering Ordering)
Sets the failure ordering constraint of this cmpxchg instruction.
Definition: Instructions.h:599
static bool isValidFailureOrdering(AtomicOrdering Ordering)
Definition: Instructions.h:574
AtomicOrdering getFailureOrdering() const
Returns the failure ordering constraint of this cmpxchg instruction.
Definition: Instructions.h:594
void setSuccessOrdering(AtomicOrdering Ordering)
Sets the success ordering constraint of this cmpxchg instruction.
Definition: Instructions.h:587
AlignmentBitfieldElementT< FailureOrderingField::NextBit > AlignmentField
Definition: Instructions.h:536
static AtomicOrdering getStrongestFailureOrdering(AtomicOrdering SuccessOrdering)
Returns the strongest permitted ordering on failure, given the desired ordering on success.
Definition: Instructions.h:652
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Transparently provide more efficient getOperand methods.
AtomicCmpXchgInst * cloneImpl() const
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Definition: Instructions.h:544
const Value * getPointerOperand() const
Definition: Instructions.h:630
static bool classof(const Value *V)
Definition: Instructions.h:671
bool isWeak() const
Return true if this cmpxchg may spuriously fail.
Definition: Instructions.h:562
void setAlignment(Align Align)
Definition: Instructions.h:548
void setVolatile(bool V)
Specify whether this is a volatile cmpxchg.
Definition: Instructions.h:559
static bool isValidSuccessOrdering(AtomicOrdering Ordering)
Definition: Instructions.h:569
AtomicOrdering getSuccessOrdering() const
Returns the success ordering constraint of this cmpxchg instruction.
Definition: Instructions.h:582
AtomicOrderingBitfieldElementT< WeakField::NextBit > SuccessOrderingField
Definition: Instructions.h:532
static unsigned getPointerOperandIndex()
Definition: Instructions.h:631
const Value * getNewValOperand() const
Definition: Instructions.h:637
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this cmpxchg instruction.
Definition: Instructions.h:620
static bool classof(const Instruction *I)
Definition: Instructions.h:668
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:704
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Definition: Instructions.h:827
static bool isFPOperation(BinOp Op)
Definition: Instructions.h:809
static unsigned getPointerOperandIndex()
Definition: Instructions.h:872
bool isVolatile() const
Return true if this is a RMW on a volatile memory location.
Definition: Instructions.h:837
void setVolatile(bool V)
Specify whether this is a volatile RMW or not.
Definition: Instructions.h:841
BinOpBitfieldElement< AtomicOrderingField::NextBit > OperationField
Definition: Instructions.h:799
BinOp
This enumeration lists the possible modifications atomicrmw can make.
Definition: Instructions.h:716
@ Add
*p = old + v
Definition: Instructions.h:720
@ FAdd
*p = old + v
Definition: Instructions.h:741
@ USubCond
Subtract only if no unsigned overflow.
Definition: Instructions.h:764
@ Min
*p = old <signed v ? old : v
Definition: Instructions.h:734
@ Or
*p = old | v
Definition: Instructions.h:728
@ Sub
*p = old - v
Definition: Instructions.h:722
@ And
*p = old & v
Definition: Instructions.h:724
@ Xor
*p = old ^ v
Definition: Instructions.h:730
@ USubSat
*p = usub.sat(old, v) usub.sat matches the behavior of llvm.usub.sat.
Definition: Instructions.h:768
@ FSub
*p = old - v
Definition: Instructions.h:744
@ UIncWrap
Increment one up to a maximum value.
Definition: Instructions.h:756
@ Max
*p = old >signed v ? old : v
Definition: Instructions.h:732
@ UMin
*p = old <unsigned v ? old : v
Definition: Instructions.h:738
@ FMin
*p = minnum(old, v) minnum matches the behavior of llvm.minnum.
Definition: Instructions.h:752
@ UMax
*p = old >unsigned v ? old : v
Definition: Instructions.h:736
@ FMax
*p = maxnum(old, v) maxnum matches the behavior of llvm.maxnum.
Definition: Instructions.h:748
@ UDecWrap
Decrement one until a minimum value or zero.
Definition: Instructions.h:760
@ Nand
*p = ~(old & v)
Definition: Instructions.h:726
AtomicOrderingBitfieldElementT< VolatileField::NextBit > AtomicOrderingField
Definition: Instructions.h:798
void setSyncScopeID(SyncScope::ID SSID)
Sets the synchronization scope ID of this rmw instruction.
Definition: Instructions.h:866
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Transparently provide more efficient getOperand methods.
Value * getPointerOperand()
Definition: Instructions.h:870
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this rmw instruction.
Definition: Instructions.h:852
bool isFloatingPointOperation() const
Definition: Instructions.h:882
static bool classof(const Instruction *I)
Definition: Instructions.h:887
const Value * getPointerOperand() const
Definition: Instructions.h:871
void setOperation(BinOp Operation)
Definition: Instructions.h:821
static bool classof(const Value *V)
Definition: Instructions.h:890
BinOp getOperation() const
Definition: Instructions.h:805
const Value * getValOperand() const
Definition: Instructions.h:875
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this rmw instruction.
Definition: Instructions.h:861
void setAlignment(Align Align)
Definition: Instructions.h:831
Value * getValOperand()
Definition: Instructions.h:874
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
Definition: Instructions.h:847
AlignmentBitfieldElementT< OperationField::NextBit > AlignmentField
Definition: Instructions.h:800
BoolBitfieldElementT< 0 > VolatileField
Definition: Instructions.h:796
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
Definition: Instructions.h:878
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
This class represents a no-op cast from one type to another.
static bool classof(const Instruction *I)
static bool classof(const Value *V)
BitCastInst * cloneImpl() const
Clone an identical BitCastInst.
Conditional or Unconditional Branch instruction.
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Transparently provide more efficient getOperand methods.
iterator_range< succ_op_iterator > successors()
static BranchInst * Create(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond, InsertPosition InsertBefore=nullptr)
void setCondition(Value *V)
static bool classof(const Instruction *I)
bool isConditional() const
unsigned getNumSuccessors() const
static bool classof(const Value *V)
static BranchInst * Create(BasicBlock *IfTrue, InsertPosition InsertBefore=nullptr)
BasicBlock * getSuccessor(unsigned i) const
bool isUnconditional() const
void setSuccessor(unsigned idx, BasicBlock *NewSucc)
Value * getCondition() const
iterator_range< const_succ_op_iterator > successors() const
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1112
void addFnAttr(Attribute::AttrKind Kind)
Adds the attribute to the function.
Definition: InstrTypes.h:1474
bool hasFnAttr(Attribute::AttrKind Kind) const
Determine whether this call has the given attribute.
Definition: InstrTypes.h:1451
FunctionType * FTy
Definition: InstrTypes.h:1127
Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
static unsigned CountBundleInputs(ArrayRef< OperandBundleDef > Bundles)
Return the total number of values used in Bundles.
Definition: InstrTypes.h:2277
unsigned arg_size() const
Definition: InstrTypes.h:1284
unsigned getNumTotalBundleOperands() const
Return the total number operands (not operand bundles) used by every operand bundle in this OperandBu...
Definition: InstrTypes.h:2005
CallBr instruction, tracking function calls that may not return control but instead transfer it to a ...
static bool classof(const Value *V)
static CallBrInst * Create(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, ArrayRef< BasicBlock * > IndirectDests, ArrayRef< Value * > Args, ArrayRef< OperandBundleDef > Bundles={}, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static bool classof(const Instruction *I)
static CallBrInst * Create(FunctionCallee Func, BasicBlock *DefaultDest, ArrayRef< BasicBlock * > IndirectDests, ArrayRef< Value * > Args, ArrayRef< OperandBundleDef > Bundles={}, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
SmallVector< BasicBlock *, 16 > getIndirectDests() const
static CallBrInst * Create(FunctionCallee Func, BasicBlock *DefaultDest, ArrayRef< BasicBlock * > IndirectDests, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
void setSuccessor(unsigned i, BasicBlock *NewSucc)
BasicBlock * getSuccessor(unsigned i) const
Value * getIndirectDestLabelUse(unsigned i) const
BasicBlock * getIndirectDest(unsigned i) const
void setDefaultDest(BasicBlock *B)
unsigned getNumSuccessors() const
void setIndirectDest(unsigned i, BasicBlock *B)
Value * getIndirectDestLabel(unsigned i) const
getIndirectDestLabel - Return the i-th indirect dest label.
BasicBlock * getDefaultDest() const
unsigned getNumIndirectDests() const
Return the number of callbr indirect dest labels.
static CallBrInst * Create(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, ArrayRef< BasicBlock * > IndirectDests, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
CallBrInst * cloneImpl() const
This class represents a function call, abstracting a target machine's calling convention.
bool isNoTailCall() const
void updateProfWeight(uint64_t S, uint64_t T)
Updates profile metadata by scaling it by S / T.
static bool classof(const Value *V)
bool isTailCall() const
void setCanReturnTwice()
void setTailCallKind(TailCallKind TCK)
static CallInst * Create(FunctionType *Ty, Value *Func, ArrayRef< Value * > Args, ArrayRef< OperandBundleDef > Bundles={}, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static CallInst * Create(FunctionType *Ty, Value *Func, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
bool canReturnTwice() const
Return true if the call can return twice.
TailCallKind getTailCallKind() const
CallInst * cloneImpl() const
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
void setTailCall(bool IsTc=true)
bool isMustTailCall() const
static CallInst * Create(FunctionCallee Func, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
static bool classof(const Instruction *I)
bool isNonContinuableTrap() const
Return true if the call is for a noreturn trap intrinsic.
static CallInst * Create(FunctionCallee Func, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static CallInst * Create(FunctionCallee Func, ArrayRef< Value * > Args, ArrayRef< OperandBundleDef > Bundles={}, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
This is the base class for all instructions that perform data casts.
Definition: InstrTypes.h:444
CatchSwitchInst * getCatchSwitch() const
Convenience accessors.
void setCatchSwitch(Value *CatchSwitch)
static bool classof(const Instruction *I)
Methods for support type inquiry through isa, cast, and dyn_cast:
static CatchPadInst * Create(Value *CatchSwitch, ArrayRef< Value * > Args, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static bool classof(const Value *V)
static bool classof(const Instruction *I)
BasicBlock * getSuccessor() const
CatchPadInst * getCatchPad() const
Convenience accessors.
void setSuccessor(BasicBlock *NewSucc)
static bool classof(const Value *V)
static CatchReturnInst * Create(Value *CatchPad, BasicBlock *BB, InsertPosition InsertBefore=nullptr)
unsigned getNumSuccessors() const
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Provide fast operand accessors.
void setCatchPad(CatchPadInst *CatchPad)
CatchReturnInst * cloneImpl() const
Value * getCatchSwitchParentPad() const
Get the parentPad of this catchret's catchpad's catchswitch.
void setUnwindDest(BasicBlock *UnwindDest)
static bool classof(const Instruction *I)
BasicBlock *(*)(Value *) DerefFnTy
const BasicBlock *(*)(const Value *) ConstDerefFnTy
unsigned getNumSuccessors() const
const_handler_iterator handler_begin() const
Returns an iterator that points to the first handler in the CatchSwitchInst.
unsigned getNumHandlers() const
return the number of 'handlers' in this catchswitch instruction, except the default handler
void setSuccessor(unsigned Idx, BasicBlock *NewSucc)
Value * getParentPad() const
void setParentPad(Value *ParentPad)
bool unwindsToCaller() const
static bool classof(const Value *V)
handler_iterator handler_end()
Returns a read-only iterator that points one past the last handler in the CatchSwitchInst.
BasicBlock * getUnwindDest() const
BasicBlock * getSuccessor(unsigned Idx) const
const_handler_iterator handler_end() const
Returns an iterator that points one past the last handler in the CatchSwitchInst.
bool hasUnwindDest() const
handler_iterator handler_begin()
Returns an iterator that points to the first handler in CatchSwitchInst.
static CatchSwitchInst * Create(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumHandlers, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
handler_range handlers()
iteration adapter for range-for loops.
const_handler_range handlers() const
iteration adapter for range-for loops.
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Provide fast operand accessors.
static bool classof(const Value *V)
static bool classof(const Instruction *I)
Methods for support type inquiry through isa, cast, and dyn_cast:
static CleanupPadInst * Create(Value *ParentPad, ArrayRef< Value * > Args={}, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static bool classof(const Instruction *I)
CleanupPadInst * getCleanupPad() const
Convenience accessor.
unsigned getNumSuccessors() const
BasicBlock * getUnwindDest() const
bool unwindsToCaller() const
void setCleanupPad(CleanupPadInst *CleanupPad)
static bool classof(const Value *V)
void setUnwindDest(BasicBlock *NewDest)
static CleanupReturnInst * Create(Value *CleanupPad, BasicBlock *UnwindBB=nullptr, InsertPosition InsertBefore=nullptr)
bool hasUnwindDest() const
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Provide fast operand accessors.
This class is the base class for the comparison instructions.
Definition: InstrTypes.h:661
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Definition: InstrTypes.h:980
void setPredicate(Predicate P)
Set the predicate for this instruction to the specified value.
Definition: InstrTypes.h:766
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:673
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
Definition: InstrTypes.h:676
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
Definition: InstrTypes.h:690
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
Definition: InstrTypes.h:681
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
Definition: InstrTypes.h:684
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
Definition: InstrTypes.h:682
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
Definition: InstrTypes.h:689
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
Definition: InstrTypes.h:675
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition: InstrTypes.h:683
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition: InstrTypes.h:825
static auto FCmpPredicates()
Returns the sequence of all FCmp predicates.
Definition: InstrTypes.h:712
bool isFPPredicate() const
Definition: InstrTypes.h:780
Predicate getPredicate() const
Return the predicate for this instruction.
Definition: InstrTypes.h:763
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
Definition: CmpPredicate.h:22
bool hasSameSign() const
Query samesign information, for optimizations.
Definition: CmpPredicate.h:42
This is the shared class of boolean and integer constants.
Definition: Constants.h:83
This is an important base class in LLVM.
Definition: Constant.h:42
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
This instruction extracts a single (scalar) element from a VectorType value.
const Value * getVectorOperand() const
ExtractElementInst * cloneImpl() const
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Transparently provide more efficient getOperand methods.
static bool classof(const Value *V)
static ExtractElementInst * Create(Value *Vec, Value *Idx, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
const Value * getIndexOperand() const
static bool classof(const Instruction *I)
VectorType * getVectorOperandType() const
static bool isValidOperands(const Value *Vec, const Value *Idx)
Return true if an extractelement instruction can be formed with the specified operands.
This instruction extracts a struct member or array element value from an aggregate value.
ArrayRef< unsigned > getIndices() const
unsigned getNumIndices() const
static bool classof(const Value *V)
static bool classof(const Instruction *I)
iterator_range< idx_iterator > indices() const
idx_iterator idx_end() const
static ExtractValueInst * Create(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
const Value * getAggregateOperand() const
static unsigned getAggregateOperandIndex()
idx_iterator idx_begin() const
This instruction compares its operands according to the predicate given to the constructor.
bool isRelational() const
FCmpInst(Predicate Pred, Value *LHS, Value *RHS, const Twine &NameStr="", Instruction *FlagsSource=nullptr)
Constructor with no-insertion semantics.
bool isEquality() const
static bool classof(const Value *V)
bool isCommutative() const
static bool isCommutative(Predicate Pred)
static bool compare(const APFloat &LHS, const APFloat &RHS, FCmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
static bool isEquality(Predicate Pred)
static bool classof(const Instruction *I)
Methods for support type inquiry through isa, cast, and dyn_cast:
static auto predicates()
Returns the sequence of all FCmp predicates.
FCmpInst * cloneImpl() const
Clone an identical FCmpInst.
void swapOperands()
Exchange the two operands to this instruction in such a way that it does not modify the semantics of ...
FCmpInst(InsertPosition InsertBefore, Predicate pred, Value *LHS, Value *RHS, const Twine &NameStr="")
Constructor with insertion semantics.
This class represents an extension of floating point types.
static bool classof(const Value *V)
FPExtInst * cloneImpl() const
Clone an identical FPExtInst.
static bool classof(const Instruction *I)
Methods for support type inquiry through isa, cast, and dyn_cast:
This class represents a cast from floating point to signed integer.
static bool classof(const Value *V)
FPToSIInst * cloneImpl() const
Clone an identical FPToSIInst.
static bool classof(const Instruction *I)
Methods for support type inquiry through isa, cast, and dyn_cast:
This class represents a cast from floating point to unsigned integer.
static bool classof(const Value *V)
static bool classof(const Instruction *I)
Methods for support type inquiry through isa, cast, and dyn_cast:
FPToUIInst * cloneImpl() const
Clone an identical FPToUIInst.
This class represents a truncation of floating point types.
static bool classof(const Instruction *I)
Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const Value *V)
FPTruncInst * cloneImpl() const
Clone an identical FPTruncInst.
An instruction for ordering other memory operations.
Definition: Instructions.h:424
static bool classof(const Value *V)
Definition: Instructions.h:473
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this fence instruction.
Definition: Instructions.h:460
void setSyncScopeID(SyncScope::ID SSID)
Sets the synchronization scope ID of this fence instruction.
Definition: Instructions.h:465
static bool classof(const Instruction *I)
Definition: Instructions.h:470
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this fence instruction.
Definition: Instructions.h:455
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
Definition: Instructions.h:449
This class represents a freeze function that returns random concrete value if an operand is either a ...
static bool classof(const Value *V)
FreezeInst * cloneImpl() const
Clone an identical FreezeInst.
static bool classof(const Instruction *I)
friend class CatchPadInst
Definition: InstrTypes.h:2335
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
Definition: DerivedTypes.h:170
Class to represent function types.
Definition: DerivedTypes.h:105
Represents flags for the getelementptr instruction/expression.
static GEPNoWrapFlags inBounds()
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:933
bool isInBounds() const
Determine whether the GEP has the inbounds flag.
bool hasNoUnsignedSignedWrap() const
Determine whether the GEP has the nusw flag.
static Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
bool hasAllZeroIndices() const
Return true if all of the indices of this GEP are zeros.
static Type * getGEPReturnType(Value *Ptr, ArrayRef< Value * > IdxList)
Returns the pointer type returned by the GEP instruction, which may be a vector of pointers.
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Transparently provide more efficient getOperand methods.
void setResultElementType(Type *Ty)
Definition: Instructions.h:993
bool hasNoUnsignedWrap() const
Determine whether the GEP has the nuw flag.
bool hasAllConstantIndices() const
Return true if all of the indices of this GEP are constant integers.
unsigned getAddressSpace() const
Returns the address space of this instruction's pointer type.
iterator_range< const_op_iterator > indices() const
Type * getResultElementType() const
Definition: Instructions.h:995
static bool classof(const Instruction *I)
static bool classof(const Value *V)
iterator_range< op_iterator > indices()
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Definition: Instructions.h:956
void setIsInBounds(bool b=true)
Set or clear the inbounds flag on this GEP instruction.
void setSourceElementType(Type *Ty)
Definition: Instructions.h:992
static Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
Type * getSourceElementType() const
Definition: Instructions.h:990
static GetElementPtrInst * CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Create an "inbounds" getelementptr.
Definition: Instructions.h:980
Type * getPointerOperandType() const
Method to return the pointer operand as a PointerType.
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, GEPNoWrapFlags NW, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Definition: Instructions.h:967
static unsigned getPointerOperandIndex()
bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const
Accumulate the constant address offset of this GEP if possible.
const_op_iterator idx_begin() const
GetElementPtrInst * cloneImpl() const
bool collectOffset(const DataLayout &DL, unsigned BitWidth, SmallMapVector< Value *, APInt, 4 > &VariableOffsets, APInt &ConstantOffset) const
void setNoWrapFlags(GEPNoWrapFlags NW)
Set nowrap flags for GEP instruction.
unsigned getNumIndices() const
GEPNoWrapFlags getNoWrapFlags() const
Get the nowrap flags for the GEP instruction.
const_op_iterator idx_end() const
const Value * getPointerOperand() const
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
This instruction compares its operands according to the predicate given to the constructor.
bool hasSameSign() const
An icmp instruction, which can be marked as "samesign", indicating that the two operands have the sam...
static bool classof(const Value *V)
void setSameSign(bool B=true)
ICmpInst(InsertPosition InsertBefore, Predicate pred, Value *LHS, Value *RHS, const Twine &NameStr="")
Constructor with insertion semantics.
static bool isCommutative(Predicate P)
static CmpPredicate getSwappedCmpPredicate(CmpPredicate Pred)
CmpPredicate getCmpPredicate() const
bool isCommutative() const
static bool isGE(Predicate P)
Return true if the predicate is SGE or UGE.
CmpPredicate getSwappedCmpPredicate() const
static bool isLT(Predicate P)
Return true if the predicate is SLT or ULT.
CmpPredicate getInverseCmpPredicate() const
static bool isGT(Predicate P)
Return true if the predicate is SGT or UGT.
static bool classof(const Instruction *I)
Predicate getFlippedSignednessPredicate() const
For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->EQ.
Predicate getSignedPredicate() const
For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
static CmpPredicate getInverseCmpPredicate(CmpPredicate Pred)
bool isEquality() const
Return true if this predicate is either EQ or NE.
static bool isEquality(Predicate P)
Return true if this predicate is either EQ or NE.
static bool isRelational(Predicate P)
Return true if the predicate is relational (not EQ or NE).
void swapOperands()
Exchange the two operands to this instruction in such a way that it does not modify the semantics of ...
static auto predicates()
Returns the sequence of all ICmp predicates.
ICmpInst(Predicate pred, Value *LHS, Value *RHS, const Twine &NameStr="")
Constructor with no-insertion semantics.
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
Predicate getUnsignedPredicate() const
For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
static bool isLE(Predicate P)
Return true if the predicate is SLE or ULE.
Indirect Branch Instruction.
static IndirectBrInst * Create(Value *Address, unsigned NumDests, InsertPosition InsertBefore=nullptr)
BasicBlock * getDestination(unsigned i)
Return the specified destination.
static bool classof(const Value *V)
const Value * getAddress() const
static bool classof(const Instruction *I)
BasicBlock * getSuccessor(unsigned i) const
iterator_range< const_succ_op_iterator > successors() const
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Provide fast operand accessors.
unsigned getNumDestinations() const
return the number of possible destinations in this indirectbr instruction.
const BasicBlock * getDestination(unsigned i) const
void setSuccessor(unsigned i, BasicBlock *NewSucc)
void setAddress(Value *V)
unsigned getNumSuccessors() const
iterator_range< succ_op_iterator > successors()
This instruction inserts a single (scalar) element into a VectorType value.
static bool classof(const Value *V)
static InsertElementInst * Create(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
VectorType * getType() const
Overload to return most specific vector type.
static bool classof(const Instruction *I)
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Transparently provide more efficient getOperand methods.
This instruction inserts a struct field of array element value into an aggregate value.
Value * getInsertedValueOperand()
static bool classof(const Instruction *I)
static unsigned getAggregateOperandIndex()
Value * getAggregateOperand()
static bool classof(const Value *V)
unsigned getNumIndices() const
ArrayRef< unsigned > getIndices() const
iterator_range< idx_iterator > indices() const
static unsigned getInsertedValueOperandIndex()
InsertValueInst * cloneImpl() const
idx_iterator idx_end() const
static InsertValueInst * Create(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Transparently provide more efficient getOperand methods.
const Value * getAggregateOperand() const
bool hasIndices() const
const Value * getInsertedValueOperand() const
idx_iterator idx_begin() const
typename Bitfield::Element< AtomicOrdering, Offset, 3, AtomicOrdering::LAST > AtomicOrderingBitfieldElementT
Definition: Instruction.h:153
typename Bitfield::Element< bool, Offset, 1 > BoolBitfieldElementT
Definition: Instruction.h:148
bool isAtomic() const LLVM_READONLY
Return true if this instruction has an AtomicOrdering of unordered or higher.
typename Bitfield::Element< unsigned, Offset, 6, Value::MaxAlignmentExponent > AlignmentBitfieldElementT
Definition: Instruction.h:145
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:274
void copyMetadata(const Instruction &SrcInst, ArrayRef< unsigned > WL=ArrayRef< unsigned >())
Copy metadata from SrcInst to this instruction.
friend class BasicBlock
Various leaf nodes.
Definition: Instruction.h:1010
This class represents a cast from an integer to a pointer.
static bool classof(const Instruction *I)
IntToPtrInst * cloneImpl() const
Clone an identical IntToPtrInst.
unsigned getAddressSpace() const
Returns the address space of this instruction's pointer type.
static bool classof(const Value *V)
Invoke instruction.
static bool classof(const Instruction *I)
BasicBlock * getUnwindDest() const
void setNormalDest(BasicBlock *B)
static bool classof(const Value *V)
static InvokeInst * Create(FunctionCallee Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
void setSuccessor(unsigned i, BasicBlock *NewSucc)
static InvokeInst * Create(FunctionCallee Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, ArrayRef< OperandBundleDef > Bundles={}, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
BasicBlock * getSuccessor(unsigned i) const
void setUnwindDest(BasicBlock *B)
BasicBlock * getNormalDest() const
static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, ArrayRef< OperandBundleDef > Bundles={}, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
unsigned getNumSuccessors() const
static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
The landingpad instruction holds all of the information necessary to generate correct exception handl...
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Provide fast operand accessors.
bool isCatch(unsigned Idx) const
Return 'true' if the clause and index Idx is a catch clause.
bool isFilter(unsigned Idx) const
Return 'true' if the clause and index Idx is a filter clause.
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
static bool classof(const Value *V)
void setCleanup(bool V)
Indicate that this landingpad instruction is a cleanup.
void reserveClauses(unsigned Size)
Grow the size of the operand list to accommodate the new number of clauses.
static bool classof(const Instruction *I)
An instruction for reading from memory.
Definition: Instructions.h:176
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
Definition: Instructions.h:261
const Value * getPointerOperand() const
Definition: Instructions.h:256
void setAlignment(Align Align)
Definition: Instructions.h:215
Value * getPointerOperand()
Definition: Instructions.h:255
bool isVolatile() const
Return true if this is a load from a volatile memory location.
Definition: Instructions.h:205
static bool classof(const Instruction *I)
Definition: Instructions.h:266
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this load instruction.
Definition: Instructions.h:225
static bool classof(const Value *V)
Definition: Instructions.h:269
void setSyncScopeID(SyncScope::ID SSID)
Sets the synchronization scope ID of this load instruction.
Definition: Instructions.h:235
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this load instruction.
Definition: Instructions.h:241
LoadInst * cloneImpl() const
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
Definition: Instructions.h:220
Type * getPointerOperandType() const
Definition: Instructions.h:258
static unsigned getPointerOperandIndex()
Definition: Instructions.h:257
bool isUnordered() const
Definition: Instructions.h:249
void setVolatile(bool V)
Specify whether this is a volatile load or not.
Definition: Instructions.h:208
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
Definition: Instructions.h:230
bool isSimple() const
Definition: Instructions.h:247
Align getAlign() const
Return the alignment of the access that is being performed.
Definition: Instructions.h:211
Metadata node.
Definition: Metadata.h:1069
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
Definition: ArrayRef.h:310
BasicBlock * getIncomingBlock(Value::const_user_iterator I) const
Return incoming basic block corresponding to value use iterator.
static bool classof(const Instruction *I)
Methods for support type inquiry through isa, cast, and dyn_cast:
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
bool isComplete() const
If the PHI node is complete which means all of its parent's predecessors have incoming value in this ...
iterator_range< const_block_iterator > blocks() const
op_range incoming_values()
static bool classof(const Value *V)
void allocHungoffUses(unsigned N)
const_block_iterator block_begin() const
void setIncomingValueForBlock(const BasicBlock *BB, Value *V)
Set every incoming value(s) for block BB to V.
void setIncomingBlock(unsigned i, BasicBlock *BB)
BasicBlock *const * const_block_iterator
void setIncomingValue(unsigned i, Value *V)
static unsigned getOperandNumForIncomingValue(unsigned i)
void copyIncomingBlocks(iterator_range< const_block_iterator > BBRange, uint32_t ToIdx=0)
Copies the basic blocks from BBRange to the incoming basic block list of this PHINode,...
const_block_iterator block_end() const
Value * getIncomingValueForBlock(const BasicBlock *BB) const
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Provide fast operand accessors.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
static unsigned getIncomingValueNumForOperand(unsigned i)
const_op_range incoming_values() const
Value * removeIncomingValue(const BasicBlock *BB, bool DeletePHIIfEmpty=true)
void replaceIncomingBlockWith(const BasicBlock *Old, BasicBlock *New)
Replace every incoming basic block Old to basic block New.
BasicBlock * getIncomingBlock(const Use &U) const
Return incoming basic block corresponding to an operand of the PHI.
int getBasicBlockIndex(const BasicBlock *BB) const
Return the first index of the specified basic block in the value list for this PHI.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
Class to represent pointers.
Definition: DerivedTypes.h:670
unsigned getAddressSpace() const
Return the address space of the Pointer type.
Definition: DerivedTypes.h:703
This class represents a cast from a pointer to an integer.
Value * getPointerOperand()
Gets the pointer operand.
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
static bool classof(const Value *V)
const Value * getPointerOperand() const
Gets the pointer operand.
static unsigned getPointerOperandIndex()
Gets the operand index of the pointer operand.
static bool classof(const Instruction *I)
PtrToIntInst * cloneImpl() const
Clone an identical PtrToIntInst.
Resume the propagation of an exception.
static ResumeInst * Create(Value *Exn, InsertPosition InsertBefore=nullptr)
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Provide fast operand accessors.
Value * getValue() const
Convenience accessor.
static bool classof(const Value *V)
unsigned getNumSuccessors() const
ResumeInst * cloneImpl() const
static bool classof(const Instruction *I)
Return a value (possibly void), from a function.
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Provide fast operand accessors.
unsigned getNumSuccessors() const
static bool classof(const Value *V)
static bool classof(const Instruction *I)
static ReturnInst * Create(LLVMContext &C, BasicBlock *InsertAtEnd)
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
static ReturnInst * Create(LLVMContext &C, Value *retVal=nullptr, InsertPosition InsertBefore=nullptr)
This class represents a sign extension of integer types.
static bool classof(const Value *V)
static bool classof(const Instruction *I)
Methods for support type inquiry through isa, cast, and dyn_cast:
SExtInst * cloneImpl() const
Clone an identical SExtInst.
This class represents a cast from signed integer to floating point.
SIToFPInst * cloneImpl() const
Clone an identical SIToFPInst.
static bool classof(const Instruction *I)
Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const Value *V)
This class represents the LLVM 'select' instruction.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, Instruction *MDFrom=nullptr)
void setFalseValue(Value *V)
const Value * getFalseValue() const
void setTrueValue(Value *V)
OtherOps getOpcode() const
Value * getCondition()
Value * getTrueValue()
void swapValues()
Swap the true and false values of the select instruction.
Value * getFalseValue()
const Value * getCondition() const
SelectInst * cloneImpl() const
friend class Instruction
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Transparently provide more efficient getOperand methods.
static const char * areInvalidOperands(Value *Cond, Value *True, Value *False)
Return a string if the specified operands are invalid for a select operation, otherwise return null.
static bool classof(const Value *V)
void setCondition(Value *V)
const Value * getTrueValue() const
static bool classof(const Instruction *I)
This instruction constructs a fixed permutation of two input vectors.
static bool classof(const Value *V)
static bool isInterleaveMask(ArrayRef< int > Mask, unsigned Factor, unsigned NumInputElts)
Constant * getShuffleMaskForBitcode() const
Return the mask for this instruction, for use in bitcode.
bool isSingleSource() const
Return true if this shuffle chooses elements from exactly one source vector without changing the leng...
bool changesLength() const
Return true if this shuffle returns a vector with a different number of elements than its source vect...
bool isExtractSubvectorMask(int &Index) const
Return true if this shuffle mask is an extract subvector mask.
ArrayRef< int > getShuffleMask() const
static bool isInsertSubvectorMask(const Constant *Mask, int NumSrcElts, int &NumSubElts, int &Index)
static bool isSingleSourceMask(const Constant *Mask, int NumSrcElts)
int getMaskValue(unsigned Elt) const
Return the shuffle mask value of this instruction for the given element index.
void getShuffleMask(SmallVectorImpl< int > &Result) const
Return the mask for this instruction as a vector of integers.
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Transparently provide more efficient getOperand methods.
static bool isDeInterleaveMaskOfFactor(ArrayRef< int > Mask, unsigned Factor)
VectorType * getType() const
Overload to return most specific vector type.
bool isInsertSubvectorMask(int &NumSubElts, int &Index) const
Return true if this shuffle mask is an insert subvector mask.
bool increasesLength() const
Return true if this shuffle returns a vector with a greater number of elements than its source vector...
bool isZeroEltSplat() const
Return true if all elements of this shuffle are the same value as the first element of exactly one so...
static bool isExtractSubvectorMask(const Constant *Mask, int NumSrcElts, int &Index)
bool isSelect() const
Return true if this shuffle chooses elements from its source vectors without lane crossings and all o...
static bool isSpliceMask(const Constant *Mask, int NumSrcElts, int &Index)
bool isTranspose() const
Return true if this shuffle transposes the elements of its inputs without changing the length of the ...
static void commuteShuffleMask(MutableArrayRef< int > Mask, unsigned InVecNumElts)
Change values in a shuffle permute mask assuming the two vector operands of length InVecNumElts have ...
bool isSplice(int &Index) const
Return true if this shuffle splices two inputs without changing the length of the vectors.
static bool isReverseMask(const Constant *Mask, int NumSrcElts)
static bool isSelectMask(const Constant *Mask, int NumSrcElts)
static bool classof(const Instruction *I)
static bool isZeroEltSplatMask(const Constant *Mask, int NumSrcElts)
bool isIdentity() const
Return true if this shuffle chooses elements from exactly one source vector without lane crossings an...
static bool isReplicationMask(const Constant *Mask, int &ReplicationFactor, int &VF)
static bool isIdentityMask(const Constant *Mask, int NumSrcElts)
static bool isTransposeMask(const Constant *Mask, int NumSrcElts)
bool isReverse() const
Return true if this shuffle swaps the order of elements from exactly one source vector.
size_t size() const
Definition: SmallVector.h:78
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:573
void push_back(const T &Elt)
Definition: SmallVector.h:413
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1196
An instruction for storing to memory.
Definition: Instructions.h:292
static bool classof(const Instruction *I)
Definition: Instructions.h:392
AtomicOrdering getOrdering() const
Returns the ordering constraint of this store instruction.
Definition: Instructions.h:342
const Value * getPointerOperand() const
Definition: Instructions.h:382
Align getAlign() const
Definition: Instructions.h:333
Type * getPointerOperandType() const
Definition: Instructions.h:384
void setVolatile(bool V)
Specify whether this is a volatile store or not.
Definition: Instructions.h:328
void setAlignment(Align Align)
Definition: Instructions.h:337
bool isSimple() const
Definition: Instructions.h:370
const Value * getValueOperand() const
Definition: Instructions.h:379
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this store instruction.
Definition: Instructions.h:348
Value * getValueOperand()
Definition: Instructions.h:378
static bool classof(const Value *V)
Definition: Instructions.h:395
bool isUnordered() const
Definition: Instructions.h:372
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Transparently provide more efficient getOperand methods.
void setSyncScopeID(SyncScope::ID SSID)
Sets the synchronization scope ID of this store instruction.
Definition: Instructions.h:358
StoreInst * cloneImpl() const
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
Definition: Instructions.h:387
static unsigned getPointerOperandIndex()
Definition: Instructions.h:383
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this store instruction.
Definition: Instructions.h:353
bool isVolatile() const
Return true if this is a store to a volatile memory location.
Definition: Instructions.h:325
Value * getPointerOperand()
Definition: Instructions.h:381
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this store instruction.
Definition: Instructions.h:364
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:51
A wrapper class to simplify modification of SwitchInst cases along with their prof branch_weights met...
void setSuccessorWeight(unsigned idx, CaseWeightOpt W)
Instruction::InstListType::iterator eraseFromParent()
Delegate the call to the underlying SwitchInst::eraseFromParent() and mark this object to not touch t...
void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W)
Delegate the call to the underlying SwitchInst::addCase() and set the specified branch weight for the...
SwitchInstProfUpdateWrapper(SwitchInst &SI)
CaseWeightOpt getSuccessorWeight(unsigned idx)
std::optional< uint32_t > CaseWeightOpt
SwitchInst::CaseIt removeCase(SwitchInst::CaseIt I)
Delegate the call to the underlying SwitchInst::removeCase() and remove correspondent branch weight.
A handle to a particular switch case.
unsigned getCaseIndex() const
Returns number of current case.
unsigned getSuccessorIndex() const
Returns successor index for current case successor.
BasicBlockT * getCaseSuccessor() const
Resolves successor for current case.
CaseHandleImpl(SwitchInstT *SI, ptrdiff_t Index)
bool operator==(const CaseHandleImpl &RHS) const
ConstantIntT * getCaseValue() const
Resolves case value for current case.
CaseHandle(SwitchInst *SI, ptrdiff_t Index)
void setValue(ConstantInt *V) const
Sets the new value for current case.
void setSuccessor(BasicBlock *S) const
Sets the new successor for current case.
const CaseHandleT & operator*() const
CaseIteratorImpl()=default
Default constructed iterator is in an invalid state until assigned to a case for a particular switch.
CaseIteratorImpl & operator-=(ptrdiff_t N)
bool operator==(const CaseIteratorImpl &RHS) const
CaseIteratorImpl & operator+=(ptrdiff_t N)
ptrdiff_t operator-(const CaseIteratorImpl &RHS) const
bool operator<(const CaseIteratorImpl &RHS) const
CaseIteratorImpl(SwitchInstT *SI, unsigned CaseNum)
Initializes case iterator for given SwitchInst and for given case number.
static CaseIteratorImpl fromSuccessorIndex(SwitchInstT *SI, unsigned SuccessorIndex)
Initializes case iterator for given SwitchInst and for given successor index.
Multiway switch.
BasicBlock * getDefaultDest() const
CaseIt case_end()
Returns a read/write iterator that points one past the last in the SwitchInst.
BasicBlock * getSuccessor(unsigned idx) const
ConstCaseIt findCaseValue(const ConstantInt *C) const
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Provide fast operand accessors.
static SwitchInst * Create(Value *Value, BasicBlock *Default, unsigned NumCases, InsertPosition InsertBefore=nullptr)
void setCondition(Value *V)
ConstCaseIt case_begin() const
Returns a read-only iterator that points to the first case in the SwitchInst.
bool defaultDestUndefined() const
Returns true if the default branch must result in immediate undefined behavior, false otherwise.
iterator_range< ConstCaseIt > cases() const
Constant iteration adapter for range-for loops.
ConstantInt * findCaseDest(BasicBlock *BB)
Finds the unique case value for a given successor.
void setSuccessor(unsigned idx, BasicBlock *NewSucc)
static bool classof(const Value *V)
unsigned getNumSuccessors() const
CaseIt case_default()
Returns an iterator that points to the default case.
void setDefaultDest(BasicBlock *DefaultCase)
unsigned getNumCases() const
Return the number of 'cases' in this switch instruction, excluding the default case.
CaseIt findCaseValue(const ConstantInt *C)
Search all of the case values for the specified constant.
Value * getCondition() const
ConstCaseIt case_default() const
CaseIt case_begin()
Returns a read/write iterator that points to the first case in the SwitchInst.
static bool classof(const Instruction *I)
iterator_range< CaseIt > cases()
Iteration adapter for range-for loops.
ConstCaseIt case_end() const
Returns a read-only iterator that points one past the last in the SwitchInst.
This class represents a truncation of integer types.
void setHasNoSignedWrap(bool B)
static bool classof(const Instruction *I)
Methods for support type inquiry through isa, cast, and dyn_cast:
TruncInst * cloneImpl() const
Clone an identical TruncInst.
void setHasNoUnsignedWrap(bool B)
unsigned getNoWrapKind() const
Returns the no-wrap kind of the operation.
bool hasNoSignedWrap() const
Test whether this operation is known to never undergo signed overflow, aka the nsw property.
static bool classof(const Value *V)
bool hasNoUnsignedWrap() const
Test whether this operation is known to never undergo unsigned overflow, aka the nuw property.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:270
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
bool isTokenTy() const
Return true if this is 'token'.
Definition: Type.h:234
This class represents a cast unsigned integer to floating point.
static bool classof(const Value *V)
UIToFPInst * cloneImpl() const
Clone an identical UIToFPInst.
static bool classof(const Instruction *I)
Methods for support type inquiry through isa, cast, and dyn_cast:
This function has undefined behavior.
unsigned getNumSuccessors() const
static bool classof(const Value *V)
static bool classof(const Instruction *I)
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
void allocHungoffUses(unsigned N, bool IsPhi=false)
Allocate the array of Uses, followed by a pointer (with bottom bit set) to the User.
Definition: User.cpp:50
op_iterator op_begin()
Definition: User.h:280
const Use & getOperandUse(unsigned i) const
Definition: User.h:241
Value * getOperand(unsigned i) const
Definition: User.h:228
unsigned getNumOperands() const
Definition: User.h:250
op_iterator op_end()
Definition: User.h:282
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
static bool classof(const Instruction *I)
Value * getPointerOperand()
VAArgInst(Value *List, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
const Value * getPointerOperand() const
static bool classof(const Value *V)
static unsigned getPointerOperandIndex()
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
user_iterator_impl< const User > const_user_iterator
Definition: Value.h:391
unsigned char SubclassOptionalData
Hold subclass data that can be dropped.
Definition: Value.h:84
void setName(const Twine &Name)
Change the name of the value.
Definition: Value.cpp:377
Base class of all SIMD vector types.
Definition: DerivedTypes.h:427
static VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
This class represents zero extension of integer types.
static bool classof(const Instruction *I)
Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const Value *V)
ZExtInst * cloneImpl() const
Clone an identical ZExtInst.
An efficient, type-erasing, non-owning reference to a callable.
base_list_type::iterator iterator
Definition: ilist.h:121
CRTP base class for adapting an iterator to a different type.
Definition: iterator.h:237
CRTP base class which implements the entire standard iterator facade in terms of a minimal subset of ...
Definition: iterator.h:80
A range adaptor for a pair of iterators.
This provides a very simple, boring adaptor for a begin and end iterator into a range type.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition: ISDOpcodes.h:71
@ System
Synchronized with respect to all concurrently executing threads.
Definition: LLVMContext.h:57
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:480
Type * checkGEPType(Type *Ty)
Definition: Instructions.h:925
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1739
unsigned getLoadStoreAddressSpace(const Value *I)
A helper function that returns the address space of the pointer operand of load or store instruction.
APInt operator*(APInt a, uint64_t RHS)
Definition: APInt.h:2204
const Value * getLoadStorePointerOperand(const Value *V)
A helper function that returns the pointer operand of a load or store instruction.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void setAtomicSyncScopeID(Instruction *I, SyncScope::ID SSID)
A helper function that sets an atomic operation's sync scope.
Align getLoadStoreAlignment(const Value *I)
A helper function that returns the alignment of load or store instruction.
const Value * getPointerOperand(const Value *V)
A helper function that returns the pointer operand of a load, store or GEP instruction.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
std::optional< SyncScope::ID > getAtomicSyncScopeID(const Instruction *I)
A helper function that returns an atomic operation's sync scope; returns std::nullopt if it is not an...
constexpr int PoisonMaskElem
AtomicOrdering
Atomic ordering for LLVM's memory model.
DWARFExpression::Operation Op
OutputIt copy(R &&Range, OutputIt Out)
Definition: STLExtras.h:1841
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:217
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1766
auto predecessors(const MachineBasicBlock *BB)
Type * getLoadStoreType(const Value *I)
A helper function that returns the type of a load or store instruction.
void setLoadStoreAlignment(Value *I, Align NewAlign)
A helper function that set the alignment of load or store instruction.
unsigned Log2(Align A)
Returns the log2 of the alignment.
Definition: Alignment.h:208
@ Default
The result values are uniform if and only if all operands are uniform.
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Summary of memprof metadata on allocations.
Describes an element of a Bitfield.
Definition: Bitfields.h:223
static constexpr bool areContiguous()
Definition: Bitfields.h:280
The const version of succ_op_iterator.
const BasicBlock * operator->() const
const_succ_op_iterator(const_value_op_iterator I)
const BasicBlock * operator*() const
Iterator type that casts an operand to a basic block.
succ_op_iterator(value_op_iterator I)
FixedNumOperandTraits - determine the allocation regime of the Use array when it is a prefix to the U...
Definition: OperandTraits.h:30
HungoffOperandTraits - determine the allocation regime of the Use array when it is not a prefix to th...
Definition: OperandTraits.h:93
The const version of succ_op_iterator.
const_succ_op_iterator(const_value_op_iterator I)
Iterator type that casts an operand to a basic block.
Compile-time customization of User operands.
Definition: User.h:42
A MapVector that performs no allocations if smaller than a certain size.
Definition: MapVector.h:254
Information about how a User object was allocated, to be passed into the User constructor.
Definition: User.h:79
Indicates this User has operands "hung off" in another allocation.
Definition: User.h:57
Indicates this User has operands co-allocated.
Definition: User.h:60
Iterator for directly iterating over the operand Values.
Definition: User.h:299
VariadicOperandTraits - determine the allocation regime of the Use array when it is a prefix to the U...
Definition: OperandTraits.h:67