LLVM 20.0.0git
Instructions.h
Go to the documentation of this file.
1//===- llvm/Instructions.h - Instruction subclass definitions ---*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file exposes the class definitions of all of the subclasses of the
10// Instruction class. This is meant to be an easy way to get access to all
11// instruction subclasses.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_IR_INSTRUCTIONS_H
16#define LLVM_IR_INSTRUCTIONS_H
17
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/Bitfields.h"
20#include "llvm/ADT/MapVector.h"
21#include "llvm/ADT/STLExtras.h"
23#include "llvm/ADT/Twine.h"
24#include "llvm/ADT/iterator.h"
26#include "llvm/IR/CFG.h"
28#include "llvm/IR/Constant.h"
31#include "llvm/IR/InstrTypes.h"
32#include "llvm/IR/Instruction.h"
33#include "llvm/IR/Intrinsics.h"
35#include "llvm/IR/Use.h"
36#include "llvm/IR/User.h"
39#include <cassert>
40#include <cstddef>
41#include <cstdint>
42#include <iterator>
43#include <optional>
44
45namespace llvm {
46
47class APFloat;
48class APInt;
49class BasicBlock;
50class ConstantInt;
51class DataLayout;
52struct KnownBits;
53class StringRef;
54class Type;
55class Value;
56class UnreachableInst;
57
58//===----------------------------------------------------------------------===//
59// AllocaInst Class
60//===----------------------------------------------------------------------===//
61
62/// an instruction to allocate memory on the stack
64 Type *AllocatedType;
65
66 using AlignmentField = AlignmentBitfieldElementT<0>;
67 using UsedWithInAllocaField = BoolBitfieldElementT<AlignmentField::NextBit>;
69 static_assert(Bitfield::areContiguous<AlignmentField, UsedWithInAllocaField,
70 SwiftErrorField>(),
71 "Bitfields must be contiguous");
72
73protected:
74 // Note: Instruction needs to be a friend here to call cloneImpl.
75 friend class Instruction;
76
77 AllocaInst *cloneImpl() const;
78
79public:
80 explicit AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
81 const Twine &Name, InsertPosition InsertBefore);
82
83 AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
84 InsertPosition InsertBefore);
85
86 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
87 const Twine &Name = "", InsertPosition InsertBefore = nullptr);
88
89 /// Return true if there is an allocation size parameter to the allocation
90 /// instruction that is not 1.
91 bool isArrayAllocation() const;
92
93 /// Get the number of elements allocated. For a simple allocation of a single
94 /// element, this will return a constant 1 value.
95 const Value *getArraySize() const { return getOperand(0); }
96 Value *getArraySize() { return getOperand(0); }
97
98 /// Overload to return most specific pointer type.
100 return cast<PointerType>(Instruction::getType());
101 }
102
103 /// Return the address space for the allocation.
104 unsigned getAddressSpace() const {
105 return getType()->getAddressSpace();
106 }
107
108 /// Get allocation size in bytes. Returns std::nullopt if size can't be
109 /// determined, e.g. in case of a VLA.
110 std::optional<TypeSize> getAllocationSize(const DataLayout &DL) const;
111
112 /// Get allocation size in bits. Returns std::nullopt if size can't be
113 /// determined, e.g. in case of a VLA.
114 std::optional<TypeSize> getAllocationSizeInBits(const DataLayout &DL) const;
115
116 /// Return the type that is being allocated by the instruction.
117 Type *getAllocatedType() const { return AllocatedType; }
118 /// for use only in special circumstances that need to generically
119 /// transform a whole instruction (eg: IR linking and vectorization).
120 void setAllocatedType(Type *Ty) { AllocatedType = Ty; }
121
122 /// Return the alignment of the memory that is being allocated by the
123 /// instruction.
124 Align getAlign() const {
125 return Align(1ULL << getSubclassData<AlignmentField>());
126 }
127
129 setSubclassData<AlignmentField>(Log2(Align));
130 }
131
132 /// Return true if this alloca is in the entry block of the function and is a
133 /// constant size. If so, the code generator will fold it into the
134 /// prolog/epilog code, so it is basically free.
135 bool isStaticAlloca() const;
136
137 /// Return true if this alloca is used as an inalloca argument to a call. Such
138 /// allocas are never considered static even if they are in the entry block.
139 bool isUsedWithInAlloca() const {
140 return getSubclassData<UsedWithInAllocaField>();
141 }
142
143 /// Specify whether this alloca is used to represent the arguments to a call.
144 void setUsedWithInAlloca(bool V) {
145 setSubclassData<UsedWithInAllocaField>(V);
146 }
147
148 /// Return true if this alloca is used as a swifterror argument to a call.
149 bool isSwiftError() const { return getSubclassData<SwiftErrorField>(); }
150 /// Specify whether this alloca is used to represent a swifterror.
151 void setSwiftError(bool V) { setSubclassData<SwiftErrorField>(V); }
152
153 // Methods for support type inquiry through isa, cast, and dyn_cast:
154 static bool classof(const Instruction *I) {
155 return (I->getOpcode() == Instruction::Alloca);
156 }
157 static bool classof(const Value *V) {
158 return isa<Instruction>(V) && classof(cast<Instruction>(V));
159 }
160
161private:
162 // Shadow Instruction::setInstructionSubclassData with a private forwarding
163 // method so that subclasses cannot accidentally use it.
164 template <typename Bitfield>
165 void setSubclassData(typename Bitfield::Type Value) {
166 Instruction::setSubclassData<Bitfield>(Value);
167 }
168};
169
170//===----------------------------------------------------------------------===//
171// LoadInst Class
172//===----------------------------------------------------------------------===//
173
174/// An instruction for reading from memory. This uses the SubclassData field in
175/// Value to store whether or not the load is volatile.
177 using VolatileField = BoolBitfieldElementT<0>;
180 static_assert(
181 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
182 "Bitfields must be contiguous");
183
184 void AssertOK();
185
186protected:
187 // Note: Instruction needs to be a friend here to call cloneImpl.
188 friend class Instruction;
189
190 LoadInst *cloneImpl() const;
191
192public:
193 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr,
194 InsertPosition InsertBefore);
195 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
196 InsertPosition InsertBefore);
197 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
198 Align Align, InsertPosition InsertBefore = nullptr);
199 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
202 InsertPosition InsertBefore = nullptr);
203
204 /// Return true if this is a load from a volatile memory location.
205 bool isVolatile() const { return getSubclassData<VolatileField>(); }
206
207 /// Specify whether this is a volatile load or not.
208 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
209
210 /// Return the alignment of the access that is being performed.
211 Align getAlign() const {
212 return Align(1ULL << (getSubclassData<AlignmentField>()));
213 }
214
216 setSubclassData<AlignmentField>(Log2(Align));
217 }
218
219 /// Returns the ordering constraint of this load instruction.
221 return getSubclassData<OrderingField>();
222 }
223 /// Sets the ordering constraint of this load instruction. May not be Release
224 /// or AcquireRelease.
226 setSubclassData<OrderingField>(Ordering);
227 }
228
229 /// Returns the synchronization scope ID of this load instruction.
231 return SSID;
232 }
233
234 /// Sets the synchronization scope ID of this load instruction.
236 this->SSID = SSID;
237 }
238
239 /// Sets the ordering constraint and the synchronization scope ID of this load
240 /// instruction.
243 setOrdering(Ordering);
244 setSyncScopeID(SSID);
245 }
246
247 bool isSimple() const { return !isAtomic() && !isVolatile(); }
248
249 bool isUnordered() const {
252 !isVolatile();
253 }
254
256 const Value *getPointerOperand() const { return getOperand(0); }
257 static unsigned getPointerOperandIndex() { return 0U; }
259
260 /// Returns the address space of the pointer operand.
261 unsigned getPointerAddressSpace() const {
263 }
264
265 // Methods for support type inquiry through isa, cast, and dyn_cast:
266 static bool classof(const Instruction *I) {
267 return I->getOpcode() == Instruction::Load;
268 }
269 static bool classof(const Value *V) {
270 return isa<Instruction>(V) && classof(cast<Instruction>(V));
271 }
272
273private:
274 // Shadow Instruction::setInstructionSubclassData with a private forwarding
275 // method so that subclasses cannot accidentally use it.
276 template <typename Bitfield>
277 void setSubclassData(typename Bitfield::Type Value) {
278 Instruction::setSubclassData<Bitfield>(Value);
279 }
280
281 /// The synchronization scope ID of this load instruction. Not quite enough
282 /// room in SubClassData for everything, so synchronization scope ID gets its
283 /// own field.
284 SyncScope::ID SSID;
285};
286
287//===----------------------------------------------------------------------===//
288// StoreInst Class
289//===----------------------------------------------------------------------===//
290
291/// An instruction for storing to memory.
292class StoreInst : public Instruction {
293 using VolatileField = BoolBitfieldElementT<0>;
296 static_assert(
297 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
298 "Bitfields must be contiguous");
299
300 void AssertOK();
301
302 constexpr static IntrusiveOperandsAllocMarker AllocMarker{2};
303
304protected:
305 // Note: Instruction needs to be a friend here to call cloneImpl.
306 friend class Instruction;
307
308 StoreInst *cloneImpl() const;
309
310public:
311 StoreInst(Value *Val, Value *Ptr, InsertPosition InsertBefore);
312 StoreInst(Value *Val, Value *Ptr, bool isVolatile,
313 InsertPosition InsertBefore);
315 InsertPosition InsertBefore = nullptr);
318 InsertPosition InsertBefore = nullptr);
319
320 // allocate space for exactly two operands
321 void *operator new(size_t S) { return User::operator new(S, AllocMarker); }
322 void operator delete(void *Ptr) { User::operator delete(Ptr); }
323
324 /// Return true if this is a store to a volatile memory location.
325 bool isVolatile() const { return getSubclassData<VolatileField>(); }
326
327 /// Specify whether this is a volatile store or not.
328 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
329
330 /// Transparently provide more efficient getOperand methods.
332
333 Align getAlign() const {
334 return Align(1ULL << (getSubclassData<AlignmentField>()));
335 }
336
338 setSubclassData<AlignmentField>(Log2(Align));
339 }
340
341 /// Returns the ordering constraint of this store instruction.
343 return getSubclassData<OrderingField>();
344 }
345
346 /// Sets the ordering constraint of this store instruction. May not be
347 /// Acquire or AcquireRelease.
349 setSubclassData<OrderingField>(Ordering);
350 }
351
352 /// Returns the synchronization scope ID of this store instruction.
354 return SSID;
355 }
356
357 /// Sets the synchronization scope ID of this store instruction.
359 this->SSID = SSID;
360 }
361
362 /// Sets the ordering constraint and the synchronization scope ID of this
363 /// store instruction.
366 setOrdering(Ordering);
367 setSyncScopeID(SSID);
368 }
369
370 bool isSimple() const { return !isAtomic() && !isVolatile(); }
371
372 bool isUnordered() const {
375 !isVolatile();
376 }
377
379 const Value *getValueOperand() const { return getOperand(0); }
380
382 const Value *getPointerOperand() const { return getOperand(1); }
383 static unsigned getPointerOperandIndex() { return 1U; }
385
386 /// Returns the address space of the pointer operand.
387 unsigned getPointerAddressSpace() const {
389 }
390
391 // Methods for support type inquiry through isa, cast, and dyn_cast:
392 static bool classof(const Instruction *I) {
393 return I->getOpcode() == Instruction::Store;
394 }
395 static bool classof(const Value *V) {
396 return isa<Instruction>(V) && classof(cast<Instruction>(V));
397 }
398
399private:
400 // Shadow Instruction::setInstructionSubclassData with a private forwarding
401 // method so that subclasses cannot accidentally use it.
402 template <typename Bitfield>
403 void setSubclassData(typename Bitfield::Type Value) {
404 Instruction::setSubclassData<Bitfield>(Value);
405 }
406
407 /// The synchronization scope ID of this store instruction. Not quite enough
408 /// room in SubClassData for everything, so synchronization scope ID gets its
409 /// own field.
410 SyncScope::ID SSID;
411};
412
413template <>
414struct OperandTraits<StoreInst> : public FixedNumOperandTraits<StoreInst, 2> {
415};
416
418
419//===----------------------------------------------------------------------===//
420// FenceInst Class
421//===----------------------------------------------------------------------===//
422
423/// An instruction for ordering other memory operations.
424class FenceInst : public Instruction {
425 using OrderingField = AtomicOrderingBitfieldElementT<0>;
426
427 constexpr static IntrusiveOperandsAllocMarker AllocMarker{0};
428
429 void Init(AtomicOrdering Ordering, SyncScope::ID SSID);
430
431protected:
432 // Note: Instruction needs to be a friend here to call cloneImpl.
433 friend class Instruction;
434
435 FenceInst *cloneImpl() const;
436
437public:
438 // Ordering may only be Acquire, Release, AcquireRelease, or
439 // SequentiallyConsistent.
442 InsertPosition InsertBefore = nullptr);
443
444 // allocate space for exactly zero operands
445 void *operator new(size_t S) { return User::operator new(S, AllocMarker); }
446 void operator delete(void *Ptr) { User::operator delete(Ptr); }
447
448 /// Returns the ordering constraint of this fence instruction.
450 return getSubclassData<OrderingField>();
451 }
452
453 /// Sets the ordering constraint of this fence instruction. May only be
454 /// Acquire, Release, AcquireRelease, or SequentiallyConsistent.
456 setSubclassData<OrderingField>(Ordering);
457 }
458
459 /// Returns the synchronization scope ID of this fence instruction.
461 return SSID;
462 }
463
464 /// Sets the synchronization scope ID of this fence instruction.
466 this->SSID = SSID;
467 }
468
469 // Methods for support type inquiry through isa, cast, and dyn_cast:
470 static bool classof(const Instruction *I) {
471 return I->getOpcode() == Instruction::Fence;
472 }
473 static bool classof(const Value *V) {
474 return isa<Instruction>(V) && classof(cast<Instruction>(V));
475 }
476
477private:
478 // Shadow Instruction::setInstructionSubclassData with a private forwarding
479 // method so that subclasses cannot accidentally use it.
480 template <typename Bitfield>
481 void setSubclassData(typename Bitfield::Type Value) {
482 Instruction::setSubclassData<Bitfield>(Value);
483 }
484
485 /// The synchronization scope ID of this fence instruction. Not quite enough
486 /// room in SubClassData for everything, so synchronization scope ID gets its
487 /// own field.
488 SyncScope::ID SSID;
489};
490
491//===----------------------------------------------------------------------===//
492// AtomicCmpXchgInst Class
493//===----------------------------------------------------------------------===//
494
495/// An instruction that atomically checks whether a
496/// specified value is in a memory location, and, if it is, stores a new value
497/// there. The value returned by this instruction is a pair containing the
498/// original value as first element, and an i1 indicating success (true) or
499/// failure (false) as second element.
500///
502 void Init(Value *Ptr, Value *Cmp, Value *NewVal, Align Align,
503 AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering,
504 SyncScope::ID SSID);
505
506 template <unsigned Offset>
507 using AtomicOrderingBitfieldElement =
510
511 constexpr static IntrusiveOperandsAllocMarker AllocMarker{3};
512
513protected:
514 // Note: Instruction needs to be a friend here to call cloneImpl.
515 friend class Instruction;
516
518
519public:
520 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
521 AtomicOrdering SuccessOrdering,
522 AtomicOrdering FailureOrdering, SyncScope::ID SSID,
523 InsertPosition InsertBefore = nullptr);
524
525 // allocate space for exactly three operands
526 void *operator new(size_t S) { return User::operator new(S, AllocMarker); }
527 void operator delete(void *Ptr) { User::operator delete(Ptr); }
528
537 static_assert(
540 "Bitfields must be contiguous");
541
542 /// Return the alignment of the memory that is being allocated by the
543 /// instruction.
544 Align getAlign() const {
545 return Align(1ULL << getSubclassData<AlignmentField>());
546 }
547
549 setSubclassData<AlignmentField>(Log2(Align));
550 }
551
552 /// Return true if this is a cmpxchg from a volatile memory
553 /// location.
554 ///
555 bool isVolatile() const { return getSubclassData<VolatileField>(); }
556
557 /// Specify whether this is a volatile cmpxchg.
558 ///
559 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
560
561 /// Return true if this cmpxchg may spuriously fail.
562 bool isWeak() const { return getSubclassData<WeakField>(); }
563
564 void setWeak(bool IsWeak) { setSubclassData<WeakField>(IsWeak); }
565
566 /// Transparently provide more efficient getOperand methods.
568
570 return Ordering != AtomicOrdering::NotAtomic &&
571 Ordering != AtomicOrdering::Unordered;
572 }
573
575 return Ordering != AtomicOrdering::NotAtomic &&
576 Ordering != AtomicOrdering::Unordered &&
577 Ordering != AtomicOrdering::AcquireRelease &&
578 Ordering != AtomicOrdering::Release;
579 }
580
581 /// Returns the success ordering constraint of this cmpxchg instruction.
583 return getSubclassData<SuccessOrderingField>();
584 }
585
586 /// Sets the success ordering constraint of this cmpxchg instruction.
588 assert(isValidSuccessOrdering(Ordering) &&
589 "invalid CmpXchg success ordering");
590 setSubclassData<SuccessOrderingField>(Ordering);
591 }
592
593 /// Returns the failure ordering constraint of this cmpxchg instruction.
595 return getSubclassData<FailureOrderingField>();
596 }
597
598 /// Sets the failure ordering constraint of this cmpxchg instruction.
600 assert(isValidFailureOrdering(Ordering) &&
601 "invalid CmpXchg failure ordering");
602 setSubclassData<FailureOrderingField>(Ordering);
603 }
604
605 /// Returns a single ordering which is at least as strong as both the
606 /// success and failure orderings for this cmpxchg.
615 }
616 return getSuccessOrdering();
617 }
618
619 /// Returns the synchronization scope ID of this cmpxchg instruction.
621 return SSID;
622 }
623
624 /// Sets the synchronization scope ID of this cmpxchg instruction.
626 this->SSID = SSID;
627 }
628
630 const Value *getPointerOperand() const { return getOperand(0); }
631 static unsigned getPointerOperandIndex() { return 0U; }
632
634 const Value *getCompareOperand() const { return getOperand(1); }
635
637 const Value *getNewValOperand() const { return getOperand(2); }
638
639 /// Returns the address space of the pointer operand.
640 unsigned getPointerAddressSpace() const {
642 }
643
644 /// Returns the strongest permitted ordering on failure, given the
645 /// desired ordering on success.
646 ///
647 /// If the comparison in a cmpxchg operation fails, there is no atomic store
648 /// so release semantics cannot be provided. So this function drops explicit
649 /// Release requests from the AtomicOrdering. A SequentiallyConsistent
650 /// operation would remain SequentiallyConsistent.
651 static AtomicOrdering
653 switch (SuccessOrdering) {
654 default:
655 llvm_unreachable("invalid cmpxchg success ordering");
664 }
665 }
666
667 // Methods for support type inquiry through isa, cast, and dyn_cast:
668 static bool classof(const Instruction *I) {
669 return I->getOpcode() == Instruction::AtomicCmpXchg;
670 }
671 static bool classof(const Value *V) {
672 return isa<Instruction>(V) && classof(cast<Instruction>(V));
673 }
674
675private:
676 // Shadow Instruction::setInstructionSubclassData with a private forwarding
677 // method so that subclasses cannot accidentally use it.
678 template <typename Bitfield>
679 void setSubclassData(typename Bitfield::Type Value) {
680 Instruction::setSubclassData<Bitfield>(Value);
681 }
682
683 /// The synchronization scope ID of this cmpxchg instruction. Not quite
684 /// enough room in SubClassData for everything, so synchronization scope ID
685 /// gets its own field.
686 SyncScope::ID SSID;
687};
688
689template <>
691 public FixedNumOperandTraits<AtomicCmpXchgInst, 3> {
692};
693
695
696//===----------------------------------------------------------------------===//
697// AtomicRMWInst Class
698//===----------------------------------------------------------------------===//
699
700/// an instruction that atomically reads a memory location,
701/// combines it with another value, and then stores the result back. Returns
702/// the old value.
703///
705protected:
706 // Note: Instruction needs to be a friend here to call cloneImpl.
707 friend class Instruction;
708
709 AtomicRMWInst *cloneImpl() const;
710
711public:
712 /// This enumeration lists the possible modifications atomicrmw can make. In
713 /// the descriptions, 'p' is the pointer to the instruction's memory location,
714 /// 'old' is the initial value of *p, and 'v' is the other value passed to the
715 /// instruction. These instructions always return 'old'.
716 enum BinOp : unsigned {
717 /// *p = v
719 /// *p = old + v
721 /// *p = old - v
723 /// *p = old & v
725 /// *p = ~(old & v)
727 /// *p = old | v
729 /// *p = old ^ v
731 /// *p = old >signed v ? old : v
733 /// *p = old <signed v ? old : v
735 /// *p = old >unsigned v ? old : v
737 /// *p = old <unsigned v ? old : v
739
740 /// *p = old + v
742
743 /// *p = old - v
745
746 /// *p = maxnum(old, v)
747 /// \p maxnum matches the behavior of \p llvm.maxnum.*.
749
750 /// *p = minnum(old, v)
751 /// \p minnum matches the behavior of \p llvm.minnum.*.
753
754 /// Increment one up to a maximum value.
755 /// *p = (old u>= v) ? 0 : (old + 1)
757
758 /// Decrement one until a minimum value or zero.
759 /// *p = ((old == 0) || (old u> v)) ? v : (old - 1)
761
762 /// Subtract only if no unsigned overflow.
763 /// *p = (old u>= v) ? old - v : old
765
766 /// *p = usub.sat(old, v)
767 /// \p usub.sat matches the behavior of \p llvm.usub.sat.*.
769
770 FIRST_BINOP = Xchg,
771 LAST_BINOP = USubSat,
772 BAD_BINOP
773 };
774
775private:
776 template <unsigned Offset>
777 using AtomicOrderingBitfieldElement =
780
781 template <unsigned Offset>
782 using BinOpBitfieldElement =
784
785 constexpr static IntrusiveOperandsAllocMarker AllocMarker{2};
786
787public:
788 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
789 AtomicOrdering Ordering, SyncScope::ID SSID,
790 InsertPosition InsertBefore = nullptr);
791
792 // allocate space for exactly two operands
793 void *operator new(size_t S) { return User::operator new(S, AllocMarker); }
794 void operator delete(void *Ptr) { User::operator delete(Ptr); }
795
799 using OperationField = BinOpBitfieldElement<AtomicOrderingField::NextBit>;
803 "Bitfields must be contiguous");
804
805 BinOp getOperation() const { return getSubclassData<OperationField>(); }
806
807 static StringRef getOperationName(BinOp Op);
808
809 static bool isFPOperation(BinOp Op) {
810 switch (Op) {
815 return true;
816 default:
817 return false;
818 }
819 }
820
822 setSubclassData<OperationField>(Operation);
823 }
824
825 /// Return the alignment of the memory that is being allocated by the
826 /// instruction.
827 Align getAlign() const {
828 return Align(1ULL << getSubclassData<AlignmentField>());
829 }
830
832 setSubclassData<AlignmentField>(Log2(Align));
833 }
834
835 /// Return true if this is a RMW on a volatile memory location.
836 ///
837 bool isVolatile() const { return getSubclassData<VolatileField>(); }
838
839 /// Specify whether this is a volatile RMW or not.
840 ///
841 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
842
843 /// Transparently provide more efficient getOperand methods.
845
846 /// Returns the ordering constraint of this rmw instruction.
848 return getSubclassData<AtomicOrderingField>();
849 }
850
851 /// Sets the ordering constraint of this rmw instruction.
853 assert(Ordering != AtomicOrdering::NotAtomic &&
854 "atomicrmw instructions can only be atomic.");
855 assert(Ordering != AtomicOrdering::Unordered &&
856 "atomicrmw instructions cannot be unordered.");
857 setSubclassData<AtomicOrderingField>(Ordering);
858 }
859
860 /// Returns the synchronization scope ID of this rmw instruction.
862 return SSID;
863 }
864
865 /// Sets the synchronization scope ID of this rmw instruction.
867 this->SSID = SSID;
868 }
869
870 Value *getPointerOperand() { return getOperand(0); }
871 const Value *getPointerOperand() const { return getOperand(0); }
872 static unsigned getPointerOperandIndex() { return 0U; }
873
874 Value *getValOperand() { return getOperand(1); }
875 const Value *getValOperand() const { return getOperand(1); }
876
877 /// Returns the address space of the pointer operand.
878 unsigned getPointerAddressSpace() const {
880 }
881
883 return isFPOperation(getOperation());
884 }
885
886 // Methods for support type inquiry through isa, cast, and dyn_cast:
887 static bool classof(const Instruction *I) {
888 return I->getOpcode() == Instruction::AtomicRMW;
889 }
890 static bool classof(const Value *V) {
891 return isa<Instruction>(V) && classof(cast<Instruction>(V));
892 }
893
894private:
895 void Init(BinOp Operation, Value *Ptr, Value *Val, Align Align,
896 AtomicOrdering Ordering, SyncScope::ID SSID);
897
898 // Shadow Instruction::setInstructionSubclassData with a private forwarding
899 // method so that subclasses cannot accidentally use it.
900 template <typename Bitfield>
901 void setSubclassData(typename Bitfield::Type Value) {
902 Instruction::setSubclassData<Bitfield>(Value);
903 }
904
905 /// The synchronization scope ID of this rmw instruction. Not quite enough
906 /// room in SubClassData for everything, so synchronization scope ID gets its
907 /// own field.
908 SyncScope::ID SSID;
909};
910
911template <>
913 : public FixedNumOperandTraits<AtomicRMWInst,2> {
914};
915
917
918//===----------------------------------------------------------------------===//
919// GetElementPtrInst Class
920//===----------------------------------------------------------------------===//
921
922// checkGEPType - Simple wrapper function to give a better assertion failure
923// message on bad indexes for a gep instruction.
924//
926 assert(Ty && "Invalid GetElementPtrInst indices for type!");
927 return Ty;
928}
929
930/// an instruction for type-safe pointer arithmetic to
931/// access elements of arrays and structs
932///
934 Type *SourceElementType;
935 Type *ResultElementType;
936
938
939 /// Constructors - Create a getelementptr instruction with a base pointer an
940 /// list of indices. The first and second ctor can optionally insert before an
941 /// existing instruction, the third appends the new instruction to the
942 /// specified BasicBlock.
943 inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
945 const Twine &NameStr, InsertPosition InsertBefore);
946
947 void init(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr);
948
949protected:
950 // Note: Instruction needs to be a friend here to call cloneImpl.
951 friend class Instruction;
952
954
955public:
956 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
957 ArrayRef<Value *> IdxList,
958 const Twine &NameStr = "",
959 InsertPosition InsertBefore = nullptr) {
960 unsigned Values = 1 + unsigned(IdxList.size());
961 assert(PointeeType && "Must specify element type");
962 IntrusiveOperandsAllocMarker AllocMarker{Values};
963 return new (AllocMarker) GetElementPtrInst(
964 PointeeType, Ptr, IdxList, AllocMarker, NameStr, InsertBefore);
965 }
966
967 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
969 const Twine &NameStr = "",
970 InsertPosition InsertBefore = nullptr) {
972 Create(PointeeType, Ptr, IdxList, NameStr, InsertBefore);
973 GEP->setNoWrapFlags(NW);
974 return GEP;
975 }
976
977 /// Create an "inbounds" getelementptr. See the documentation for the
978 /// "inbounds" flag in LangRef.html for details.
979 static GetElementPtrInst *
981 const Twine &NameStr = "",
982 InsertPosition InsertBefore = nullptr) {
983 return Create(PointeeType, Ptr, IdxList, GEPNoWrapFlags::inBounds(),
984 NameStr, InsertBefore);
985 }
986
987 /// Transparently provide more efficient getOperand methods.
989
990 Type *getSourceElementType() const { return SourceElementType; }
991
992 void setSourceElementType(Type *Ty) { SourceElementType = Ty; }
993 void setResultElementType(Type *Ty) { ResultElementType = Ty; }
994
996 return ResultElementType;
997 }
998
999 /// Returns the address space of this instruction's pointer type.
1000 unsigned getAddressSpace() const {
1001 // Note that this is always the same as the pointer operand's address space
1002 // and that is cheaper to compute, so cheat here.
1003 return getPointerAddressSpace();
1004 }
1005
1006 /// Returns the result type of a getelementptr with the given source
1007 /// element type and indexes.
1008 ///
1009 /// Null is returned if the indices are invalid for the specified
1010 /// source element type.
1011 static Type *getIndexedType(Type *Ty, ArrayRef<Value *> IdxList);
1012 static Type *getIndexedType(Type *Ty, ArrayRef<Constant *> IdxList);
1013 static Type *getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList);
1014
1015 /// Return the type of the element at the given index of an indexable
1016 /// type. This is equivalent to "getIndexedType(Agg, {Zero, Idx})".
1017 ///
1018 /// Returns null if the type can't be indexed, or the given index is not
1019 /// legal for the given type.
1020 static Type *getTypeAtIndex(Type *Ty, Value *Idx);
1021 static Type *getTypeAtIndex(Type *Ty, uint64_t Idx);
1022
1023 inline op_iterator idx_begin() { return op_begin()+1; }
1024 inline const_op_iterator idx_begin() const { return op_begin()+1; }
1025 inline op_iterator idx_end() { return op_end(); }
1026 inline const_op_iterator idx_end() const { return op_end(); }
1027
1029 return make_range(idx_begin(), idx_end());
1030 }
1031
1033 return make_range(idx_begin(), idx_end());
1034 }
1035
1037 return getOperand(0);
1038 }
1039 const Value *getPointerOperand() const {
1040 return getOperand(0);
1041 }
1042 static unsigned getPointerOperandIndex() {
1043 return 0U; // get index for modifying correct operand.
1044 }
1045
1046 /// Method to return the pointer operand as a
1047 /// PointerType.
1049 return getPointerOperand()->getType();
1050 }
1051
1052 /// Returns the address space of the pointer operand.
1053 unsigned getPointerAddressSpace() const {
1055 }
1056
1057 /// Returns the pointer type returned by the GEP
1058 /// instruction, which may be a vector of pointers.
1060 // Vector GEP
1061 Type *Ty = Ptr->getType();
1062 if (Ty->isVectorTy())
1063 return Ty;
1064
1065 for (Value *Index : IdxList)
1066 if (auto *IndexVTy = dyn_cast<VectorType>(Index->getType())) {
1067 ElementCount EltCount = IndexVTy->getElementCount();
1068 return VectorType::get(Ty, EltCount);
1069 }
1070 // Scalar GEP
1071 return Ty;
1072 }
1073
1074 unsigned getNumIndices() const { // Note: always non-negative
1075 return getNumOperands() - 1;
1076 }
1077
1078 bool hasIndices() const {
1079 return getNumOperands() > 1;
1080 }
1081
1082 /// Return true if all of the indices of this GEP are
1083 /// zeros. If so, the result pointer and the first operand have the same
1084 /// value, just potentially different types.
1085 bool hasAllZeroIndices() const;
1086
1087 /// Return true if all of the indices of this GEP are
1088 /// constant integers. If so, the result pointer and the first operand have
1089 /// a constant offset between them.
1090 bool hasAllConstantIndices() const;
1091
1092 /// Set nowrap flags for GEP instruction.
1094
1095 /// Set or clear the inbounds flag on this GEP instruction.
1096 /// See LangRef.html for the meaning of inbounds on a getelementptr.
1097 /// TODO: Remove this method in favor of setNoWrapFlags().
1098 void setIsInBounds(bool b = true);
1099
1100 /// Get the nowrap flags for the GEP instruction.
1102
1103 /// Determine whether the GEP has the inbounds flag.
1104 bool isInBounds() const;
1105
1106 /// Determine whether the GEP has the nusw flag.
1107 bool hasNoUnsignedSignedWrap() const;
1108
1109 /// Determine whether the GEP has the nuw flag.
1110 bool hasNoUnsignedWrap() const;
1111
1112 /// Accumulate the constant address offset of this GEP if possible.
1113 ///
1114 /// This routine accepts an APInt into which it will accumulate the constant
1115 /// offset of this GEP if the GEP is in fact constant. If the GEP is not
1116 /// all-constant, it returns false and the value of the offset APInt is
1117 /// undefined (it is *not* preserved!). The APInt passed into this routine
1118 /// must be at least as wide as the IntPtr type for the address space of
1119 /// the base GEP pointer.
1120 bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const;
1121 bool collectOffset(const DataLayout &DL, unsigned BitWidth,
1122 SmallMapVector<Value *, APInt, 4> &VariableOffsets,
1123 APInt &ConstantOffset) const;
1124 // Methods for support type inquiry through isa, cast, and dyn_cast:
1125 static bool classof(const Instruction *I) {
1126 return (I->getOpcode() == Instruction::GetElementPtr);
1127 }
1128 static bool classof(const Value *V) {
1129 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1130 }
1131};
1132
1133template <>
1135 : public VariadicOperandTraits<GetElementPtrInst> {};
1136
1137GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1138 ArrayRef<Value *> IdxList,
1139 AllocInfo AllocInfo, const Twine &NameStr,
1140 InsertPosition InsertBefore)
1141 : Instruction(getGEPReturnType(Ptr, IdxList), GetElementPtr, AllocInfo,
1142 InsertBefore),
1143 SourceElementType(PointeeType),
1144 ResultElementType(getIndexedType(PointeeType, IdxList)) {
1145 init(Ptr, IdxList, NameStr);
1146}
1147
1148DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value)
1149
1150//===----------------------------------------------------------------------===//
1151// ICmpInst Class
1152//===----------------------------------------------------------------------===//
1153
1154/// This instruction compares its operands according to the predicate given
1155/// to the constructor. It only operates on integers or pointers. The operands
1156/// must be identical types.
1157/// Represent an integer comparison operator.
1158class ICmpInst: public CmpInst {
1159 void AssertOK() {
1160 assert(isIntPredicate() &&
1161 "Invalid ICmp predicate value");
1162 assert(getOperand(0)->getType() == getOperand(1)->getType() &&
1163 "Both operands to ICmp instruction are not of the same type!");
1164 // Check that the operands are the right type
1165 assert((getOperand(0)->getType()->isIntOrIntVectorTy() ||
1166 getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
1167 "Invalid operand types for ICmp instruction");
1168 }
1169
1170 enum { SameSign = (1 << 0) };
1171
1172protected:
1173 // Note: Instruction needs to be a friend here to call cloneImpl.
1174 friend class Instruction;
1175
1176 /// Clone an identical ICmpInst
1177 ICmpInst *cloneImpl() const;
1178
1179public:
1180 /// Constructor with insertion semantics.
1181 ICmpInst(InsertPosition InsertBefore, ///< Where to insert
1182 Predicate pred, ///< The predicate to use for the comparison
1183 Value *LHS, ///< The left-hand-side of the expression
1184 Value *RHS, ///< The right-hand-side of the expression
1185 const Twine &NameStr = "" ///< Name of the instruction
1186 )
1187 : CmpInst(makeCmpResultType(LHS->getType()), Instruction::ICmp, pred, LHS,
1188 RHS, NameStr, InsertBefore) {
1189#ifndef NDEBUG
1190 AssertOK();
1191#endif
1192 }
1193
1194 /// Constructor with no-insertion semantics
1196 Predicate pred, ///< The predicate to use for the comparison
1197 Value *LHS, ///< The left-hand-side of the expression
1198 Value *RHS, ///< The right-hand-side of the expression
1199 const Twine &NameStr = "" ///< Name of the instruction
1200 ) : CmpInst(makeCmpResultType(LHS->getType()),
1201 Instruction::ICmp, pred, LHS, RHS, NameStr) {
1202#ifndef NDEBUG
1203 AssertOK();
1204#endif
1205 }
1206
1207 /// @returns the predicate along with samesign information.
1209 return {getPredicate(), hasSameSign()};
1210 }
1211
1212 /// @returns the inverse predicate along with samesign information: static
1213 /// variant.
1215 return {getInversePredicate(Pred), Pred.hasSameSign()};
1216 }
1217
1218 /// @returns the inverse predicate along with samesign information.
1220 return getInverseCmpPredicate(getCmpPredicate());
1221 }
1222
1223 /// @returns the swapped predicate along with samesign information: static
1224 /// variant.
1226 return {getSwappedPredicate(Pred), Pred.hasSameSign()};
1227 }
1228
1229 /// @returns the swapped predicate along with samesign information.
1231 return getSwappedCmpPredicate(getCmpPredicate());
1232 }
1233
1234 /// For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
1235 /// @returns the predicate that would be the result if the operand were
1236 /// regarded as signed.
1237 /// Return the signed version of the predicate.
1239 return getSignedPredicate(getPredicate());
1240 }
1241
1242 /// Return the signed version of the predicate: static variant.
1243 static Predicate getSignedPredicate(Predicate Pred);
1244
1245 /// For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
1246 /// @returns the predicate that would be the result if the operand were
1247 /// regarded as unsigned.
1248 /// Return the unsigned version of the predicate.
1250 return getUnsignedPredicate(getPredicate());
1251 }
1252
1253 /// Return the unsigned version of the predicate: static variant.
1254 static Predicate getUnsignedPredicate(Predicate Pred);
1255
1256 /// For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->EQ
1257 /// @returns the unsigned version of the signed predicate pred or
1258 /// the signed version of the signed predicate pred.
1259 /// Static variant.
1260 static Predicate getFlippedSignednessPredicate(Predicate Pred);
1261
1262 /// For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->EQ
1263 /// @returns the unsigned version of the signed predicate pred or
1264 /// the signed version of the signed predicate pred.
1266 return getFlippedSignednessPredicate(getPredicate());
1267 }
1268
1269 void setSameSign(bool B = true) {
1270 SubclassOptionalData = (SubclassOptionalData & ~SameSign) | (B * SameSign);
1271 }
1272
1273 /// An icmp instruction, which can be marked as "samesign", indicating that
1274 /// the two operands have the same sign. This means that we can convert
1275 /// "slt" to "ult" and vice versa, which enables more optimizations.
1276 bool hasSameSign() const { return SubclassOptionalData & SameSign; }
1277
1278 /// Return true if this predicate is either EQ or NE. This also
1279 /// tests for commutativity.
1280 static bool isEquality(Predicate P) {
1281 return P == ICMP_EQ || P == ICMP_NE;
1282 }
1283
1284 /// Return true if this predicate is either EQ or NE. This also
1285 /// tests for commutativity.
1286 bool isEquality() const {
1287 return isEquality(getPredicate());
1288 }
1289
1290 /// @returns true if the predicate is commutative
1291 /// Determine if this relation is commutative.
1292 static bool isCommutative(Predicate P) { return isEquality(P); }
1293
1294 /// @returns true if the predicate of this ICmpInst is commutative
1295 /// Determine if this relation is commutative.
1296 bool isCommutative() const { return isCommutative(getPredicate()); }
1297
1298 /// Return true if the predicate is relational (not EQ or NE).
1299 ///
1300 bool isRelational() const {
1301 return !isEquality();
1302 }
1303
1304 /// Return true if the predicate is relational (not EQ or NE).
1305 ///
1306 static bool isRelational(Predicate P) {
1307 return !isEquality(P);
1308 }
1309
1310 /// Return true if the predicate is SGT or UGT.
1311 ///
1312 static bool isGT(Predicate P) {
1313 return P == ICMP_SGT || P == ICMP_UGT;
1314 }
1315
1316 /// Return true if the predicate is SLT or ULT.
1317 ///
1318 static bool isLT(Predicate P) {
1319 return P == ICMP_SLT || P == ICMP_ULT;
1320 }
1321
1322 /// Return true if the predicate is SGE or UGE.
1323 ///
1324 static bool isGE(Predicate P) {
1325 return P == ICMP_SGE || P == ICMP_UGE;
1326 }
1327
1328 /// Return true if the predicate is SLE or ULE.
1329 ///
1330 static bool isLE(Predicate P) {
1331 return P == ICMP_SLE || P == ICMP_ULE;
1332 }
1333
1334 /// Returns the sequence of all ICmp predicates.
1335 ///
1336 static auto predicates() { return ICmpPredicates(); }
1337
1338 /// Exchange the two operands to this instruction in such a way that it does
1339 /// not modify the semantics of the instruction. The predicate value may be
1340 /// changed to retain the same result if the predicate is order dependent
1341 /// (e.g. ult).
1342 /// Swap operands and adjust predicate.
1344 setPredicate(getSwappedPredicate());
1345 Op<0>().swap(Op<1>());
1346 }
1347
1348 /// Return result of `LHS Pred RHS` comparison.
1349 static bool compare(const APInt &LHS, const APInt &RHS,
1350 ICmpInst::Predicate Pred);
1351
1352 /// Return result of `LHS Pred RHS`, if it can be determined from the
1353 /// KnownBits. Otherwise return nullopt.
1354 static std::optional<bool> compare(const KnownBits &LHS, const KnownBits &RHS,
1355 ICmpInst::Predicate Pred);
1356
1357 // Methods for support type inquiry through isa, cast, and dyn_cast:
1358 static bool classof(const Instruction *I) {
1359 return I->getOpcode() == Instruction::ICmp;
1360 }
1361 static bool classof(const Value *V) {
1362 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1363 }
1364};
1365
1366//===----------------------------------------------------------------------===//
1367// FCmpInst Class
1368//===----------------------------------------------------------------------===//
1369
1370/// This instruction compares its operands according to the predicate given
1371/// to the constructor. It only operates on floating point values or packed
1372/// vectors of floating point values. The operands must be identical types.
1373/// Represents a floating point comparison operator.
1374class FCmpInst: public CmpInst {
1375 void AssertOK() {
1376 assert(isFPPredicate() && "Invalid FCmp predicate value");
1377 assert(getOperand(0)->getType() == getOperand(1)->getType() &&
1378 "Both operands to FCmp instruction are not of the same type!");
1379 // Check that the operands are the right type
1380 assert(getOperand(0)->getType()->isFPOrFPVectorTy() &&
1381 "Invalid operand types for FCmp instruction");
1382 }
1383
1384protected:
1385 // Note: Instruction needs to be a friend here to call cloneImpl.
1386 friend class Instruction;
1387
1388 /// Clone an identical FCmpInst
1389 FCmpInst *cloneImpl() const;
1390
1391public:
1392 /// Constructor with insertion semantics.
1393 FCmpInst(InsertPosition InsertBefore, ///< Where to insert
1394 Predicate pred, ///< The predicate to use for the comparison
1395 Value *LHS, ///< The left-hand-side of the expression
1396 Value *RHS, ///< The right-hand-side of the expression
1397 const Twine &NameStr = "" ///< Name of the instruction
1398 )
1400 RHS, NameStr, InsertBefore) {
1401 AssertOK();
1402 }
1403
1404 /// Constructor with no-insertion semantics
1405 FCmpInst(Predicate Pred, ///< The predicate to use for the comparison
1406 Value *LHS, ///< The left-hand-side of the expression
1407 Value *RHS, ///< The right-hand-side of the expression
1408 const Twine &NameStr = "", ///< Name of the instruction
1409 Instruction *FlagsSource = nullptr)
1410 : CmpInst(makeCmpResultType(LHS->getType()), Instruction::FCmp, Pred, LHS,
1411 RHS, NameStr, nullptr, FlagsSource) {
1412 AssertOK();
1413 }
1414
1415 /// @returns true if the predicate is EQ or NE.
1416 /// Determine if this is an equality predicate.
1417 static bool isEquality(Predicate Pred) {
1418 return Pred == FCMP_OEQ || Pred == FCMP_ONE || Pred == FCMP_UEQ ||
1419 Pred == FCMP_UNE;
1420 }
1421
1422 /// @returns true if the predicate of this instruction is EQ or NE.
1423 /// Determine if this is an equality predicate.
1424 bool isEquality() const { return isEquality(getPredicate()); }
1425
1426 /// @returns true if the predicate is commutative.
1427 /// Determine if this is a commutative predicate.
1428 static bool isCommutative(Predicate Pred) {
1429 return isEquality(Pred) || Pred == FCMP_FALSE || Pred == FCMP_TRUE ||
1430 Pred == FCMP_ORD || Pred == FCMP_UNO;
1431 }
1432
1433 /// @returns true if the predicate of this instruction is commutative.
1434 /// Determine if this is a commutative predicate.
1435 bool isCommutative() const { return isCommutative(getPredicate()); }
1436
1437 /// @returns true if the predicate is relational (not EQ or NE).
1438 /// Determine if this a relational predicate.
1439 bool isRelational() const { return !isEquality(); }
1440
1441 /// Exchange the two operands to this instruction in such a way that it does
1442 /// not modify the semantics of the instruction. The predicate value may be
1443 /// changed to retain the same result if the predicate is order dependent
1444 /// (e.g. ult).
1445 /// Swap operands and adjust predicate.
1448 Op<0>().swap(Op<1>());
1449 }
1450
1451 /// Returns the sequence of all FCmp predicates.
1452 ///
1453 static auto predicates() { return FCmpPredicates(); }
1454
1455 /// Return result of `LHS Pred RHS` comparison.
1456 static bool compare(const APFloat &LHS, const APFloat &RHS,
1457 FCmpInst::Predicate Pred);
1458
1459 /// Methods for support type inquiry through isa, cast, and dyn_cast:
1460 static bool classof(const Instruction *I) {
1461 return I->getOpcode() == Instruction::FCmp;
1462 }
1463 static bool classof(const Value *V) {
1464 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1465 }
1466};
1467
1468//===----------------------------------------------------------------------===//
1469/// This class represents a function call, abstracting a target
1470/// machine's calling convention. This class uses low bit of the SubClassData
1471/// field to indicate whether or not this is a tail call. The rest of the bits
1472/// hold the calling convention of the call.
1473///
1474class CallInst : public CallBase {
1476
1477 /// Construct a CallInst from a range of arguments
1478 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1479 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1480 AllocInfo AllocInfo, InsertPosition InsertBefore);
1481
1482 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1483 const Twine &NameStr, AllocInfo AllocInfo,
1484 InsertPosition InsertBefore)
1485 : CallInst(Ty, Func, Args, {}, NameStr, AllocInfo, InsertBefore) {}
1486
1487 explicit CallInst(FunctionType *Ty, Value *F, const Twine &NameStr,
1488 AllocInfo AllocInfo, InsertPosition InsertBefore);
1489
1490 void init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
1491 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
1492 void init(FunctionType *FTy, Value *Func, const Twine &NameStr);
1493
1494 /// Compute the number of operands to allocate.
1495 static unsigned ComputeNumOperands(unsigned NumArgs,
1496 unsigned NumBundleInputs = 0) {
1497 // We need one operand for the called function, plus the input operand
1498 // counts provided.
1499 return 1 + NumArgs + NumBundleInputs;
1500 }
1501
1502protected:
1503 // Note: Instruction needs to be a friend here to call cloneImpl.
1504 friend class Instruction;
1505
1506 CallInst *cloneImpl() const;
1507
1508public:
1509 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr = "",
1510 InsertPosition InsertBefore = nullptr) {
1511 IntrusiveOperandsAllocMarker AllocMarker{ComputeNumOperands(0)};
1512 return new (AllocMarker)
1513 CallInst(Ty, F, NameStr, AllocMarker, InsertBefore);
1514 }
1515
1517 const Twine &NameStr,
1518 InsertPosition InsertBefore = nullptr) {
1519 IntrusiveOperandsAllocMarker AllocMarker{ComputeNumOperands(Args.size())};
1520 return new (AllocMarker)
1521 CallInst(Ty, Func, Args, {}, NameStr, AllocMarker, InsertBefore);
1522 }
1523
1525 ArrayRef<OperandBundleDef> Bundles = {},
1526 const Twine &NameStr = "",
1527 InsertPosition InsertBefore = nullptr) {
1528 IntrusiveOperandsAndDescriptorAllocMarker AllocMarker{
1529 ComputeNumOperands(unsigned(Args.size()), CountBundleInputs(Bundles)),
1530 unsigned(Bundles.size() * sizeof(BundleOpInfo))};
1531
1532 return new (AllocMarker)
1533 CallInst(Ty, Func, Args, Bundles, NameStr, AllocMarker, InsertBefore);
1534 }
1535
1536 static CallInst *Create(FunctionCallee Func, const Twine &NameStr = "",
1537 InsertPosition InsertBefore = nullptr) {
1538 return Create(Func.getFunctionType(), Func.getCallee(), NameStr,
1539 InsertBefore);
1540 }
1541
1543 ArrayRef<OperandBundleDef> Bundles = {},
1544 const Twine &NameStr = "",
1545 InsertPosition InsertBefore = nullptr) {
1546 return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles,
1547 NameStr, InsertBefore);
1548 }
1549
1551 const Twine &NameStr,
1552 InsertPosition InsertBefore = nullptr) {
1553 return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr,
1554 InsertBefore);
1555 }
1556
1557 /// Create a clone of \p CI with a different set of operand bundles and
1558 /// insert it before \p InsertBefore.
1559 ///
1560 /// The returned call instruction is identical \p CI in every way except that
1561 /// the operand bundles for the new instruction are set to the operand bundles
1562 /// in \p Bundles.
1564 InsertPosition InsertPt = nullptr);
1565
1566 // Note that 'musttail' implies 'tail'.
1567 enum TailCallKind : unsigned {
1574
1576 static_assert(
1577 Bitfield::areContiguous<TailCallKindField, CallBase::CallingConvField>(),
1578 "Bitfields must be contiguous");
1579
1581 return getSubclassData<TailCallKindField>();
1582 }
1583
1584 bool isTailCall() const {
1586 return Kind == TCK_Tail || Kind == TCK_MustTail;
1587 }
1588
1589 bool isMustTailCall() const { return getTailCallKind() == TCK_MustTail; }
1590
1591 bool isNoTailCall() const { return getTailCallKind() == TCK_NoTail; }
1592
1594 setSubclassData<TailCallKindField>(TCK);
1595 }
1596
1597 void setTailCall(bool IsTc = true) {
1599 }
1600
1601 /// Return true if the call can return twice
1602 bool canReturnTwice() const { return hasFnAttr(Attribute::ReturnsTwice); }
1603 void setCanReturnTwice() { addFnAttr(Attribute::ReturnsTwice); }
1604
1605 /// Return true if the call is for a noreturn trap intrinsic.
1607 switch (getIntrinsicID()) {
1608 case Intrinsic::trap:
1609 case Intrinsic::ubsantrap:
1610 return !hasFnAttr("trap-func-name");
1611 default:
1612 return false;
1613 }
1614 }
1615
1616 // Methods for support type inquiry through isa, cast, and dyn_cast:
1617 static bool classof(const Instruction *I) {
1618 return I->getOpcode() == Instruction::Call;
1619 }
1620 static bool classof(const Value *V) {
1621 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1622 }
1623
1624 /// Updates profile metadata by scaling it by \p S / \p T.
1626
1627private:
1628 // Shadow Instruction::setInstructionSubclassData with a private forwarding
1629 // method so that subclasses cannot accidentally use it.
1630 template <typename Bitfield>
1631 void setSubclassData(typename Bitfield::Type Value) {
1632 Instruction::setSubclassData<Bitfield>(Value);
1633 }
1634};
1635
1636CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1637 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1638 AllocInfo AllocInfo, InsertPosition InsertBefore)
1639 : CallBase(Ty->getReturnType(), Instruction::Call, AllocInfo,
1640 InsertBefore) {
1641 assert(AllocInfo.NumOps ==
1642 unsigned(Args.size() + CountBundleInputs(Bundles) + 1));
1643 init(Ty, Func, Args, Bundles, NameStr);
1644}
1645
1646//===----------------------------------------------------------------------===//
1647// SelectInst Class
1648//===----------------------------------------------------------------------===//
1649
1650/// This class represents the LLVM 'select' instruction.
1651///
1652class SelectInst : public Instruction {
1653 constexpr static IntrusiveOperandsAllocMarker AllocMarker{3};
1654
1655 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1656 InsertPosition InsertBefore)
1657 : Instruction(S1->getType(), Instruction::Select, AllocMarker,
1658 InsertBefore) {
1659 init(C, S1, S2);
1660 setName(NameStr);
1661 }
1662
1663 void init(Value *C, Value *S1, Value *S2) {
1664 assert(!areInvalidOperands(C, S1, S2) && "Invalid operands for select");
1665 Op<0>() = C;
1666 Op<1>() = S1;
1667 Op<2>() = S2;
1668 }
1669
1670protected:
1671 // Note: Instruction needs to be a friend here to call cloneImpl.
1672 friend class Instruction;
1673
1674 SelectInst *cloneImpl() const;
1675
1676public:
1678 const Twine &NameStr = "",
1679 InsertPosition InsertBefore = nullptr,
1680 Instruction *MDFrom = nullptr) {
1681 SelectInst *Sel =
1682 new (AllocMarker) SelectInst(C, S1, S2, NameStr, InsertBefore);
1683 if (MDFrom)
1684 Sel->copyMetadata(*MDFrom);
1685 return Sel;
1686 }
1687
1688 const Value *getCondition() const { return Op<0>(); }
1689 const Value *getTrueValue() const { return Op<1>(); }
1690 const Value *getFalseValue() const { return Op<2>(); }
1691 Value *getCondition() { return Op<0>(); }
1692 Value *getTrueValue() { return Op<1>(); }
1693 Value *getFalseValue() { return Op<2>(); }
1694
1695 void setCondition(Value *V) { Op<0>() = V; }
1696 void setTrueValue(Value *V) { Op<1>() = V; }
1697 void setFalseValue(Value *V) { Op<2>() = V; }
1698
1699 /// Swap the true and false values of the select instruction.
1700 /// This doesn't swap prof metadata.
1701 void swapValues() { Op<1>().swap(Op<2>()); }
1702
1703 /// Return a string if the specified operands are invalid
1704 /// for a select operation, otherwise return null.
1705 static const char *areInvalidOperands(Value *Cond, Value *True, Value *False);
1706
1707 /// Transparently provide more efficient getOperand methods.
1709
1711 return static_cast<OtherOps>(Instruction::getOpcode());
1712 }
1713
1714 // Methods for support type inquiry through isa, cast, and dyn_cast:
1715 static bool classof(const Instruction *I) {
1716 return I->getOpcode() == Instruction::Select;
1717 }
1718 static bool classof(const Value *V) {
1719 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1720 }
1721};
1722
1723template <>
1724struct OperandTraits<SelectInst> : public FixedNumOperandTraits<SelectInst, 3> {
1725};
1726
1728
1729//===----------------------------------------------------------------------===//
1730// VAArgInst Class
1731//===----------------------------------------------------------------------===//
1732
1733/// This class represents the va_arg llvm instruction, which returns
1734/// an argument of the specified type given a va_list and increments that list
1735///
1737protected:
1738 // Note: Instruction needs to be a friend here to call cloneImpl.
1739 friend class Instruction;
1740
1741 VAArgInst *cloneImpl() const;
1742
1743public:
1744 VAArgInst(Value *List, Type *Ty, const Twine &NameStr = "",
1745 InsertPosition InsertBefore = nullptr)
1746 : UnaryInstruction(Ty, VAArg, List, InsertBefore) {
1747 setName(NameStr);
1748 }
1749
1750 Value *getPointerOperand() { return getOperand(0); }
1751 const Value *getPointerOperand() const { return getOperand(0); }
1752 static unsigned getPointerOperandIndex() { return 0U; }
1753
1754 // Methods for support type inquiry through isa, cast, and dyn_cast:
1755 static bool classof(const Instruction *I) {
1756 return I->getOpcode() == VAArg;
1757 }
1758 static bool classof(const Value *V) {
1759 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1760 }
1761};
1762
1763//===----------------------------------------------------------------------===//
1764// ExtractElementInst Class
1765//===----------------------------------------------------------------------===//
1766
1767/// This instruction extracts a single (scalar)
1768/// element from a VectorType value
1769///
1771 constexpr static IntrusiveOperandsAllocMarker AllocMarker{2};
1772
1773 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr = "",
1774 InsertPosition InsertBefore = nullptr);
1775
1776protected:
1777 // Note: Instruction needs to be a friend here to call cloneImpl.
1778 friend class Instruction;
1779
1781
1782public:
1784 const Twine &NameStr = "",
1785 InsertPosition InsertBefore = nullptr) {
1786 return new (AllocMarker)
1787 ExtractElementInst(Vec, Idx, NameStr, InsertBefore);
1788 }
1789
1790 /// Return true if an extractelement instruction can be
1791 /// formed with the specified operands.
1792 static bool isValidOperands(const Value *Vec, const Value *Idx);
1793
1795 Value *getIndexOperand() { return Op<1>(); }
1796 const Value *getVectorOperand() const { return Op<0>(); }
1797 const Value *getIndexOperand() const { return Op<1>(); }
1798
1800 return cast<VectorType>(getVectorOperand()->getType());
1801 }
1802
1803 /// Transparently provide more efficient getOperand methods.
1805
1806 // Methods for support type inquiry through isa, cast, and dyn_cast:
1807 static bool classof(const Instruction *I) {
1808 return I->getOpcode() == Instruction::ExtractElement;
1809 }
1810 static bool classof(const Value *V) {
1811 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1812 }
1813};
1814
1815template <>
1817 public FixedNumOperandTraits<ExtractElementInst, 2> {
1818};
1819
1821
1822//===----------------------------------------------------------------------===//
1823// InsertElementInst Class
1824//===----------------------------------------------------------------------===//
1825
1826/// This instruction inserts a single (scalar)
1827/// element into a VectorType value
1828///
1830 constexpr static IntrusiveOperandsAllocMarker AllocMarker{3};
1831
1832 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx,
1833 const Twine &NameStr = "",
1834 InsertPosition InsertBefore = nullptr);
1835
1836protected:
1837 // Note: Instruction needs to be a friend here to call cloneImpl.
1838 friend class Instruction;
1839
1840 InsertElementInst *cloneImpl() const;
1841
1842public:
1843 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
1844 const Twine &NameStr = "",
1845 InsertPosition InsertBefore = nullptr) {
1846 return new (AllocMarker)
1847 InsertElementInst(Vec, NewElt, Idx, NameStr, InsertBefore);
1848 }
1849
1850 /// Return true if an insertelement instruction can be
1851 /// formed with the specified operands.
1852 static bool isValidOperands(const Value *Vec, const Value *NewElt,
1853 const Value *Idx);
1854
1855 /// Overload to return most specific vector type.
1856 ///
1858 return cast<VectorType>(Instruction::getType());
1859 }
1860
1861 /// Transparently provide more efficient getOperand methods.
1863
1864 // Methods for support type inquiry through isa, cast, and dyn_cast:
1865 static bool classof(const Instruction *I) {
1866 return I->getOpcode() == Instruction::InsertElement;
1867 }
1868 static bool classof(const Value *V) {
1869 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1870 }
1871};
1872
1873template <>
1875 public FixedNumOperandTraits<InsertElementInst, 3> {
1876};
1877
1879
1880//===----------------------------------------------------------------------===//
1881// ShuffleVectorInst Class
1882//===----------------------------------------------------------------------===//
1883
1884constexpr int PoisonMaskElem = -1;
1885
1886/// This instruction constructs a fixed permutation of two
1887/// input vectors.
1888///
1889/// For each element of the result vector, the shuffle mask selects an element
1890/// from one of the input vectors to copy to the result. Non-negative elements
1891/// in the mask represent an index into the concatenated pair of input vectors.
1892/// PoisonMaskElem (-1) specifies that the result element is poison.
1893///
1894/// For scalable vectors, all the elements of the mask must be 0 or -1. This
1895/// requirement may be relaxed in the future.
1897 constexpr static IntrusiveOperandsAllocMarker AllocMarker{2};
1898
1899 SmallVector<int, 4> ShuffleMask;
1900 Constant *ShuffleMaskForBitcode;
1901
1902protected:
1903 // Note: Instruction needs to be a friend here to call cloneImpl.
1904 friend class Instruction;
1905
1906 ShuffleVectorInst *cloneImpl() const;
1907
1908public:
1909 ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr = "",
1910 InsertPosition InsertBefore = nullptr);
1911 ShuffleVectorInst(Value *V1, ArrayRef<int> Mask, const Twine &NameStr = "",
1912 InsertPosition InsertBefore = nullptr);
1913 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
1914 const Twine &NameStr = "",
1915 InsertPosition InsertBefore = nullptr);
1917 const Twine &NameStr = "",
1918 InsertPosition InsertBefore = nullptr);
1919
1920 void *operator new(size_t S) { return User::operator new(S, AllocMarker); }
1921 void operator delete(void *Ptr) { return User::operator delete(Ptr); }
1922
1923 /// Swap the operands and adjust the mask to preserve the semantics
1924 /// of the instruction.
1925 void commute();
1926
1927 /// Return true if a shufflevector instruction can be
1928 /// formed with the specified operands.
1929 static bool isValidOperands(const Value *V1, const Value *V2,
1930 const Value *Mask);
1931 static bool isValidOperands(const Value *V1, const Value *V2,
1932 ArrayRef<int> Mask);
1933
1934 /// Overload to return most specific vector type.
1935 ///
1937 return cast<VectorType>(Instruction::getType());
1938 }
1939
1940 /// Transparently provide more efficient getOperand methods.
1942
1943 /// Return the shuffle mask value of this instruction for the given element
1944 /// index. Return PoisonMaskElem if the element is undef.
1945 int getMaskValue(unsigned Elt) const { return ShuffleMask[Elt]; }
1946
1947 /// Convert the input shuffle mask operand to a vector of integers. Undefined
1948 /// elements of the mask are returned as PoisonMaskElem.
1949 static void getShuffleMask(const Constant *Mask,
1950 SmallVectorImpl<int> &Result);
1951
1952 /// Return the mask for this instruction as a vector of integers. Undefined
1953 /// elements of the mask are returned as PoisonMaskElem.
1955 Result.assign(ShuffleMask.begin(), ShuffleMask.end());
1956 }
1957
1958 /// Return the mask for this instruction, for use in bitcode.
1959 ///
1960 /// TODO: This is temporary until we decide a new bitcode encoding for
1961 /// shufflevector.
1962 Constant *getShuffleMaskForBitcode() const { return ShuffleMaskForBitcode; }
1963
1964 static Constant *convertShuffleMaskForBitcode(ArrayRef<int> Mask,
1965 Type *ResultTy);
1966
1967 void setShuffleMask(ArrayRef<int> Mask);
1968
1969 ArrayRef<int> getShuffleMask() const { return ShuffleMask; }
1970
1971 /// Return true if this shuffle returns a vector with a different number of
1972 /// elements than its source vectors.
1973 /// Examples: shufflevector <4 x n> A, <4 x n> B, <1,2,3>
1974 /// shufflevector <4 x n> A, <4 x n> B, <1,2,3,4,5>
1975 bool changesLength() const {
1976 unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType())
1977 ->getElementCount()
1978 .getKnownMinValue();
1979 unsigned NumMaskElts = ShuffleMask.size();
1980 return NumSourceElts != NumMaskElts;
1981 }
1982
1983 /// Return true if this shuffle returns a vector with a greater number of
1984 /// elements than its source vectors.
1985 /// Example: shufflevector <2 x n> A, <2 x n> B, <1,2,3>
1986 bool increasesLength() const {
1987 unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType())
1988 ->getElementCount()
1989 .getKnownMinValue();
1990 unsigned NumMaskElts = ShuffleMask.size();
1991 return NumSourceElts < NumMaskElts;
1992 }
1993
1994 /// Return true if this shuffle mask chooses elements from exactly one source
1995 /// vector.
1996 /// Example: <7,5,undef,7>
1997 /// This assumes that vector operands (of length \p NumSrcElts) are the same
1998 /// length as the mask.
1999 static bool isSingleSourceMask(ArrayRef<int> Mask, int NumSrcElts);
2000 static bool isSingleSourceMask(const Constant *Mask, int NumSrcElts) {
2001 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2002 SmallVector<int, 16> MaskAsInts;
2003 getShuffleMask(Mask, MaskAsInts);
2004 return isSingleSourceMask(MaskAsInts, NumSrcElts);
2005 }
2006
2007 /// Return true if this shuffle chooses elements from exactly one source
2008 /// vector without changing the length of that vector.
2009 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,0,undef,3>
2010 /// TODO: Optionally allow length-changing shuffles.
2011 bool isSingleSource() const {
2012 return !changesLength() &&
2013 isSingleSourceMask(ShuffleMask, ShuffleMask.size());
2014 }
2015
2016 /// Return true if this shuffle mask chooses elements from exactly one source
2017 /// vector without lane crossings. A shuffle using this mask is not
2018 /// necessarily a no-op because it may change the number of elements from its
2019 /// input vectors or it may provide demanded bits knowledge via undef lanes.
2020 /// Example: <undef,undef,2,3>
2021 static bool isIdentityMask(ArrayRef<int> Mask, int NumSrcElts);
2022 static bool isIdentityMask(const Constant *Mask, int NumSrcElts) {
2023 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2024
2025 // Not possible to express a shuffle mask for a scalable vector for this
2026 // case.
2027 if (isa<ScalableVectorType>(Mask->getType()))
2028 return false;
2029
2030 SmallVector<int, 16> MaskAsInts;
2031 getShuffleMask(Mask, MaskAsInts);
2032 return isIdentityMask(MaskAsInts, NumSrcElts);
2033 }
2034
2035 /// Return true if this shuffle chooses elements from exactly one source
2036 /// vector without lane crossings and does not change the number of elements
2037 /// from its input vectors.
2038 /// Example: shufflevector <4 x n> A, <4 x n> B, <4,undef,6,undef>
2039 bool isIdentity() const {
2040 // Not possible to express a shuffle mask for a scalable vector for this
2041 // case.
2042 if (isa<ScalableVectorType>(getType()))
2043 return false;
2044
2045 return !changesLength() && isIdentityMask(ShuffleMask, ShuffleMask.size());
2046 }
2047
2048 /// Return true if this shuffle lengthens exactly one source vector with
2049 /// undefs in the high elements.
2050 bool isIdentityWithPadding() const;
2051
2052 /// Return true if this shuffle extracts the first N elements of exactly one
2053 /// source vector.
2054 bool isIdentityWithExtract() const;
2055
2056 /// Return true if this shuffle concatenates its 2 source vectors. This
2057 /// returns false if either input is undefined. In that case, the shuffle is
2058 /// is better classified as an identity with padding operation.
2059 bool isConcat() const;
2060
2061 /// Return true if this shuffle mask chooses elements from its source vectors
2062 /// without lane crossings. A shuffle using this mask would be
2063 /// equivalent to a vector select with a constant condition operand.
2064 /// Example: <4,1,6,undef>
2065 /// This returns false if the mask does not choose from both input vectors.
2066 /// In that case, the shuffle is better classified as an identity shuffle.
2067 /// This assumes that vector operands are the same length as the mask
2068 /// (a length-changing shuffle can never be equivalent to a vector select).
2069 static bool isSelectMask(ArrayRef<int> Mask, int NumSrcElts);
2070 static bool isSelectMask(const Constant *Mask, int NumSrcElts) {
2071 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2072 SmallVector<int, 16> MaskAsInts;
2073 getShuffleMask(Mask, MaskAsInts);
2074 return isSelectMask(MaskAsInts, NumSrcElts);
2075 }
2076
2077 /// Return true if this shuffle chooses elements from its source vectors
2078 /// without lane crossings and all operands have the same number of elements.
2079 /// In other words, this shuffle is equivalent to a vector select with a
2080 /// constant condition operand.
2081 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,1,6,3>
2082 /// This returns false if the mask does not choose from both input vectors.
2083 /// In that case, the shuffle is better classified as an identity shuffle.
2084 /// TODO: Optionally allow length-changing shuffles.
2085 bool isSelect() const {
2086 return !changesLength() && isSelectMask(ShuffleMask, ShuffleMask.size());
2087 }
2088
2089 /// Return true if this shuffle mask swaps the order of elements from exactly
2090 /// one source vector.
2091 /// Example: <7,6,undef,4>
2092 /// This assumes that vector operands (of length \p NumSrcElts) are the same
2093 /// length as the mask.
2094 static bool isReverseMask(ArrayRef<int> Mask, int NumSrcElts);
2095 static bool isReverseMask(const Constant *Mask, int NumSrcElts) {
2096 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2097 SmallVector<int, 16> MaskAsInts;
2098 getShuffleMask(Mask, MaskAsInts);
2099 return isReverseMask(MaskAsInts, NumSrcElts);
2100 }
2101
2102 /// Return true if this shuffle swaps the order of elements from exactly
2103 /// one source vector.
2104 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,undef,1,undef>
2105 /// TODO: Optionally allow length-changing shuffles.
2106 bool isReverse() const {
2107 return !changesLength() && isReverseMask(ShuffleMask, ShuffleMask.size());
2108 }
2109
2110 /// Return true if this shuffle mask chooses all elements with the same value
2111 /// as the first element of exactly one source vector.
2112 /// Example: <4,undef,undef,4>
2113 /// This assumes that vector operands (of length \p NumSrcElts) are the same
2114 /// length as the mask.
2115 static bool isZeroEltSplatMask(ArrayRef<int> Mask, int NumSrcElts);
2116 static bool isZeroEltSplatMask(const Constant *Mask, int NumSrcElts) {
2117 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2118 SmallVector<int, 16> MaskAsInts;
2119 getShuffleMask(Mask, MaskAsInts);
2120 return isZeroEltSplatMask(MaskAsInts, NumSrcElts);
2121 }
2122
2123 /// Return true if all elements of this shuffle are the same value as the
2124 /// first element of exactly one source vector without changing the length
2125 /// of that vector.
2126 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,0,undef,0>
2127 /// TODO: Optionally allow length-changing shuffles.
2128 /// TODO: Optionally allow splats from other elements.
2129 bool isZeroEltSplat() const {
2130 return !changesLength() &&
2131 isZeroEltSplatMask(ShuffleMask, ShuffleMask.size());
2132 }
2133
2134 /// Return true if this shuffle mask is a transpose mask.
2135 /// Transpose vector masks transpose a 2xn matrix. They read corresponding
2136 /// even- or odd-numbered vector elements from two n-dimensional source
2137 /// vectors and write each result into consecutive elements of an
2138 /// n-dimensional destination vector. Two shuffles are necessary to complete
2139 /// the transpose, one for the even elements and another for the odd elements.
2140 /// This description closely follows how the TRN1 and TRN2 AArch64
2141 /// instructions operate.
2142 ///
2143 /// For example, a simple 2x2 matrix can be transposed with:
2144 ///
2145 /// ; Original matrix
2146 /// m0 = < a, b >
2147 /// m1 = < c, d >
2148 ///
2149 /// ; Transposed matrix
2150 /// t0 = < a, c > = shufflevector m0, m1, < 0, 2 >
2151 /// t1 = < b, d > = shufflevector m0, m1, < 1, 3 >
2152 ///
2153 /// For matrices having greater than n columns, the resulting nx2 transposed
2154 /// matrix is stored in two result vectors such that one vector contains
2155 /// interleaved elements from all the even-numbered rows and the other vector
2156 /// contains interleaved elements from all the odd-numbered rows. For example,
2157 /// a 2x4 matrix can be transposed with:
2158 ///
2159 /// ; Original matrix
2160 /// m0 = < a, b, c, d >
2161 /// m1 = < e, f, g, h >
2162 ///
2163 /// ; Transposed matrix
2164 /// t0 = < a, e, c, g > = shufflevector m0, m1 < 0, 4, 2, 6 >
2165 /// t1 = < b, f, d, h > = shufflevector m0, m1 < 1, 5, 3, 7 >
2166 static bool isTransposeMask(ArrayRef<int> Mask, int NumSrcElts);
2167 static bool isTransposeMask(const Constant *Mask, int NumSrcElts) {
2168 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2169 SmallVector<int, 16> MaskAsInts;
2170 getShuffleMask(Mask, MaskAsInts);
2171 return isTransposeMask(MaskAsInts, NumSrcElts);
2172 }
2173
2174 /// Return true if this shuffle transposes the elements of its inputs without
2175 /// changing the length of the vectors. This operation may also be known as a
2176 /// merge or interleave. See the description for isTransposeMask() for the
2177 /// exact specification.
2178 /// Example: shufflevector <4 x n> A, <4 x n> B, <0,4,2,6>
2179 bool isTranspose() const {
2180 return !changesLength() && isTransposeMask(ShuffleMask, ShuffleMask.size());
2181 }
2182
2183 /// Return true if this shuffle mask is a splice mask, concatenating the two
2184 /// inputs together and then extracts an original width vector starting from
2185 /// the splice index.
2186 /// Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4>
2187 /// This assumes that vector operands (of length \p NumSrcElts) are the same
2188 /// length as the mask.
2189 static bool isSpliceMask(ArrayRef<int> Mask, int NumSrcElts, int &Index);
2190 static bool isSpliceMask(const Constant *Mask, int NumSrcElts, int &Index) {
2191 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2192 SmallVector<int, 16> MaskAsInts;
2193 getShuffleMask(Mask, MaskAsInts);
2194 return isSpliceMask(MaskAsInts, NumSrcElts, Index);
2195 }
2196
2197 /// Return true if this shuffle splices two inputs without changing the length
2198 /// of the vectors. This operation concatenates the two inputs together and
2199 /// then extracts an original width vector starting from the splice index.
2200 /// Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4>
2201 bool isSplice(int &Index) const {
2202 return !changesLength() &&
2203 isSpliceMask(ShuffleMask, ShuffleMask.size(), Index);
2204 }
2205
2206 /// Return true if this shuffle mask is an extract subvector mask.
2207 /// A valid extract subvector mask returns a smaller vector from a single
2208 /// source operand. The base extraction index is returned as well.
2209 static bool isExtractSubvectorMask(ArrayRef<int> Mask, int NumSrcElts,
2210 int &Index);
2211 static bool isExtractSubvectorMask(const Constant *Mask, int NumSrcElts,
2212 int &Index) {
2213 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2214 // Not possible to express a shuffle mask for a scalable vector for this
2215 // case.
2216 if (isa<ScalableVectorType>(Mask->getType()))
2217 return false;
2218 SmallVector<int, 16> MaskAsInts;
2219 getShuffleMask(Mask, MaskAsInts);
2220 return isExtractSubvectorMask(MaskAsInts, NumSrcElts, Index);
2221 }
2222
2223 /// Return true if this shuffle mask is an extract subvector mask.
2225 // Not possible to express a shuffle mask for a scalable vector for this
2226 // case.
2227 if (isa<ScalableVectorType>(getType()))
2228 return false;
2229
2230 int NumSrcElts =
2231 cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2232 return isExtractSubvectorMask(ShuffleMask, NumSrcElts, Index);
2233 }
2234
2235 /// Return true if this shuffle mask is an insert subvector mask.
2236 /// A valid insert subvector mask inserts the lowest elements of a second
2237 /// source operand into an in-place first source operand.
2238 /// Both the sub vector width and the insertion index is returned.
2239 static bool isInsertSubvectorMask(ArrayRef<int> Mask, int NumSrcElts,
2240 int &NumSubElts, int &Index);
2241 static bool isInsertSubvectorMask(const Constant *Mask, int NumSrcElts,
2242 int &NumSubElts, int &Index) {
2243 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2244 // Not possible to express a shuffle mask for a scalable vector for this
2245 // case.
2246 if (isa<ScalableVectorType>(Mask->getType()))
2247 return false;
2248 SmallVector<int, 16> MaskAsInts;
2249 getShuffleMask(Mask, MaskAsInts);
2250 return isInsertSubvectorMask(MaskAsInts, NumSrcElts, NumSubElts, Index);
2251 }
2252
2253 /// Return true if this shuffle mask is an insert subvector mask.
2254 bool isInsertSubvectorMask(int &NumSubElts, int &Index) const {
2255 // Not possible to express a shuffle mask for a scalable vector for this
2256 // case.
2257 if (isa<ScalableVectorType>(getType()))
2258 return false;
2259
2260 int NumSrcElts =
2261 cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2262 return isInsertSubvectorMask(ShuffleMask, NumSrcElts, NumSubElts, Index);
2263 }
2264
2265 /// Return true if this shuffle mask replicates each of the \p VF elements
2266 /// in a vector \p ReplicationFactor times.
2267 /// For example, the mask for \p ReplicationFactor=3 and \p VF=4 is:
2268 /// <0,0,0,1,1,1,2,2,2,3,3,3>
2269 static bool isReplicationMask(ArrayRef<int> Mask, int &ReplicationFactor,
2270 int &VF);
2271 static bool isReplicationMask(const Constant *Mask, int &ReplicationFactor,
2272 int &VF) {
2273 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2274 // Not possible to express a shuffle mask for a scalable vector for this
2275 // case.
2276 if (isa<ScalableVectorType>(Mask->getType()))
2277 return false;
2278 SmallVector<int, 16> MaskAsInts;
2279 getShuffleMask(Mask, MaskAsInts);
2280 return isReplicationMask(MaskAsInts, ReplicationFactor, VF);
2281 }
2282
2283 /// Return true if this shuffle mask is a replication mask.
2284 bool isReplicationMask(int &ReplicationFactor, int &VF) const;
2285
2286 /// Return true if this shuffle mask represents "clustered" mask of size VF,
2287 /// i.e. each index between [0..VF) is used exactly once in each submask of
2288 /// size VF.
2289 /// For example, the mask for \p VF=4 is:
2290 /// 0, 1, 2, 3, 3, 2, 0, 1 - "clustered", because each submask of size 4
2291 /// (0,1,2,3 and 3,2,0,1) uses indices [0..VF) exactly one time.
2292 /// 0, 1, 2, 3, 3, 3, 1, 0 - not "clustered", because
2293 /// element 3 is used twice in the second submask
2294 /// (3,3,1,0) and index 2 is not used at all.
2295 static bool isOneUseSingleSourceMask(ArrayRef<int> Mask, int VF);
2296
2297 /// Return true if this shuffle mask is a one-use-single-source("clustered")
2298 /// mask.
2299 bool isOneUseSingleSourceMask(int VF) const;
2300
2301 /// Change values in a shuffle permute mask assuming the two vector operands
2302 /// of length InVecNumElts have swapped position.
2304 unsigned InVecNumElts) {
2305 for (int &Idx : Mask) {
2306 if (Idx == -1)
2307 continue;
2308 Idx = Idx < (int)InVecNumElts ? Idx + InVecNumElts : Idx - InVecNumElts;
2309 assert(Idx >= 0 && Idx < (int)InVecNumElts * 2 &&
2310 "shufflevector mask index out of range");
2311 }
2312 }
2313
2314 /// Return if this shuffle interleaves its two input vectors together.
2315 bool isInterleave(unsigned Factor);
2316
2317 /// Return true if the mask interleaves one or more input vectors together.
2318 ///
2319 /// I.e. <0, LaneLen, ... , LaneLen*(Factor - 1), 1, LaneLen + 1, ...>
2320 /// E.g. For a Factor of 2 (LaneLen=4):
2321 /// <0, 4, 1, 5, 2, 6, 3, 7>
2322 /// E.g. For a Factor of 3 (LaneLen=4):
2323 /// <4, 0, 9, 5, 1, 10, 6, 2, 11, 7, 3, 12>
2324 /// E.g. For a Factor of 4 (LaneLen=2):
2325 /// <0, 2, 6, 4, 1, 3, 7, 5>
2326 ///
2327 /// NumInputElts is the total number of elements in the input vectors.
2328 ///
2329 /// StartIndexes are the first indexes of each vector being interleaved,
2330 /// substituting any indexes that were undef
2331 /// E.g. <4, -1, 2, 5, 1, 3> (Factor=3): StartIndexes=<4, 0, 2>
2332 ///
2333 /// Note that this does not check if the input vectors are consecutive:
2334 /// It will return true for masks such as
2335 /// <0, 4, 6, 1, 5, 7> (Factor=3, LaneLen=2)
2336 static bool isInterleaveMask(ArrayRef<int> Mask, unsigned Factor,
2337 unsigned NumInputElts,
2338 SmallVectorImpl<unsigned> &StartIndexes);
2339 static bool isInterleaveMask(ArrayRef<int> Mask, unsigned Factor,
2340 unsigned NumInputElts) {
2341 SmallVector<unsigned, 8> StartIndexes;
2342 return isInterleaveMask(Mask, Factor, NumInputElts, StartIndexes);
2343 }
2344
2345 /// Check if the mask is a DE-interleave mask of the given factor
2346 /// \p Factor like:
2347 /// <Index, Index+Factor, ..., Index+(NumElts-1)*Factor>
2348 static bool isDeInterleaveMaskOfFactor(ArrayRef<int> Mask, unsigned Factor,
2349 unsigned &Index);
2350 static bool isDeInterleaveMaskOfFactor(ArrayRef<int> Mask, unsigned Factor) {
2351 unsigned Unused;
2352 return isDeInterleaveMaskOfFactor(Mask, Factor, Unused);
2353 }
2354
2355 /// Checks if the shuffle is a bit rotation of the first operand across
2356 /// multiple subelements, e.g:
2357 ///
2358 /// shuffle <8 x i8> %a, <8 x i8> poison, <8 x i32> <1, 0, 3, 2, 5, 4, 7, 6>
2359 ///
2360 /// could be expressed as
2361 ///
2362 /// rotl <4 x i16> %a, 8
2363 ///
2364 /// If it can be expressed as a rotation, returns the number of subelements to
2365 /// group by in NumSubElts and the number of bits to rotate left in RotateAmt.
2366 static bool isBitRotateMask(ArrayRef<int> Mask, unsigned EltSizeInBits,
2367 unsigned MinSubElts, unsigned MaxSubElts,
2368 unsigned &NumSubElts, unsigned &RotateAmt);
2369
2370 // Methods for support type inquiry through isa, cast, and dyn_cast:
2371 static bool classof(const Instruction *I) {
2372 return I->getOpcode() == Instruction::ShuffleVector;
2373 }
2374 static bool classof(const Value *V) {
2375 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2376 }
2377};
2378
2379template <>
2381 : public FixedNumOperandTraits<ShuffleVectorInst, 2> {};
2382
2384
2385//===----------------------------------------------------------------------===//
2386// ExtractValueInst Class
2387//===----------------------------------------------------------------------===//
2388
2389/// This instruction extracts a struct member or array
2390/// element value from an aggregate value.
2391///
2394
2396
2397 /// Constructors - Create a extractvalue instruction with a base aggregate
2398 /// value and a list of indices. The first and second ctor can optionally
2399 /// insert before an existing instruction, the third appends the new
2400 /// instruction to the specified BasicBlock.
2401 inline ExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs,
2402 const Twine &NameStr, InsertPosition InsertBefore);
2403
2404 void init(ArrayRef<unsigned> Idxs, const Twine &NameStr);
2405
2406protected:
2407 // Note: Instruction needs to be a friend here to call cloneImpl.
2408 friend class Instruction;
2409
2410 ExtractValueInst *cloneImpl() const;
2411
2412public:
2414 const Twine &NameStr = "",
2415 InsertPosition InsertBefore = nullptr) {
2416 return new
2417 ExtractValueInst(Agg, Idxs, NameStr, InsertBefore);
2418 }
2419
2420 /// Returns the type of the element that would be extracted
2421 /// with an extractvalue instruction with the specified parameters.
2422 ///
2423 /// Null is returned if the indices are invalid for the specified type.
2424 static Type *getIndexedType(Type *Agg, ArrayRef<unsigned> Idxs);
2425
2426 using idx_iterator = const unsigned*;
2427
2428 inline idx_iterator idx_begin() const { return Indices.begin(); }
2429 inline idx_iterator idx_end() const { return Indices.end(); }
2431 return make_range(idx_begin(), idx_end());
2432 }
2433
2435 return getOperand(0);
2436 }
2438 return getOperand(0);
2439 }
2440 static unsigned getAggregateOperandIndex() {
2441 return 0U; // get index for modifying correct operand
2442 }
2443
2445 return Indices;
2446 }
2447
2448 unsigned getNumIndices() const {
2449 return (unsigned)Indices.size();
2450 }
2451
2452 bool hasIndices() const {
2453 return true;
2454 }
2455
2456 // Methods for support type inquiry through isa, cast, and dyn_cast:
2457 static bool classof(const Instruction *I) {
2458 return I->getOpcode() == Instruction::ExtractValue;
2459 }
2460 static bool classof(const Value *V) {
2461 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2462 }
2463};
2464
2465ExtractValueInst::ExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs,
2466 const Twine &NameStr,
2467 InsertPosition InsertBefore)
2468 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2469 ExtractValue, Agg, InsertBefore) {
2470 init(Idxs, NameStr);
2471}
2472
2473//===----------------------------------------------------------------------===//
2474// InsertValueInst Class
2475//===----------------------------------------------------------------------===//
2476
2477/// This instruction inserts a struct field of array element
2478/// value into an aggregate value.
2479///
2481 constexpr static IntrusiveOperandsAllocMarker AllocMarker{2};
2482
2484
2485 InsertValueInst(const InsertValueInst &IVI);
2486
2487 /// Constructors - Create a insertvalue instruction with a base aggregate
2488 /// value, a value to insert, and a list of indices. The first and second ctor
2489 /// can optionally insert before an existing instruction, the third appends
2490 /// the new instruction to the specified BasicBlock.
2491 inline InsertValueInst(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
2492 const Twine &NameStr, InsertPosition InsertBefore);
2493
2494 /// Constructors - These three constructors are convenience methods because
2495 /// one and two index insertvalue instructions are so common.
2496 InsertValueInst(Value *Agg, Value *Val, unsigned Idx,
2497 const Twine &NameStr = "",
2498 InsertPosition InsertBefore = nullptr);
2499
2500 void init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
2501 const Twine &NameStr);
2502
2503protected:
2504 // Note: Instruction needs to be a friend here to call cloneImpl.
2505 friend class Instruction;
2506
2507 InsertValueInst *cloneImpl() const;
2508
2509public:
2510 // allocate space for exactly two operands
2511 void *operator new(size_t S) { return User::operator new(S, AllocMarker); }
2512 void operator delete(void *Ptr) { User::operator delete(Ptr); }
2513
2514 static InsertValueInst *Create(Value *Agg, Value *Val,
2515 ArrayRef<unsigned> Idxs,
2516 const Twine &NameStr = "",
2517 InsertPosition InsertBefore = nullptr) {
2518 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertBefore);
2519 }
2520
2521 /// Transparently provide more efficient getOperand methods.
2523
2524 using idx_iterator = const unsigned*;
2525
2526 inline idx_iterator idx_begin() const { return Indices.begin(); }
2527 inline idx_iterator idx_end() const { return Indices.end(); }
2529 return make_range(idx_begin(), idx_end());
2530 }
2531
2533 return getOperand(0);
2534 }
2536 return getOperand(0);
2537 }
2538 static unsigned getAggregateOperandIndex() {
2539 return 0U; // get index for modifying correct operand
2540 }
2541
2543 return getOperand(1);
2544 }
2546 return getOperand(1);
2547 }
2549 return 1U; // get index for modifying correct operand
2550 }
2551
2553 return Indices;
2554 }
2555
2556 unsigned getNumIndices() const {
2557 return (unsigned)Indices.size();
2558 }
2559
2560 bool hasIndices() const {
2561 return true;
2562 }
2563
2564 // Methods for support type inquiry through isa, cast, and dyn_cast:
2565 static bool classof(const Instruction *I) {
2566 return I->getOpcode() == Instruction::InsertValue;
2567 }
2568 static bool classof(const Value *V) {
2569 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2570 }
2571};
2572
2573template <>
2575 public FixedNumOperandTraits<InsertValueInst, 2> {
2576};
2577
2578InsertValueInst::InsertValueInst(Value *Agg, Value *Val,
2579 ArrayRef<unsigned> Idxs, const Twine &NameStr,
2580 InsertPosition InsertBefore)
2581 : Instruction(Agg->getType(), InsertValue, AllocMarker, InsertBefore) {
2582 init(Agg, Val, Idxs, NameStr);
2583}
2584
2585DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertValueInst, Value)
2586
2587//===----------------------------------------------------------------------===//
2588// PHINode Class
2589//===----------------------------------------------------------------------===//
2590
2591// PHINode - The PHINode class is used to represent the magical mystical PHI
2592// node, that can not exist in nature, but can be synthesized in a computer
2593// scientist's overactive imagination.
2594//
2595class PHINode : public Instruction {
2596 constexpr static HungOffOperandsAllocMarker AllocMarker{};
2597
2598 /// The number of operands actually allocated. NumOperands is
2599 /// the number actually in use.
2600 unsigned ReservedSpace;
2601
2602 PHINode(const PHINode &PN);
2603
2604 explicit PHINode(Type *Ty, unsigned NumReservedValues,
2605 const Twine &NameStr = "",
2606 InsertPosition InsertBefore = nullptr)
2607 : Instruction(Ty, Instruction::PHI, AllocMarker, InsertBefore),
2608 ReservedSpace(NumReservedValues) {
2609 assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!");
2610 setName(NameStr);
2611 allocHungoffUses(ReservedSpace);
2612 }
2613
2614protected:
2615 // Note: Instruction needs to be a friend here to call cloneImpl.
2616 friend class Instruction;
2617
2618 PHINode *cloneImpl() const;
2619
2620 // allocHungoffUses - this is more complicated than the generic
2621 // User::allocHungoffUses, because we have to allocate Uses for the incoming
2622 // values and pointers to the incoming blocks, all in one allocation.
2623 void allocHungoffUses(unsigned N) {
2624 User::allocHungoffUses(N, /* IsPhi */ true);
2625 }
2626
2627public:
2628 /// Constructors - NumReservedValues is a hint for the number of incoming
2629 /// edges that this phi node will have (use 0 if you really have no idea).
2630 static PHINode *Create(Type *Ty, unsigned NumReservedValues,
2631 const Twine &NameStr = "",
2632 InsertPosition InsertBefore = nullptr) {
2633 return new (AllocMarker)
2634 PHINode(Ty, NumReservedValues, NameStr, InsertBefore);
2635 }
2636
2637 /// Provide fast operand accessors
2639
2640 // Block iterator interface. This provides access to the list of incoming
2641 // basic blocks, which parallels the list of incoming values.
2642 // Please note that we are not providing non-const iterators for blocks to
2643 // force all updates go through an interface function.
2644
2647
2649 return reinterpret_cast<const_block_iterator>(op_begin() + ReservedSpace);
2650 }
2651
2653 return block_begin() + getNumOperands();
2654 }
2655
2657 return make_range(block_begin(), block_end());
2658 }
2659
2660 op_range incoming_values() { return operands(); }
2661
2662 const_op_range incoming_values() const { return operands(); }
2663
2664 /// Return the number of incoming edges
2665 ///
2666 unsigned getNumIncomingValues() const { return getNumOperands(); }
2667
2668 /// Return incoming value number x
2669 ///
2670 Value *getIncomingValue(unsigned i) const {
2671 return getOperand(i);
2672 }
2673 void setIncomingValue(unsigned i, Value *V) {
2674 assert(V && "PHI node got a null value!");
2675 assert(getType() == V->getType() &&
2676 "All operands to PHI node must be the same type as the PHI node!");
2677 setOperand(i, V);
2678 }
2679
2680 static unsigned getOperandNumForIncomingValue(unsigned i) {
2681 return i;
2682 }
2683
2684 static unsigned getIncomingValueNumForOperand(unsigned i) {
2685 return i;
2686 }
2687
2688 /// Return incoming basic block number @p i.
2689 ///
2690 BasicBlock *getIncomingBlock(unsigned i) const {
2691 return block_begin()[i];
2692 }
2693
2694 /// Return incoming basic block corresponding
2695 /// to an operand of the PHI.
2696 ///
2698 assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?");
2699 return getIncomingBlock(unsigned(&U - op_begin()));
2700 }
2701
2702 /// Return incoming basic block corresponding
2703 /// to value use iterator.
2704 ///
2706 return getIncomingBlock(I.getUse());
2707 }
2708
2709 void setIncomingBlock(unsigned i, BasicBlock *BB) {
2710 const_cast<block_iterator>(block_begin())[i] = BB;
2711 }
2712
2713 /// Copies the basic blocks from \p BBRange to the incoming basic block list
2714 /// of this PHINode, starting at \p ToIdx.
2716 uint32_t ToIdx = 0) {
2717 copy(BBRange, const_cast<block_iterator>(block_begin()) + ToIdx);
2718 }
2719
2720 /// Replace every incoming basic block \p Old to basic block \p New.
2722 assert(New && Old && "PHI node got a null basic block!");
2723 for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op)
2724 if (getIncomingBlock(Op) == Old)
2725 setIncomingBlock(Op, New);
2726 }
2727
2728 /// Add an incoming value to the end of the PHI list
2729 ///
2731 if (getNumOperands() == ReservedSpace)
2732 growOperands(); // Get more space!
2733 // Initialize some new operands.
2734 setNumHungOffUseOperands(getNumOperands() + 1);
2735 setIncomingValue(getNumOperands() - 1, V);
2736 setIncomingBlock(getNumOperands() - 1, BB);
2737 }
2738
2739 /// Remove an incoming value. This is useful if a
2740 /// predecessor basic block is deleted. The value removed is returned.
2741 ///
2742 /// If the last incoming value for a PHI node is removed (and DeletePHIIfEmpty
2743 /// is true), the PHI node is destroyed and any uses of it are replaced with
2744 /// dummy values. The only time there should be zero incoming values to a PHI
2745 /// node is when the block is dead, so this strategy is sound.
2746 ///
2747 Value *removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty = true);
2748
2749 Value *removeIncomingValue(const BasicBlock *BB, bool DeletePHIIfEmpty=true) {
2750 int Idx = getBasicBlockIndex(BB);
2751 assert(Idx >= 0 && "Invalid basic block argument to remove!");
2752 return removeIncomingValue(Idx, DeletePHIIfEmpty);
2753 }
2754
2755 /// Remove all incoming values for which the predicate returns true.
2756 /// The predicate accepts the incoming value index.
2757 void removeIncomingValueIf(function_ref<bool(unsigned)> Predicate,
2758 bool DeletePHIIfEmpty = true);
2759
2760 /// Return the first index of the specified basic
2761 /// block in the value list for this PHI. Returns -1 if no instance.
2762 ///
2763 int getBasicBlockIndex(const BasicBlock *BB) const {
2764 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
2765 if (block_begin()[i] == BB)
2766 return i;
2767 return -1;
2768 }
2769
2771 int Idx = getBasicBlockIndex(BB);
2772 assert(Idx >= 0 && "Invalid basic block argument!");
2773 return getIncomingValue(Idx);
2774 }
2775
2776 /// Set every incoming value(s) for block \p BB to \p V.
2778 assert(BB && "PHI node got a null basic block!");
2779 bool Found = false;
2780 for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op)
2781 if (getIncomingBlock(Op) == BB) {
2782 Found = true;
2783 setIncomingValue(Op, V);
2784 }
2785 (void)Found;
2786 assert(Found && "Invalid basic block argument to set!");
2787 }
2788
2789 /// If the specified PHI node always merges together the
2790 /// same value, return the value, otherwise return null.
2791 Value *hasConstantValue() const;
2792
2793 /// Whether the specified PHI node always merges
2794 /// together the same value, assuming undefs are equal to a unique
2795 /// non-undef value.
2796 bool hasConstantOrUndefValue() const;
2797
2798 /// If the PHI node is complete which means all of its parent's predecessors
2799 /// have incoming value in this PHI, return true, otherwise return false.
2800 bool isComplete() const {
2802 [this](const BasicBlock *Pred) {
2803 return getBasicBlockIndex(Pred) >= 0;
2804 });
2805 }
2806
2807 /// Methods for support type inquiry through isa, cast, and dyn_cast:
2808 static bool classof(const Instruction *I) {
2809 return I->getOpcode() == Instruction::PHI;
2810 }
2811 static bool classof(const Value *V) {
2812 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2813 }
2814
2815private:
2816 void growOperands();
2817};
2818
2819template <> struct OperandTraits<PHINode> : public HungoffOperandTraits {};
2820
2822
2823//===----------------------------------------------------------------------===//
2824// LandingPadInst Class
2825//===----------------------------------------------------------------------===//
2826
2827//===---------------------------------------------------------------------------
2828/// The landingpad instruction holds all of the information
2829/// necessary to generate correct exception handling. The landingpad instruction
2830/// cannot be moved from the top of a landing pad block, which itself is
2831/// accessible only from the 'unwind' edge of an invoke. This uses the
2832/// SubclassData field in Value to store whether or not the landingpad is a
2833/// cleanup.
2834///
2836 using CleanupField = BoolBitfieldElementT<0>;
2837
2838 constexpr static HungOffOperandsAllocMarker AllocMarker{};
2839
2840 /// The number of operands actually allocated. NumOperands is
2841 /// the number actually in use.
2842 unsigned ReservedSpace;
2843
2844 LandingPadInst(const LandingPadInst &LP);
2845
2846public:
2848
2849private:
2850 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
2851 const Twine &NameStr, InsertPosition InsertBefore);
2852
2853 // Allocate space for exactly zero operands.
2854 void *operator new(size_t S) { return User::operator new(S, AllocMarker); }
2855
2856 void growOperands(unsigned Size);
2857 void init(unsigned NumReservedValues, const Twine &NameStr);
2858
2859protected:
2860 // Note: Instruction needs to be a friend here to call cloneImpl.
2861 friend class Instruction;
2862
2863 LandingPadInst *cloneImpl() const;
2864
2865public:
2866 void operator delete(void *Ptr) { User::operator delete(Ptr); }
2867
2868 /// Constructors - NumReservedClauses is a hint for the number of incoming
2869 /// clauses that this landingpad will have (use 0 if you really have no idea).
2870 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
2871 const Twine &NameStr = "",
2872 InsertPosition InsertBefore = nullptr);
2873
2874 /// Provide fast operand accessors
2876
2877 /// Return 'true' if this landingpad instruction is a
2878 /// cleanup. I.e., it should be run when unwinding even if its landing pad
2879 /// doesn't catch the exception.
2880 bool isCleanup() const { return getSubclassData<CleanupField>(); }
2881
2882 /// Indicate that this landingpad instruction is a cleanup.
2883 void setCleanup(bool V) { setSubclassData<CleanupField>(V); }
2884
2885 /// Add a catch or filter clause to the landing pad.
2886 void addClause(Constant *ClauseVal);
2887
2888 /// Get the value of the clause at index Idx. Use isCatch/isFilter to
2889 /// determine what type of clause this is.
2890 Constant *getClause(unsigned Idx) const {
2891 return cast<Constant>(getOperandList()[Idx]);
2892 }
2893
2894 /// Return 'true' if the clause and index Idx is a catch clause.
2895 bool isCatch(unsigned Idx) const {
2896 return !isa<ArrayType>(getOperandList()[Idx]->getType());
2897 }
2898
2899 /// Return 'true' if the clause and index Idx is a filter clause.
2900 bool isFilter(unsigned Idx) const {
2901 return isa<ArrayType>(getOperandList()[Idx]->getType());
2902 }
2903
2904 /// Get the number of clauses for this landing pad.
2905 unsigned getNumClauses() const { return getNumOperands(); }
2906
2907 /// Grow the size of the operand list to accommodate the new
2908 /// number of clauses.
2909 void reserveClauses(unsigned Size) { growOperands(Size); }
2910
2911 // Methods for support type inquiry through isa, cast, and dyn_cast:
2912 static bool classof(const Instruction *I) {
2913 return I->getOpcode() == Instruction::LandingPad;
2914 }
2915 static bool classof(const Value *V) {
2916 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2917 }
2918};
2919
2920template <>
2922
2924
2925//===----------------------------------------------------------------------===//
2926// ReturnInst Class
2927//===----------------------------------------------------------------------===//
2928
2929//===---------------------------------------------------------------------------
2930/// Return a value (possibly void), from a function. Execution
2931/// does not continue in this function any longer.
2932///
2933class ReturnInst : public Instruction {
2935
2936private:
2937 // ReturnInst constructors:
2938 // ReturnInst() - 'ret void' instruction
2939 // ReturnInst( null) - 'ret void' instruction
2940 // ReturnInst(Value* X) - 'ret X' instruction
2941 // ReturnInst(null, Iterator It) - 'ret void' instruction, insert before I
2942 // ReturnInst(Value* X, Iterator It) - 'ret X' instruction, insert before I
2943 // ReturnInst( null, Inst *I) - 'ret void' instruction, insert before I
2944 // ReturnInst(Value* X, Inst *I) - 'ret X' instruction, insert before I
2945 // ReturnInst( null, BB *B) - 'ret void' instruction, insert @ end of B
2946 // ReturnInst(Value* X, BB *B) - 'ret X' instruction, insert @ end of B
2947 //
2948 // NOTE: If the Value* passed is of type void then the constructor behaves as
2949 // if it was passed NULL.
2950 explicit ReturnInst(LLVMContext &C, Value *retVal, AllocInfo AllocInfo,
2951 InsertPosition InsertBefore);
2952
2953protected:
2954 // Note: Instruction needs to be a friend here to call cloneImpl.
2955 friend class Instruction;
2956
2957 ReturnInst *cloneImpl() const;
2958
2959public:
2960 static ReturnInst *Create(LLVMContext &C, Value *retVal = nullptr,
2961 InsertPosition InsertBefore = nullptr) {
2962 IntrusiveOperandsAllocMarker AllocMarker{retVal ? 1U : 0U};
2963 return new (AllocMarker) ReturnInst(C, retVal, AllocMarker, InsertBefore);
2964 }
2965
2966 static ReturnInst *Create(LLVMContext &C, BasicBlock *InsertAtEnd) {
2967 IntrusiveOperandsAllocMarker AllocMarker{0};
2968 return new (AllocMarker) ReturnInst(C, nullptr, AllocMarker, InsertAtEnd);
2969 }
2970
2971 /// Provide fast operand accessors
2973
2974 /// Convenience accessor. Returns null if there is no return value.
2976 return getNumOperands() != 0 ? getOperand(0) : nullptr;
2977 }
2978
2979 unsigned getNumSuccessors() const { return 0; }
2980
2981 // Methods for support type inquiry through isa, cast, and dyn_cast:
2982 static bool classof(const Instruction *I) {
2983 return (I->getOpcode() == Instruction::Ret);
2984 }
2985 static bool classof(const Value *V) {
2986 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2987 }
2988
2989private:
2990 BasicBlock *getSuccessor(unsigned idx) const {
2991 llvm_unreachable("ReturnInst has no successors!");
2992 }
2993
2994 void setSuccessor(unsigned idx, BasicBlock *B) {
2995 llvm_unreachable("ReturnInst has no successors!");
2996 }
2997};
2998
2999template <>
3000struct OperandTraits<ReturnInst> : public VariadicOperandTraits<ReturnInst> {};
3001
3003
3004//===----------------------------------------------------------------------===//
3005// BranchInst Class
3006//===----------------------------------------------------------------------===//
3007
3008//===---------------------------------------------------------------------------
3009/// Conditional or Unconditional Branch instruction.
3010///
3011class BranchInst : public Instruction {
3012 /// Ops list - Branches are strange. The operands are ordered:
3013 /// [Cond, FalseDest,] TrueDest. This makes some accessors faster because
3014 /// they don't have to check for cond/uncond branchness. These are mostly
3015 /// accessed relative from op_end().
3017 // BranchInst constructors (where {B, T, F} are blocks, and C is a condition):
3018 // BranchInst(BB *B) - 'br B'
3019 // BranchInst(BB* T, BB *F, Value *C) - 'br C, T, F'
3020 // BranchInst(BB* B, Iter It) - 'br B' insert before I
3021 // BranchInst(BB* T, BB *F, Value *C, Iter It) - 'br C, T, F', insert before I
3022 // BranchInst(BB* B, Inst *I) - 'br B' insert before I
3023 // BranchInst(BB* T, BB *F, Value *C, Inst *I) - 'br C, T, F', insert before I
3024 // BranchInst(BB* B, BB *I) - 'br B' insert at end
3025 // BranchInst(BB* T, BB *F, Value *C, BB *I) - 'br C, T, F', insert at end
3026 explicit BranchInst(BasicBlock *IfTrue, AllocInfo AllocInfo,
3027 InsertPosition InsertBefore);
3028 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
3029 AllocInfo AllocInfo, InsertPosition InsertBefore);
3030
3031 void AssertOK();
3032
3033protected:
3034 // Note: Instruction needs to be a friend here to call cloneImpl.
3035 friend class Instruction;
3036
3037 BranchInst *cloneImpl() const;
3038
3039public:
3040 /// Iterator type that casts an operand to a basic block.
3041 ///
3042 /// This only makes sense because the successors are stored as adjacent
3043 /// operands for branch instructions.
3045 : iterator_adaptor_base<succ_op_iterator, value_op_iterator,
3046 std::random_access_iterator_tag, BasicBlock *,
3047 ptrdiff_t, BasicBlock *, BasicBlock *> {
3049
3050 BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3051 BasicBlock *operator->() const { return operator*(); }
3052 };
3053
3054 /// The const version of `succ_op_iterator`.
3056 : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator,
3057 std::random_access_iterator_tag,
3058 const BasicBlock *, ptrdiff_t, const BasicBlock *,
3059 const BasicBlock *> {
3062
3063 const BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3064 const BasicBlock *operator->() const { return operator*(); }
3065 };
3066
3068 InsertPosition InsertBefore = nullptr) {
3069 IntrusiveOperandsAllocMarker AllocMarker{1};
3070 return new (AllocMarker) BranchInst(IfTrue, AllocMarker, InsertBefore);
3071 }
3072
3073 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
3074 Value *Cond,
3075 InsertPosition InsertBefore = nullptr) {
3076 IntrusiveOperandsAllocMarker AllocMarker{3};
3077 return new (AllocMarker)
3078 BranchInst(IfTrue, IfFalse, Cond, AllocMarker, InsertBefore);
3079 }
3080
3081 /// Transparently provide more efficient getOperand methods.
3083
3084 bool isUnconditional() const { return getNumOperands() == 1; }
3085 bool isConditional() const { return getNumOperands() == 3; }
3086
3088 assert(isConditional() && "Cannot get condition of an uncond branch!");
3089 return Op<-3>();
3090 }
3091
3093 assert(isConditional() && "Cannot set condition of unconditional branch!");
3094 Op<-3>() = V;
3095 }
3096
3097 unsigned getNumSuccessors() const { return 1+isConditional(); }
3098
3099 BasicBlock *getSuccessor(unsigned i) const {
3100 assert(i < getNumSuccessors() && "Successor # out of range for Branch!");
3101 return cast_or_null<BasicBlock>((&Op<-1>() - i)->get());
3102 }
3103
3104 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3105 assert(idx < getNumSuccessors() && "Successor # out of range for Branch!");
3106 *(&Op<-1>() - idx) = NewSucc;
3107 }
3108
3109 /// Swap the successors of this branch instruction.
3110 ///
3111 /// Swaps the successors of the branch instruction. This also swaps any
3112 /// branch weight metadata associated with the instruction so that it
3113 /// continues to map correctly to each operand.
3114 void swapSuccessors();
3115
3117 return make_range(
3118 succ_op_iterator(std::next(value_op_begin(), isConditional() ? 1 : 0)),
3119 succ_op_iterator(value_op_end()));
3120 }
3121
3124 std::next(value_op_begin(), isConditional() ? 1 : 0)),
3125 const_succ_op_iterator(value_op_end()));
3126 }
3127
3128 // Methods for support type inquiry through isa, cast, and dyn_cast:
3129 static bool classof(const Instruction *I) {
3130 return (I->getOpcode() == Instruction::Br);
3131 }
3132 static bool classof(const Value *V) {
3133 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3134 }
3135};
3136
3137template <>
3138struct OperandTraits<BranchInst> : public VariadicOperandTraits<BranchInst> {};
3139
3141
3142//===----------------------------------------------------------------------===//
3143// SwitchInst Class
3144//===----------------------------------------------------------------------===//
3145
3146//===---------------------------------------------------------------------------
3147/// Multiway switch
3148///
3149class SwitchInst : public Instruction {
3150 constexpr static HungOffOperandsAllocMarker AllocMarker{};
3151
3152 unsigned ReservedSpace;
3153
3154 // Operand[0] = Value to switch on
3155 // Operand[1] = Default basic block destination
3156 // Operand[2n ] = Value to match
3157 // Operand[2n+1] = BasicBlock to go to on match
3158 SwitchInst(const SwitchInst &SI);
3159
3160 /// Create a new switch instruction, specifying a value to switch on and a
3161 /// default destination. The number of additional cases can be specified here
3162 /// to make memory allocation more efficient. This constructor can also
3163 /// auto-insert before another instruction.
3164 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3165 InsertPosition InsertBefore);
3166
3167 // allocate space for exactly zero operands
3168 void *operator new(size_t S) { return User::operator new(S, AllocMarker); }
3169
3170 void init(Value *Value, BasicBlock *Default, unsigned NumReserved);
3171 void growOperands();
3172
3173protected:
3174 // Note: Instruction needs to be a friend here to call cloneImpl.
3175 friend class Instruction;
3176
3177 SwitchInst *cloneImpl() const;
3178
3179public:
3180 void operator delete(void *Ptr) { User::operator delete(Ptr); }
3181
3182 // -2
3183 static const unsigned DefaultPseudoIndex = static_cast<unsigned>(~0L-1);
3184
3185 template <typename CaseHandleT> class CaseIteratorImpl;
3186
3187 /// A handle to a particular switch case. It exposes a convenient interface
3188 /// to both the case value and the successor block.
3189 ///
3190 /// We define this as a template and instantiate it to form both a const and
3191 /// non-const handle.
3192 template <typename SwitchInstT, typename ConstantIntT, typename BasicBlockT>
3194 // Directly befriend both const and non-const iterators.
3195 friend class SwitchInst::CaseIteratorImpl<
3196 CaseHandleImpl<SwitchInstT, ConstantIntT, BasicBlockT>>;
3197
3198 protected:
3199 // Expose the switch type we're parameterized with to the iterator.
3200 using SwitchInstType = SwitchInstT;
3201
3202 SwitchInstT *SI;
3204
3205 CaseHandleImpl() = default;
3207
3208 public:
3209 /// Resolves case value for current case.
3210 ConstantIntT *getCaseValue() const {
3211 assert((unsigned)Index < SI->getNumCases() &&
3212 "Index out the number of cases.");
3213 return reinterpret_cast<ConstantIntT *>(SI->getOperand(2 + Index * 2));
3214 }
3215
3216 /// Resolves successor for current case.
3217 BasicBlockT *getCaseSuccessor() const {
3218 assert(((unsigned)Index < SI->getNumCases() ||
3219 (unsigned)Index == DefaultPseudoIndex) &&
3220 "Index out the number of cases.");
3221 return SI->getSuccessor(getSuccessorIndex());
3222 }
3223
3224 /// Returns number of current case.
3225 unsigned getCaseIndex() const { return Index; }
3226
3227 /// Returns successor index for current case successor.
3228 unsigned getSuccessorIndex() const {
3229 assert(((unsigned)Index == DefaultPseudoIndex ||
3230 (unsigned)Index < SI->getNumCases()) &&
3231 "Index out the number of cases.");
3232 return (unsigned)Index != DefaultPseudoIndex ? Index + 1 : 0;
3233 }
3234
3235 bool operator==(const CaseHandleImpl &RHS) const {
3236 assert(SI == RHS.SI && "Incompatible operators.");
3237 return Index == RHS.Index;
3238 }
3239 };
3240
3243
3245 : public CaseHandleImpl<SwitchInst, ConstantInt, BasicBlock> {
3247
3248 public:
3250
3251 /// Sets the new value for current case.
3252 void setValue(ConstantInt *V) const {
3253 assert((unsigned)Index < SI->getNumCases() &&
3254 "Index out the number of cases.");
3255 SI->setOperand(2 + Index*2, reinterpret_cast<Value*>(V));
3256 }
3257
3258 /// Sets the new successor for current case.
3259 void setSuccessor(BasicBlock *S) const {
3260 SI->setSuccessor(getSuccessorIndex(), S);
3261 }
3262 };
3263
3264 template <typename CaseHandleT>
3266 : public iterator_facade_base<CaseIteratorImpl<CaseHandleT>,
3267 std::random_access_iterator_tag,
3268 const CaseHandleT> {
3269 using SwitchInstT = typename CaseHandleT::SwitchInstType;
3270
3271 CaseHandleT Case;
3272
3273 public:
3274 /// Default constructed iterator is in an invalid state until assigned to
3275 /// a case for a particular switch.
3276 CaseIteratorImpl() = default;
3277
3278 /// Initializes case iterator for given SwitchInst and for given
3279 /// case number.
3280 CaseIteratorImpl(SwitchInstT *SI, unsigned CaseNum) : Case(SI, CaseNum) {}
3281
3282 /// Initializes case iterator for given SwitchInst and for given
3283 /// successor index.
3285 unsigned SuccessorIndex) {
3286 assert(SuccessorIndex < SI->getNumSuccessors() &&
3287 "Successor index # out of range!");
3288 return SuccessorIndex != 0 ? CaseIteratorImpl(SI, SuccessorIndex - 1)
3289 : CaseIteratorImpl(SI, DefaultPseudoIndex);
3290 }
3291
3292 /// Support converting to the const variant. This will be a no-op for const
3293 /// variant.
3295 return CaseIteratorImpl<ConstCaseHandle>(Case.SI, Case.Index);
3296 }
3297
3299 // Check index correctness after addition.
3300 // Note: Index == getNumCases() means end().
3301 assert(Case.Index + N >= 0 &&
3302 (unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&
3303 "Case.Index out the number of cases.");
3304 Case.Index += N;
3305 return *this;
3306 }
3308 // Check index correctness after subtraction.
3309 // Note: Case.Index == getNumCases() means end().
3310 assert(Case.Index - N >= 0 &&
3311 (unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&
3312 "Case.Index out the number of cases.");
3313 Case.Index -= N;
3314 return *this;
3315 }
3317 assert(Case.SI == RHS.Case.SI && "Incompatible operators.");
3318 return Case.Index - RHS.Case.Index;
3319 }
3320 bool operator==(const CaseIteratorImpl &RHS) const {
3321 return Case == RHS.Case;
3322 }
3323 bool operator<(const CaseIteratorImpl &RHS) const {
3324 assert(Case.SI == RHS.Case.SI && "Incompatible operators.");
3325 return Case.Index < RHS.Case.Index;
3326 }
3327 const CaseHandleT &operator*() const { return Case; }
3328 };
3329
3332
3334 unsigned NumCases,
3335 InsertPosition InsertBefore = nullptr) {
3336 return new SwitchInst(Value, Default, NumCases, InsertBefore);
3337 }
3338
3339 /// Provide fast operand accessors
3341
3342 // Accessor Methods for Switch stmt
3343 Value *getCondition() const { return getOperand(0); }
3344 void setCondition(Value *V) { setOperand(0, V); }
3345
3347 return cast<BasicBlock>(getOperand(1));
3348 }
3349
3350 /// Returns true if the default branch must result in immediate undefined
3351 /// behavior, false otherwise.
3353 return isa<UnreachableInst>(getDefaultDest()->getFirstNonPHIOrDbg());
3354 }
3355
3356 void setDefaultDest(BasicBlock *DefaultCase) {
3357 setOperand(1, reinterpret_cast<Value*>(DefaultCase));
3358 }
3359
3360 /// Return the number of 'cases' in this switch instruction, excluding the
3361 /// default case.
3362 unsigned getNumCases() const {
3363 return getNumOperands()/2 - 1;
3364 }
3365
3366 /// Returns a read/write iterator that points to the first case in the
3367 /// SwitchInst.
3369 return CaseIt(this, 0);
3370 }
3371
3372 /// Returns a read-only iterator that points to the first case in the
3373 /// SwitchInst.
3375 return ConstCaseIt(this, 0);
3376 }
3377
3378 /// Returns a read/write iterator that points one past the last in the
3379 /// SwitchInst.
3381 return CaseIt(this, getNumCases());
3382 }
3383
3384 /// Returns a read-only iterator that points one past the last in the
3385 /// SwitchInst.
3387 return ConstCaseIt(this, getNumCases());
3388 }
3389
3390 /// Iteration adapter for range-for loops.
3392 return make_range(case_begin(), case_end());
3393 }
3394
3395 /// Constant iteration adapter for range-for loops.
3397 return make_range(case_begin(), case_end());
3398 }
3399
3400 /// Returns an iterator that points to the default case.
3401 /// Note: this iterator allows to resolve successor only. Attempt
3402 /// to resolve case value causes an assertion.
3403 /// Also note, that increment and decrement also causes an assertion and
3404 /// makes iterator invalid.
3406 return CaseIt(this, DefaultPseudoIndex);
3407 }
3409 return ConstCaseIt(this, DefaultPseudoIndex);
3410 }
3411
3412 /// Search all of the case values for the specified constant. If it is
3413 /// explicitly handled, return the case iterator of it, otherwise return
3414 /// default case iterator to indicate that it is handled by the default
3415 /// handler.
3417 return CaseIt(
3418 this,
3419 const_cast<const SwitchInst *>(this)->findCaseValue(C)->getCaseIndex());
3420 }
3422 ConstCaseIt I = llvm::find_if(cases(), [C](const ConstCaseHandle &Case) {
3423 return Case.getCaseValue() == C;
3424 });
3425 if (I != case_end())
3426 return I;
3427
3428 return case_default();
3429 }
3430
3431 /// Finds the unique case value for a given successor. Returns null if the
3432 /// successor is not found, not unique, or is the default case.
3434 if (BB == getDefaultDest())
3435 return nullptr;
3436
3437 ConstantInt *CI = nullptr;
3438 for (auto Case : cases()) {
3439 if (Case.getCaseSuccessor() != BB)
3440 continue;
3441
3442 if (CI)
3443 return nullptr; // Multiple cases lead to BB.
3444
3445 CI = Case.getCaseValue();
3446 }
3447
3448 return CI;
3449 }
3450
3451 /// Add an entry to the switch instruction.
3452 /// Note:
3453 /// This action invalidates case_end(). Old case_end() iterator will
3454 /// point to the added case.
3455 void addCase(ConstantInt *OnVal, BasicBlock *Dest);
3456
3457 /// This method removes the specified case and its successor from the switch
3458 /// instruction. Note that this operation may reorder the remaining cases at
3459 /// index idx and above.
3460 /// Note:
3461 /// This action invalidates iterators for all cases following the one removed,
3462 /// including the case_end() iterator. It returns an iterator for the next
3463 /// case.
3464 CaseIt removeCase(CaseIt I);
3465
3466 unsigned getNumSuccessors() const { return getNumOperands()/2; }
3467 BasicBlock *getSuccessor(unsigned idx) const {
3468 assert(idx < getNumSuccessors() &&"Successor idx out of range for switch!");
3469 return cast<BasicBlock>(getOperand(idx*2+1));
3470 }
3471 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3472 assert(idx < getNumSuccessors() && "Successor # out of range for switch!");
3473 setOperand(idx * 2 + 1, NewSucc);
3474 }
3475
3476 // Methods for support type inquiry through isa, cast, and dyn_cast:
3477 static bool classof(const Instruction *I) {
3478 return I->getOpcode() == Instruction::Switch;
3479 }
3480 static bool classof(const Value *V) {
3481 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3482 }
3483};
3484
3485/// A wrapper class to simplify modification of SwitchInst cases along with
3486/// their prof branch_weights metadata.
3488 SwitchInst &SI;
3489 std::optional<SmallVector<uint32_t, 8>> Weights;
3490 bool Changed = false;
3491
3492protected:
3494
3495 void init();
3496
3497public:
3498 using CaseWeightOpt = std::optional<uint32_t>;
3499 SwitchInst *operator->() { return &SI; }
3500 SwitchInst &operator*() { return SI; }
3501 operator SwitchInst *() { return &SI; }
3502
3504
3506 if (Changed)
3507 SI.setMetadata(LLVMContext::MD_prof, buildProfBranchWeightsMD());
3508 }
3509
3510 /// Delegate the call to the underlying SwitchInst::removeCase() and remove
3511 /// correspondent branch weight.
3513
3514 /// Delegate the call to the underlying SwitchInst::addCase() and set the
3515 /// specified branch weight for the added case.
3516 void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W);
3517
3518 /// Delegate the call to the underlying SwitchInst::eraseFromParent() and mark
3519 /// this object to not touch the underlying SwitchInst in destructor.
3521
3522 void setSuccessorWeight(unsigned idx, CaseWeightOpt W);
3523 CaseWeightOpt getSuccessorWeight(unsigned idx);
3524
3525 static CaseWeightOpt getSuccessorWeight(const SwitchInst &SI, unsigned idx);
3526};
3527
3528template <> struct OperandTraits<SwitchInst> : public HungoffOperandTraits {};
3529
3531
3532//===----------------------------------------------------------------------===//
3533// IndirectBrInst Class
3534//===----------------------------------------------------------------------===//
3535
3536//===---------------------------------------------------------------------------
3537/// Indirect Branch Instruction.
3538///
3540 constexpr static HungOffOperandsAllocMarker AllocMarker{};
3541
3542 unsigned ReservedSpace;
3543
3544 // Operand[0] = Address to jump to
3545 // Operand[n+1] = n-th destination
3546 IndirectBrInst(const IndirectBrInst &IBI);
3547
3548 /// Create a new indirectbr instruction, specifying an
3549 /// Address to jump to. The number of expected destinations can be specified
3550 /// here to make memory allocation more efficient. This constructor can also
3551 /// autoinsert before another instruction.
3552 IndirectBrInst(Value *Address, unsigned NumDests,
3553 InsertPosition InsertBefore);
3554
3555 // allocate space for exactly zero operands
3556 void *operator new(size_t S) { return User::operator new(S, AllocMarker); }
3557
3558 void init(Value *Address, unsigned NumDests);
3559 void growOperands();
3560
3561protected:
3562 // Note: Instruction needs to be a friend here to call cloneImpl.
3563 friend class Instruction;
3564
3565 IndirectBrInst *cloneImpl() const;
3566
3567public:
3568 void operator delete(void *Ptr) { User::operator delete(Ptr); }
3569
3570 /// Iterator type that casts an operand to a basic block.
3571 ///
3572 /// This only makes sense because the successors are stored as adjacent
3573 /// operands for indirectbr instructions.
3575 : iterator_adaptor_base<succ_op_iterator, value_op_iterator,
3576 std::random_access_iterator_tag, BasicBlock *,
3577 ptrdiff_t, BasicBlock *, BasicBlock *> {
3579
3580 BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3581 BasicBlock *operator->() const { return operator*(); }
3582 };
3583
3584 /// The const version of `succ_op_iterator`.
3586 : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator,
3587 std::random_access_iterator_tag,
3588 const BasicBlock *, ptrdiff_t, const BasicBlock *,
3589 const BasicBlock *> {
3592
3593 const BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3594 const BasicBlock *operator->() const { return operator*(); }
3595 };
3596
3597 static IndirectBrInst *Create(Value *Address, unsigned NumDests,
3598 InsertPosition InsertBefore = nullptr) {
3599 return new IndirectBrInst(Address, NumDests, InsertBefore);
3600 }
3601
3602 /// Provide fast operand accessors.
3604
3605 // Accessor Methods for IndirectBrInst instruction.
3606 Value *getAddress() { return getOperand(0); }
3607 const Value *getAddress() const { return getOperand(0); }
3608 void setAddress(Value *V) { setOperand(0, V); }
3609
3610 /// return the number of possible destinations in this
3611 /// indirectbr instruction.
3612 unsigned getNumDestinations() const { return getNumOperands()-1; }
3613
3614 /// Return the specified destination.
3615 BasicBlock *getDestination(unsigned i) { return getSuccessor(i); }
3616 const BasicBlock *getDestination(unsigned i) const { return getSuccessor(i); }
3617
3618 /// Add a destination.
3619 ///
3620 void addDestination(BasicBlock *Dest);
3621
3622 /// This method removes the specified successor from the
3623 /// indirectbr instruction.
3624 void removeDestination(unsigned i);
3625
3626 unsigned getNumSuccessors() const { return getNumOperands()-1; }
3627 BasicBlock *getSuccessor(unsigned i) const {
3628 return cast<BasicBlock>(getOperand(i+1));
3629 }
3630 void setSuccessor(unsigned i, BasicBlock *NewSucc) {
3631 setOperand(i + 1, NewSucc);
3632 }
3633
3635 return make_range(succ_op_iterator(std::next(value_op_begin())),
3636 succ_op_iterator(value_op_end()));
3637 }
3638
3640 return make_range(const_succ_op_iterator(std::next(value_op_begin())),
3641 const_succ_op_iterator(value_op_end()));
3642 }
3643
3644 // Methods for support type inquiry through isa, cast, and dyn_cast:
3645 static bool classof(const Instruction *I) {
3646 return I->getOpcode() == Instruction::IndirectBr;
3647 }
3648 static bool classof(const Value *V) {
3649 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3650 }
3651};
3652
3653template <>
3655
3657
3658//===----------------------------------------------------------------------===//
3659// InvokeInst Class
3660//===----------------------------------------------------------------------===//
3661
3662/// Invoke instruction. The SubclassData field is used to hold the
3663/// calling convention of the call.
3664///
3665class InvokeInst : public CallBase {
3666 /// The number of operands for this call beyond the called function,
3667 /// arguments, and operand bundles.
3668 static constexpr int NumExtraOperands = 2;
3669
3670 /// The index from the end of the operand array to the normal destination.
3671 static constexpr int NormalDestOpEndIdx = -3;
3672
3673 /// The index from the end of the operand array to the unwind destination.
3674 static constexpr int UnwindDestOpEndIdx = -2;
3675
3677
3678 /// Construct an InvokeInst given a range of arguments.
3679 ///
3680 /// Construct an InvokeInst from a range of arguments
3681 inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3682 BasicBlock *IfException, ArrayRef<Value *> Args,
3684 const Twine &NameStr, InsertPosition InsertBefore);
3685
3686 void init(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3687 BasicBlock *IfException, ArrayRef<Value *> Args,
3688 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
3689
3690 /// Compute the number of operands to allocate.
3691 static unsigned ComputeNumOperands(unsigned NumArgs,
3692 size_t NumBundleInputs = 0) {
3693 // We need one operand for the called function, plus our extra operands and
3694 // the input operand counts provided.
3695 return 1 + NumExtraOperands + NumArgs + unsigned(NumBundleInputs);
3696 }
3697
3698protected:
3699 // Note: Instruction needs to be a friend here to call cloneImpl.
3700 friend class Instruction;
3701
3702 InvokeInst *cloneImpl() const;
3703
3704public:
3705 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3706 BasicBlock *IfException, ArrayRef<Value *> Args,
3707 const Twine &NameStr,
3708 InsertPosition InsertBefore = nullptr) {
3709 IntrusiveOperandsAllocMarker AllocMarker{
3710 ComputeNumOperands(unsigned(Args.size()))};
3711 return new (AllocMarker) InvokeInst(Ty, Func, IfNormal, IfException, Args,
3712 {}, AllocMarker, NameStr, InsertBefore);
3713 }
3714
3715 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3716 BasicBlock *IfException, ArrayRef<Value *> Args,
3717 ArrayRef<OperandBundleDef> Bundles = {},
3718 const Twine &NameStr = "",
3719 InsertPosition InsertBefore = nullptr) {
3720 IntrusiveOperandsAndDescriptorAllocMarker AllocMarker{
3721 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)),
3722 unsigned(Bundles.size() * sizeof(BundleOpInfo))};
3723
3724 return new (AllocMarker)
3725 InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, AllocMarker,
3726 NameStr, InsertBefore);
3727 }
3728
3730 BasicBlock *IfException, ArrayRef<Value *> Args,
3731 const Twine &NameStr,
3732 InsertPosition InsertBefore = nullptr) {
3733 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3734 IfException, Args, {}, NameStr, InsertBefore);
3735 }
3736
3738 BasicBlock *IfException, ArrayRef<Value *> Args,
3739 ArrayRef<OperandBundleDef> Bundles = {},
3740 const Twine &NameStr = "",
3741 InsertPosition InsertBefore = nullptr) {
3742 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3743 IfException, Args, Bundles, NameStr, InsertBefore);
3744 }
3745
3746 /// Create a clone of \p II with a different set of operand bundles and
3747 /// insert it before \p InsertBefore.
3748 ///
3749 /// The returned invoke instruction is identical to \p II in every way except
3750 /// that the operand bundles for the new instruction are set to the operand
3751 /// bundles in \p Bundles.
3752 static InvokeInst *Create(InvokeInst *II, ArrayRef<OperandBundleDef> Bundles,
3753 InsertPosition InsertPt = nullptr);
3754
3755 // get*Dest - Return the destination basic blocks...
3757 return cast<BasicBlock>(Op<NormalDestOpEndIdx>());
3758 }
3760 return cast<BasicBlock>(Op<UnwindDestOpEndIdx>());
3761 }
3763 Op<NormalDestOpEndIdx>() = reinterpret_cast<Value *>(B);
3764 }
3766 Op<UnwindDestOpEndIdx>() = reinterpret_cast<Value *>(B);
3767 }
3768
3769 /// Get the landingpad instruction from the landing pad
3770 /// block (the unwind destination).
3771 LandingPadInst *getLandingPadInst() const;
3772
3773 BasicBlock *getSuccessor(unsigned i) const {
3774 assert(i < 2 && "Successor # out of range for invoke!");
3775 return i == 0 ? getNormalDest() : getUnwindDest();
3776 }
3777
3778 void setSuccessor(unsigned i, BasicBlock *NewSucc) {
3779 assert(i < 2 && "Successor # out of range for invoke!");
3780 if (i == 0)
3781 setNormalDest(NewSucc);
3782 else
3783 setUnwindDest(NewSucc);
3784 }
3785
3786 unsigned getNumSuccessors() const { return 2; }
3787
3788 /// Updates profile metadata by scaling it by \p S / \p T.
3789 void updateProfWeight(uint64_t S, uint64_t T);
3790
3791 // Methods for support type inquiry through isa, cast, and dyn_cast:
3792 static bool classof(const Instruction *I) {
3793 return (I->getOpcode() == Instruction::Invoke);
3794 }
3795 static bool classof(const Value *V) {
3796 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3797 }
3798
3799private:
3800 // Shadow Instruction::setInstructionSubclassData with a private forwarding
3801 // method so that subclasses cannot accidentally use it.
3802 template <typename Bitfield>
3803 void setSubclassData(typename Bitfield::Type Value) {
3804 Instruction::setSubclassData<Bitfield>(Value);
3805 }
3806};
3807
3808InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3809 BasicBlock *IfException, ArrayRef<Value *> Args,
3810 ArrayRef<OperandBundleDef> Bundles, AllocInfo AllocInfo,
3811 const Twine &NameStr, InsertPosition InsertBefore)
3812 : CallBase(Ty->getReturnType(), Instruction::Invoke, AllocInfo,
3813 InsertBefore) {
3814 init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr);
3815}
3816
3817//===----------------------------------------------------------------------===//
3818// CallBrInst Class
3819//===----------------------------------------------------------------------===//
3820
3821/// CallBr instruction, tracking function calls that may not return control but
3822/// instead transfer it to a third location. The SubclassData field is used to
3823/// hold the calling convention of the call.
3824///
3825class CallBrInst : public CallBase {
3826
3827 unsigned NumIndirectDests;
3828
3830
3831 /// Construct a CallBrInst given a range of arguments.
3832 ///
3833 /// Construct a CallBrInst from a range of arguments
3834 inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
3835 ArrayRef<BasicBlock *> IndirectDests,
3837 AllocInfo AllocInfo, const Twine &NameStr,
3838 InsertPosition InsertBefore);
3839
3840 void init(FunctionType *FTy, Value *Func, BasicBlock *DefaultDest,
3841 ArrayRef<BasicBlock *> IndirectDests, ArrayRef<Value *> Args,
3842 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
3843
3844 /// Compute the number of operands to allocate.
3845 static unsigned ComputeNumOperands(int NumArgs, int NumIndirectDests,
3846 int NumBundleInputs = 0) {
3847 // We need one operand for the called function, plus our extra operands and
3848 // the input operand counts provided.
3849 return unsigned(2 + NumIndirectDests + NumArgs + NumBundleInputs);
3850 }
3851
3852protected:
3853 // Note: Instruction needs to be a friend here to call cloneImpl.
3854 friend class Instruction;
3855
3856 CallBrInst *cloneImpl() const;
3857
3858public:
3860 BasicBlock *DefaultDest,
3861 ArrayRef<BasicBlock *> IndirectDests,
3862 ArrayRef<Value *> Args, const Twine &NameStr,
3863 InsertPosition InsertBefore = nullptr) {
3864 IntrusiveOperandsAllocMarker AllocMarker{
3865 ComputeNumOperands(Args.size(), IndirectDests.size())};
3866 return new (AllocMarker)
3867 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, {}, AllocMarker,
3868 NameStr, InsertBefore);
3869 }
3870
3871 static CallBrInst *
3872 Create(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
3873 ArrayRef<BasicBlock *> IndirectDests, ArrayRef<Value *> Args,
3874 ArrayRef<OperandBundleDef> Bundles = {}, const Twine &NameStr = "",
3875 InsertPosition InsertBefore = nullptr) {
3876 IntrusiveOperandsAndDescriptorAllocMarker AllocMarker{
3877 ComputeNumOperands(Args.size(), IndirectDests.size(),
3878 CountBundleInputs(Bundles)),
3879 unsigned(Bundles.size() * sizeof(BundleOpInfo))};
3880
3881 return new (AllocMarker)
3882 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles,
3883 AllocMarker, NameStr, InsertBefore);
3884 }
3885
3886 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
3887 ArrayRef<BasicBlock *> IndirectDests,
3888 ArrayRef<Value *> Args, const Twine &NameStr,
3889 InsertPosition InsertBefore = nullptr) {
3890 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
3891 IndirectDests, Args, NameStr, InsertBefore);
3892 }
3893
3894 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
3895 ArrayRef<BasicBlock *> IndirectDests,
3896 ArrayRef<Value *> Args,
3897 ArrayRef<OperandBundleDef> Bundles = {},
3898 const Twine &NameStr = "",
3899 InsertPosition InsertBefore = nullptr) {
3900 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
3901 IndirectDests, Args, Bundles, NameStr, InsertBefore);
3902 }
3903
3904 /// Create a clone of \p CBI with a different set of operand bundles and
3905 /// insert it before \p InsertBefore.
3906 ///
3907 /// The returned callbr instruction is identical to \p CBI in every way
3908 /// except that the operand bundles for the new instruction are set to the
3909 /// operand bundles in \p Bundles.
3910 static CallBrInst *Create(CallBrInst *CBI, ArrayRef<OperandBundleDef> Bundles,
3911 InsertPosition InsertBefore = nullptr);
3912
3913 /// Return the number of callbr indirect dest labels.
3914 ///
3915 unsigned getNumIndirectDests() const { return NumIndirectDests; }
3916
3917 /// getIndirectDestLabel - Return the i-th indirect dest label.
3918 ///
3919 Value *getIndirectDestLabel(unsigned i) const {
3920 assert(i < getNumIndirectDests() && "Out of bounds!");
3921 return getOperand(i + arg_size() + getNumTotalBundleOperands() + 1);
3922 }
3923
3924 Value *getIndirectDestLabelUse(unsigned i) const {
3925 assert(i < getNumIndirectDests() && "Out of bounds!");
3926 return getOperandUse(i + arg_size() + getNumTotalBundleOperands() + 1);
3927 }
3928
3929 // Return the destination basic blocks...
3931 return cast<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() - 1));
3932 }
3933 BasicBlock *getIndirectDest(unsigned i) const {
3934 return cast_or_null<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() + i));
3935 }
3937 SmallVector<BasicBlock *, 16> IndirectDests;
3938 for (unsigned i = 0, e = getNumIndirectDests(); i < e; ++i)
3939 IndirectDests.push_back(getIndirectDest(i));
3940 return IndirectDests;
3941 }
3943 *(&Op<-1>() - getNumIndirectDests() - 1) = reinterpret_cast<Value *>(B);
3944 }
3945 void setIndirectDest(unsigned i, BasicBlock *B) {
3946 *(&Op<-1>() - getNumIndirectDests() + i) = reinterpret_cast<Value *>(B);
3947 }
3948
3949 BasicBlock *getSuccessor(unsigned i) const {
3950 assert(i < getNumSuccessors() + 1 &&
3951 "Successor # out of range for callbr!");
3952 return i == 0 ? getDefaultDest() : getIndirectDest(i - 1);
3953 }
3954
3955 void setSuccessor(unsigned i, BasicBlock *NewSucc) {
3956 assert(i < getNumIndirectDests() + 1 &&
3957 "Successor # out of range for callbr!");
3958 return i == 0 ? setDefaultDest(NewSucc) : setIndirectDest(i - 1, NewSucc);
3959 }
3960
3961 unsigned getNumSuccessors() const { return getNumIndirectDests() + 1; }
3962
3963 // Methods for support type inquiry through isa, cast, and dyn_cast:
3964 static bool classof(const Instruction *I) {
3965 return (I->getOpcode() == Instruction::CallBr);
3966 }
3967 static bool classof(const Value *V) {
3968 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3969 }
3970
3971private:
3972 // Shadow Instruction::setInstructionSubclassData with a private forwarding
3973 // method so that subclasses cannot accidentally use it.
3974 template <typename Bitfield>
3975 void setSubclassData(typename Bitfield::Type Value) {
3976 Instruction::setSubclassData<Bitfield>(Value);
3977 }
3978};
3979
3980CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
3981 ArrayRef<BasicBlock *> IndirectDests,
3982 ArrayRef<Value *> Args,
3983 ArrayRef<OperandBundleDef> Bundles, AllocInfo AllocInfo,
3984 const Twine &NameStr, InsertPosition InsertBefore)
3985 : CallBase(Ty->getReturnType(), Instruction::CallBr, AllocInfo,
3986 InsertBefore) {
3987 init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr);
3988}
3989
3990//===----------------------------------------------------------------------===//
3991// ResumeInst Class
3992//===----------------------------------------------------------------------===//
3993
3994//===---------------------------------------------------------------------------
3995/// Resume the propagation of an exception.
3996///
3997class ResumeInst : public Instruction {
3998 constexpr static IntrusiveOperandsAllocMarker AllocMarker{1};
3999
4000 ResumeInst(const ResumeInst &RI);
4001
4002 explicit ResumeInst(Value *Exn, InsertPosition InsertBefore = nullptr);
4003
4004protected:
4005 // Note: Instruction needs to be a friend here to call cloneImpl.
4006 friend class Instruction;
4007
4008 ResumeInst *cloneImpl() const;
4009
4010public:
4011 static ResumeInst *Create(Value *Exn, InsertPosition InsertBefore = nullptr) {
4012 return new (AllocMarker) ResumeInst(Exn, InsertBefore);
4013 }
4014
4015 /// Provide fast operand accessors
4017
4018 /// Convenience accessor.
4019 Value *getValue() const { return Op<0>(); }
4020
4021 unsigned getNumSuccessors() const { return 0; }
4022
4023 // Methods for support type inquiry through isa, cast, and dyn_cast:
4024 static bool classof(const Instruction *I) {
4025 return I->getOpcode() == Instruction::Resume;
4026 }
4027 static bool classof(const Value *V) {
4028 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4029 }
4030
4031private:
4032 BasicBlock *getSuccessor(unsigned idx) const {
4033 llvm_unreachable("ResumeInst has no successors!");
4034 }
4035
4036 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
4037 llvm_unreachable("ResumeInst has no successors!");
4038 }
4039};
4040
4041template <>
4043 public FixedNumOperandTraits<ResumeInst, 1> {
4044};
4045
4047
4048//===----------------------------------------------------------------------===//
4049// CatchSwitchInst Class
4050//===----------------------------------------------------------------------===//
4052 using UnwindDestField = BoolBitfieldElementT<0>;
4053
4054 constexpr static HungOffOperandsAllocMarker AllocMarker{};
4055
4056 /// The number of operands actually allocated. NumOperands is
4057 /// the number actually in use.
4058 unsigned ReservedSpace;
4059
4060 // Operand[0] = Outer scope
4061 // Operand[1] = Unwind block destination
4062 // Operand[n] = BasicBlock to go to on match
4063 CatchSwitchInst(const CatchSwitchInst &CSI);
4064
4065 /// Create a new switch instruction, specifying a
4066 /// default destination. The number of additional handlers can be specified
4067 /// here to make memory allocation more efficient.
4068 /// This constructor can also autoinsert before another instruction.
4069 CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
4070 unsigned NumHandlers, const Twine &NameStr,
4071 InsertPosition InsertBefore);
4072
4073 // allocate space for exactly zero operands
4074 void *operator new(size_t S) { return User::operator new(S, AllocMarker); }
4075
4076 void init(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumReserved);
4077 void growOperands(unsigned Size);
4078
4079protected:
4080 // Note: Instruction needs to be a friend here to call cloneImpl.
4081 friend class Instruction;
4082
4083 CatchSwitchInst *cloneImpl() const;
4084
4085public:
4086 void operator delete(void *Ptr) { return User::operator delete(Ptr); }
4087
4088 static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest,
4089 unsigned NumHandlers,
4090 const Twine &NameStr = "",
4091 InsertPosition InsertBefore = nullptr) {
4092 return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr,
4093 InsertBefore);
4094 }
4095
4096 /// Provide fast operand accessors
4098
4099 // Accessor Methods for CatchSwitch stmt
4100 Value *getParentPad() const { return getOperand(0); }
4101 void setParentPad(Value *ParentPad) { setOperand(0, ParentPad); }
4102
4103 // Accessor Methods for CatchSwitch stmt
4104 bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); }
4105 bool unwindsToCaller() const { return !hasUnwindDest(); }
4107 if (hasUnwindDest())
4108 return cast<BasicBlock>(getOperand(1));
4109 return nullptr;
4110 }
4111 void setUnwindDest(BasicBlock *UnwindDest) {
4112 assert(UnwindDest);
4113 assert(hasUnwindDest());
4114 setOperand(1, UnwindDest);
4115 }
4116
4117 /// return the number of 'handlers' in this catchswitch
4118 /// instruction, except the default handler
4119 unsigned getNumHandlers() const {
4120 if (hasUnwindDest())
4121 return getNumOperands() - 2;
4122 return getNumOperands() - 1;
4123 }
4124
4125private:
4126 static BasicBlock *handler_helper(Value *V) { return cast<BasicBlock>(V); }
4127 static const BasicBlock *handler_helper(const Value *V) {
4128 return cast<BasicBlock>(V);
4129 }
4130
4131public:
4132 using DerefFnTy = BasicBlock *(*)(Value *);
4135 using ConstDerefFnTy = const BasicBlock *(*)(const Value *);
4139
4140 /// Returns an iterator that points to the first handler in CatchSwitchInst.
4142 op_iterator It = op_begin() + 1;
4143 if (hasUnwindDest())
4144 ++It;
4145 return handler_iterator(It, DerefFnTy(handler_helper));
4146 }
4147
4148 /// Returns an iterator that points to the first handler in the
4149 /// CatchSwitchInst.
4151 const_op_iterator It = op_begin() + 1;
4152 if (hasUnwindDest())
4153 ++It;
4154 return const_handler_iterator(It, ConstDerefFnTy(handler_helper));
4155 }
4156
4157 /// Returns a read-only iterator that points one past the last
4158 /// handler in the CatchSwitchInst.
4160 return handler_iterator(op_end(), DerefFnTy(handler_helper));
4161 }
4162
4163 /// Returns an iterator that points one past the last handler in the
4164 /// CatchSwitchInst.
4166 return const_handler_iterator(op_end(), ConstDerefFnTy(handler_helper));
4167 }
4168
4169 /// iteration adapter for range-for loops.
4171 return make_range(handler_begin(), handler_end());
4172 }
4173
4174 /// iteration adapter for range-for loops.
4176 return make_range(handler_begin(), handler_end());
4177 }
4178
4179 /// Add an entry to the switch instruction...
4180 /// Note:
4181 /// This action invalidates handler_end(). Old handler_end() iterator will
4182 /// point to the added handler.
4183 void addHandler(BasicBlock *Dest);
4184
4185 void removeHandler(handler_iterator HI);
4186
4187 unsigned getNumSuccessors() const { return getNumOperands() - 1; }
4188 BasicBlock *getSuccessor(unsigned Idx) const {
4189 assert(Idx < getNumSuccessors() &&
4190 "Successor # out of range for catchswitch!");
4191 return cast<BasicBlock>(getOperand(Idx + 1));
4192 }
4193 void setSuccessor(unsigned Idx, BasicBlock *NewSucc) {
4194 assert(Idx < getNumSuccessors() &&
4195 "Successor # out of range for catchswitch!");
4196 setOperand(Idx + 1, NewSucc);
4197 }
4198
4199 // Methods for support type inquiry through isa, cast, and dyn_cast:
4200 static bool classof(const Instruction *I) {
4201 return I->getOpcode() == Instruction::CatchSwitch;
4202 }
4203 static bool classof(const Value *V) {
4204 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4205 }
4206};
4207
4208template <>
4210
4212
4213//===----------------------------------------------------------------------===//
4214// CleanupPadInst Class
4215//===----------------------------------------------------------------------===//
4217private:
4218 explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args,
4219 AllocInfo AllocInfo, const Twine &NameStr,
4220 InsertPosition InsertBefore)
4221 : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, AllocInfo,
4222 NameStr, InsertBefore) {}
4223
4224public:
4225 static CleanupPadInst *Create(Value *ParentPad, ArrayRef<Value *> Args = {},
4226 const Twine &NameStr = "",
4227 InsertPosition InsertBefore = nullptr) {
4228 IntrusiveOperandsAllocMarker AllocMarker{unsigned(1 + Args.size())};
4229 return new (AllocMarker)
4230 CleanupPadInst(ParentPad, Args, AllocMarker, NameStr, InsertBefore);
4231 }
4232
4233 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4234 static bool classof(const Instruction *I) {
4235 return I->getOpcode() == Instruction::CleanupPad;
4236 }
4237 static bool classof(const Value *V) {
4238 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4239 }
4240};
4241
4242//===----------------------------------------------------------------------===//
4243// CatchPadInst Class
4244//===----------------------------------------------------------------------===//
4246private:
4247 explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args,
4248 AllocInfo AllocInfo, const Twine &NameStr,
4249 InsertPosition InsertBefore)
4250 : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, AllocInfo,
4251 NameStr, InsertBefore) {}
4252
4253public:
4254 static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args,
4255 const Twine &NameStr = "",
4256 InsertPosition InsertBefore = nullptr) {
4257 IntrusiveOperandsAllocMarker AllocMarker{unsigned(1 + Args.size())};
4258 return new (AllocMarker)
4259 CatchPadInst(CatchSwitch, Args, AllocMarker, NameStr, InsertBefore);
4260 }
4261
4262 /// Convenience accessors
4264 return cast<CatchSwitchInst>(Op<-1>());
4265 }
4266 void setCatchSwitch(Value *CatchSwitch) {
4267 assert(CatchSwitch);
4268 Op<-1>() = CatchSwitch;
4269 }
4270
4271 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4272 static bool classof(const Instruction *I) {
4273 return I->getOpcode() == Instruction::CatchPad;
4274 }
4275 static bool classof(const Value *V) {
4276 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4277 }
4278};
4279
4280//===----------------------------------------------------------------------===//
4281// CatchReturnInst Class
4282//===----------------------------------------------------------------------===//
4283
4285 constexpr static IntrusiveOperandsAllocMarker AllocMarker{2};
4286
4288 CatchReturnInst(Value *CatchPad, BasicBlock *BB, InsertPosition InsertBefore);
4289
4290 void init(Value *CatchPad, BasicBlock *BB);
4291
4292protected:
4293 // Note: Instruction needs to be a friend here to call cloneImpl.
4294 friend class Instruction;
4295
4296 CatchReturnInst *cloneImpl() const;
4297
4298public:
4299 static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB,
4300 InsertPosition InsertBefore = nullptr) {
4301 assert(CatchPad);
4302 assert(BB);
4303 return new (AllocMarker) CatchReturnInst(CatchPad, BB, InsertBefore);
4304 }
4305
4306 /// Provide fast operand accessors
4308
4309 /// Convenience accessors.
4310 CatchPadInst *getCatchPad() const { return cast<CatchPadInst>(Op<0>()); }
4311 void setCatchPad(CatchPadInst *CatchPad) {
4312 assert(CatchPad);
4313 Op<0>() = CatchPad;
4314 }
4315
4316 BasicBlock *getSuccessor() const { return cast<BasicBlock>(Op<1>()); }
4317 void setSuccessor(BasicBlock *NewSucc) {
4318 assert(NewSucc);
4319 Op<1>() = NewSucc;
4320 }
4321 unsigned getNumSuccessors() const { return 1; }
4322
4323 /// Get the parentPad of this catchret's catchpad's catchswitch.
4324 /// The successor block is implicitly a member of this funclet.
4327 }
4328
4329 // Methods for support type inquiry through isa, cast, and dyn_cast:
4330 static bool classof(const Instruction *I) {
4331 return (I->getOpcode() == Instruction::CatchRet);
4332 }
4333 static bool classof(const Value *V) {
4334 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4335 }
4336
4337private:
4338 BasicBlock *getSuccessor(unsigned Idx) const {
4339 assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!");
4340 return getSuccessor();
4341 }
4342
4343 void setSuccessor(unsigned Idx, BasicBlock *B) {
4344 assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!");
4345 setSuccessor(B);
4346 }
4347};
4348
4349template <>
4351 : public FixedNumOperandTraits<CatchReturnInst, 2> {};
4352
4354
4355//===----------------------------------------------------------------------===//
4356// CleanupReturnInst Class
4357//===----------------------------------------------------------------------===//
4358
4360 using UnwindDestField = BoolBitfieldElementT<0>;
4361
4362private:
4364 CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB,
4365 AllocInfo AllocInfo, InsertPosition InsertBefore = nullptr);
4366
4367 void init(Value *CleanupPad, BasicBlock *UnwindBB);
4368
4369protected:
4370 // Note: Instruction needs to be a friend here to call cloneImpl.
4371 friend class Instruction;
4372
4373 CleanupReturnInst *cloneImpl() const;
4374
4375public:
4376 static CleanupReturnInst *Create(Value *CleanupPad,
4377 BasicBlock *UnwindBB = nullptr,
4378 InsertPosition InsertBefore = nullptr) {
4379 assert(CleanupPad);
4380 unsigned Values = 1;
4381 if (UnwindBB)
4382 ++Values;
4383 IntrusiveOperandsAllocMarker AllocMarker{Values};
4384 return new (AllocMarker)
4385 CleanupReturnInst(CleanupPad, UnwindBB, AllocMarker, InsertBefore);
4386 }
4387
4388 /// Provide fast operand accessors
4390
4391 bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); }
4392 bool unwindsToCaller() const { return !hasUnwindDest(); }
4393
4394 /// Convenience accessor.
4396 return cast<CleanupPadInst>(Op<0>());
4397 }
4398 void setCleanupPad(CleanupPadInst *CleanupPad) {
4399 assert(CleanupPad);
4400 Op<0>() = CleanupPad;
4401 }
4402
4403 unsigned getNumSuccessors() const { return hasUnwindDest() ? 1 : 0; }
4404
4406 return hasUnwindDest() ? cast<BasicBlock>(Op<1>()) : nullptr;
4407 }
4408 void setUnwindDest(BasicBlock *NewDest) {
4409 assert(NewDest);
4410 assert(hasUnwindDest());
4411 Op<1>() = NewDest;
4412 }
4413
4414 // Methods for support type inquiry through isa, cast, and dyn_cast:
4415 static bool classof(const Instruction *I) {
4416 return (I->getOpcode() == Instruction::CleanupRet);
4417 }
4418 static bool classof(const Value *V) {
4419 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4420 }
4421
4422private:
4423 BasicBlock *getSuccessor(unsigned Idx) const {
4424 assert(Idx == 0);
4425 return getUnwindDest();
4426 }
4427
4428 void setSuccessor(unsigned Idx, BasicBlock *B) {
4429 assert(Idx == 0);
4430 setUnwindDest(B);
4431 }
4432
4433 // Shadow Instruction::setInstructionSubclassData with a private forwarding
4434 // method so that subclasses cannot accidentally use it.
4435 template <typename Bitfield>
4436 void setSubclassData(typename Bitfield::Type Value) {
4437 Instruction::setSubclassData<Bitfield>(Value);
4438 }
4439};
4440
4441template <>
4443 : public VariadicOperandTraits<CleanupReturnInst> {};
4444
4446
4447//===----------------------------------------------------------------------===//
4448// UnreachableInst Class
4449//===----------------------------------------------------------------------===//
4450
4451//===---------------------------------------------------------------------------
4452/// This function has undefined behavior. In particular, the
4453/// presence of this instruction indicates some higher level knowledge that the
4454/// end of the block cannot be reached.
4455///
4457 constexpr static IntrusiveOperandsAllocMarker AllocMarker{0};
4458
4459protected:
4460 // Note: Instruction needs to be a friend here to call cloneImpl.
4461 friend class Instruction;
4462
4463 UnreachableInst *cloneImpl() const;
4464
4465public:
4466 explicit UnreachableInst(LLVMContext &C,
4467 InsertPosition InsertBefore = nullptr);
4468
4469 // allocate space for exactly zero operands
4470 void *operator new(size_t S) { return User::operator new(S, AllocMarker); }
4471 void operator delete(void *Ptr) { User::operator delete(Ptr); }
4472
4473 unsigned getNumSuccessors() const { return 0; }
4474
4475 // Methods for support type inquiry through isa, cast, and dyn_cast:
4476 static bool classof(const Instruction *I) {
4477 return I->getOpcode() == Instruction::Unreachable;
4478 }
4479 static bool classof(const Value *V) {
4480 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4481 }
4482
4483private:
4484 BasicBlock *getSuccessor(unsigned idx) const {
4485 llvm_unreachable("UnreachableInst has no successors!");
4486 }
4487
4488 void setSuccessor(unsigned idx, BasicBlock *B) {
4489 llvm_unreachable("UnreachableInst has no successors!");
4490 }
4491};
4492
4493//===----------------------------------------------------------------------===//
4494// TruncInst Class
4495//===----------------------------------------------------------------------===//
4496
4497/// This class represents a truncation of integer types.
4498class TruncInst : public CastInst {
4499protected:
4500 // Note: Instruction needs to be a friend here to call cloneImpl.
4501 friend class Instruction;
4502
4503 /// Clone an identical TruncInst
4504 TruncInst *cloneImpl() const;
4505
4506public:
4507 enum { AnyWrap = 0, NoUnsignedWrap = (1 << 0), NoSignedWrap = (1 << 1) };
4508
4509 /// Constructor with insert-before-instruction semantics
4510 TruncInst(Value *S, ///< The value to be truncated
4511 Type *Ty, ///< The (smaller) type to truncate to
4512 const Twine &NameStr = "", ///< A name for the new instruction
4513 InsertPosition InsertBefore =
4514 nullptr ///< Where to insert the new instruction
4515 );
4516
4517 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4518 static bool classof(const Instruction *I) {
4519 return I->getOpcode() == Trunc;
4520 }
4521 static bool classof(const Value *V) {
4522 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4523 }
4524
4527 (SubclassOptionalData & ~NoUnsignedWrap) | (B * NoUnsignedWrap);
4528 }
4531 (SubclassOptionalData & ~NoSignedWrap) | (B * NoSignedWrap);
4532 }
4533
4534 /// Test whether this operation is known to never
4535 /// undergo unsigned overflow, aka the nuw property.
4536 bool hasNoUnsignedWrap() const {
4538 }
4539
4540 /// Test whether this operation is known to never
4541 /// undergo signed overflow, aka the nsw property.
4542 bool hasNoSignedWrap() const {
4543 return (SubclassOptionalData & NoSignedWrap) != 0;
4544 }
4545
4546 /// Returns the no-wrap kind of the operation.
4547 unsigned getNoWrapKind() const {
4548 unsigned NoWrapKind = 0;
4549 if (hasNoUnsignedWrap())
4550 NoWrapKind |= NoUnsignedWrap;
4551
4552 if (hasNoSignedWrap())
4553 NoWrapKind |= NoSignedWrap;
4554
4555 return NoWrapKind;
4556 }
4557};
4558
4559//===----------------------------------------------------------------------===//
4560// ZExtInst Class
4561//===----------------------------------------------------------------------===//
4562
4563/// This class represents zero extension of integer types.
4564class ZExtInst : public CastInst {
4565protected:
4566 // Note: Instruction needs to be a friend here to call cloneImpl.
4567 friend class Instruction;
4568
4569 /// Clone an identical ZExtInst
4570 ZExtInst *cloneImpl() const;
4571
4572public:
4573 /// Constructor with insert-before-instruction semantics
4574 ZExtInst(Value *S, ///< The value to be zero extended
4575 Type *Ty, ///< The type to zero extend to
4576 const Twine &NameStr = "", ///< A name for the new instruction
4577 InsertPosition InsertBefore =
4578 nullptr ///< Where to insert the new instruction
4579 );
4580
4581 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4582 static bool classof(const Instruction *I) {
4583 return I->getOpcode() == ZExt;
4584 }
4585 static bool classof(const Value *V) {
4586 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4587 }
4588};
4589
4590//===----------------------------------------------------------------------===//
4591// SExtInst Class
4592//===----------------------------------------------------------------------===//
4593
4594/// This class represents a sign extension of integer types.
4595class SExtInst : public CastInst {
4596protected:
4597 // Note: Instruction needs to be a friend here to call cloneImpl.
4598 friend class Instruction;
4599
4600 /// Clone an identical SExtInst
4601 SExtInst *cloneImpl() const;
4602
4603public:
4604 /// Constructor with insert-before-instruction semantics
4605 SExtInst(Value *S, ///< The value to be sign extended
4606 Type *Ty, ///< The type to sign extend to
4607 const Twine &NameStr = "", ///< A name for the new instruction
4608 InsertPosition InsertBefore =
4609 nullptr ///< Where to insert the new instruction
4610 );
4611
4612 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4613 static bool classof(const Instruction *I) {
4614 return I->getOpcode() == SExt;
4615 }
4616 static bool classof(const Value *V) {
4617 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4618 }
4619};
4620
4621//===----------------------------------------------------------------------===//
4622// FPTruncInst Class
4623//===----------------------------------------------------------------------===//
4624
4625/// This class represents a truncation of floating point types.
4626class FPTruncInst : public CastInst {
4627protected:
4628 // Note: Instruction needs to be a friend here to call cloneImpl.
4629 friend class Instruction;
4630
4631 /// Clone an identical FPTruncInst
4632 FPTruncInst *cloneImpl() const;
4633
4634public: /// Constructor with insert-before-instruction semantics
4635 FPTruncInst(Value *S, ///< The value to be truncated
4636 Type *Ty, ///< The type to truncate to
4637 const Twine &NameStr = "", ///< A name for the new instruction
4638 InsertPosition InsertBefore =
4639 nullptr ///< Where to insert the new instruction
4640 );
4641
4642 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4643 static bool classof(const Instruction *I) {
4644 return I->getOpcode() == FPTrunc;
4645 }
4646 static bool classof(const Value *V) {
4647 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4648 }
4649};
4650
4651//===----------------------------------------------------------------------===//
4652// FPExtInst Class
4653//===----------------------------------------------------------------------===//
4654
4655/// This class represents an extension of floating point types.
4656class FPExtInst : public CastInst {
4657protected:
4658 // Note: Instruction needs to be a friend here to call cloneImpl.
4659 friend class Instruction;
4660
4661 /// Clone an identical FPExtInst
4662 FPExtInst *cloneImpl() const;
4663
4664public:
4665 /// Constructor with insert-before-instruction semantics
4666 FPExtInst(Value *S, ///< The value to be extended
4667 Type *Ty, ///< The type to extend to
4668 const Twine &NameStr = "", ///< A name for the new instruction
4669 InsertPosition InsertBefore =
4670 nullptr ///< Where to insert the new instruction
4671 );
4672
4673 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4674 static bool classof(const Instruction *I) {
4675 return I->getOpcode() == FPExt;
4676 }
4677 static bool classof(const Value *V) {
4678 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4679 }
4680};
4681
4682//===----------------------------------------------------------------------===//
4683// UIToFPInst Class
4684//===----------------------------------------------------------------------===//
4685
4686/// This class represents a cast unsigned integer to floating point.
4687class UIToFPInst : public CastInst {
4688protected:
4689 // Note: Instruction needs to be a friend here to call cloneImpl.
4690 friend class Instruction;
4691
4692 /// Clone an identical UIToFPInst
4693 UIToFPInst *cloneImpl() const;
4694
4695public:
4696 /// Constructor with insert-before-instruction semantics
4697 UIToFPInst(Value *S, ///< The value to be converted
4698 Type *Ty, ///< The type to convert to
4699 const Twine &NameStr = "", ///< A name for the new instruction
4700 InsertPosition InsertBefore =
4701 nullptr ///< Where to insert the new instruction
4702 );
4703
4704 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4705 static bool classof(const Instruction *I) {
4706 return I->getOpcode() == UIToFP;
4707 }
4708 static bool classof(const Value *V) {
4709 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4710 }
4711};
4712
4713//===----------------------------------------------------------------------===//
4714// SIToFPInst Class
4715//===----------------------------------------------------------------------===//
4716
4717/// This class represents a cast from signed integer to floating point.
4718class SIToFPInst : public CastInst {
4719protected:
4720 // Note: Instruction needs to be a friend here to call cloneImpl.
4721 friend class Instruction;
4722
4723 /// Clone an identical SIToFPInst
4724 SIToFPInst *cloneImpl() const;
4725
4726public:
4727 /// Constructor with insert-before-instruction semantics
4728 SIToFPInst(Value *S, ///< The value to be converted
4729 Type *Ty, ///< The type to convert to
4730 const Twine &NameStr = "", ///< A name for the new instruction
4731 InsertPosition InsertBefore =
4732 nullptr ///< Where to insert the new instruction
4733 );
4734
4735 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4736 static bool classof(const Instruction *I) {
4737 return I->getOpcode() == SIToFP;
4738 }
4739 static bool classof(const Value *V) {
4740 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4741 }
4742};
4743
4744//===----------------------------------------------------------------------===//
4745// FPToUIInst Class
4746//===----------------------------------------------------------------------===//
4747
4748/// This class represents a cast from floating point to unsigned integer
4749class FPToUIInst : public CastInst {
4750protected:
4751 // Note: Instruction needs to be a friend here to call cloneImpl.
4752 friend class Instruction;
4753
4754 /// Clone an identical FPToUIInst
4755 FPToUIInst *cloneImpl() const;
4756
4757public:
4758 /// Constructor with insert-before-instruction semantics
4759 FPToUIInst(Value *S, ///< The value to be converted
4760 Type *Ty, ///< The type to convert to
4761 const Twine &NameStr = "", ///< A name for the new instruction
4762 InsertPosition InsertBefore =
4763 nullptr ///< Where to insert the new instruction
4764 );
4765
4766 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4767 static bool classof(const Instruction *I) {
4768 return I->getOpcode() == FPToUI;
4769 }
4770 static bool classof(const Value *V) {
4771 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4772 }
4773};
4774
4775//===----------------------------------------------------------------------===//
4776// FPToSIInst Class
4777//===----------------------------------------------------------------------===//
4778
4779/// This class represents a cast from floating point to signed integer.
4780class FPToSIInst : public CastInst {
4781protected:
4782 // Note: Instruction needs to be a friend here to call cloneImpl.
4783 friend class Instruction;
4784
4785 /// Clone an identical FPToSIInst
4786 FPToSIInst *cloneImpl() const;
4787
4788public:
4789 /// Constructor with insert-before-instruction semantics
4790 FPToSIInst(Value *S, ///< The value to be converted
4791 Type *Ty, ///< The type to convert to
4792 const Twine &NameStr = "", ///< A name for the new instruction
4793 InsertPosition InsertBefore =
4794 nullptr ///< Where to insert the new instruction
4795 );
4796
4797 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4798 static bool classof(const Instruction *I) {
4799 return I->getOpcode() == FPToSI;
4800 }
4801 static bool classof(const Value *V) {
4802 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4803 }
4804};
4805
4806//===----------------------------------------------------------------------===//
4807// IntToPtrInst Class
4808//===----------------------------------------------------------------------===//
4809
4810/// This class represents a cast from an integer to a pointer.
4811class IntToPtrInst : public CastInst {
4812public:
4813 // Note: Instruction needs to be a friend here to call cloneImpl.
4814 friend class Instruction;
4815
4816 /// Constructor with insert-before-instruction semantics
4817 IntToPtrInst(Value *S, ///< The value to be converted
4818 Type *Ty, ///< The type to convert to
4819 const Twine &NameStr = "", ///< A name for the new instruction
4820 InsertPosition InsertBefore =
4821 nullptr ///< Where to insert the new instruction
4822 );
4823
4824 /// Clone an identical IntToPtrInst.
4825 IntToPtrInst *cloneImpl() const;
4826
4827 /// Returns the address space of this instruction's pointer type.
4828 unsigned getAddressSpace() const {
4829 return getType()->getPointerAddressSpace();
4830 }
4831
4832 // Methods for support type inquiry through isa, cast, and dyn_cast:
4833 static bool classof(const Instruction *I) {
4834 return I->getOpcode() == IntToPtr;
4835 }
4836 static bool classof(const Value *V) {
4837 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4838 }
4839};
4840
4841//===----------------------------------------------------------------------===//
4842// PtrToIntInst Class
4843//===----------------------------------------------------------------------===//
4844
4845/// This class represents a cast from a pointer to an integer.
4846class PtrToIntInst : public CastInst {
4847protected:
4848 // Note: Instruction needs to be a friend here to call cloneImpl.
4849 friend class Instruction;
4850
4851 /// Clone an identical PtrToIntInst.
4852 PtrToIntInst *cloneImpl() const;
4853
4854public:
4855 /// Constructor with insert-before-instruction semantics
4856 PtrToIntInst(Value *S, ///< The value to be converted
4857 Type *Ty, ///< The type to convert to
4858 const Twine &NameStr = "", ///< A name for the new instruction
4859 InsertPosition InsertBefore =
4860 nullptr ///< Where to insert the new instruction
4861 );
4862
4863 /// Gets the pointer operand.
4865 /// Gets the pointer operand.
4866 const Value *getPointerOperand() const { return getOperand(0); }
4867 /// Gets the operand index of the pointer operand.
4868 static unsigned getPointerOperandIndex() { return 0U; }
4869
4870 /// Returns the address space of the pointer operand.
4871 unsigned getPointerAddressSpace() const {
4873 }
4874
4875 // Methods for support type inquiry through isa, cast, and dyn_cast:
4876 static bool classof(const Instruction *I) {
4877 return I->getOpcode() == PtrToInt;
4878 }
4879 static bool classof(const Value *V) {
4880 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4881 }
4882};
4883
4884//===----------------------------------------------------------------------===//
4885// BitCastInst Class
4886//===----------------------------------------------------------------------===//
4887
4888/// This class represents a no-op cast from one type to another.
4889class BitCastInst : public CastInst {
4890protected:
4891 // Note: Instruction needs to be a friend here to call cloneImpl.
4892 friend class Instruction;
4893
4894 /// Clone an identical BitCastInst.
4895 BitCastInst *cloneImpl() const;
4896
4897public:
4898 /// Constructor with insert-before-instruction semantics
4899 BitCastInst(Value *S, ///< The value to be casted
4900 Type *Ty, ///< The type to casted to
4901 const Twine &NameStr = "", ///< A name for the new instruction
4902 InsertPosition InsertBefore =
4903 nullptr ///< Where to insert the new instruction
4904 );
4905
4906 // Methods for support type inquiry through isa, cast, and dyn_cast:
4907 static bool classof(const Instruction *I) {
4908 return I->getOpcode() == BitCast;
4909 }
4910 static bool classof(const Value *V) {
4911 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4912 }
4913};
4914
4915//===----------------------------------------------------------------------===//
4916// AddrSpaceCastInst Class
4917//===----------------------------------------------------------------------===//
4918
4919/// This class represents a conversion between pointers from one address space
4920/// to another.
4922protected:
4923 // Note: Instruction needs to be a friend here to call cloneImpl.
4924 friend class Instruction;
4925
4926 /// Clone an identical AddrSpaceCastInst.
4928
4929public:
4930 /// Constructor with insert-before-instruction semantics
4932 Value *S, ///< The value to be casted
4933 Type *Ty, ///< The type to casted to
4934 const Twine &NameStr = "", ///< A name for the new instruction
4935 InsertPosition InsertBefore =
4936 nullptr ///< Where to insert the new instruction
4937 );
4938
4939 // Methods for support type inquiry through isa, cast, and dyn_cast:
4940 static bool classof(const Instruction *I) {
4941 return I->getOpcode() == AddrSpaceCast;
4942 }
4943 static bool classof(const Value *V) {
4944 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4945 }
4946
4947 /// Gets the pointer operand.
4949 return getOperand(0);
4950 }
4951
4952 /// Gets the pointer operand.
4953 const Value *getPointerOperand() const {
4954 return getOperand(0);
4955 }
4956
4957 /// Gets the operand index of the pointer operand.
4958 static unsigned getPointerOperandIndex() {
4959 return 0U;
4960 }
4961
4962 /// Returns the address space of the pointer operand.
4963 unsigned getSrcAddressSpace() const {
4965 }
4966
4967 /// Returns the address space of the result.
4968 unsigned getDestAddressSpace() const {
4969 return getType()->getPointerAddressSpace();
4970 }
4971};
4972
4973//===----------------------------------------------------------------------===//
4974// Helper functions
4975//===----------------------------------------------------------------------===//
4976
4977/// A helper function that returns the pointer operand of a load or store
4978/// instruction. Returns nullptr if not load or store.
4979inline const Value *getLoadStorePointerOperand(const Value *V) {
4980 if (auto *Load = dyn_cast<LoadInst>(V))
4981 return Load->getPointerOperand();
4982 if (auto *Store = dyn_cast<StoreInst>(V))
4983 return Store->getPointerOperand();
4984 return nullptr;
4985}
4987 return const_cast<Value *>(
4988 getLoadStorePointerOperand(static_cast<const Value *>(V)));
4989}
4990
4991/// A helper function that returns the pointer operand of a load, store
4992/// or GEP instruction. Returns nullptr if not load, store, or GEP.
4993inline const Value *getPointerOperand(const Value *V) {
4994 if (auto *Ptr = getLoadStorePointerOperand(V))
4995 return Ptr;
4996 if (auto *Gep = dyn_cast<GetElementPtrInst>(V))
4997 return Gep->getPointerOperand();
4998 return nullptr;
4999}
5001 return const_cast<Value *>(getPointerOperand(static_cast<const Value *>(V)));
5002}
5003
5004/// A helper function that returns the alignment of load or store instruction.
5006 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
5007 "Expected Load or Store instruction");
5008 if (auto *LI = dyn_cast<LoadInst>(I))
5009 return LI->getAlign();
5010 return cast<StoreInst>(I)->getAlign();
5011}
5012
5013/// A helper function that set the alignment of load or store instruction.
5014inline void setLoadStoreAlignment(Value *I, Align NewAlign) {
5015 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
5016 "Expected Load or Store instruction");
5017 if (auto *LI = dyn_cast<LoadInst>(I))
5018 LI->setAlignment(NewAlign);
5019 else
5020 cast<StoreInst>(I)->setAlignment(NewAlign);
5021}
5022
5023/// A helper function that returns the address space of the pointer operand of
5024/// load or store instruction.
5025inline unsigned getLoadStoreAddressSpace(const Value *I) {
5026 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
5027 "Expected Load or Store instruction");
5028 if (auto *LI = dyn_cast<LoadInst>(I))
5029 return LI->getPointerAddressSpace();
5030 return cast<StoreInst>(I)->getPointerAddressSpace();
5031}
5032
5033/// A helper function that returns the type of a load or store instruction.
5034inline Type *getLoadStoreType(const Value *I) {
5035 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
5036 "Expected Load or Store instruction");
5037 if (auto *LI = dyn_cast<LoadInst>(I))
5038 return LI->getType();
5039 return cast<StoreInst>(I)->getValueOperand()->getType();
5040}
5041
5042/// A helper function that returns an atomic operation's sync scope; returns
5043/// std::nullopt if it is not an atomic operation.
5044inline std::optional<SyncScope::ID> getAtomicSyncScopeID(const Instruction *I) {
5045 if (!I->isAtomic())
5046 return std::nullopt;
5047 if (auto *AI = dyn_cast<LoadInst>(I))
5048 return AI->getSyncScopeID();
5049 if (auto *AI = dyn_cast<StoreInst>(I))
5050 return AI->getSyncScopeID();
5051 if (auto *AI = dyn_cast<FenceInst>(I))
5052 return AI->getSyncScopeID();
5053 if (auto *AI = dyn_cast<AtomicCmpXchgInst>(I))
5054 return AI->getSyncScopeID();
5055 if (auto *AI = dyn_cast<AtomicRMWInst>(I))
5056 return AI->getSyncScopeID();
5057 llvm_unreachable("unhandled atomic operation");
5058}
5059
5060/// A helper function that sets an atomic operation's sync scope.
5062 assert(I->isAtomic());
5063 if (auto *AI = dyn_cast<LoadInst>(I))
5064 AI->setSyncScopeID(SSID);
5065 else if (auto *AI = dyn_cast<StoreInst>(I))
5066 AI->setSyncScopeID(SSID);
5067 else if (auto *AI = dyn_cast<FenceInst>(I))
5068 AI->setSyncScopeID(SSID);
5069 else if (auto *AI = dyn_cast<AtomicCmpXchgInst>(I))
5070 AI->setSyncScopeID(SSID);
5071 else if (auto *AI = dyn_cast<AtomicRMWInst>(I))
5072 AI->setSyncScopeID(SSID);
5073 else
5074 llvm_unreachable("unhandled atomic operation");
5075}
5076
5077//===----------------------------------------------------------------------===//
5078// FreezeInst Class
5079//===----------------------------------------------------------------------===//
5080
5081/// This class represents a freeze function that returns random concrete
5082/// value if an operand is either a poison value or an undef value
5084protected:
5085 // Note: Instruction needs to be a friend here to call cloneImpl.
5086 friend class Instruction;
5087
5088 /// Clone an identical FreezeInst
5089 FreezeInst *cloneImpl() const;
5090
5091public:
5092 explicit FreezeInst(Value *S, const Twine &NameStr = "",
5093 InsertPosition InsertBefore = nullptr);
5094
5095 // Methods for support type inquiry through isa, cast, and dyn_cast:
5096 static inline bool classof(const Instruction *I) {
5097 return I->getOpcode() == Freeze;
5098 }
5099 static inline bool classof(const Value *V) {
5100 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5101 }
5102};
5103
5104} // end namespace llvm
5105
5106#endif // LLVM_IR_INSTRUCTIONS_H
static const LLT S1
static bool isReverseMask(ArrayRef< int > M, EVT VT)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
always inline
Atomic ordering constants.
static const Function * getParent(const Value *V)
This file implements methods to test, set and extract typed bits from packed unsigned integers.
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
RelocType Type
Definition: COFFYAML.cpp:410
return RetTy
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
uint64_t Align
std::string Name
uint32_t Index
uint64_t Size
Hexagon Common GEP
hexagon gen pred
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
This defines the Use class.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file implements a map that provides insertion order iteration.
uint64_t IntrinsicInst * II
#define DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CLASS, VALUECLASS)
Macro for generating out-of-class operand accessor definitions.
#define P(N)
PowerPC Reduce CR logical Operation
StandardInstrumentations SI(Mod->getContext(), Debug, VerifyEach)
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
This file defines the SmallVector class.
static SymbolRef::Type getType(const Symbol *Sym)
Definition: TapiFile.cpp:39
Value * RHS
Value * LHS
Class for arbitrary precision integers.
Definition: APInt.h:78
This class represents a conversion between pointers from one address space to another.
const Value * getPointerOperand() const
Gets the pointer operand.
AddrSpaceCastInst * cloneImpl() const
Clone an identical AddrSpaceCastInst.
Value * getPointerOperand()
Gets the pointer operand.
static bool classof(const Instruction *I)
static bool classof(const Value *V)
unsigned getSrcAddressSpace() const
Returns the address space of the pointer operand.
unsigned getDestAddressSpace() const
Returns the address space of the result.
static unsigned getPointerOperandIndex()
Gets the operand index of the pointer operand.
an instruction to allocate memory on the stack
Definition: Instructions.h:63
std::optional< TypeSize > getAllocationSizeInBits(const DataLayout &DL) const
Get allocation size in bits.
static bool classof(const Value *V)
Definition: Instructions.h:157
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
Definition: Instructions.h:149
void setSwiftError(bool V)
Specify whether this alloca is used to represent a swifterror.
Definition: Instructions.h:151
bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Definition: Instructions.h:124
void setAllocatedType(Type *Ty)
for use only in special circumstances that need to generically transform a whole instruction (eg: IR ...
Definition: Instructions.h:120
static bool classof(const Instruction *I)
Definition: Instructions.h:154
PointerType * getType() const
Overload to return most specific pointer type.
Definition: Instructions.h:99
void setUsedWithInAlloca(bool V)
Specify whether this alloca is used to represent the arguments to a call.
Definition: Instructions.h:144
AllocaInst * cloneImpl() const
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Definition: Instructions.h:117
bool isUsedWithInAlloca() const
Return true if this alloca is used as an inalloca argument to a call.
Definition: Instructions.h:139
Value * getArraySize()
Definition: Instructions.h:96
unsigned getAddressSpace() const
Return the address space for the allocation.
Definition: Instructions.h:104
std::optional< TypeSize > getAllocationSize(const DataLayout &DL) const
Get allocation size in bytes.
bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
void setAlignment(Align Align)
Definition: Instructions.h:128
const Value * getArraySize() const
Get the number of elements allocated.
Definition: Instructions.h:95
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:168
An instruction that atomically checks whether a specified value is in a memory location,...
Definition: Instructions.h:501
BoolBitfieldElementT< 0 > VolatileField
Definition: Instructions.h:529
const Value * getCompareOperand() const
Definition: Instructions.h:634
void setSyncScopeID(SyncScope::ID SSID)
Sets the synchronization scope ID of this cmpxchg instruction.
Definition: Instructions.h:625
AtomicOrdering getMergedOrdering() const
Returns a single ordering which is at least as strong as both the success and failure orderings for t...
Definition: Instructions.h:607
void setWeak(bool IsWeak)
Definition: Instructions.h:564
bool isVolatile() const
Return true if this is a cmpxchg from a volatile memory location.
Definition: Instructions.h:555
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
Definition: Instructions.h:640
BoolBitfieldElementT< VolatileField::NextBit > WeakField
Definition: Instructions.h:530
AtomicOrderingBitfieldElementT< SuccessOrderingField::NextBit > FailureOrderingField
Definition: Instructions.h:534
void setFailureOrdering(AtomicOrdering Ordering)
Sets the failure ordering constraint of this cmpxchg instruction.
Definition: Instructions.h:599
static bool isValidFailureOrdering(AtomicOrdering Ordering)
Definition: Instructions.h:574
AtomicOrdering getFailureOrdering() const
Returns the failure ordering constraint of this cmpxchg instruction.
Definition: Instructions.h:594
void setSuccessOrdering(AtomicOrdering Ordering)
Sets the success ordering constraint of this cmpxchg instruction.
Definition: Instructions.h:587
AlignmentBitfieldElementT< FailureOrderingField::NextBit > AlignmentField
Definition: Instructions.h:536
static AtomicOrdering getStrongestFailureOrdering(AtomicOrdering SuccessOrdering)
Returns the strongest permitted ordering on failure, given the desired ordering on success.
Definition: Instructions.h:652
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Transparently provide more efficient getOperand methods.
AtomicCmpXchgInst * cloneImpl() const
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Definition: Instructions.h:544
const Value * getPointerOperand() const
Definition: Instructions.h:630
static bool classof(const Value *V)
Definition: Instructions.h:671
bool isWeak() const
Return true if this cmpxchg may spuriously fail.
Definition: Instructions.h:562
void setAlignment(Align Align)
Definition: Instructions.h:548
void setVolatile(bool V)
Specify whether this is a volatile cmpxchg.
Definition: Instructions.h:559
static bool isValidSuccessOrdering(AtomicOrdering Ordering)
Definition: Instructions.h:569
AtomicOrdering getSuccessOrdering() const
Returns the success ordering constraint of this cmpxchg instruction.
Definition: Instructions.h:582
AtomicOrderingBitfieldElementT< WeakField::NextBit > SuccessOrderingField
Definition: Instructions.h:532
static unsigned getPointerOperandIndex()
Definition: Instructions.h:631
const Value * getNewValOperand() const
Definition: Instructions.h:637
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this cmpxchg instruction.
Definition: Instructions.h:620
static bool classof(const Instruction *I)
Definition: Instructions.h:668
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:704
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Definition: Instructions.h:827
static bool isFPOperation(BinOp Op)
Definition: Instructions.h:809
static unsigned getPointerOperandIndex()
Definition: Instructions.h:872
bool isVolatile() const
Return true if this is a RMW on a volatile memory location.
Definition: Instructions.h:837
void setVolatile(bool V)
Specify whether this is a volatile RMW or not.
Definition: Instructions.h:841
BinOpBitfieldElement< AtomicOrderingField::NextBit > OperationField
Definition: Instructions.h:799
BinOp
This enumeration lists the possible modifications atomicrmw can make.
Definition: Instructions.h:716
@ Add
*p = old + v
Definition: Instructions.h:720
@ FAdd
*p = old + v
Definition: Instructions.h:741
@ USubCond
Subtract only if no unsigned overflow.
Definition: Instructions.h:764
@ Min
*p = old <signed v ? old : v
Definition: Instructions.h:734
@ Or
*p = old | v
Definition: Instructions.h:728
@ Sub
*p = old - v
Definition: Instructions.h:722
@ And
*p = old & v
Definition: Instructions.h:724
@ Xor
*p = old ^ v
Definition: Instructions.h:730
@ USubSat
*p = usub.sat(old, v) usub.sat matches the behavior of llvm.usub.sat.
Definition: Instructions.h:768
@ FSub
*p = old - v
Definition: Instructions.h:744
@ UIncWrap
Increment one up to a maximum value.
Definition: Instructions.h:756
@ Max
*p = old >signed v ? old : v
Definition: Instructions.h:732
@ UMin
*p = old <unsigned v ? old : v
Definition: Instructions.h:738
@ FMin
*p = minnum(old, v) minnum matches the behavior of llvm.minnum.
Definition: Instructions.h:752
@ UMax
*p = old >unsigned v ? old : v
Definition: Instructions.h:736
@ FMax
*p = maxnum(old, v) maxnum matches the behavior of llvm.maxnum.
Definition: Instructions.h:748
@ UDecWrap
Decrement one until a minimum value or zero.
Definition: Instructions.h:760
@ Nand
*p = ~(old & v)
Definition: Instructions.h:726
AtomicOrderingBitfieldElementT< VolatileField::NextBit > AtomicOrderingField
Definition: Instructions.h:798
void setSyncScopeID(SyncScope::ID SSID)
Sets the synchronization scope ID of this rmw instruction.
Definition: Instructions.h:866
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Transparently provide more efficient getOperand methods.
Value * getPointerOperand()
Definition: Instructions.h:870
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this rmw instruction.
Definition: Instructions.h:852
bool isFloatingPointOperation() const
Definition: Instructions.h:882
static bool classof(const Instruction *I)
Definition: Instructions.h:887
const Value * getPointerOperand() const
Definition: Instructions.h:871
void setOperation(BinOp Operation)
Definition: Instructions.h:821
static bool classof(const Value *V)
Definition: Instructions.h:890
BinOp getOperation() const
Definition: Instructions.h:805
const Value * getValOperand() const
Definition: Instructions.h:875
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this rmw instruction.
Definition: Instructions.h:861
void setAlignment(Align Align)
Definition: Instructions.h:831
Value * getValOperand()
Definition: Instructions.h:874
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
Definition: Instructions.h:847
AlignmentBitfieldElementT< OperationField::NextBit > AlignmentField
Definition: Instructions.h:800
BoolBitfieldElementT< 0 > VolatileField
Definition: Instructions.h:796
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
Definition: Instructions.h:878
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
This class represents a no-op cast from one type to another.
static bool classof(const Instruction *I)
static bool classof(const Value *V)
BitCastInst * cloneImpl() const
Clone an identical BitCastInst.
Conditional or Unconditional Branch instruction.
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Transparently provide more efficient getOperand methods.
iterator_range< succ_op_iterator > successors()
static BranchInst * Create(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond, InsertPosition InsertBefore=nullptr)
void setCondition(Value *V)
static bool classof(const Instruction *I)
bool isConditional() const
unsigned getNumSuccessors() const
static bool classof(const Value *V)
static BranchInst * Create(BasicBlock *IfTrue, InsertPosition InsertBefore=nullptr)
BasicBlock * getSuccessor(unsigned i) const
bool isUnconditional() const
void setSuccessor(unsigned idx, BasicBlock *NewSucc)
Value * getCondition() const
iterator_range< const_succ_op_iterator > successors() const
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1120
void addFnAttr(Attribute::AttrKind Kind)
Adds the attribute to the function.
Definition: InstrTypes.h:1482
bool hasFnAttr(Attribute::AttrKind Kind) const
Determine whether this call has the given attribute.
Definition: InstrTypes.h:1459
FunctionType * FTy
Definition: InstrTypes.h:1135
Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
static unsigned CountBundleInputs(ArrayRef< OperandBundleDef > Bundles)
Return the total number of values used in Bundles.
Definition: InstrTypes.h:2280
unsigned arg_size() const
Definition: InstrTypes.h:1292
unsigned getNumTotalBundleOperands() const
Return the total number operands (not operand bundles) used by every operand bundle in this OperandBu...
Definition: InstrTypes.h:2008
CallBr instruction, tracking function calls that may not return control but instead transfer it to a ...
static bool classof(const Value *V)
static CallBrInst * Create(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, ArrayRef< BasicBlock * > IndirectDests, ArrayRef< Value * > Args, ArrayRef< OperandBundleDef > Bundles={}, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static bool classof(const Instruction *I)
static CallBrInst * Create(FunctionCallee Func, BasicBlock *DefaultDest, ArrayRef< BasicBlock * > IndirectDests, ArrayRef< Value * > Args, ArrayRef< OperandBundleDef > Bundles={}, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
SmallVector< BasicBlock *, 16 > getIndirectDests() const
static CallBrInst * Create(FunctionCallee Func, BasicBlock *DefaultDest, ArrayRef< BasicBlock * > IndirectDests, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
void setSuccessor(unsigned i, BasicBlock *NewSucc)
BasicBlock * getSuccessor(unsigned i) const
Value * getIndirectDestLabelUse(unsigned i) const
BasicBlock * getIndirectDest(unsigned i) const
void setDefaultDest(BasicBlock *B)
unsigned getNumSuccessors() const
void setIndirectDest(unsigned i, BasicBlock *B)
Value * getIndirectDestLabel(unsigned i) const
getIndirectDestLabel - Return the i-th indirect dest label.
BasicBlock * getDefaultDest() const
unsigned getNumIndirectDests() const
Return the number of callbr indirect dest labels.
static CallBrInst * Create(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, ArrayRef< BasicBlock * > IndirectDests, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
CallBrInst * cloneImpl() const
This class represents a function call, abstracting a target machine's calling convention.
bool isNoTailCall() const
void updateProfWeight(uint64_t S, uint64_t T)
Updates profile metadata by scaling it by S / T.
static bool classof(const Value *V)
bool isTailCall() const
void setCanReturnTwice()
void setTailCallKind(TailCallKind TCK)
static CallInst * Create(FunctionType *Ty, Value *Func, ArrayRef< Value * > Args, ArrayRef< OperandBundleDef > Bundles={}, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static CallInst * Create(FunctionType *Ty, Value *Func, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
bool canReturnTwice() const
Return true if the call can return twice.
TailCallKind getTailCallKind() const
CallInst * cloneImpl() const
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
void setTailCall(bool IsTc=true)
bool isMustTailCall() const
static CallInst * Create(FunctionCallee Func, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
static bool classof(const Instruction *I)
bool isNonContinuableTrap() const
Return true if the call is for a noreturn trap intrinsic.
static CallInst * Create(FunctionCallee Func, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static CallInst * Create(FunctionCallee Func, ArrayRef< Value * > Args, ArrayRef< OperandBundleDef > Bundles={}, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
This is the base class for all instructions that perform data casts.
Definition: InstrTypes.h:444
CatchSwitchInst * getCatchSwitch() const
Convenience accessors.
void setCatchSwitch(Value *CatchSwitch)
static bool classof(const Instruction *I)
Methods for support type inquiry through isa, cast, and dyn_cast:
static CatchPadInst * Create(Value *CatchSwitch, ArrayRef< Value * > Args, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static bool classof(const Value *V)
static bool classof(const Instruction *I)
BasicBlock * getSuccessor() const
CatchPadInst * getCatchPad() const
Convenience accessors.
void setSuccessor(BasicBlock *NewSucc)
static bool classof(const Value *V)
static CatchReturnInst * Create(Value *CatchPad, BasicBlock *BB, InsertPosition InsertBefore=nullptr)
unsigned getNumSuccessors() const
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Provide fast operand accessors.
void setCatchPad(CatchPadInst *CatchPad)
CatchReturnInst * cloneImpl() const
Value * getCatchSwitchParentPad() const
Get the parentPad of this catchret's catchpad's catchswitch.
void setUnwindDest(BasicBlock *UnwindDest)
static bool classof(const Instruction *I)
BasicBlock *(*)(Value *) DerefFnTy
const BasicBlock *(*)(const Value *) ConstDerefFnTy
unsigned getNumSuccessors() const
const_handler_iterator handler_begin() const
Returns an iterator that points to the first handler in the CatchSwitchInst.
unsigned getNumHandlers() const
return the number of 'handlers' in this catchswitch instruction, except the default handler
void setSuccessor(unsigned Idx, BasicBlock *NewSucc)
Value * getParentPad() const
void setParentPad(Value *ParentPad)
bool unwindsToCaller() const
static bool classof(const Value *V)
handler_iterator handler_end()
Returns a read-only iterator that points one past the last handler in the CatchSwitchInst.
BasicBlock * getUnwindDest() const
BasicBlock * getSuccessor(unsigned Idx) const
const_handler_iterator handler_end() const
Returns an iterator that points one past the last handler in the CatchSwitchInst.
bool hasUnwindDest() const
handler_iterator handler_begin()
Returns an iterator that points to the first handler in CatchSwitchInst.
static CatchSwitchInst * Create(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumHandlers, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
handler_range handlers()
iteration adapter for range-for loops.
const_handler_range handlers() const
iteration adapter for range-for loops.
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Provide fast operand accessors.
static bool classof(const Value *V)
static bool classof(const Instruction *I)
Methods for support type inquiry through isa, cast, and dyn_cast:
static CleanupPadInst * Create(Value *ParentPad, ArrayRef< Value * > Args={}, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static bool classof(const Instruction *I)
CleanupPadInst * getCleanupPad() const
Convenience accessor.
unsigned getNumSuccessors() const
BasicBlock * getUnwindDest() const
bool unwindsToCaller() const
void setCleanupPad(CleanupPadInst *CleanupPad)
static bool classof(const Value *V)
void setUnwindDest(BasicBlock *NewDest)
static CleanupReturnInst * Create(Value *CleanupPad, BasicBlock *UnwindBB=nullptr, InsertPosition InsertBefore=nullptr)
bool hasUnwindDest() const
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Provide fast operand accessors.
This class is the base class for the comparison instructions.
Definition: InstrTypes.h:661
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Definition: InstrTypes.h:988
void setPredicate(Predicate P)
Set the predicate for this instruction to the specified value.
Definition: InstrTypes.h:766
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:673
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
Definition: InstrTypes.h:676
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
Definition: InstrTypes.h:690
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
Definition: InstrTypes.h:681
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
Definition: InstrTypes.h:684
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
Definition: InstrTypes.h:682
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
Definition: InstrTypes.h:689
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
Definition: InstrTypes.h:675
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition: InstrTypes.h:683
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition: InstrTypes.h:825
static auto FCmpPredicates()
Returns the sequence of all FCmp predicates.
Definition: InstrTypes.h:712
bool isFPPredicate() const
Definition: InstrTypes.h:780
Predicate getPredicate() const
Return the predicate for this instruction.
Definition: InstrTypes.h:763
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
Definition: CmpPredicate.h:22
bool hasSameSign() const
Query samesign information, for optimizations.
Definition: CmpPredicate.h:42
This is the shared class of boolean and integer constants.
Definition: Constants.h:83
This is an important base class in LLVM.
Definition: Constant.h:42
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
This instruction extracts a single (scalar) element from a VectorType value.
const Value * getVectorOperand() const
ExtractElementInst * cloneImpl() const
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Transparently provide more efficient getOperand methods.
static bool classof(const Value *V)
static ExtractElementInst * Create(Value *Vec, Value *Idx, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
const Value * getIndexOperand() const
static bool classof(const Instruction *I)
VectorType * getVectorOperandType() const
static bool isValidOperands(const Value *Vec, const Value *Idx)
Return true if an extractelement instruction can be formed with the specified operands.
This instruction extracts a struct member or array element value from an aggregate value.
ArrayRef< unsigned > getIndices() const
unsigned getNumIndices() const
static bool classof(const Value *V)
static bool classof(const Instruction *I)
iterator_range< idx_iterator > indices() const
idx_iterator idx_end() const
static ExtractValueInst * Create(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
const Value * getAggregateOperand() const
static unsigned getAggregateOperandIndex()
idx_iterator idx_begin() const
This instruction compares its operands according to the predicate given to the constructor.
bool isRelational() const
FCmpInst(Predicate Pred, Value *LHS, Value *RHS, const Twine &NameStr="", Instruction *FlagsSource=nullptr)
Constructor with no-insertion semantics.
bool isEquality() const
static bool classof(const Value *V)
bool isCommutative() const
static bool isCommutative(Predicate Pred)
static bool compare(const APFloat &LHS, const APFloat &RHS, FCmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
static bool isEquality(Predicate Pred)
static bool classof(const Instruction *I)
Methods for support type inquiry through isa, cast, and dyn_cast:
static auto predicates()
Returns the sequence of all FCmp predicates.
FCmpInst * cloneImpl() const
Clone an identical FCmpInst.
void swapOperands()
Exchange the two operands to this instruction in such a way that it does not modify the semantics of ...
FCmpInst(InsertPosition InsertBefore, Predicate pred, Value *LHS, Value *RHS, const Twine &NameStr="")
Constructor with insertion semantics.
This class represents an extension of floating point types.
static bool classof(const Value *V)
FPExtInst * cloneImpl() const
Clone an identical FPExtInst.
static bool classof(const Instruction *I)
Methods for support type inquiry through isa, cast, and dyn_cast:
This class represents a cast from floating point to signed integer.
static bool classof(const Value *V)
FPToSIInst * cloneImpl() const
Clone an identical FPToSIInst.
static bool classof(const Instruction *I)
Methods for support type inquiry through isa, cast, and dyn_cast:
This class represents a cast from floating point to unsigned integer.
static bool classof(const Value *V)
static bool classof(const Instruction *I)
Methods for support type inquiry through isa, cast, and dyn_cast:
FPToUIInst * cloneImpl() const
Clone an identical FPToUIInst.
This class represents a truncation of floating point types.
static bool classof(const Instruction *I)
Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const Value *V)
FPTruncInst * cloneImpl() const
Clone an identical FPTruncInst.
An instruction for ordering other memory operations.
Definition: Instructions.h:424
static bool classof(const Value *V)
Definition: Instructions.h:473
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this fence instruction.
Definition: Instructions.h:460
void setSyncScopeID(SyncScope::ID SSID)
Sets the synchronization scope ID of this fence instruction.
Definition: Instructions.h:465
static bool classof(const Instruction *I)
Definition: Instructions.h:470
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this fence instruction.
Definition: Instructions.h:455
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
Definition: Instructions.h:449
This class represents a freeze function that returns random concrete value if an operand is either a ...
static bool classof(const Value *V)
FreezeInst * cloneImpl() const
Clone an identical FreezeInst.
static bool classof(const Instruction *I)
friend class CatchPadInst
Definition: InstrTypes.h:2338
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
Definition: DerivedTypes.h:170
Class to represent function types.
Definition: DerivedTypes.h:105
Represents flags for the getelementptr instruction/expression.
static GEPNoWrapFlags inBounds()
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:933
bool isInBounds() const
Determine whether the GEP has the inbounds flag.
bool hasNoUnsignedSignedWrap() const
Determine whether the GEP has the nusw flag.
static Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
bool hasAllZeroIndices() const
Return true if all of the indices of this GEP are zeros.
static Type * getGEPReturnType(Value *Ptr, ArrayRef< Value * > IdxList)
Returns the pointer type returned by the GEP instruction, which may be a vector of pointers.
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Transparently provide more efficient getOperand methods.
void setResultElementType(Type *Ty)
Definition: Instructions.h:993
bool hasNoUnsignedWrap() const
Determine whether the GEP has the nuw flag.
bool hasAllConstantIndices() const
Return true if all of the indices of this GEP are constant integers.
unsigned getAddressSpace() const
Returns the address space of this instruction's pointer type.
iterator_range< const_op_iterator > indices() const
Type * getResultElementType() const
Definition: Instructions.h:995
static bool classof(const Instruction *I)
static bool classof(const Value *V)
iterator_range< op_iterator > indices()
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Definition: Instructions.h:956
void setIsInBounds(bool b=true)
Set or clear the inbounds flag on this GEP instruction.
void setSourceElementType(Type *Ty)
Definition: Instructions.h:992
static Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
Type * getSourceElementType() const
Definition: Instructions.h:990
static GetElementPtrInst * CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Create an "inbounds" getelementptr.
Definition: Instructions.h:980
Type * getPointerOperandType() const
Method to return the pointer operand as a PointerType.
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, GEPNoWrapFlags NW, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Definition: Instructions.h:967
static unsigned getPointerOperandIndex()
bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const
Accumulate the constant address offset of this GEP if possible.
const_op_iterator idx_begin() const
GetElementPtrInst * cloneImpl() const
bool collectOffset(const DataLayout &DL, unsigned BitWidth, SmallMapVector< Value *, APInt, 4 > &VariableOffsets, APInt &ConstantOffset) const
void setNoWrapFlags(GEPNoWrapFlags NW)
Set nowrap flags for GEP instruction.
unsigned getNumIndices() const
GEPNoWrapFlags getNoWrapFlags() const
Get the nowrap flags for the GEP instruction.
const_op_iterator idx_end() const
const Value * getPointerOperand() const
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
This instruction compares its operands according to the predicate given to the constructor.
bool hasSameSign() const
An icmp instruction, which can be marked as "samesign", indicating that the two operands have the sam...
static bool classof(const Value *V)
void setSameSign(bool B=true)
ICmpInst(InsertPosition InsertBefore, Predicate pred, Value *LHS, Value *RHS, const Twine &NameStr="")
Constructor with insertion semantics.
static bool isCommutative(Predicate P)
static CmpPredicate getSwappedCmpPredicate(CmpPredicate Pred)
CmpPredicate getCmpPredicate() const
bool isCommutative() const
static bool isGE(Predicate P)
Return true if the predicate is SGE or UGE.
CmpPredicate getSwappedCmpPredicate() const
static bool isLT(Predicate P)
Return true if the predicate is SLT or ULT.
CmpPredicate getInverseCmpPredicate() const
static bool isGT(Predicate P)
Return true if the predicate is SGT or UGT.
static bool classof(const Instruction *I)
Predicate getFlippedSignednessPredicate() const
For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->EQ.
Predicate getSignedPredicate() const
For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
static CmpPredicate getInverseCmpPredicate(CmpPredicate Pred)
bool isEquality() const
Return true if this predicate is either EQ or NE.
static bool isEquality(Predicate P)
Return true if this predicate is either EQ or NE.
static bool isRelational(Predicate P)
Return true if the predicate is relational (not EQ or NE).
void swapOperands()
Exchange the two operands to this instruction in such a way that it does not modify the semantics of ...
static auto predicates()
Returns the sequence of all ICmp predicates.
ICmpInst(Predicate pred, Value *LHS, Value *RHS, const Twine &NameStr="")
Constructor with no-insertion semantics.
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
Predicate getUnsignedPredicate() const
For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
static bool isLE(Predicate P)
Return true if the predicate is SLE or ULE.
Indirect Branch Instruction.
static IndirectBrInst * Create(Value *Address, unsigned NumDests, InsertPosition InsertBefore=nullptr)
BasicBlock * getDestination(unsigned i)
Return the specified destination.
static bool classof(const Value *V)
const Value * getAddress() const
static bool classof(const Instruction *I)
BasicBlock * getSuccessor(unsigned i) const
iterator_range< const_succ_op_iterator > successors() const
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Provide fast operand accessors.
unsigned getNumDestinations() const
return the number of possible destinations in this indirectbr instruction.
const BasicBlock * getDestination(unsigned i) const
void setSuccessor(unsigned i, BasicBlock *NewSucc)
void setAddress(Value *V)
unsigned getNumSuccessors() const
iterator_range< succ_op_iterator > successors()
This instruction inserts a single (scalar) element into a VectorType value.
static bool classof(const Value *V)
static InsertElementInst * Create(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
VectorType * getType() const
Overload to return most specific vector type.
static bool classof(const Instruction *I)
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Transparently provide more efficient getOperand methods.
This instruction inserts a struct field of array element value into an aggregate value.
Value * getInsertedValueOperand()
static bool classof(const Instruction *I)
static unsigned getAggregateOperandIndex()
Value * getAggregateOperand()
static bool classof(const Value *V)
unsigned getNumIndices() const
ArrayRef< unsigned > getIndices() const
iterator_range< idx_iterator > indices() const
static unsigned getInsertedValueOperandIndex()
InsertValueInst * cloneImpl() const
idx_iterator idx_end() const
static InsertValueInst * Create(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Transparently provide more efficient getOperand methods.
const Value * getAggregateOperand() const
bool hasIndices() const
const Value * getInsertedValueOperand() const
idx_iterator idx_begin() const
typename Bitfield::Element< AtomicOrdering, Offset, 3, AtomicOrdering::LAST > AtomicOrderingBitfieldElementT
Definition: Instruction.h:153
typename Bitfield::Element< bool, Offset, 1 > BoolBitfieldElementT
Definition: Instruction.h:148
bool isAtomic() const LLVM_READONLY
Return true if this instruction has an AtomicOrdering of unordered or higher.
typename Bitfield::Element< unsigned, Offset, 6, Value::MaxAlignmentExponent > AlignmentBitfieldElementT
Definition: Instruction.h:145
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:274
void copyMetadata(const Instruction &SrcInst, ArrayRef< unsigned > WL=ArrayRef< unsigned >())
Copy metadata from SrcInst to this instruction.
friend class BasicBlock
Various leaf nodes.
Definition: Instruction.h:1006
This class represents a cast from an integer to a pointer.
static bool classof(const Instruction *I)
IntToPtrInst * cloneImpl() const
Clone an identical IntToPtrInst.
unsigned getAddressSpace() const
Returns the address space of this instruction's pointer type.
static bool classof(const Value *V)
Invoke instruction.
static bool classof(const Instruction *I)
BasicBlock * getUnwindDest() const
void setNormalDest(BasicBlock *B)
static bool classof(const Value *V)
static InvokeInst * Create(FunctionCallee Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
void setSuccessor(unsigned i, BasicBlock *NewSucc)
static InvokeInst * Create(FunctionCallee Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, ArrayRef< OperandBundleDef > Bundles={}, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
BasicBlock * getSuccessor(unsigned i) const
void setUnwindDest(BasicBlock *B)
BasicBlock * getNormalDest() const
static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, ArrayRef< OperandBundleDef > Bundles={}, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
unsigned getNumSuccessors() const
static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
The landingpad instruction holds all of the information necessary to generate correct exception handl...
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Provide fast operand accessors.
bool isCatch(unsigned Idx) const
Return 'true' if the clause and index Idx is a catch clause.
bool isFilter(unsigned Idx) const
Return 'true' if the clause and index Idx is a filter clause.
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
static bool classof(const Value *V)
void setCleanup(bool V)
Indicate that this landingpad instruction is a cleanup.
void reserveClauses(unsigned Size)
Grow the size of the operand list to accommodate the new number of clauses.
static bool classof(const Instruction *I)
An instruction for reading from memory.
Definition: Instructions.h:176
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
Definition: Instructions.h:261
const Value * getPointerOperand() const
Definition: Instructions.h:256
void setAlignment(Align Align)
Definition: Instructions.h:215
Value * getPointerOperand()
Definition: Instructions.h:255
bool isVolatile() const
Return true if this is a load from a volatile memory location.
Definition: Instructions.h:205
static bool classof(const Instruction *I)
Definition: Instructions.h:266
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this load instruction.
Definition: Instructions.h:225
static bool classof(const Value *V)
Definition: Instructions.h:269
void setSyncScopeID(SyncScope::ID SSID)
Sets the synchronization scope ID of this load instruction.
Definition: Instructions.h:235
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this load instruction.
Definition: Instructions.h:241
LoadInst * cloneImpl() const
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
Definition: Instructions.h:220
Type * getPointerOperandType() const
Definition: Instructions.h:258
static unsigned getPointerOperandIndex()
Definition: Instructions.h:257
bool isUnordered() const
Definition: Instructions.h:249
void setVolatile(bool V)
Specify whether this is a volatile load or not.
Definition: Instructions.h:208
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
Definition: Instructions.h:230
bool isSimple() const
Definition: Instructions.h:247
Align getAlign() const
Return the alignment of the access that is being performed.
Definition: Instructions.h:211
Metadata node.
Definition: Metadata.h:1069
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
Definition: ArrayRef.h:310
BasicBlock * getIncomingBlock(Value::const_user_iterator I) const
Return incoming basic block corresponding to value use iterator.
static bool classof(const Instruction *I)
Methods for support type inquiry through isa, cast, and dyn_cast:
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
bool isComplete() const
If the PHI node is complete which means all of its parent's predecessors have incoming value in this ...
iterator_range< const_block_iterator > blocks() const
op_range incoming_values()
static bool classof(const Value *V)
void allocHungoffUses(unsigned N)
const_block_iterator block_begin() const
void setIncomingValueForBlock(const BasicBlock *BB, Value *V)
Set every incoming value(s) for block BB to V.
void setIncomingBlock(unsigned i, BasicBlock *BB)
BasicBlock *const * const_block_iterator
void setIncomingValue(unsigned i, Value *V)
static unsigned getOperandNumForIncomingValue(unsigned i)
void copyIncomingBlocks(iterator_range< const_block_iterator > BBRange, uint32_t ToIdx=0)
Copies the basic blocks from BBRange to the incoming basic block list of this PHINode,...
const_block_iterator block_end() const
Value * getIncomingValueForBlock(const BasicBlock *BB) const
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Provide fast operand accessors.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
static unsigned getIncomingValueNumForOperand(unsigned i)
const_op_range incoming_values() const
Value * removeIncomingValue(const BasicBlock *BB, bool DeletePHIIfEmpty=true)
void replaceIncomingBlockWith(const BasicBlock *Old, BasicBlock *New)
Replace every incoming basic block Old to basic block New.
BasicBlock * getIncomingBlock(const Use &U) const
Return incoming basic block corresponding to an operand of the PHI.
int getBasicBlockIndex(const BasicBlock *BB) const
Return the first index of the specified basic block in the value list for this PHI.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
Class to represent pointers.
Definition: DerivedTypes.h:670
unsigned getAddressSpace() const
Return the address space of the Pointer type.
Definition: DerivedTypes.h:703
This class represents a cast from a pointer to an integer.
Value * getPointerOperand()
Gets the pointer operand.
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
static bool classof(const Value *V)
const Value * getPointerOperand() const
Gets the pointer operand.
static unsigned getPointerOperandIndex()
Gets the operand index of the pointer operand.
static bool classof(const Instruction *I)
PtrToIntInst * cloneImpl() const
Clone an identical PtrToIntInst.
Resume the propagation of an exception.
static ResumeInst * Create(Value *Exn, InsertPosition InsertBefore=nullptr)
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Provide fast operand accessors.
Value * getValue() const
Convenience accessor.
static bool classof(const Value *V)
unsigned getNumSuccessors() const
ResumeInst * cloneImpl() const
static bool classof(const Instruction *I)
Return a value (possibly void), from a function.
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Provide fast operand accessors.
unsigned getNumSuccessors() const
static bool classof(const Value *V)
static bool classof(const Instruction *I)
static ReturnInst * Create(LLVMContext &C, BasicBlock *InsertAtEnd)
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
static ReturnInst * Create(LLVMContext &C, Value *retVal=nullptr, InsertPosition InsertBefore=nullptr)
This class represents a sign extension of integer types.
static bool classof(const Value *V)
static bool classof(const Instruction *I)
Methods for support type inquiry through isa, cast, and dyn_cast:
SExtInst * cloneImpl() const
Clone an identical SExtInst.
This class represents a cast from signed integer to floating point.
SIToFPInst * cloneImpl() const
Clone an identical SIToFPInst.
static bool classof(const Instruction *I)
Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const Value *V)
This class represents the LLVM 'select' instruction.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, Instruction *MDFrom=nullptr)
void setFalseValue(Value *V)
const Value * getFalseValue() const
void setTrueValue(Value *V)
OtherOps getOpcode() const
Value * getCondition()
Value * getTrueValue()
void swapValues()
Swap the true and false values of the select instruction.
Value * getFalseValue()
const Value * getCondition() const
SelectInst * cloneImpl() const
friend class Instruction
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Transparently provide more efficient getOperand methods.
static const char * areInvalidOperands(Value *Cond, Value *True, Value *False)
Return a string if the specified operands are invalid for a select operation, otherwise return null.
static bool classof(const Value *V)
void setCondition(Value *V)
const Value * getTrueValue() const
static bool classof(const Instruction *I)
This instruction constructs a fixed permutation of two input vectors.
static bool classof(const Value *V)
static bool isInterleaveMask(ArrayRef< int > Mask, unsigned Factor, unsigned NumInputElts)
Constant * getShuffleMaskForBitcode() const
Return the mask for this instruction, for use in bitcode.
bool isSingleSource() const
Return true if this shuffle chooses elements from exactly one source vector without changing the leng...
bool changesLength() const
Return true if this shuffle returns a vector with a different number of elements than its source vect...
bool isExtractSubvectorMask(int &Index) const
Return true if this shuffle mask is an extract subvector mask.
ArrayRef< int > getShuffleMask() const
static bool isInsertSubvectorMask(const Constant *Mask, int NumSrcElts, int &NumSubElts, int &Index)
static bool isSingleSourceMask(const Constant *Mask, int NumSrcElts)
int getMaskValue(unsigned Elt) const
Return the shuffle mask value of this instruction for the given element index.
void getShuffleMask(SmallVectorImpl< int > &Result) const
Return the mask for this instruction as a vector of integers.
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Transparently provide more efficient getOperand methods.
static bool isDeInterleaveMaskOfFactor(ArrayRef< int > Mask, unsigned Factor)
VectorType * getType() const
Overload to return most specific vector type.
bool isInsertSubvectorMask(int &NumSubElts, int &Index) const
Return true if this shuffle mask is an insert subvector mask.
bool increasesLength() const
Return true if this shuffle returns a vector with a greater number of elements than its source vector...
bool isZeroEltSplat() const
Return true if all elements of this shuffle are the same value as the first element of exactly one so...
static bool isExtractSubvectorMask(const Constant *Mask, int NumSrcElts, int &Index)
bool isSelect() const
Return true if this shuffle chooses elements from its source vectors without lane crossings and all o...
static bool isSpliceMask(const Constant *Mask, int NumSrcElts, int &Index)
bool isTranspose() const
Return true if this shuffle transposes the elements of its inputs without changing the length of the ...
static void commuteShuffleMask(MutableArrayRef< int > Mask, unsigned InVecNumElts)
Change values in a shuffle permute mask assuming the two vector operands of length InVecNumElts have ...
bool isSplice(int &Index) const
Return true if this shuffle splices two inputs without changing the length of the vectors.
static bool isReverseMask(const Constant *Mask, int NumSrcElts)
static bool isSelectMask(const Constant *Mask, int NumSrcElts)
static bool classof(const Instruction *I)
static bool isZeroEltSplatMask(const Constant *Mask, int NumSrcElts)
bool isIdentity() const
Return true if this shuffle chooses elements from exactly one source vector without lane crossings an...
static bool isReplicationMask(const Constant *Mask, int &ReplicationFactor, int &VF)
static bool isIdentityMask(const Constant *Mask, int NumSrcElts)
static bool isTransposeMask(const Constant *Mask, int NumSrcElts)
bool isReverse() const
Return true if this shuffle swaps the order of elements from exactly one source vector.
size_t size() const
Definition: SmallVector.h:78
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:573
void push_back(const T &Elt)
Definition: SmallVector.h:413
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1196
An instruction for storing to memory.
Definition: Instructions.h:292
static bool classof(const Instruction *I)
Definition: Instructions.h:392
AtomicOrdering getOrdering() const
Returns the ordering constraint of this store instruction.
Definition: Instructions.h:342
const Value * getPointerOperand() const
Definition: Instructions.h:382
Align getAlign() const
Definition: Instructions.h:333
Type * getPointerOperandType() const
Definition: Instructions.h:384
void setVolatile(bool V)
Specify whether this is a volatile store or not.
Definition: Instructions.h:328
void setAlignment(Align Align)
Definition: Instructions.h:337
bool isSimple() const
Definition: Instructions.h:370
const Value * getValueOperand() const
Definition: Instructions.h:379
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this store instruction.
Definition: Instructions.h:348
Value * getValueOperand()
Definition: Instructions.h:378
static bool classof(const Value *V)
Definition: Instructions.h:395
bool isUnordered() const
Definition: Instructions.h:372
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Transparently provide more efficient getOperand methods.
void setSyncScopeID(SyncScope::ID SSID)
Sets the synchronization scope ID of this store instruction.
Definition: Instructions.h:358
StoreInst * cloneImpl() const
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
Definition: Instructions.h:387
static unsigned getPointerOperandIndex()
Definition: Instructions.h:383
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this store instruction.
Definition: Instructions.h:353
bool isVolatile() const
Return true if this is a store to a volatile memory location.
Definition: Instructions.h:325
Value * getPointerOperand()
Definition: Instructions.h:381
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this store instruction.
Definition: Instructions.h:364
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:51
A wrapper class to simplify modification of SwitchInst cases along with their prof branch_weights met...
void setSuccessorWeight(unsigned idx, CaseWeightOpt W)
Instruction::InstListType::iterator eraseFromParent()
Delegate the call to the underlying SwitchInst::eraseFromParent() and mark this object to not touch t...
void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W)
Delegate the call to the underlying SwitchInst::addCase() and set the specified branch weight for the...
SwitchInstProfUpdateWrapper(SwitchInst &SI)
CaseWeightOpt getSuccessorWeight(unsigned idx)
std::optional< uint32_t > CaseWeightOpt
SwitchInst::CaseIt removeCase(SwitchInst::CaseIt I)
Delegate the call to the underlying SwitchInst::removeCase() and remove correspondent branch weight.
A handle to a particular switch case.
unsigned getCaseIndex() const
Returns number of current case.
unsigned getSuccessorIndex() const
Returns successor index for current case successor.
BasicBlockT * getCaseSuccessor() const
Resolves successor for current case.
CaseHandleImpl(SwitchInstT *SI, ptrdiff_t Index)
bool operator==(const CaseHandleImpl &RHS) const
ConstantIntT * getCaseValue() const
Resolves case value for current case.
CaseHandle(SwitchInst *SI, ptrdiff_t Index)
void setValue(ConstantInt *V) const
Sets the new value for current case.
void setSuccessor(BasicBlock *S) const
Sets the new successor for current case.
const CaseHandleT & operator*() const
CaseIteratorImpl()=default
Default constructed iterator is in an invalid state until assigned to a case for a particular switch.
CaseIteratorImpl & operator-=(ptrdiff_t N)
bool operator==(const CaseIteratorImpl &RHS) const
CaseIteratorImpl & operator+=(ptrdiff_t N)
ptrdiff_t operator-(const CaseIteratorImpl &RHS) const
bool operator<(const CaseIteratorImpl &RHS) const
CaseIteratorImpl(SwitchInstT *SI, unsigned CaseNum)
Initializes case iterator for given SwitchInst and for given case number.
static CaseIteratorImpl fromSuccessorIndex(SwitchInstT *SI, unsigned SuccessorIndex)
Initializes case iterator for given SwitchInst and for given successor index.
Multiway switch.
BasicBlock * getDefaultDest() const
CaseIt case_end()
Returns a read/write iterator that points one past the last in the SwitchInst.
BasicBlock * getSuccessor(unsigned idx) const
ConstCaseIt findCaseValue(const ConstantInt *C) const
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)
Provide fast operand accessors.
static SwitchInst * Create(Value *Value, BasicBlock *Default, unsigned NumCases, InsertPosition InsertBefore=nullptr)
void setCondition(Value *V)
ConstCaseIt case_begin() const
Returns a read-only iterator that points to the first case in the SwitchInst.
bool defaultDestUndefined() const
Returns true if the default branch must result in immediate undefined behavior, false otherwise.
iterator_range< ConstCaseIt > cases() const
Constant iteration adapter for range-for loops.
ConstantInt * findCaseDest(BasicBlock *BB)
Finds the unique case value for a given successor.
void setSuccessor(unsigned idx, BasicBlock *NewSucc)
static bool classof(const Value *V)
unsigned getNumSuccessors() const
CaseIt case_default()
Returns an iterator that points to the default case.
void setDefaultDest(BasicBlock *DefaultCase)
unsigned getNumCases() const
Return the number of 'cases' in this switch instruction, excluding the default case.
CaseIt findCaseValue(const ConstantInt *C)
Search all of the case values for the specified constant.
Value * getCondition() const
ConstCaseIt case_default() const
CaseIt case_begin()
Returns a read/write iterator that points to the first case in the SwitchInst.
static bool classof(const Instruction *I)
iterator_range< CaseIt > cases()
Iteration adapter for range-for loops.
ConstCaseIt case_end() const
Returns a read-only iterator that points one past the last in the SwitchInst.
This class represents a truncation of integer types.
void setHasNoSignedWrap(bool B)
static bool classof(const Instruction *I)
Methods for support type inquiry through isa, cast, and dyn_cast:
TruncInst * cloneImpl() const
Clone an identical TruncInst.
void setHasNoUnsignedWrap(bool B)
unsigned getNoWrapKind() const
Returns the no-wrap kind of the operation.
bool hasNoSignedWrap() const
Test whether this operation is known to never undergo signed overflow, aka the nsw property.
static bool classof(const Value *V)
bool hasNoUnsignedWrap() const
Test whether this operation is known to never undergo unsigned overflow, aka the nuw property.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:270
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
bool isTokenTy() const
Return true if this is 'token'.
Definition: Type.h:234
This class represents a cast unsigned integer to floating point.
static bool classof(const Value *V)
UIToFPInst * cloneImpl() const
Clone an identical UIToFPInst.
static bool classof(const Instruction *I)
Methods for support type inquiry through isa, cast, and dyn_cast:
This function has undefined behavior.
unsigned getNumSuccessors() const
static bool classof(const Value *V)
static bool classof(const Instruction *I)
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
void allocHungoffUses(unsigned N, bool IsPhi=false)
Allocate the array of Uses, followed by a pointer (with bottom bit set) to the User.
Definition: User.cpp:50
op_iterator op_begin()
Definition: User.h:280
const Use & getOperandUse(unsigned i) const
Definition: User.h:241
Value * getOperand(unsigned i) const
Definition: User.h:228
unsigned getNumOperands() const
Definition: User.h:250
op_iterator op_end()
Definition: User.h:282
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
static bool classof(const Instruction *I)
Value * getPointerOperand()
VAArgInst(Value *List, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
const Value * getPointerOperand() const
static bool classof(const Value *V)
static unsigned getPointerOperandIndex()
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
user_iterator_impl< const User > const_user_iterator
Definition: Value.h:391
unsigned char SubclassOptionalData
Hold subclass data that can be dropped.
Definition: Value.h:84
void setName(const Twine &Name)
Change the name of the value.
Definition: Value.cpp:377
Base class of all SIMD vector types.
Definition: DerivedTypes.h:427
static VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
This class represents zero extension of integer types.
static bool classof(const Instruction *I)
Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const Value *V)
ZExtInst * cloneImpl() const
Clone an identical ZExtInst.
An efficient, type-erasing, non-owning reference to a callable.
base_list_type::iterator iterator
Definition: ilist.h:121
CRTP base class for adapting an iterator to a different type.
Definition: iterator.h:237
CRTP base class which implements the entire standard iterator facade in terms of a minimal subset of ...
Definition: iterator.h:80
A range adaptor for a pair of iterators.
This provides a very simple, boring adaptor for a begin and end iterator into a range type.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition: ISDOpcodes.h:71
@ System
Synchronized with respect to all concurrently executing threads.
Definition: LLVMContext.h:57
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:480
Type * checkGEPType(Type *Ty)
Definition: Instructions.h:925
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1739
unsigned getLoadStoreAddressSpace(const Value *I)
A helper function that returns the address space of the pointer operand of load or store instruction.
APInt operator*(APInt a, uint64_t RHS)
Definition: APInt.h:2204
const Value * getLoadStorePointerOperand(const Value *V)
A helper function that returns the pointer operand of a load or store instruction.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void setAtomicSyncScopeID(Instruction *I, SyncScope::ID SSID)
A helper function that sets an atomic operation's sync scope.
Align getLoadStoreAlignment(const Value *I)
A helper function that returns the alignment of load or store instruction.
const Value * getPointerOperand(const Value *V)
A helper function that returns the pointer operand of a load, store or GEP instruction.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
std::optional< SyncScope::ID > getAtomicSyncScopeID(const Instruction *I)
A helper function that returns an atomic operation's sync scope; returns std::nullopt if it is not an...
constexpr int PoisonMaskElem
AtomicOrdering
Atomic ordering for LLVM's memory model.
DWARFExpression::Operation Op
OutputIt copy(R &&Range, OutputIt Out)
Definition: STLExtras.h:1841
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:217
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1766
auto predecessors(const MachineBasicBlock *BB)
Type * getLoadStoreType(const Value *I)
A helper function that returns the type of a load or store instruction.
void setLoadStoreAlignment(Value *I, Align NewAlign)
A helper function that set the alignment of load or store instruction.
unsigned Log2(Align A)
Returns the log2 of the alignment.
Definition: Alignment.h:208
@ Default
The result values are uniform if and only if all operands are uniform.
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Summary of memprof metadata on allocations.
Describes an element of a Bitfield.
Definition: Bitfields.h:223
static constexpr bool areContiguous()
Definition: Bitfields.h:280
The const version of succ_op_iterator.
const BasicBlock * operator->() const
const_succ_op_iterator(const_value_op_iterator I)
const BasicBlock * operator*() const
Iterator type that casts an operand to a basic block.
succ_op_iterator(value_op_iterator I)
FixedNumOperandTraits - determine the allocation regime of the Use array when it is a prefix to the U...
Definition: OperandTraits.h:30
HungoffOperandTraits - determine the allocation regime of the Use array when it is not a prefix to th...
Definition: OperandTraits.h:93
The const version of succ_op_iterator.
const_succ_op_iterator(const_value_op_iterator I)
Iterator type that casts an operand to a basic block.
Compile-time customization of User operands.
Definition: User.h:42
A MapVector that performs no allocations if smaller than a certain size.
Definition: MapVector.h:254
Information about how a User object was allocated, to be passed into the User constructor.
Definition: User.h:79
Indicates this User has operands "hung off" in another allocation.
Definition: User.h:57
Indicates this User has operands co-allocated.
Definition: User.h:60
Iterator for directly iterating over the operand Values.
Definition: User.h:299
VariadicOperandTraits - determine the allocation regime of the Use array when it is a prefix to the U...
Definition: OperandTraits.h:67