LLVM 19.0.0git
Instructions.cpp
Go to the documentation of this file.
1//===- Instructions.cpp - Implement the LLVM instructions -----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements all of the non-inline methods for the LLVM instruction
10// classes.
11//
12//===----------------------------------------------------------------------===//
13
15#include "LLVMContextImpl.h"
18#include "llvm/ADT/Twine.h"
19#include "llvm/IR/Attributes.h"
20#include "llvm/IR/BasicBlock.h"
21#include "llvm/IR/Constant.h"
23#include "llvm/IR/Constants.h"
24#include "llvm/IR/DataLayout.h"
26#include "llvm/IR/Function.h"
27#include "llvm/IR/InstrTypes.h"
28#include "llvm/IR/Instruction.h"
29#include "llvm/IR/Intrinsics.h"
30#include "llvm/IR/LLVMContext.h"
31#include "llvm/IR/MDBuilder.h"
32#include "llvm/IR/Metadata.h"
33#include "llvm/IR/Module.h"
34#include "llvm/IR/Operator.h"
36#include "llvm/IR/Type.h"
37#include "llvm/IR/Value.h"
42#include "llvm/Support/ModRef.h"
44#include <algorithm>
45#include <cassert>
46#include <cstdint>
47#include <optional>
48#include <vector>
49
50using namespace llvm;
51
53 "disable-i2p-p2i-opt", cl::init(false),
54 cl::desc("Disables inttoptr/ptrtoint roundtrip optimization"));
55
56//===----------------------------------------------------------------------===//
57// AllocaInst Class
58//===----------------------------------------------------------------------===//
59
60std::optional<TypeSize>
62 TypeSize Size = DL.getTypeAllocSize(getAllocatedType());
63 if (isArrayAllocation()) {
64 auto *C = dyn_cast<ConstantInt>(getArraySize());
65 if (!C)
66 return std::nullopt;
67 assert(!Size.isScalable() && "Array elements cannot have a scalable size");
68 Size *= C->getZExtValue();
69 }
70 return Size;
71}
72
73std::optional<TypeSize>
75 std::optional<TypeSize> Size = getAllocationSize(DL);
76 if (Size)
77 return *Size * 8;
78 return std::nullopt;
79}
80
81//===----------------------------------------------------------------------===//
82// SelectInst Class
83//===----------------------------------------------------------------------===//
84
85/// areInvalidOperands - Return a string if the specified operands are invalid
86/// for a select operation, otherwise return null.
87const char *SelectInst::areInvalidOperands(Value *Op0, Value *Op1, Value *Op2) {
88 if (Op1->getType() != Op2->getType())
89 return "both values to select must have same type";
90
91 if (Op1->getType()->isTokenTy())
92 return "select values cannot have token type";
93
94 if (VectorType *VT = dyn_cast<VectorType>(Op0->getType())) {
95 // Vector select.
96 if (VT->getElementType() != Type::getInt1Ty(Op0->getContext()))
97 return "vector select condition element type must be i1";
98 VectorType *ET = dyn_cast<VectorType>(Op1->getType());
99 if (!ET)
100 return "selected values for vector select must be vectors";
101 if (ET->getElementCount() != VT->getElementCount())
102 return "vector select requires selected vectors to have "
103 "the same vector length as select condition";
104 } else if (Op0->getType() != Type::getInt1Ty(Op0->getContext())) {
105 return "select condition must be i1 or <n x i1>";
106 }
107 return nullptr;
108}
109
110//===----------------------------------------------------------------------===//
111// PHINode Class
112//===----------------------------------------------------------------------===//
113
114PHINode::PHINode(const PHINode &PN)
115 : Instruction(PN.getType(), Instruction::PHI, nullptr, PN.getNumOperands()),
116 ReservedSpace(PN.getNumOperands()) {
118 std::copy(PN.op_begin(), PN.op_end(), op_begin());
119 copyIncomingBlocks(make_range(PN.block_begin(), PN.block_end()));
121}
122
123// removeIncomingValue - Remove an incoming value. This is useful if a
124// predecessor basic block is deleted.
125Value *PHINode::removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty) {
126 Value *Removed = getIncomingValue(Idx);
127
128 // Move everything after this operand down.
129 //
130 // FIXME: we could just swap with the end of the list, then erase. However,
131 // clients might not expect this to happen. The code as it is thrashes the
132 // use/def lists, which is kinda lame.
133 std::copy(op_begin() + Idx + 1, op_end(), op_begin() + Idx);
135
136 // Nuke the last value.
137 Op<-1>().set(nullptr);
139
140 // If the PHI node is dead, because it has zero entries, nuke it now.
141 if (getNumOperands() == 0 && DeletePHIIfEmpty) {
142 // If anyone is using this PHI, make them use a dummy value instead...
145 }
146 return Removed;
147}
148
149void PHINode::removeIncomingValueIf(function_ref<bool(unsigned)> Predicate,
150 bool DeletePHIIfEmpty) {
151 SmallDenseSet<unsigned> RemoveIndices;
152 for (unsigned Idx = 0; Idx < getNumIncomingValues(); ++Idx)
153 if (Predicate(Idx))
154 RemoveIndices.insert(Idx);
155
156 if (RemoveIndices.empty())
157 return;
158
159 // Remove operands.
160 auto NewOpEnd = remove_if(operands(), [&](Use &U) {
161 return RemoveIndices.contains(U.getOperandNo());
162 });
163 for (Use &U : make_range(NewOpEnd, op_end()))
164 U.set(nullptr);
165
166 // Remove incoming blocks.
167 (void)std::remove_if(const_cast<block_iterator>(block_begin()),
168 const_cast<block_iterator>(block_end()), [&](BasicBlock *&BB) {
169 return RemoveIndices.contains(&BB - block_begin());
170 });
171
172 setNumHungOffUseOperands(getNumOperands() - RemoveIndices.size());
173
174 // If the PHI node is dead, because it has zero entries, nuke it now.
175 if (getNumOperands() == 0 && DeletePHIIfEmpty) {
176 // If anyone is using this PHI, make them use a dummy value instead...
179 }
180}
181
182/// growOperands - grow operands - This grows the operand list in response
183/// to a push_back style of operation. This grows the number of ops by 1.5
184/// times.
185///
186void PHINode::growOperands() {
187 unsigned e = getNumOperands();
188 unsigned NumOps = e + e / 2;
189 if (NumOps < 2) NumOps = 2; // 2 op PHI nodes are VERY common.
190
191 ReservedSpace = NumOps;
192 growHungoffUses(ReservedSpace, /* IsPhi */ true);
193}
194
195/// hasConstantValue - If the specified PHI node always merges together the same
196/// value, return the value, otherwise return null.
198 // Exploit the fact that phi nodes always have at least one entry.
199 Value *ConstantValue = getIncomingValue(0);
200 for (unsigned i = 1, e = getNumIncomingValues(); i != e; ++i)
201 if (getIncomingValue(i) != ConstantValue && getIncomingValue(i) != this) {
202 if (ConstantValue != this)
203 return nullptr; // Incoming values not all the same.
204 // The case where the first value is this PHI.
205 ConstantValue = getIncomingValue(i);
206 }
207 if (ConstantValue == this)
208 return PoisonValue::get(getType());
209 return ConstantValue;
210}
211
212/// hasConstantOrUndefValue - Whether the specified PHI node always merges
213/// together the same value, assuming that undefs result in the same value as
214/// non-undefs.
215/// Unlike \ref hasConstantValue, this does not return a value because the
216/// unique non-undef incoming value need not dominate the PHI node.
218 Value *ConstantValue = nullptr;
219 for (unsigned i = 0, e = getNumIncomingValues(); i != e; ++i) {
221 if (Incoming != this && !isa<UndefValue>(Incoming)) {
222 if (ConstantValue && ConstantValue != Incoming)
223 return false;
224 ConstantValue = Incoming;
225 }
226 }
227 return true;
228}
229
230//===----------------------------------------------------------------------===//
231// LandingPadInst Implementation
232//===----------------------------------------------------------------------===//
233
234LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues,
235 const Twine &NameStr,
236 InsertPosition InsertBefore)
237 : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertBefore) {
238 init(NumReservedValues, NameStr);
239}
240
241LandingPadInst::LandingPadInst(const LandingPadInst &LP)
242 : Instruction(LP.getType(), Instruction::LandingPad, nullptr,
243 LP.getNumOperands()),
244 ReservedSpace(LP.getNumOperands()) {
246 Use *OL = getOperandList();
247 const Use *InOL = LP.getOperandList();
248 for (unsigned I = 0, E = ReservedSpace; I != E; ++I)
249 OL[I] = InOL[I];
250
251 setCleanup(LP.isCleanup());
252}
253
254LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses,
255 const Twine &NameStr,
256 InsertPosition InsertBefore) {
257 return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertBefore);
258}
259
260void LandingPadInst::init(unsigned NumReservedValues, const Twine &NameStr) {
261 ReservedSpace = NumReservedValues;
263 allocHungoffUses(ReservedSpace);
264 setName(NameStr);
265 setCleanup(false);
266}
267
268/// growOperands - grow operands - This grows the operand list in response to a
269/// push_back style of operation. This grows the number of ops by 2 times.
270void LandingPadInst::growOperands(unsigned Size) {
271 unsigned e = getNumOperands();
272 if (ReservedSpace >= e + Size) return;
273 ReservedSpace = (std::max(e, 1U) + Size / 2) * 2;
274 growHungoffUses(ReservedSpace);
275}
276
278 unsigned OpNo = getNumOperands();
279 growOperands(1);
280 assert(OpNo < ReservedSpace && "Growing didn't work!");
282 getOperandList()[OpNo] = Val;
283}
284
285//===----------------------------------------------------------------------===//
286// CallBase Implementation
287//===----------------------------------------------------------------------===//
288
290 InsertPosition InsertPt) {
291 switch (CB->getOpcode()) {
292 case Instruction::Call:
293 return CallInst::Create(cast<CallInst>(CB), Bundles, InsertPt);
294 case Instruction::Invoke:
295 return InvokeInst::Create(cast<InvokeInst>(CB), Bundles, InsertPt);
296 case Instruction::CallBr:
297 return CallBrInst::Create(cast<CallBrInst>(CB), Bundles, InsertPt);
298 default:
299 llvm_unreachable("Unknown CallBase sub-class!");
300 }
301}
302
304 InsertPosition InsertPt) {
306 for (unsigned i = 0, e = CI->getNumOperandBundles(); i < e; ++i) {
307 auto ChildOB = CI->getOperandBundleAt(i);
308 if (ChildOB.getTagName() != OpB.getTag())
309 OpDefs.emplace_back(ChildOB);
310 }
311 OpDefs.emplace_back(OpB);
312 return CallBase::Create(CI, OpDefs, InsertPt);
313}
314
315Function *CallBase::getCaller() { return getParent()->getParent(); }
316
318 assert(getOpcode() == Instruction::CallBr && "Unexpected opcode!");
319 return cast<CallBrInst>(this)->getNumIndirectDests() + 1;
320}
321
323 const Value *V = getCalledOperand();
324 if (isa<Function>(V) || isa<Constant>(V))
325 return false;
326 return !isInlineAsm();
327}
328
329/// Tests if this call site must be tail call optimized. Only a CallInst can
330/// be tail call optimized.
332 if (auto *CI = dyn_cast<CallInst>(this))
333 return CI->isMustTailCall();
334 return false;
335}
336
337/// Tests if this call site is marked as a tail call.
339 if (auto *CI = dyn_cast<CallInst>(this))
340 return CI->isTailCall();
341 return false;
342}
343
345 if (auto *F = getCalledFunction())
346 return F->getIntrinsicID();
348}
349
352
353 if (const Function *F = getCalledFunction())
354 Mask |= F->getAttributes().getRetNoFPClass();
355 return Mask;
356}
357
360
361 if (const Function *F = getCalledFunction())
362 Mask |= F->getAttributes().getParamNoFPClass(i);
363 return Mask;
364}
365
366std::optional<ConstantRange> CallBase::getRange() const {
367 const Attribute RangeAttr = getRetAttr(llvm::Attribute::Range);
368 if (RangeAttr.isValid())
369 return RangeAttr.getRange();
370 return std::nullopt;
371}
372
374 if (hasRetAttr(Attribute::NonNull))
375 return true;
376
377 if (getRetDereferenceableBytes() > 0 &&
379 return true;
380
381 return false;
382}
383
385 unsigned Index;
386
387 if (Attrs.hasAttrSomewhere(Kind, &Index))
389 if (const Function *F = getCalledFunction())
390 if (F->getAttributes().hasAttrSomewhere(Kind, &Index))
392
393 return nullptr;
394}
395
396/// Determine whether the argument or parameter has the given attribute.
397bool CallBase::paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const {
398 assert(ArgNo < arg_size() && "Param index out of bounds!");
399
400 if (Attrs.hasParamAttr(ArgNo, Kind))
401 return true;
402
403 const Function *F = getCalledFunction();
404 if (!F)
405 return false;
406
407 if (!F->getAttributes().hasParamAttr(ArgNo, Kind))
408 return false;
409
410 // Take into account mod/ref by operand bundles.
411 switch (Kind) {
412 case Attribute::ReadNone:
414 case Attribute::ReadOnly:
416 case Attribute::WriteOnly:
417 return !hasReadingOperandBundles();
418 default:
419 return true;
420 }
421}
422
423bool CallBase::hasFnAttrOnCalledFunction(Attribute::AttrKind Kind) const {
424 if (auto *F = dyn_cast<Function>(getCalledOperand()))
425 return F->getAttributes().hasFnAttr(Kind);
426
427 return false;
428}
429
430bool CallBase::hasFnAttrOnCalledFunction(StringRef Kind) const {
431 if (auto *F = dyn_cast<Function>(getCalledOperand()))
432 return F->getAttributes().hasFnAttr(Kind);
433
434 return false;
435}
436
437template <typename AK>
438Attribute CallBase::getFnAttrOnCalledFunction(AK Kind) const {
439 if constexpr (std::is_same_v<AK, Attribute::AttrKind>) {
440 // getMemoryEffects() correctly combines memory effects from the call-site,
441 // operand bundles and function.
442 assert(Kind != Attribute::Memory && "Use getMemoryEffects() instead");
443 }
444
445 if (auto *F = dyn_cast<Function>(getCalledOperand()))
446 return F->getAttributes().getFnAttr(Kind);
447
448 return Attribute();
449}
450
451template Attribute
452CallBase::getFnAttrOnCalledFunction(Attribute::AttrKind Kind) const;
453template Attribute CallBase::getFnAttrOnCalledFunction(StringRef Kind) const;
454
455template <typename AK>
456Attribute CallBase::getParamAttrOnCalledFunction(unsigned ArgNo,
457 AK Kind) const {
459
460 if (auto *F = dyn_cast<Function>(V))
461 return F->getAttributes().getParamAttr(ArgNo, Kind);
462
463 return Attribute();
464}
465template Attribute
466CallBase::getParamAttrOnCalledFunction(unsigned ArgNo,
467 Attribute::AttrKind Kind) const;
468template Attribute CallBase::getParamAttrOnCalledFunction(unsigned ArgNo,
469 StringRef Kind) const;
470
473 for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i)
475}
476
479 const unsigned BeginIndex) {
480 auto It = op_begin() + BeginIndex;
481 for (auto &B : Bundles)
482 It = std::copy(B.input_begin(), B.input_end(), It);
483
484 auto *ContextImpl = getContext().pImpl;
485 auto BI = Bundles.begin();
486 unsigned CurrentIndex = BeginIndex;
487
488 for (auto &BOI : bundle_op_infos()) {
489 assert(BI != Bundles.end() && "Incorrect allocation?");
490
491 BOI.Tag = ContextImpl->getOrInsertBundleTag(BI->getTag());
492 BOI.Begin = CurrentIndex;
493 BOI.End = CurrentIndex + BI->input_size();
494 CurrentIndex = BOI.End;
495 BI++;
496 }
497
498 assert(BI == Bundles.end() && "Incorrect allocation?");
499
500 return It;
501}
502
504 /// When there isn't many bundles, we do a simple linear search.
505 /// Else fallback to a binary-search that use the fact that bundles usually
506 /// have similar number of argument to get faster convergence.
508 for (auto &BOI : bundle_op_infos())
509 if (BOI.Begin <= OpIdx && OpIdx < BOI.End)
510 return BOI;
511
512 llvm_unreachable("Did not find operand bundle for operand!");
513 }
514
515 assert(OpIdx >= arg_size() && "the Idx is not in the operand bundles");
517 OpIdx < std::prev(bundle_op_info_end())->End &&
518 "The Idx isn't in the operand bundle");
519
520 /// We need a decimal number below and to prevent using floating point numbers
521 /// we use an intergal value multiplied by this constant.
522 constexpr unsigned NumberScaling = 1024;
523
526 bundle_op_iterator Current = Begin;
527
528 while (Begin != End) {
529 unsigned ScaledOperandPerBundle =
530 NumberScaling * (std::prev(End)->End - Begin->Begin) / (End - Begin);
531 Current = Begin + (((OpIdx - Begin->Begin) * NumberScaling) /
532 ScaledOperandPerBundle);
533 if (Current >= End)
534 Current = std::prev(End);
535 assert(Current < End && Current >= Begin &&
536 "the operand bundle doesn't cover every value in the range");
537 if (OpIdx >= Current->Begin && OpIdx < Current->End)
538 break;
539 if (OpIdx >= Current->End)
540 Begin = Current + 1;
541 else
542 End = Current;
543 }
544
545 assert(OpIdx >= Current->Begin && OpIdx < Current->End &&
546 "the operand bundle doesn't cover every value in the range");
547 return *Current;
548}
549
552 InsertPosition InsertPt) {
553 if (CB->getOperandBundle(ID))
554 return CB;
555
557 CB->getOperandBundlesAsDefs(Bundles);
558 Bundles.push_back(OB);
559 return Create(CB, Bundles, InsertPt);
560}
561
563 InsertPosition InsertPt) {
565 bool CreateNew = false;
566
567 for (unsigned I = 0, E = CB->getNumOperandBundles(); I != E; ++I) {
568 auto Bundle = CB->getOperandBundleAt(I);
569 if (Bundle.getTagID() == ID) {
570 CreateNew = true;
571 continue;
572 }
573 Bundles.emplace_back(Bundle);
574 }
575
576 return CreateNew ? Create(CB, Bundles, InsertPt) : CB;
577}
578
580 // Implementation note: this is a conservative implementation of operand
581 // bundle semantics, where *any* non-assume operand bundle (other than
582 // ptrauth) forces a callsite to be at least readonly.
585 getIntrinsicID() != Intrinsic::assume;
586}
587
592 getIntrinsicID() != Intrinsic::assume;
593}
594
597 if (auto *Fn = dyn_cast<Function>(getCalledOperand())) {
598 MemoryEffects FnME = Fn->getMemoryEffects();
599 if (hasOperandBundles()) {
600 // TODO: Add a method to get memory effects for operand bundles instead.
602 FnME |= MemoryEffects::readOnly();
604 FnME |= MemoryEffects::writeOnly();
605 }
606 ME &= FnME;
607 }
608 return ME;
609}
612}
613
614/// Determine if the function does not access memory.
617}
620}
621
622/// Determine if the function does not access or only reads memory.
625}
628}
629
630/// Determine if the function does not access or only writes memory.
633}
636}
637
638/// Determine if the call can access memmory only using pointers based
639/// on its arguments.
642}
645}
646
647/// Determine if the function may only access memory that is
648/// inaccessible from the IR.
651}
654}
655
656/// Determine if the function may only access memory that is
657/// either inaccessible from the IR or pointed to by its arguments.
660}
664}
665
666//===----------------------------------------------------------------------===//
667// CallInst Implementation
668//===----------------------------------------------------------------------===//
669
670void CallInst::init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
671 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr) {
672 this->FTy = FTy;
673 assert(getNumOperands() == Args.size() + CountBundleInputs(Bundles) + 1 &&
674 "NumOperands not set up?");
675
676#ifndef NDEBUG
677 assert((Args.size() == FTy->getNumParams() ||
678 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
679 "Calling a function with bad signature!");
680
681 for (unsigned i = 0; i != Args.size(); ++i)
682 assert((i >= FTy->getNumParams() ||
683 FTy->getParamType(i) == Args[i]->getType()) &&
684 "Calling a function with a bad signature!");
685#endif
686
687 // Set operands in order of their index to match use-list-order
688 // prediction.
689 llvm::copy(Args, op_begin());
690 setCalledOperand(Func);
691
692 auto It = populateBundleOperandInfos(Bundles, Args.size());
693 (void)It;
694 assert(It + 1 == op_end() && "Should add up!");
695
696 setName(NameStr);
697}
698
699void CallInst::init(FunctionType *FTy, Value *Func, const Twine &NameStr) {
700 this->FTy = FTy;
701 assert(getNumOperands() == 1 && "NumOperands not set up?");
702 setCalledOperand(Func);
703
704 assert(FTy->getNumParams() == 0 && "Calling a function with bad signature");
705
706 setName(NameStr);
707}
708
709CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name,
710 InsertPosition InsertBefore)
711 : CallBase(Ty->getReturnType(), Instruction::Call,
712 OperandTraits<CallBase>::op_end(this) - 1, 1, InsertBefore) {
713 init(Ty, Func, Name);
714}
715
716CallInst::CallInst(const CallInst &CI)
717 : CallBase(CI.Attrs, CI.FTy, CI.getType(), Instruction::Call,
718 OperandTraits<CallBase>::op_end(this) - CI.getNumOperands(),
719 CI.getNumOperands()) {
720 setTailCallKind(CI.getTailCallKind());
722
723 std::copy(CI.op_begin(), CI.op_end(), op_begin());
724 std::copy(CI.bundle_op_info_begin(), CI.bundle_op_info_end(),
727}
728
730 InsertPosition InsertPt) {
731 std::vector<Value *> Args(CI->arg_begin(), CI->arg_end());
732
733 auto *NewCI = CallInst::Create(CI->getFunctionType(), CI->getCalledOperand(),
734 Args, OpB, CI->getName(), InsertPt);
735 NewCI->setTailCallKind(CI->getTailCallKind());
736 NewCI->setCallingConv(CI->getCallingConv());
737 NewCI->SubclassOptionalData = CI->SubclassOptionalData;
738 NewCI->setAttributes(CI->getAttributes());
739 NewCI->setDebugLoc(CI->getDebugLoc());
740 return NewCI;
741}
742
743// Update profile weight for call instruction by scaling it using the ratio
744// of S/T. The meaning of "branch_weights" meta data for call instruction is
745// transfered to represent call count.
747 if (T == 0) {
748 LLVM_DEBUG(dbgs() << "Attempting to update profile weights will result in "
749 "div by 0. Ignoring. Likely the function "
750 << getParent()->getParent()->getName()
751 << " has 0 entry count, and contains call instructions "
752 "with non-zero prof info.");
753 return;
754 }
755 scaleProfData(*this, S, T);
756}
757
758//===----------------------------------------------------------------------===//
759// InvokeInst Implementation
760//===----------------------------------------------------------------------===//
761
762void InvokeInst::init(FunctionType *FTy, Value *Fn, BasicBlock *IfNormal,
763 BasicBlock *IfException, ArrayRef<Value *> Args,
765 const Twine &NameStr) {
766 this->FTy = FTy;
767
768 assert((int)getNumOperands() ==
769 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)) &&
770 "NumOperands not set up?");
771
772#ifndef NDEBUG
773 assert(((Args.size() == FTy->getNumParams()) ||
774 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
775 "Invoking a function with bad signature");
776
777 for (unsigned i = 0, e = Args.size(); i != e; i++)
778 assert((i >= FTy->getNumParams() ||
779 FTy->getParamType(i) == Args[i]->getType()) &&
780 "Invoking a function with a bad signature!");
781#endif
782
783 // Set operands in order of their index to match use-list-order
784 // prediction.
785 llvm::copy(Args, op_begin());
786 setNormalDest(IfNormal);
787 setUnwindDest(IfException);
789
790 auto It = populateBundleOperandInfos(Bundles, Args.size());
791 (void)It;
792 assert(It + 3 == op_end() && "Should add up!");
793
794 setName(NameStr);
795}
796
797InvokeInst::InvokeInst(const InvokeInst &II)
798 : CallBase(II.Attrs, II.FTy, II.getType(), Instruction::Invoke,
799 OperandTraits<CallBase>::op_end(this) - II.getNumOperands(),
800 II.getNumOperands()) {
801 setCallingConv(II.getCallingConv());
802 std::copy(II.op_begin(), II.op_end(), op_begin());
803 std::copy(II.bundle_op_info_begin(), II.bundle_op_info_end(),
805 SubclassOptionalData = II.SubclassOptionalData;
806}
807
809 InsertPosition InsertPt) {
810 std::vector<Value *> Args(II->arg_begin(), II->arg_end());
811
812 auto *NewII = InvokeInst::Create(
813 II->getFunctionType(), II->getCalledOperand(), II->getNormalDest(),
814 II->getUnwindDest(), Args, OpB, II->getName(), InsertPt);
815 NewII->setCallingConv(II->getCallingConv());
816 NewII->SubclassOptionalData = II->SubclassOptionalData;
817 NewII->setAttributes(II->getAttributes());
818 NewII->setDebugLoc(II->getDebugLoc());
819 return NewII;
820}
821
823 return cast<LandingPadInst>(getUnwindDest()->getFirstNonPHI());
824}
825
827 if (T == 0) {
828 LLVM_DEBUG(dbgs() << "Attempting to update profile weights will result in "
829 "div by 0. Ignoring. Likely the function "
830 << getParent()->getParent()->getName()
831 << " has 0 entry count, and contains call instructions "
832 "with non-zero prof info.");
833 return;
834 }
835 scaleProfData(*this, S, T);
836}
837
838//===----------------------------------------------------------------------===//
839// CallBrInst Implementation
840//===----------------------------------------------------------------------===//
841
842void CallBrInst::init(FunctionType *FTy, Value *Fn, BasicBlock *Fallthrough,
843 ArrayRef<BasicBlock *> IndirectDests,
846 const Twine &NameStr) {
847 this->FTy = FTy;
848
849 assert((int)getNumOperands() ==
850 ComputeNumOperands(Args.size(), IndirectDests.size(),
851 CountBundleInputs(Bundles)) &&
852 "NumOperands not set up?");
853
854#ifndef NDEBUG
855 assert(((Args.size() == FTy->getNumParams()) ||
856 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
857 "Calling a function with bad signature");
858
859 for (unsigned i = 0, e = Args.size(); i != e; i++)
860 assert((i >= FTy->getNumParams() ||
861 FTy->getParamType(i) == Args[i]->getType()) &&
862 "Calling a function with a bad signature!");
863#endif
864
865 // Set operands in order of their index to match use-list-order
866 // prediction.
867 std::copy(Args.begin(), Args.end(), op_begin());
868 NumIndirectDests = IndirectDests.size();
869 setDefaultDest(Fallthrough);
870 for (unsigned i = 0; i != NumIndirectDests; ++i)
871 setIndirectDest(i, IndirectDests[i]);
873
874 auto It = populateBundleOperandInfos(Bundles, Args.size());
875 (void)It;
876 assert(It + 2 + IndirectDests.size() == op_end() && "Should add up!");
877
878 setName(NameStr);
879}
880
881CallBrInst::CallBrInst(const CallBrInst &CBI)
882 : CallBase(CBI.Attrs, CBI.FTy, CBI.getType(), Instruction::CallBr,
883 OperandTraits<CallBase>::op_end(this) - CBI.getNumOperands(),
884 CBI.getNumOperands()) {
886 std::copy(CBI.op_begin(), CBI.op_end(), op_begin());
887 std::copy(CBI.bundle_op_info_begin(), CBI.bundle_op_info_end(),
890 NumIndirectDests = CBI.NumIndirectDests;
891}
892
894 InsertPosition InsertPt) {
895 std::vector<Value *> Args(CBI->arg_begin(), CBI->arg_end());
896
897 auto *NewCBI = CallBrInst::Create(
898 CBI->getFunctionType(), CBI->getCalledOperand(), CBI->getDefaultDest(),
899 CBI->getIndirectDests(), Args, OpB, CBI->getName(), InsertPt);
900 NewCBI->setCallingConv(CBI->getCallingConv());
901 NewCBI->SubclassOptionalData = CBI->SubclassOptionalData;
902 NewCBI->setAttributes(CBI->getAttributes());
903 NewCBI->setDebugLoc(CBI->getDebugLoc());
904 NewCBI->NumIndirectDests = CBI->NumIndirectDests;
905 return NewCBI;
906}
907
908//===----------------------------------------------------------------------===//
909// ReturnInst Implementation
910//===----------------------------------------------------------------------===//
911
912ReturnInst::ReturnInst(const ReturnInst &RI)
913 : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Ret,
914 OperandTraits<ReturnInst>::op_end(this) - RI.getNumOperands(),
915 RI.getNumOperands()) {
916 if (RI.getNumOperands())
917 Op<0>() = RI.Op<0>();
919}
920
921ReturnInst::ReturnInst(LLVMContext &C, Value *retVal,
922 InsertPosition InsertBefore)
923 : Instruction(Type::getVoidTy(C), Instruction::Ret,
924 OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal,
925 InsertBefore) {
926 if (retVal)
927 Op<0>() = retVal;
928}
929
930//===----------------------------------------------------------------------===//
931// ResumeInst Implementation
932//===----------------------------------------------------------------------===//
933
934ResumeInst::ResumeInst(const ResumeInst &RI)
935 : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Resume,
936 OperandTraits<ResumeInst>::op_begin(this), 1) {
937 Op<0>() = RI.Op<0>();
938}
939
940ResumeInst::ResumeInst(Value *Exn, InsertPosition InsertBefore)
941 : Instruction(Type::getVoidTy(Exn->getContext()), Instruction::Resume,
942 OperandTraits<ResumeInst>::op_begin(this), 1, InsertBefore) {
943 Op<0>() = Exn;
944}
945
946//===----------------------------------------------------------------------===//
947// CleanupReturnInst Implementation
948//===----------------------------------------------------------------------===//
949
950CleanupReturnInst::CleanupReturnInst(const CleanupReturnInst &CRI)
951 : Instruction(CRI.getType(), Instruction::CleanupRet,
952 OperandTraits<CleanupReturnInst>::op_end(this) -
953 CRI.getNumOperands(),
954 CRI.getNumOperands()) {
955 setSubclassData<Instruction::OpaqueField>(
957 Op<0>() = CRI.Op<0>();
958 if (CRI.hasUnwindDest())
959 Op<1>() = CRI.Op<1>();
960}
961
962void CleanupReturnInst::init(Value *CleanupPad, BasicBlock *UnwindBB) {
963 if (UnwindBB)
964 setSubclassData<UnwindDestField>(true);
965
966 Op<0>() = CleanupPad;
967 if (UnwindBB)
968 Op<1>() = UnwindBB;
969}
970
971CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB,
972 unsigned Values,
973 InsertPosition InsertBefore)
974 : Instruction(Type::getVoidTy(CleanupPad->getContext()),
975 Instruction::CleanupRet,
976 OperandTraits<CleanupReturnInst>::op_end(this) - Values,
977 Values, InsertBefore) {
978 init(CleanupPad, UnwindBB);
979}
980
981//===----------------------------------------------------------------------===//
982// CatchReturnInst Implementation
983//===----------------------------------------------------------------------===//
984void CatchReturnInst::init(Value *CatchPad, BasicBlock *BB) {
985 Op<0>() = CatchPad;
986 Op<1>() = BB;
987}
988
989CatchReturnInst::CatchReturnInst(const CatchReturnInst &CRI)
990 : Instruction(Type::getVoidTy(CRI.getContext()), Instruction::CatchRet,
991 OperandTraits<CatchReturnInst>::op_begin(this), 2) {
992 Op<0>() = CRI.Op<0>();
993 Op<1>() = CRI.Op<1>();
994}
995
996CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB,
997 InsertPosition InsertBefore)
998 : Instruction(Type::getVoidTy(BB->getContext()), Instruction::CatchRet,
999 OperandTraits<CatchReturnInst>::op_begin(this), 2,
1000 InsertBefore) {
1001 init(CatchPad, BB);
1002}
1003
1004//===----------------------------------------------------------------------===//
1005// CatchSwitchInst Implementation
1006//===----------------------------------------------------------------------===//
1007
1008CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
1009 unsigned NumReservedValues,
1010 const Twine &NameStr,
1011 InsertPosition InsertBefore)
1012 : Instruction(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0,
1013 InsertBefore) {
1014 if (UnwindDest)
1015 ++NumReservedValues;
1016 init(ParentPad, UnwindDest, NumReservedValues + 1);
1017 setName(NameStr);
1018}
1019
1020CatchSwitchInst::CatchSwitchInst(const CatchSwitchInst &CSI)
1021 : Instruction(CSI.getType(), Instruction::CatchSwitch, nullptr,
1022 CSI.getNumOperands()) {
1023 init(CSI.getParentPad(), CSI.getUnwindDest(), CSI.getNumOperands());
1024 setNumHungOffUseOperands(ReservedSpace);
1025 Use *OL = getOperandList();
1026 const Use *InOL = CSI.getOperandList();
1027 for (unsigned I = 1, E = ReservedSpace; I != E; ++I)
1028 OL[I] = InOL[I];
1029}
1030
1031void CatchSwitchInst::init(Value *ParentPad, BasicBlock *UnwindDest,
1032 unsigned NumReservedValues) {
1033 assert(ParentPad && NumReservedValues);
1034
1035 ReservedSpace = NumReservedValues;
1036 setNumHungOffUseOperands(UnwindDest ? 2 : 1);
1037 allocHungoffUses(ReservedSpace);
1038
1039 Op<0>() = ParentPad;
1040 if (UnwindDest) {
1041 setSubclassData<UnwindDestField>(true);
1042 setUnwindDest(UnwindDest);
1043 }
1044}
1045
1046/// growOperands - grow operands - This grows the operand list in response to a
1047/// push_back style of operation. This grows the number of ops by 2 times.
1048void CatchSwitchInst::growOperands(unsigned Size) {
1049 unsigned NumOperands = getNumOperands();
1050 assert(NumOperands >= 1);
1051 if (ReservedSpace >= NumOperands + Size)
1052 return;
1053 ReservedSpace = (NumOperands + Size / 2) * 2;
1054 growHungoffUses(ReservedSpace);
1055}
1056
1058 unsigned OpNo = getNumOperands();
1059 growOperands(1);
1060 assert(OpNo < ReservedSpace && "Growing didn't work!");
1062 getOperandList()[OpNo] = Handler;
1063}
1064
1066 // Move all subsequent handlers up one.
1067 Use *EndDst = op_end() - 1;
1068 for (Use *CurDst = HI.getCurrent(); CurDst != EndDst; ++CurDst)
1069 *CurDst = *(CurDst + 1);
1070 // Null out the last handler use.
1071 *EndDst = nullptr;
1072
1074}
1075
1076//===----------------------------------------------------------------------===//
1077// FuncletPadInst Implementation
1078//===----------------------------------------------------------------------===//
1079void FuncletPadInst::init(Value *ParentPad, ArrayRef<Value *> Args,
1080 const Twine &NameStr) {
1081 assert(getNumOperands() == 1 + Args.size() && "NumOperands not set up?");
1082 llvm::copy(Args, op_begin());
1083 setParentPad(ParentPad);
1084 setName(NameStr);
1085}
1086
1087FuncletPadInst::FuncletPadInst(const FuncletPadInst &FPI)
1088 : Instruction(FPI.getType(), FPI.getOpcode(),
1089 OperandTraits<FuncletPadInst>::op_end(this) -
1090 FPI.getNumOperands(),
1091 FPI.getNumOperands()) {
1092 std::copy(FPI.op_begin(), FPI.op_end(), op_begin());
1094}
1095
1096FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
1097 ArrayRef<Value *> Args, unsigned Values,
1098 const Twine &NameStr,
1099 InsertPosition InsertBefore)
1100 : Instruction(ParentPad->getType(), Op,
1101 OperandTraits<FuncletPadInst>::op_end(this) - Values, Values,
1102 InsertBefore) {
1103 init(ParentPad, Args, NameStr);
1104}
1105
1106//===----------------------------------------------------------------------===//
1107// UnreachableInst Implementation
1108//===----------------------------------------------------------------------===//
1109
1111 InsertPosition InsertBefore)
1112 : Instruction(Type::getVoidTy(Context), Instruction::Unreachable, nullptr,
1113 0, InsertBefore) {}
1114
1115//===----------------------------------------------------------------------===//
1116// BranchInst Implementation
1117//===----------------------------------------------------------------------===//
1118
1119void BranchInst::AssertOK() {
1120 if (isConditional())
1121 assert(getCondition()->getType()->isIntegerTy(1) &&
1122 "May only branch on boolean predicates!");
1123}
1124
1125BranchInst::BranchInst(BasicBlock *IfTrue, InsertPosition InsertBefore)
1126 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1127 OperandTraits<BranchInst>::op_end(this) - 1, 1,
1128 InsertBefore) {
1129 assert(IfTrue && "Branch destination may not be null!");
1130 Op<-1>() = IfTrue;
1131}
1132
1133BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
1134 InsertPosition InsertBefore)
1135 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1136 OperandTraits<BranchInst>::op_end(this) - 3, 3,
1137 InsertBefore) {
1138 // Assign in order of operand index to make use-list order predictable.
1139 Op<-3>() = Cond;
1140 Op<-2>() = IfFalse;
1141 Op<-1>() = IfTrue;
1142#ifndef NDEBUG
1143 AssertOK();
1144#endif
1145}
1146
1147BranchInst::BranchInst(const BranchInst &BI)
1148 : Instruction(Type::getVoidTy(BI.getContext()), Instruction::Br,
1149 OperandTraits<BranchInst>::op_end(this) - BI.getNumOperands(),
1150 BI.getNumOperands()) {
1151 // Assign in order of operand index to make use-list order predictable.
1152 if (BI.getNumOperands() != 1) {
1153 assert(BI.getNumOperands() == 3 && "BR can have 1 or 3 operands!");
1154 Op<-3>() = BI.Op<-3>();
1155 Op<-2>() = BI.Op<-2>();
1156 }
1157 Op<-1>() = BI.Op<-1>();
1159}
1160
1163 "Cannot swap successors of an unconditional branch");
1164 Op<-1>().swap(Op<-2>());
1165
1166 // Update profile metadata if present and it matches our structural
1167 // expectations.
1169}
1170
1171//===----------------------------------------------------------------------===//
1172// AllocaInst Implementation
1173//===----------------------------------------------------------------------===//
1174
1175static Value *getAISize(LLVMContext &Context, Value *Amt) {
1176 if (!Amt)
1177 Amt = ConstantInt::get(Type::getInt32Ty(Context), 1);
1178 else {
1179 assert(!isa<BasicBlock>(Amt) &&
1180 "Passed basic block into allocation size parameter! Use other ctor");
1181 assert(Amt->getType()->isIntegerTy() &&
1182 "Allocation array size is not an integer!");
1183 }
1184 return Amt;
1185}
1186
1188 assert(Pos.isValid() &&
1189 "Insertion position cannot be null when alignment not provided!");
1190 BasicBlock *BB = Pos.getBasicBlock();
1191 assert(BB->getParent() &&
1192 "BB must be in a Function when alignment not provided!");
1193 const DataLayout &DL = BB->getDataLayout();
1194 return DL.getPrefTypeAlign(Ty);
1195}
1196
1197AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
1198 InsertPosition InsertBefore)
1199 : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertBefore) {}
1200
1201AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1202 const Twine &Name, InsertPosition InsertBefore)
1203 : AllocaInst(Ty, AddrSpace, ArraySize,
1204 computeAllocaDefaultAlign(Ty, InsertBefore), Name,
1205 InsertBefore) {}
1206
1207AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1208 Align Align, const Twine &Name,
1209 InsertPosition InsertBefore)
1210 : UnaryInstruction(PointerType::get(Ty, AddrSpace), Alloca,
1211 getAISize(Ty->getContext(), ArraySize), InsertBefore),
1212 AllocatedType(Ty) {
1214 assert(!Ty->isVoidTy() && "Cannot allocate void!");
1215 setName(Name);
1216}
1217
1219 if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(0)))
1220 return !CI->isOne();
1221 return true;
1222}
1223
1224/// isStaticAlloca - Return true if this alloca is in the entry block of the
1225/// function and is a constant size. If so, the code generator will fold it
1226/// into the prolog/epilog code, so it is basically free.
1228 // Must be constant size.
1229 if (!isa<ConstantInt>(getArraySize())) return false;
1230
1231 // Must be in the entry block.
1232 const BasicBlock *Parent = getParent();
1233 return Parent->isEntryBlock() && !isUsedWithInAlloca();
1234}
1235
1236//===----------------------------------------------------------------------===//
1237// LoadInst Implementation
1238//===----------------------------------------------------------------------===//
1239
1240void LoadInst::AssertOK() {
1242 "Ptr must have pointer type.");
1243}
1244
1246 assert(Pos.isValid() &&
1247 "Insertion position cannot be null when alignment not provided!");
1248 BasicBlock *BB = Pos.getBasicBlock();
1249 assert(BB->getParent() &&
1250 "BB must be in a Function when alignment not provided!");
1251 const DataLayout &DL = BB->getDataLayout();
1252 return DL.getABITypeAlign(Ty);
1253}
1254
1256 InsertPosition InsertBef)
1257 : LoadInst(Ty, Ptr, Name, /*isVolatile=*/false, InsertBef) {}
1258
1259LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1260 InsertPosition InsertBef)
1261 : LoadInst(Ty, Ptr, Name, isVolatile,
1262 computeLoadStoreDefaultAlign(Ty, InsertBef), InsertBef) {}
1263
1264LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1265 Align Align, InsertPosition InsertBef)
1266 : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
1267 SyncScope::System, InsertBef) {}
1268
1269LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1271 InsertPosition InsertBef)
1272 : UnaryInstruction(Ty, Load, Ptr, InsertBef) {
1275 setAtomic(Order, SSID);
1276 AssertOK();
1277 setName(Name);
1278}
1279
1280//===----------------------------------------------------------------------===//
1281// StoreInst Implementation
1282//===----------------------------------------------------------------------===//
1283
1284void StoreInst::AssertOK() {
1285 assert(getOperand(0) && getOperand(1) && "Both operands must be non-null!");
1287 "Ptr must have pointer type!");
1288}
1289
1291 : StoreInst(val, addr, /*isVolatile=*/false, InsertBefore) {}
1292
1293StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
1294 InsertPosition InsertBefore)
1295 : StoreInst(val, addr, isVolatile,
1296 computeLoadStoreDefaultAlign(val->getType(), InsertBefore),
1297 InsertBefore) {}
1298
1299StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1300 InsertPosition InsertBefore)
1301 : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic,
1302 SyncScope::System, InsertBefore) {}
1303
1304StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1305 AtomicOrdering Order, SyncScope::ID SSID,
1306 InsertPosition InsertBefore)
1307 : Instruction(Type::getVoidTy(val->getContext()), Store,
1308 OperandTraits<StoreInst>::op_begin(this),
1309 OperandTraits<StoreInst>::operands(this), InsertBefore) {
1310 Op<0>() = val;
1311 Op<1>() = addr;
1314 setAtomic(Order, SSID);
1315 AssertOK();
1316}
1317
1318//===----------------------------------------------------------------------===//
1319// AtomicCmpXchgInst Implementation
1320//===----------------------------------------------------------------------===//
1321
1322void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal,
1323 Align Alignment, AtomicOrdering SuccessOrdering,
1324 AtomicOrdering FailureOrdering,
1325 SyncScope::ID SSID) {
1326 Op<0>() = Ptr;
1327 Op<1>() = Cmp;
1328 Op<2>() = NewVal;
1329 setSuccessOrdering(SuccessOrdering);
1330 setFailureOrdering(FailureOrdering);
1331 setSyncScopeID(SSID);
1332 setAlignment(Alignment);
1333
1334 assert(getOperand(0) && getOperand(1) && getOperand(2) &&
1335 "All operands must be non-null!");
1337 "Ptr must have pointer type!");
1338 assert(getOperand(1)->getType() == getOperand(2)->getType() &&
1339 "Cmp type and NewVal type must be same!");
1340}
1341
1343 Align Alignment,
1344 AtomicOrdering SuccessOrdering,
1345 AtomicOrdering FailureOrdering,
1346 SyncScope::ID SSID,
1347 InsertPosition InsertBefore)
1348 : Instruction(
1349 StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())),
1350 AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this),
1351 OperandTraits<AtomicCmpXchgInst>::operands(this), InsertBefore) {
1352 Init(Ptr, Cmp, NewVal, Alignment, SuccessOrdering, FailureOrdering, SSID);
1353}
1354
1355//===----------------------------------------------------------------------===//
1356// AtomicRMWInst Implementation
1357//===----------------------------------------------------------------------===//
1358
1359void AtomicRMWInst::Init(BinOp Operation, Value *Ptr, Value *Val,
1360 Align Alignment, AtomicOrdering Ordering,
1361 SyncScope::ID SSID) {
1362 assert(Ordering != AtomicOrdering::NotAtomic &&
1363 "atomicrmw instructions can only be atomic.");
1364 assert(Ordering != AtomicOrdering::Unordered &&
1365 "atomicrmw instructions cannot be unordered.");
1366 Op<0>() = Ptr;
1367 Op<1>() = Val;
1369 setOrdering(Ordering);
1370 setSyncScopeID(SSID);
1371 setAlignment(Alignment);
1372
1373 assert(getOperand(0) && getOperand(1) && "All operands must be non-null!");
1375 "Ptr must have pointer type!");
1376 assert(Ordering != AtomicOrdering::NotAtomic &&
1377 "AtomicRMW instructions must be atomic!");
1378}
1379
1381 Align Alignment, AtomicOrdering Ordering,
1382 SyncScope::ID SSID, InsertPosition InsertBefore)
1383 : Instruction(Val->getType(), AtomicRMW,
1384 OperandTraits<AtomicRMWInst>::op_begin(this),
1385 OperandTraits<AtomicRMWInst>::operands(this), InsertBefore) {
1386 Init(Operation, Ptr, Val, Alignment, Ordering, SSID);
1387}
1388
1390 switch (Op) {
1392 return "xchg";
1393 case AtomicRMWInst::Add:
1394 return "add";
1395 case AtomicRMWInst::Sub:
1396 return "sub";
1397 case AtomicRMWInst::And:
1398 return "and";
1400 return "nand";
1401 case AtomicRMWInst::Or:
1402 return "or";
1403 case AtomicRMWInst::Xor:
1404 return "xor";
1405 case AtomicRMWInst::Max:
1406 return "max";
1407 case AtomicRMWInst::Min:
1408 return "min";
1410 return "umax";
1412 return "umin";
1414 return "fadd";
1416 return "fsub";
1418 return "fmax";
1420 return "fmin";
1422 return "uinc_wrap";
1424 return "udec_wrap";
1426 return "<invalid operation>";
1427 }
1428
1429 llvm_unreachable("invalid atomicrmw operation");
1430}
1431
1432//===----------------------------------------------------------------------===//
1433// FenceInst Implementation
1434//===----------------------------------------------------------------------===//
1435
1437 SyncScope::ID SSID, InsertPosition InsertBefore)
1438 : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertBefore) {
1439 setOrdering(Ordering);
1440 setSyncScopeID(SSID);
1441}
1442
1443//===----------------------------------------------------------------------===//
1444// GetElementPtrInst Implementation
1445//===----------------------------------------------------------------------===//
1446
1447void GetElementPtrInst::init(Value *Ptr, ArrayRef<Value *> IdxList,
1448 const Twine &Name) {
1449 assert(getNumOperands() == 1 + IdxList.size() &&
1450 "NumOperands not initialized?");
1451 Op<0>() = Ptr;
1452 llvm::copy(IdxList, op_begin() + 1);
1453 setName(Name);
1454}
1455
1456GetElementPtrInst::GetElementPtrInst(const GetElementPtrInst &GEPI)
1457 : Instruction(GEPI.getType(), GetElementPtr,
1458 OperandTraits<GetElementPtrInst>::op_end(this) -
1459 GEPI.getNumOperands(),
1460 GEPI.getNumOperands()),
1461 SourceElementType(GEPI.SourceElementType),
1462 ResultElementType(GEPI.ResultElementType) {
1463 std::copy(GEPI.op_begin(), GEPI.op_end(), op_begin());
1465}
1466
1468 if (auto *Struct = dyn_cast<StructType>(Ty)) {
1469 if (!Struct->indexValid(Idx))
1470 return nullptr;
1471 return Struct->getTypeAtIndex(Idx);
1472 }
1473 if (!Idx->getType()->isIntOrIntVectorTy())
1474 return nullptr;
1475 if (auto *Array = dyn_cast<ArrayType>(Ty))
1476 return Array->getElementType();
1477 if (auto *Vector = dyn_cast<VectorType>(Ty))
1478 return Vector->getElementType();
1479 return nullptr;
1480}
1481
1483 if (auto *Struct = dyn_cast<StructType>(Ty)) {
1484 if (Idx >= Struct->getNumElements())
1485 return nullptr;
1486 return Struct->getElementType(Idx);
1487 }
1488 if (auto *Array = dyn_cast<ArrayType>(Ty))
1489 return Array->getElementType();
1490 if (auto *Vector = dyn_cast<VectorType>(Ty))
1491 return Vector->getElementType();
1492 return nullptr;
1493}
1494
1495template <typename IndexTy>
1497 if (IdxList.empty())
1498 return Ty;
1499 for (IndexTy V : IdxList.slice(1)) {
1501 if (!Ty)
1502 return Ty;
1503 }
1504 return Ty;
1505}
1506
1508 return getIndexedTypeInternal(Ty, IdxList);
1509}
1510
1512 ArrayRef<Constant *> IdxList) {
1513 return getIndexedTypeInternal(Ty, IdxList);
1514}
1515
1517 return getIndexedTypeInternal(Ty, IdxList);
1518}
1519
1520/// hasAllZeroIndices - Return true if all of the indices of this GEP are
1521/// zeros. If so, the result pointer and the first operand have the same
1522/// value, just potentially different types.
1524 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
1525 if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(i))) {
1526 if (!CI->isZero()) return false;
1527 } else {
1528 return false;
1529 }
1530 }
1531 return true;
1532}
1533
1534/// hasAllConstantIndices - Return true if all of the indices of this GEP are
1535/// constant integers. If so, the result pointer and the first operand have
1536/// a constant offset between them.
1538 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
1539 if (!isa<ConstantInt>(getOperand(i)))
1540 return false;
1541 }
1542 return true;
1543}
1544
1547}
1548
1550 GEPNoWrapFlags NW = cast<GEPOperator>(this)->getNoWrapFlags();
1551 if (B)
1553 else
1554 NW = NW.withoutInBounds();
1555 setNoWrapFlags(NW);
1556}
1557
1559 return cast<GEPOperator>(this)->getNoWrapFlags();
1560}
1561
1563 return cast<GEPOperator>(this)->isInBounds();
1564}
1565
1567 return cast<GEPOperator>(this)->hasNoUnsignedSignedWrap();
1568}
1569
1571 return cast<GEPOperator>(this)->hasNoUnsignedWrap();
1572}
1573
1575 APInt &Offset) const {
1576 // Delegate to the generic GEPOperator implementation.
1577 return cast<GEPOperator>(this)->accumulateConstantOffset(DL, Offset);
1578}
1579
1581 const DataLayout &DL, unsigned BitWidth,
1582 MapVector<Value *, APInt> &VariableOffsets,
1583 APInt &ConstantOffset) const {
1584 // Delegate to the generic GEPOperator implementation.
1585 return cast<GEPOperator>(this)->collectOffset(DL, BitWidth, VariableOffsets,
1586 ConstantOffset);
1587}
1588
1589//===----------------------------------------------------------------------===//
1590// ExtractElementInst Implementation
1591//===----------------------------------------------------------------------===//
1592
1593ExtractElementInst::ExtractElementInst(Value *Val, Value *Index,
1594 const Twine &Name,
1595 InsertPosition InsertBef)
1596 : Instruction(
1597 cast<VectorType>(Val->getType())->getElementType(), ExtractElement,
1598 OperandTraits<ExtractElementInst>::op_begin(this), 2, InsertBef) {
1599 assert(isValidOperands(Val, Index) &&
1600 "Invalid extractelement instruction operands!");
1601 Op<0>() = Val;
1602 Op<1>() = Index;
1603 setName(Name);
1604}
1605
1607 if (!Val->getType()->isVectorTy() || !Index->getType()->isIntegerTy())
1608 return false;
1609 return true;
1610}
1611
1612//===----------------------------------------------------------------------===//
1613// InsertElementInst Implementation
1614//===----------------------------------------------------------------------===//
1615
1616InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index,
1617 const Twine &Name,
1618 InsertPosition InsertBef)
1619 : Instruction(Vec->getType(), InsertElement,
1620 OperandTraits<InsertElementInst>::op_begin(this), 3,
1621 InsertBef) {
1622 assert(isValidOperands(Vec, Elt, Index) &&
1623 "Invalid insertelement instruction operands!");
1624 Op<0>() = Vec;
1625 Op<1>() = Elt;
1626 Op<2>() = Index;
1627 setName(Name);
1628}
1629
1631 const Value *Index) {
1632 if (!Vec->getType()->isVectorTy())
1633 return false; // First operand of insertelement must be vector type.
1634
1635 if (Elt->getType() != cast<VectorType>(Vec->getType())->getElementType())
1636 return false;// Second operand of insertelement must be vector element type.
1637
1638 if (!Index->getType()->isIntegerTy())
1639 return false; // Third operand of insertelement must be i32.
1640 return true;
1641}
1642
1643//===----------------------------------------------------------------------===//
1644// ShuffleVectorInst Implementation
1645//===----------------------------------------------------------------------===//
1646
1648 assert(V && "Cannot create placeholder of nullptr V");
1649 return PoisonValue::get(V->getType());
1650}
1651
1653 InsertPosition InsertBefore)
1655 InsertBefore) {}
1656
1658 const Twine &Name,
1659 InsertPosition InsertBefore)
1661 InsertBefore) {}
1662
1664 const Twine &Name,
1665 InsertPosition InsertBefore)
1666 : Instruction(
1667 VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
1668 cast<VectorType>(Mask->getType())->getElementCount()),
1669 ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this),
1670 OperandTraits<ShuffleVectorInst>::operands(this), InsertBefore) {
1671 assert(isValidOperands(V1, V2, Mask) &&
1672 "Invalid shuffle vector instruction operands!");
1673
1674 Op<0>() = V1;
1675 Op<1>() = V2;
1676 SmallVector<int, 16> MaskArr;
1677 getShuffleMask(cast<Constant>(Mask), MaskArr);
1678 setShuffleMask(MaskArr);
1679 setName(Name);
1680}
1681
1683 const Twine &Name,
1684 InsertPosition InsertBefore)
1685 : Instruction(
1686 VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
1687 Mask.size(), isa<ScalableVectorType>(V1->getType())),
1688 ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this),
1689 OperandTraits<ShuffleVectorInst>::operands(this), InsertBefore) {
1690 assert(isValidOperands(V1, V2, Mask) &&
1691 "Invalid shuffle vector instruction operands!");
1692 Op<0>() = V1;
1693 Op<1>() = V2;
1694 setShuffleMask(Mask);
1695 setName(Name);
1696}
1697
1699 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
1700 int NumMaskElts = ShuffleMask.size();
1701 SmallVector<int, 16> NewMask(NumMaskElts);
1702 for (int i = 0; i != NumMaskElts; ++i) {
1703 int MaskElt = getMaskValue(i);
1704 if (MaskElt == PoisonMaskElem) {
1705 NewMask[i] = PoisonMaskElem;
1706 continue;
1707 }
1708 assert(MaskElt >= 0 && MaskElt < 2 * NumOpElts && "Out-of-range mask");
1709 MaskElt = (MaskElt < NumOpElts) ? MaskElt + NumOpElts : MaskElt - NumOpElts;
1710 NewMask[i] = MaskElt;
1711 }
1712 setShuffleMask(NewMask);
1713 Op<0>().swap(Op<1>());
1714}
1715
1717 ArrayRef<int> Mask) {
1718 // V1 and V2 must be vectors of the same type.
1719 if (!isa<VectorType>(V1->getType()) || V1->getType() != V2->getType())
1720 return false;
1721
1722 // Make sure the mask elements make sense.
1723 int V1Size =
1724 cast<VectorType>(V1->getType())->getElementCount().getKnownMinValue();
1725 for (int Elem : Mask)
1726 if (Elem != PoisonMaskElem && Elem >= V1Size * 2)
1727 return false;
1728
1729 if (isa<ScalableVectorType>(V1->getType()))
1730 if ((Mask[0] != 0 && Mask[0] != PoisonMaskElem) || !all_equal(Mask))
1731 return false;
1732
1733 return true;
1734}
1735
1737 const Value *Mask) {
1738 // V1 and V2 must be vectors of the same type.
1739 if (!V1->getType()->isVectorTy() || V1->getType() != V2->getType())
1740 return false;
1741
1742 // Mask must be vector of i32, and must be the same kind of vector as the
1743 // input vectors
1744 auto *MaskTy = dyn_cast<VectorType>(Mask->getType());
1745 if (!MaskTy || !MaskTy->getElementType()->isIntegerTy(32) ||
1746 isa<ScalableVectorType>(MaskTy) != isa<ScalableVectorType>(V1->getType()))
1747 return false;
1748
1749 // Check to see if Mask is valid.
1750 if (isa<UndefValue>(Mask) || isa<ConstantAggregateZero>(Mask))
1751 return true;
1752
1753 if (const auto *MV = dyn_cast<ConstantVector>(Mask)) {
1754 unsigned V1Size = cast<FixedVectorType>(V1->getType())->getNumElements();
1755 for (Value *Op : MV->operands()) {
1756 if (auto *CI = dyn_cast<ConstantInt>(Op)) {
1757 if (CI->uge(V1Size*2))
1758 return false;
1759 } else if (!isa<UndefValue>(Op)) {
1760 return false;
1761 }
1762 }
1763 return true;
1764 }
1765
1766 if (const auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
1767 unsigned V1Size = cast<FixedVectorType>(V1->getType())->getNumElements();
1768 for (unsigned i = 0, e = cast<FixedVectorType>(MaskTy)->getNumElements();
1769 i != e; ++i)
1770 if (CDS->getElementAsInteger(i) >= V1Size*2)
1771 return false;
1772 return true;
1773 }
1774
1775 return false;
1776}
1777
1779 SmallVectorImpl<int> &Result) {
1780 ElementCount EC = cast<VectorType>(Mask->getType())->getElementCount();
1781
1782 if (isa<ConstantAggregateZero>(Mask)) {
1783 Result.resize(EC.getKnownMinValue(), 0);
1784 return;
1785 }
1786
1787 Result.reserve(EC.getKnownMinValue());
1788
1789 if (EC.isScalable()) {
1790 assert((isa<ConstantAggregateZero>(Mask) || isa<UndefValue>(Mask)) &&
1791 "Scalable vector shuffle mask must be undef or zeroinitializer");
1792 int MaskVal = isa<UndefValue>(Mask) ? -1 : 0;
1793 for (unsigned I = 0; I < EC.getKnownMinValue(); ++I)
1794 Result.emplace_back(MaskVal);
1795 return;
1796 }
1797
1798 unsigned NumElts = EC.getKnownMinValue();
1799
1800 if (auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
1801 for (unsigned i = 0; i != NumElts; ++i)
1802 Result.push_back(CDS->getElementAsInteger(i));
1803 return;
1804 }
1805 for (unsigned i = 0; i != NumElts; ++i) {
1806 Constant *C = Mask->getAggregateElement(i);
1807 Result.push_back(isa<UndefValue>(C) ? -1 :
1808 cast<ConstantInt>(C)->getZExtValue());
1809 }
1810}
1811
1813 ShuffleMask.assign(Mask.begin(), Mask.end());
1814 ShuffleMaskForBitcode = convertShuffleMaskForBitcode(Mask, getType());
1815}
1816
1818 Type *ResultTy) {
1819 Type *Int32Ty = Type::getInt32Ty(ResultTy->getContext());
1820 if (isa<ScalableVectorType>(ResultTy)) {
1821 assert(all_equal(Mask) && "Unexpected shuffle");
1822 Type *VecTy = VectorType::get(Int32Ty, Mask.size(), true);
1823 if (Mask[0] == 0)
1824 return Constant::getNullValue(VecTy);
1825 return PoisonValue::get(VecTy);
1826 }
1828 for (int Elem : Mask) {
1829 if (Elem == PoisonMaskElem)
1830 MaskConst.push_back(PoisonValue::get(Int32Ty));
1831 else
1832 MaskConst.push_back(ConstantInt::get(Int32Ty, Elem));
1833 }
1834 return ConstantVector::get(MaskConst);
1835}
1836
1837static bool isSingleSourceMaskImpl(ArrayRef<int> Mask, int NumOpElts) {
1838 assert(!Mask.empty() && "Shuffle mask must contain elements");
1839 bool UsesLHS = false;
1840 bool UsesRHS = false;
1841 for (int I : Mask) {
1842 if (I == -1)
1843 continue;
1844 assert(I >= 0 && I < (NumOpElts * 2) &&
1845 "Out-of-bounds shuffle mask element");
1846 UsesLHS |= (I < NumOpElts);
1847 UsesRHS |= (I >= NumOpElts);
1848 if (UsesLHS && UsesRHS)
1849 return false;
1850 }
1851 // Allow for degenerate case: completely undef mask means neither source is used.
1852 return UsesLHS || UsesRHS;
1853}
1854
1856 // We don't have vector operand size information, so assume operands are the
1857 // same size as the mask.
1858 return isSingleSourceMaskImpl(Mask, NumSrcElts);
1859}
1860
1861static bool isIdentityMaskImpl(ArrayRef<int> Mask, int NumOpElts) {
1862 if (!isSingleSourceMaskImpl(Mask, NumOpElts))
1863 return false;
1864 for (int i = 0, NumMaskElts = Mask.size(); i < NumMaskElts; ++i) {
1865 if (Mask[i] == -1)
1866 continue;
1867 if (Mask[i] != i && Mask[i] != (NumOpElts + i))
1868 return false;
1869 }
1870 return true;
1871}
1872
1874 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
1875 return false;
1876 // We don't have vector operand size information, so assume operands are the
1877 // same size as the mask.
1878 return isIdentityMaskImpl(Mask, NumSrcElts);
1879}
1880
1882 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
1883 return false;
1884 if (!isSingleSourceMask(Mask, NumSrcElts))
1885 return false;
1886
1887 // The number of elements in the mask must be at least 2.
1888 if (NumSrcElts < 2)
1889 return false;
1890
1891 for (int I = 0, E = Mask.size(); I < E; ++I) {
1892 if (Mask[I] == -1)
1893 continue;
1894 if (Mask[I] != (NumSrcElts - 1 - I) &&
1895 Mask[I] != (NumSrcElts + NumSrcElts - 1 - I))
1896 return false;
1897 }
1898 return true;
1899}
1900
1902 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
1903 return false;
1904 if (!isSingleSourceMask(Mask, NumSrcElts))
1905 return false;
1906 for (int I = 0, E = Mask.size(); I < E; ++I) {
1907 if (Mask[I] == -1)
1908 continue;
1909 if (Mask[I] != 0 && Mask[I] != NumSrcElts)
1910 return false;
1911 }
1912 return true;
1913}
1914
1916 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
1917 return false;
1918 // Select is differentiated from identity. It requires using both sources.
1919 if (isSingleSourceMask(Mask, NumSrcElts))
1920 return false;
1921 for (int I = 0, E = Mask.size(); I < E; ++I) {
1922 if (Mask[I] == -1)
1923 continue;
1924 if (Mask[I] != I && Mask[I] != (NumSrcElts + I))
1925 return false;
1926 }
1927 return true;
1928}
1929
1931 // Example masks that will return true:
1932 // v1 = <a, b, c, d>
1933 // v2 = <e, f, g, h>
1934 // trn1 = shufflevector v1, v2 <0, 4, 2, 6> = <a, e, c, g>
1935 // trn2 = shufflevector v1, v2 <1, 5, 3, 7> = <b, f, d, h>
1936
1937 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
1938 return false;
1939 // 1. The number of elements in the mask must be a power-of-2 and at least 2.
1940 int Sz = Mask.size();
1941 if (Sz < 2 || !isPowerOf2_32(Sz))
1942 return false;
1943
1944 // 2. The first element of the mask must be either a 0 or a 1.
1945 if (Mask[0] != 0 && Mask[0] != 1)
1946 return false;
1947
1948 // 3. The difference between the first 2 elements must be equal to the
1949 // number of elements in the mask.
1950 if ((Mask[1] - Mask[0]) != NumSrcElts)
1951 return false;
1952
1953 // 4. The difference between consecutive even-numbered and odd-numbered
1954 // elements must be equal to 2.
1955 for (int I = 2; I < Sz; ++I) {
1956 int MaskEltVal = Mask[I];
1957 if (MaskEltVal == -1)
1958 return false;
1959 int MaskEltPrevVal = Mask[I - 2];
1960 if (MaskEltVal - MaskEltPrevVal != 2)
1961 return false;
1962 }
1963 return true;
1964}
1965
1967 int &Index) {
1968 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
1969 return false;
1970 // Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4>
1971 int StartIndex = -1;
1972 for (int I = 0, E = Mask.size(); I != E; ++I) {
1973 int MaskEltVal = Mask[I];
1974 if (MaskEltVal == -1)
1975 continue;
1976
1977 if (StartIndex == -1) {
1978 // Don't support a StartIndex that begins in the second input, or if the
1979 // first non-undef index would access below the StartIndex.
1980 if (MaskEltVal < I || NumSrcElts <= (MaskEltVal - I))
1981 return false;
1982
1983 StartIndex = MaskEltVal - I;
1984 continue;
1985 }
1986
1987 // Splice is sequential starting from StartIndex.
1988 if (MaskEltVal != (StartIndex + I))
1989 return false;
1990 }
1991
1992 if (StartIndex == -1)
1993 return false;
1994
1995 // NOTE: This accepts StartIndex == 0 (COPY).
1996 Index = StartIndex;
1997 return true;
1998}
1999
2001 int NumSrcElts, int &Index) {
2002 // Must extract from a single source.
2003 if (!isSingleSourceMaskImpl(Mask, NumSrcElts))
2004 return false;
2005
2006 // Must be smaller (else this is an Identity shuffle).
2007 if (NumSrcElts <= (int)Mask.size())
2008 return false;
2009
2010 // Find start of extraction, accounting that we may start with an UNDEF.
2011 int SubIndex = -1;
2012 for (int i = 0, e = Mask.size(); i != e; ++i) {
2013 int M = Mask[i];
2014 if (M < 0)
2015 continue;
2016 int Offset = (M % NumSrcElts) - i;
2017 if (0 <= SubIndex && SubIndex != Offset)
2018 return false;
2019 SubIndex = Offset;
2020 }
2021
2022 if (0 <= SubIndex && SubIndex + (int)Mask.size() <= NumSrcElts) {
2023 Index = SubIndex;
2024 return true;
2025 }
2026 return false;
2027}
2028
2030 int NumSrcElts, int &NumSubElts,
2031 int &Index) {
2032 int NumMaskElts = Mask.size();
2033
2034 // Don't try to match if we're shuffling to a smaller size.
2035 if (NumMaskElts < NumSrcElts)
2036 return false;
2037
2038 // TODO: We don't recognize self-insertion/widening.
2039 if (isSingleSourceMaskImpl(Mask, NumSrcElts))
2040 return false;
2041
2042 // Determine which mask elements are attributed to which source.
2043 APInt UndefElts = APInt::getZero(NumMaskElts);
2044 APInt Src0Elts = APInt::getZero(NumMaskElts);
2045 APInt Src1Elts = APInt::getZero(NumMaskElts);
2046 bool Src0Identity = true;
2047 bool Src1Identity = true;
2048
2049 for (int i = 0; i != NumMaskElts; ++i) {
2050 int M = Mask[i];
2051 if (M < 0) {
2052 UndefElts.setBit(i);
2053 continue;
2054 }
2055 if (M < NumSrcElts) {
2056 Src0Elts.setBit(i);
2057 Src0Identity &= (M == i);
2058 continue;
2059 }
2060 Src1Elts.setBit(i);
2061 Src1Identity &= (M == (i + NumSrcElts));
2062 }
2063 assert((Src0Elts | Src1Elts | UndefElts).isAllOnes() &&
2064 "unknown shuffle elements");
2065 assert(!Src0Elts.isZero() && !Src1Elts.isZero() &&
2066 "2-source shuffle not found");
2067
2068 // Determine lo/hi span ranges.
2069 // TODO: How should we handle undefs at the start of subvector insertions?
2070 int Src0Lo = Src0Elts.countr_zero();
2071 int Src1Lo = Src1Elts.countr_zero();
2072 int Src0Hi = NumMaskElts - Src0Elts.countl_zero();
2073 int Src1Hi = NumMaskElts - Src1Elts.countl_zero();
2074
2075 // If src0 is in place, see if the src1 elements is inplace within its own
2076 // span.
2077 if (Src0Identity) {
2078 int NumSub1Elts = Src1Hi - Src1Lo;
2079 ArrayRef<int> Sub1Mask = Mask.slice(Src1Lo, NumSub1Elts);
2080 if (isIdentityMaskImpl(Sub1Mask, NumSrcElts)) {
2081 NumSubElts = NumSub1Elts;
2082 Index = Src1Lo;
2083 return true;
2084 }
2085 }
2086
2087 // If src1 is in place, see if the src0 elements is inplace within its own
2088 // span.
2089 if (Src1Identity) {
2090 int NumSub0Elts = Src0Hi - Src0Lo;
2091 ArrayRef<int> Sub0Mask = Mask.slice(Src0Lo, NumSub0Elts);
2092 if (isIdentityMaskImpl(Sub0Mask, NumSrcElts)) {
2093 NumSubElts = NumSub0Elts;
2094 Index = Src0Lo;
2095 return true;
2096 }
2097 }
2098
2099 return false;
2100}
2101
2103 // FIXME: Not currently possible to express a shuffle mask for a scalable
2104 // vector for this case.
2105 if (isa<ScalableVectorType>(getType()))
2106 return false;
2107
2108 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2109 int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2110 if (NumMaskElts <= NumOpElts)
2111 return false;
2112
2113 // The first part of the mask must choose elements from exactly 1 source op.
2115 if (!isIdentityMaskImpl(Mask, NumOpElts))
2116 return false;
2117
2118 // All extending must be with undef elements.
2119 for (int i = NumOpElts; i < NumMaskElts; ++i)
2120 if (Mask[i] != -1)
2121 return false;
2122
2123 return true;
2124}
2125
2127 // FIXME: Not currently possible to express a shuffle mask for a scalable
2128 // vector for this case.
2129 if (isa<ScalableVectorType>(getType()))
2130 return false;
2131
2132 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2133 int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2134 if (NumMaskElts >= NumOpElts)
2135 return false;
2136
2137 return isIdentityMaskImpl(getShuffleMask(), NumOpElts);
2138}
2139
2141 // Vector concatenation is differentiated from identity with padding.
2142 if (isa<UndefValue>(Op<0>()) || isa<UndefValue>(Op<1>()))
2143 return false;
2144
2145 // FIXME: Not currently possible to express a shuffle mask for a scalable
2146 // vector for this case.
2147 if (isa<ScalableVectorType>(getType()))
2148 return false;
2149
2150 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2151 int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2152 if (NumMaskElts != NumOpElts * 2)
2153 return false;
2154
2155 // Use the mask length rather than the operands' vector lengths here. We
2156 // already know that the shuffle returns a vector twice as long as the inputs,
2157 // and neither of the inputs are undef vectors. If the mask picks consecutive
2158 // elements from both inputs, then this is a concatenation of the inputs.
2159 return isIdentityMaskImpl(getShuffleMask(), NumMaskElts);
2160}
2161
2163 int ReplicationFactor, int VF) {
2164 assert(Mask.size() == (unsigned)ReplicationFactor * VF &&
2165 "Unexpected mask size.");
2166
2167 for (int CurrElt : seq(VF)) {
2168 ArrayRef<int> CurrSubMask = Mask.take_front(ReplicationFactor);
2169 assert(CurrSubMask.size() == (unsigned)ReplicationFactor &&
2170 "Run out of mask?");
2171 Mask = Mask.drop_front(ReplicationFactor);
2172 if (!all_of(CurrSubMask, [CurrElt](int MaskElt) {
2173 return MaskElt == PoisonMaskElem || MaskElt == CurrElt;
2174 }))
2175 return false;
2176 }
2177 assert(Mask.empty() && "Did not consume the whole mask?");
2178
2179 return true;
2180}
2181
2183 int &ReplicationFactor, int &VF) {
2184 // undef-less case is trivial.
2185 if (!llvm::is_contained(Mask, PoisonMaskElem)) {
2186 ReplicationFactor =
2187 Mask.take_while([](int MaskElt) { return MaskElt == 0; }).size();
2188 if (ReplicationFactor == 0 || Mask.size() % ReplicationFactor != 0)
2189 return false;
2190 VF = Mask.size() / ReplicationFactor;
2191 return isReplicationMaskWithParams(Mask, ReplicationFactor, VF);
2192 }
2193
2194 // However, if the mask contains undef's, we have to enumerate possible tuples
2195 // and pick one. There are bounds on replication factor: [1, mask size]
2196 // (where RF=1 is an identity shuffle, RF=mask size is a broadcast shuffle)
2197 // Additionally, mask size is a replication factor multiplied by vector size,
2198 // which further significantly reduces the search space.
2199
2200 // Before doing that, let's perform basic correctness checking first.
2201 int Largest = -1;
2202 for (int MaskElt : Mask) {
2203 if (MaskElt == PoisonMaskElem)
2204 continue;
2205 // Elements must be in non-decreasing order.
2206 if (MaskElt < Largest)
2207 return false;
2208 Largest = std::max(Largest, MaskElt);
2209 }
2210
2211 // Prefer larger replication factor if all else equal.
2212 for (int PossibleReplicationFactor :
2213 reverse(seq_inclusive<unsigned>(1, Mask.size()))) {
2214 if (Mask.size() % PossibleReplicationFactor != 0)
2215 continue;
2216 int PossibleVF = Mask.size() / PossibleReplicationFactor;
2217 if (!isReplicationMaskWithParams(Mask, PossibleReplicationFactor,
2218 PossibleVF))
2219 continue;
2220 ReplicationFactor = PossibleReplicationFactor;
2221 VF = PossibleVF;
2222 return true;
2223 }
2224
2225 return false;
2226}
2227
2228bool ShuffleVectorInst::isReplicationMask(int &ReplicationFactor,
2229 int &VF) const {
2230 // Not possible to express a shuffle mask for a scalable vector for this
2231 // case.
2232 if (isa<ScalableVectorType>(getType()))
2233 return false;
2234
2235 VF = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2236 if (ShuffleMask.size() % VF != 0)
2237 return false;
2238 ReplicationFactor = ShuffleMask.size() / VF;
2239
2240 return isReplicationMaskWithParams(ShuffleMask, ReplicationFactor, VF);
2241}
2242
2244 if (VF <= 0 || Mask.size() < static_cast<unsigned>(VF) ||
2245 Mask.size() % VF != 0)
2246 return false;
2247 for (unsigned K = 0, Sz = Mask.size(); K < Sz; K += VF) {
2248 ArrayRef<int> SubMask = Mask.slice(K, VF);
2249 if (all_of(SubMask, [](int Idx) { return Idx == PoisonMaskElem; }))
2250 continue;
2251 SmallBitVector Used(VF, false);
2252 for (int Idx : SubMask) {
2253 if (Idx != PoisonMaskElem && Idx < VF)
2254 Used.set(Idx);
2255 }
2256 if (!Used.all())
2257 return false;
2258 }
2259 return true;
2260}
2261
2262/// Return true if this shuffle mask is a replication mask.
2264 // Not possible to express a shuffle mask for a scalable vector for this
2265 // case.
2266 if (isa<ScalableVectorType>(getType()))
2267 return false;
2268 if (!isSingleSourceMask(ShuffleMask, VF))
2269 return false;
2270
2271 return isOneUseSingleSourceMask(ShuffleMask, VF);
2272}
2273
2274bool ShuffleVectorInst::isInterleave(unsigned Factor) {
2275 FixedVectorType *OpTy = dyn_cast<FixedVectorType>(getOperand(0)->getType());
2276 // shuffle_vector can only interleave fixed length vectors - for scalable
2277 // vectors, see the @llvm.vector.interleave2 intrinsic
2278 if (!OpTy)
2279 return false;
2280 unsigned OpNumElts = OpTy->getNumElements();
2281
2282 return isInterleaveMask(ShuffleMask, Factor, OpNumElts * 2);
2283}
2284
2286 ArrayRef<int> Mask, unsigned Factor, unsigned NumInputElts,
2287 SmallVectorImpl<unsigned> &StartIndexes) {
2288 unsigned NumElts = Mask.size();
2289 if (NumElts % Factor)
2290 return false;
2291
2292 unsigned LaneLen = NumElts / Factor;
2293 if (!isPowerOf2_32(LaneLen))
2294 return false;
2295
2296 StartIndexes.resize(Factor);
2297
2298 // Check whether each element matches the general interleaved rule.
2299 // Ignore undef elements, as long as the defined elements match the rule.
2300 // Outer loop processes all factors (x, y, z in the above example)
2301 unsigned I = 0, J;
2302 for (; I < Factor; I++) {
2303 unsigned SavedLaneValue;
2304 unsigned SavedNoUndefs = 0;
2305
2306 // Inner loop processes consecutive accesses (x, x+1... in the example)
2307 for (J = 0; J < LaneLen - 1; J++) {
2308 // Lane computes x's position in the Mask
2309 unsigned Lane = J * Factor + I;
2310 unsigned NextLane = Lane + Factor;
2311 int LaneValue = Mask[Lane];
2312 int NextLaneValue = Mask[NextLane];
2313
2314 // If both are defined, values must be sequential
2315 if (LaneValue >= 0 && NextLaneValue >= 0 &&
2316 LaneValue + 1 != NextLaneValue)
2317 break;
2318
2319 // If the next value is undef, save the current one as reference
2320 if (LaneValue >= 0 && NextLaneValue < 0) {
2321 SavedLaneValue = LaneValue;
2322 SavedNoUndefs = 1;
2323 }
2324
2325 // Undefs are allowed, but defined elements must still be consecutive:
2326 // i.e.: x,..., undef,..., x + 2,..., undef,..., undef,..., x + 5, ....
2327 // Verify this by storing the last non-undef followed by an undef
2328 // Check that following non-undef masks are incremented with the
2329 // corresponding distance.
2330 if (SavedNoUndefs > 0 && LaneValue < 0) {
2331 SavedNoUndefs++;
2332 if (NextLaneValue >= 0 &&
2333 SavedLaneValue + SavedNoUndefs != (unsigned)NextLaneValue)
2334 break;
2335 }
2336 }
2337
2338 if (J < LaneLen - 1)
2339 return false;
2340
2341 int StartMask = 0;
2342 if (Mask[I] >= 0) {
2343 // Check that the start of the I range (J=0) is greater than 0
2344 StartMask = Mask[I];
2345 } else if (Mask[(LaneLen - 1) * Factor + I] >= 0) {
2346 // StartMask defined by the last value in lane
2347 StartMask = Mask[(LaneLen - 1) * Factor + I] - J;
2348 } else if (SavedNoUndefs > 0) {
2349 // StartMask defined by some non-zero value in the j loop
2350 StartMask = SavedLaneValue - (LaneLen - 1 - SavedNoUndefs);
2351 }
2352 // else StartMask remains set to 0, i.e. all elements are undefs
2353
2354 if (StartMask < 0)
2355 return false;
2356 // We must stay within the vectors; This case can happen with undefs.
2357 if (StartMask + LaneLen > NumInputElts)
2358 return false;
2359
2360 StartIndexes[I] = StartMask;
2361 }
2362
2363 return true;
2364}
2365
2366/// Check if the mask is a DE-interleave mask of the given factor
2367/// \p Factor like:
2368/// <Index, Index+Factor, ..., Index+(NumElts-1)*Factor>
2370 unsigned Factor,
2371 unsigned &Index) {
2372 // Check all potential start indices from 0 to (Factor - 1).
2373 for (unsigned Idx = 0; Idx < Factor; Idx++) {
2374 unsigned I = 0;
2375
2376 // Check that elements are in ascending order by Factor. Ignore undef
2377 // elements.
2378 for (; I < Mask.size(); I++)
2379 if (Mask[I] >= 0 && static_cast<unsigned>(Mask[I]) != Idx + I * Factor)
2380 break;
2381
2382 if (I == Mask.size()) {
2383 Index = Idx;
2384 return true;
2385 }
2386 }
2387
2388 return false;
2389}
2390
2391/// Try to lower a vector shuffle as a bit rotation.
2392///
2393/// Look for a repeated rotation pattern in each sub group.
2394/// Returns an element-wise left bit rotation amount or -1 if failed.
2395static int matchShuffleAsBitRotate(ArrayRef<int> Mask, int NumSubElts) {
2396 int NumElts = Mask.size();
2397 assert((NumElts % NumSubElts) == 0 && "Illegal shuffle mask");
2398
2399 int RotateAmt = -1;
2400 for (int i = 0; i != NumElts; i += NumSubElts) {
2401 for (int j = 0; j != NumSubElts; ++j) {
2402 int M = Mask[i + j];
2403 if (M < 0)
2404 continue;
2405 if (M < i || M >= i + NumSubElts)
2406 return -1;
2407 int Offset = (NumSubElts - (M - (i + j))) % NumSubElts;
2408 if (0 <= RotateAmt && Offset != RotateAmt)
2409 return -1;
2410 RotateAmt = Offset;
2411 }
2412 }
2413 return RotateAmt;
2414}
2415
2417 ArrayRef<int> Mask, unsigned EltSizeInBits, unsigned MinSubElts,
2418 unsigned MaxSubElts, unsigned &NumSubElts, unsigned &RotateAmt) {
2419 for (NumSubElts = MinSubElts; NumSubElts <= MaxSubElts; NumSubElts *= 2) {
2420 int EltRotateAmt = matchShuffleAsBitRotate(Mask, NumSubElts);
2421 if (EltRotateAmt < 0)
2422 continue;
2423 RotateAmt = EltRotateAmt * EltSizeInBits;
2424 return true;
2425 }
2426
2427 return false;
2428}
2429
2430//===----------------------------------------------------------------------===//
2431// InsertValueInst Class
2432//===----------------------------------------------------------------------===//
2433
2434void InsertValueInst::init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
2435 const Twine &Name) {
2436 assert(getNumOperands() == 2 && "NumOperands not initialized?");
2437
2438 // There's no fundamental reason why we require at least one index
2439 // (other than weirdness with &*IdxBegin being invalid; see
2440 // getelementptr's init routine for example). But there's no
2441 // present need to support it.
2442 assert(!Idxs.empty() && "InsertValueInst must have at least one index");
2443
2445 Val->getType() && "Inserted value must match indexed type!");
2446 Op<0>() = Agg;
2447 Op<1>() = Val;
2448
2449 Indices.append(Idxs.begin(), Idxs.end());
2450 setName(Name);
2451}
2452
2453InsertValueInst::InsertValueInst(const InsertValueInst &IVI)
2454 : Instruction(IVI.getType(), InsertValue,
2455 OperandTraits<InsertValueInst>::op_begin(this), 2),
2456 Indices(IVI.Indices) {
2457 Op<0>() = IVI.getOperand(0);
2458 Op<1>() = IVI.getOperand(1);
2460}
2461
2462//===----------------------------------------------------------------------===//
2463// ExtractValueInst Class
2464//===----------------------------------------------------------------------===//
2465
2466void ExtractValueInst::init(ArrayRef<unsigned> Idxs, const Twine &Name) {
2467 assert(getNumOperands() == 1 && "NumOperands not initialized?");
2468
2469 // There's no fundamental reason why we require at least one index.
2470 // But there's no present need to support it.
2471 assert(!Idxs.empty() && "ExtractValueInst must have at least one index");
2472
2473 Indices.append(Idxs.begin(), Idxs.end());
2474 setName(Name);
2475}
2476
2477ExtractValueInst::ExtractValueInst(const ExtractValueInst &EVI)
2478 : UnaryInstruction(EVI.getType(), ExtractValue, EVI.getOperand(0)),
2479 Indices(EVI.Indices) {
2481}
2482
2483// getIndexedType - Returns the type of the element that would be extracted
2484// with an extractvalue instruction with the specified parameters.
2485//
2486// A null type is returned if the indices are invalid for the specified
2487// pointer type.
2488//
2490 ArrayRef<unsigned> Idxs) {
2491 for (unsigned Index : Idxs) {
2492 // We can't use CompositeType::indexValid(Index) here.
2493 // indexValid() always returns true for arrays because getelementptr allows
2494 // out-of-bounds indices. Since we don't allow those for extractvalue and
2495 // insertvalue we need to check array indexing manually.
2496 // Since the only other types we can index into are struct types it's just
2497 // as easy to check those manually as well.
2498 if (ArrayType *AT = dyn_cast<ArrayType>(Agg)) {
2499 if (Index >= AT->getNumElements())
2500 return nullptr;
2501 Agg = AT->getElementType();
2502 } else if (StructType *ST = dyn_cast<StructType>(Agg)) {
2503 if (Index >= ST->getNumElements())
2504 return nullptr;
2505 Agg = ST->getElementType(Index);
2506 } else {
2507 // Not a valid type to index into.
2508 return nullptr;
2509 }
2510 }
2511 return const_cast<Type*>(Agg);
2512}
2513
2514//===----------------------------------------------------------------------===//
2515// UnaryOperator Class
2516//===----------------------------------------------------------------------===//
2517
2519 const Twine &Name, InsertPosition InsertBefore)
2520 : UnaryInstruction(Ty, iType, S, InsertBefore) {
2521 Op<0>() = S;
2522 setName(Name);
2523 AssertOK();
2524}
2525
2527 InsertPosition InsertBefore) {
2528 return new UnaryOperator(Op, S, S->getType(), Name, InsertBefore);
2529}
2530
2531void UnaryOperator::AssertOK() {
2532 Value *LHS = getOperand(0);
2533 (void)LHS; // Silence warnings.
2534#ifndef NDEBUG
2535 switch (getOpcode()) {
2536 case FNeg:
2537 assert(getType() == LHS->getType() &&
2538 "Unary operation should return same type as operand!");
2539 assert(getType()->isFPOrFPVectorTy() &&
2540 "Tried to create a floating-point operation on a "
2541 "non-floating-point type!");
2542 break;
2543 default: llvm_unreachable("Invalid opcode provided");
2544 }
2545#endif
2546}
2547
2548//===----------------------------------------------------------------------===//
2549// BinaryOperator Class
2550//===----------------------------------------------------------------------===//
2551
2553 const Twine &Name, InsertPosition InsertBefore)
2554 : Instruction(Ty, iType, OperandTraits<BinaryOperator>::op_begin(this),
2555 OperandTraits<BinaryOperator>::operands(this), InsertBefore) {
2556 Op<0>() = S1;
2557 Op<1>() = S2;
2558 setName(Name);
2559 AssertOK();
2560}
2561
2562void BinaryOperator::AssertOK() {
2563 Value *LHS = getOperand(0), *RHS = getOperand(1);
2564 (void)LHS; (void)RHS; // Silence warnings.
2565 assert(LHS->getType() == RHS->getType() &&
2566 "Binary operator operand types must match!");
2567#ifndef NDEBUG
2568 switch (getOpcode()) {
2569 case Add: case Sub:
2570 case Mul:
2571 assert(getType() == LHS->getType() &&
2572 "Arithmetic operation should return same type as operands!");
2573 assert(getType()->isIntOrIntVectorTy() &&
2574 "Tried to create an integer operation on a non-integer type!");
2575 break;
2576 case FAdd: case FSub:
2577 case FMul:
2578 assert(getType() == LHS->getType() &&
2579 "Arithmetic operation should return same type as operands!");
2580 assert(getType()->isFPOrFPVectorTy() &&
2581 "Tried to create a floating-point operation on a "
2582 "non-floating-point type!");
2583 break;
2584 case UDiv:
2585 case SDiv:
2586 assert(getType() == LHS->getType() &&
2587 "Arithmetic operation should return same type as operands!");
2588 assert(getType()->isIntOrIntVectorTy() &&
2589 "Incorrect operand type (not integer) for S/UDIV");
2590 break;
2591 case FDiv:
2592 assert(getType() == LHS->getType() &&
2593 "Arithmetic operation should return same type as operands!");
2594 assert(getType()->isFPOrFPVectorTy() &&
2595 "Incorrect operand type (not floating point) for FDIV");
2596 break;
2597 case URem:
2598 case SRem:
2599 assert(getType() == LHS->getType() &&
2600 "Arithmetic operation should return same type as operands!");
2601 assert(getType()->isIntOrIntVectorTy() &&
2602 "Incorrect operand type (not integer) for S/UREM");
2603 break;
2604 case FRem:
2605 assert(getType() == LHS->getType() &&
2606 "Arithmetic operation should return same type as operands!");
2607 assert(getType()->isFPOrFPVectorTy() &&
2608 "Incorrect operand type (not floating point) for FREM");
2609 break;
2610 case Shl:
2611 case LShr:
2612 case AShr:
2613 assert(getType() == LHS->getType() &&
2614 "Shift operation should return same type as operands!");
2615 assert(getType()->isIntOrIntVectorTy() &&
2616 "Tried to create a shift operation on a non-integral type!");
2617 break;
2618 case And: case Or:
2619 case Xor:
2620 assert(getType() == LHS->getType() &&
2621 "Logical operation should return same type as operands!");
2622 assert(getType()->isIntOrIntVectorTy() &&
2623 "Tried to create a logical operation on a non-integral type!");
2624 break;
2625 default: llvm_unreachable("Invalid opcode provided");
2626 }
2627#endif
2628}
2629
2631 const Twine &Name,
2632 InsertPosition InsertBefore) {
2633 assert(S1->getType() == S2->getType() &&
2634 "Cannot create binary operator with two operands of differing type!");
2635 return new BinaryOperator(Op, S1, S2, S1->getType(), Name, InsertBefore);
2636}
2637
2639 InsertPosition InsertBefore) {
2640 Value *Zero = ConstantInt::get(Op->getType(), 0);
2641 return new BinaryOperator(Instruction::Sub, Zero, Op, Op->getType(), Name,
2642 InsertBefore);
2643}
2644
2646 InsertPosition InsertBefore) {
2647 Value *Zero = ConstantInt::get(Op->getType(), 0);
2648 return BinaryOperator::CreateNSWSub(Zero, Op, Name, InsertBefore);
2649}
2650
2652 InsertPosition InsertBefore) {
2653 Constant *C = Constant::getAllOnesValue(Op->getType());
2654 return new BinaryOperator(Instruction::Xor, Op, C,
2655 Op->getType(), Name, InsertBefore);
2656}
2657
2658// Exchange the two operands to this instruction. This instruction is safe to
2659// use on any binary instruction and does not modify the semantics of the
2660// instruction. If the instruction is order-dependent (SetLT f.e.), the opcode
2661// is changed.
2663 if (!isCommutative())
2664 return true; // Can't commute operands
2665 Op<0>().swap(Op<1>());
2666 return false;
2667}
2668
2669//===----------------------------------------------------------------------===//
2670// FPMathOperator Class
2671//===----------------------------------------------------------------------===//
2672
2674 const MDNode *MD =
2675 cast<Instruction>(this)->getMetadata(LLVMContext::MD_fpmath);
2676 if (!MD)
2677 return 0.0;
2678 ConstantFP *Accuracy = mdconst::extract<ConstantFP>(MD->getOperand(0));
2679 return Accuracy->getValueAPF().convertToFloat();
2680}
2681
2682//===----------------------------------------------------------------------===//
2683// CastInst Class
2684//===----------------------------------------------------------------------===//
2685
2686// Just determine if this cast only deals with integral->integral conversion.
2688 switch (getOpcode()) {
2689 default: return false;
2690 case Instruction::ZExt:
2691 case Instruction::SExt:
2692 case Instruction::Trunc:
2693 return true;
2694 case Instruction::BitCast:
2695 return getOperand(0)->getType()->isIntegerTy() &&
2696 getType()->isIntegerTy();
2697 }
2698}
2699
2700/// This function determines if the CastInst does not require any bits to be
2701/// changed in order to effect the cast. Essentially, it identifies cases where
2702/// no code gen is necessary for the cast, hence the name no-op cast. For
2703/// example, the following are all no-op casts:
2704/// # bitcast i32* %x to i8*
2705/// # bitcast <2 x i32> %x to <4 x i16>
2706/// # ptrtoint i32* %x to i32 ; on 32-bit plaforms only
2707/// Determine if the described cast is a no-op.
2709 Type *SrcTy,
2710 Type *DestTy,
2711 const DataLayout &DL) {
2712 assert(castIsValid(Opcode, SrcTy, DestTy) && "method precondition");
2713 switch (Opcode) {
2714 default: llvm_unreachable("Invalid CastOp");
2715 case Instruction::Trunc:
2716 case Instruction::ZExt:
2717 case Instruction::SExt:
2718 case Instruction::FPTrunc:
2719 case Instruction::FPExt:
2720 case Instruction::UIToFP:
2721 case Instruction::SIToFP:
2722 case Instruction::FPToUI:
2723 case Instruction::FPToSI:
2724 case Instruction::AddrSpaceCast:
2725 // TODO: Target informations may give a more accurate answer here.
2726 return false;
2727 case Instruction::BitCast:
2728 return true; // BitCast never modifies bits.
2729 case Instruction::PtrToInt:
2730 return DL.getIntPtrType(SrcTy)->getScalarSizeInBits() ==
2731 DestTy->getScalarSizeInBits();
2732 case Instruction::IntToPtr:
2733 return DL.getIntPtrType(DestTy)->getScalarSizeInBits() ==
2734 SrcTy->getScalarSizeInBits();
2735 }
2736}
2737
2739 return isNoopCast(getOpcode(), getOperand(0)->getType(), getType(), DL);
2740}
2741
2742/// This function determines if a pair of casts can be eliminated and what
2743/// opcode should be used in the elimination. This assumes that there are two
2744/// instructions like this:
2745/// * %F = firstOpcode SrcTy %x to MidTy
2746/// * %S = secondOpcode MidTy %F to DstTy
2747/// The function returns a resultOpcode so these two casts can be replaced with:
2748/// * %Replacement = resultOpcode %SrcTy %x to DstTy
2749/// If no such cast is permitted, the function returns 0.
2752 Type *SrcTy, Type *MidTy, Type *DstTy, Type *SrcIntPtrTy, Type *MidIntPtrTy,
2753 Type *DstIntPtrTy) {
2754 // Define the 144 possibilities for these two cast instructions. The values
2755 // in this matrix determine what to do in a given situation and select the
2756 // case in the switch below. The rows correspond to firstOp, the columns
2757 // correspond to secondOp. In looking at the table below, keep in mind
2758 // the following cast properties:
2759 //
2760 // Size Compare Source Destination
2761 // Operator Src ? Size Type Sign Type Sign
2762 // -------- ------------ ------------------- ---------------------
2763 // TRUNC > Integer Any Integral Any
2764 // ZEXT < Integral Unsigned Integer Any
2765 // SEXT < Integral Signed Integer Any
2766 // FPTOUI n/a FloatPt n/a Integral Unsigned
2767 // FPTOSI n/a FloatPt n/a Integral Signed
2768 // UITOFP n/a Integral Unsigned FloatPt n/a
2769 // SITOFP n/a Integral Signed FloatPt n/a
2770 // FPTRUNC > FloatPt n/a FloatPt n/a
2771 // FPEXT < FloatPt n/a FloatPt n/a
2772 // PTRTOINT n/a Pointer n/a Integral Unsigned
2773 // INTTOPTR n/a Integral Unsigned Pointer n/a
2774 // BITCAST = FirstClass n/a FirstClass n/a
2775 // ADDRSPCST n/a Pointer n/a Pointer n/a
2776 //
2777 // NOTE: some transforms are safe, but we consider them to be non-profitable.
2778 // For example, we could merge "fptoui double to i32" + "zext i32 to i64",
2779 // into "fptoui double to i64", but this loses information about the range
2780 // of the produced value (we no longer know the top-part is all zeros).
2781 // Further this conversion is often much more expensive for typical hardware,
2782 // and causes issues when building libgcc. We disallow fptosi+sext for the
2783 // same reason.
2784 const unsigned numCastOps =
2785 Instruction::CastOpsEnd - Instruction::CastOpsBegin;
2786 static const uint8_t CastResults[numCastOps][numCastOps] = {
2787 // T F F U S F F P I B A -+
2788 // R Z S P P I I T P 2 N T S |
2789 // U E E 2 2 2 2 R E I T C C +- secondOp
2790 // N X X U S F F N X N 2 V V |
2791 // C T T I I P P C T T P T T -+
2792 { 1, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // Trunc -+
2793 { 8, 1, 9,99,99, 2,17,99,99,99, 2, 3, 0}, // ZExt |
2794 { 8, 0, 1,99,99, 0, 2,99,99,99, 0, 3, 0}, // SExt |
2795 { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToUI |
2796 { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToSI |
2797 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // UIToFP +- firstOp
2798 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // SIToFP |
2799 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // FPTrunc |
2800 { 99,99,99, 2, 2,99,99, 8, 2,99,99, 4, 0}, // FPExt |
2801 { 1, 0, 0,99,99, 0, 0,99,99,99, 7, 3, 0}, // PtrToInt |
2802 { 99,99,99,99,99,99,99,99,99,11,99,15, 0}, // IntToPtr |
2803 { 5, 5, 5, 0, 0, 5, 5, 0, 0,16, 5, 1,14}, // BitCast |
2804 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,13,12}, // AddrSpaceCast -+
2805 };
2806
2807 // TODO: This logic could be encoded into the table above and handled in the
2808 // switch below.
2809 // If either of the casts are a bitcast from scalar to vector, disallow the
2810 // merging. However, any pair of bitcasts are allowed.
2811 bool IsFirstBitcast = (firstOp == Instruction::BitCast);
2812 bool IsSecondBitcast = (secondOp == Instruction::BitCast);
2813 bool AreBothBitcasts = IsFirstBitcast && IsSecondBitcast;
2814
2815 // Check if any of the casts convert scalars <-> vectors.
2816 if ((IsFirstBitcast && isa<VectorType>(SrcTy) != isa<VectorType>(MidTy)) ||
2817 (IsSecondBitcast && isa<VectorType>(MidTy) != isa<VectorType>(DstTy)))
2818 if (!AreBothBitcasts)
2819 return 0;
2820
2821 int ElimCase = CastResults[firstOp-Instruction::CastOpsBegin]
2822 [secondOp-Instruction::CastOpsBegin];
2823 switch (ElimCase) {
2824 case 0:
2825 // Categorically disallowed.
2826 return 0;
2827 case 1:
2828 // Allowed, use first cast's opcode.
2829 return firstOp;
2830 case 2:
2831 // Allowed, use second cast's opcode.
2832 return secondOp;
2833 case 3:
2834 // No-op cast in second op implies firstOp as long as the DestTy
2835 // is integer and we are not converting between a vector and a
2836 // non-vector type.
2837 if (!SrcTy->isVectorTy() && DstTy->isIntegerTy())
2838 return firstOp;
2839 return 0;
2840 case 4:
2841 // No-op cast in second op implies firstOp as long as the DestTy
2842 // matches MidTy.
2843 if (DstTy == MidTy)
2844 return firstOp;
2845 return 0;
2846 case 5:
2847 // No-op cast in first op implies secondOp as long as the SrcTy
2848 // is an integer.
2849 if (SrcTy->isIntegerTy())
2850 return secondOp;
2851 return 0;
2852 case 7: {
2853 // Disable inttoptr/ptrtoint optimization if enabled.
2854 if (DisableI2pP2iOpt)
2855 return 0;
2856
2857 // Cannot simplify if address spaces are different!
2858 if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace())
2859 return 0;
2860
2861 unsigned MidSize = MidTy->getScalarSizeInBits();
2862 // We can still fold this without knowing the actual sizes as long we
2863 // know that the intermediate pointer is the largest possible
2864 // pointer size.
2865 // FIXME: Is this always true?
2866 if (MidSize == 64)
2867 return Instruction::BitCast;
2868
2869 // ptrtoint, inttoptr -> bitcast (ptr -> ptr) if int size is >= ptr size.
2870 if (!SrcIntPtrTy || DstIntPtrTy != SrcIntPtrTy)
2871 return 0;
2872 unsigned PtrSize = SrcIntPtrTy->getScalarSizeInBits();
2873 if (MidSize >= PtrSize)
2874 return Instruction::BitCast;
2875 return 0;
2876 }
2877 case 8: {
2878 // ext, trunc -> bitcast, if the SrcTy and DstTy are the same
2879 // ext, trunc -> ext, if sizeof(SrcTy) < sizeof(DstTy)
2880 // ext, trunc -> trunc, if sizeof(SrcTy) > sizeof(DstTy)
2881 unsigned SrcSize = SrcTy->getScalarSizeInBits();
2882 unsigned DstSize = DstTy->getScalarSizeInBits();
2883 if (SrcTy == DstTy)
2884 return Instruction::BitCast;
2885 if (SrcSize < DstSize)
2886 return firstOp;
2887 if (SrcSize > DstSize)
2888 return secondOp;
2889 return 0;
2890 }
2891 case 9:
2892 // zext, sext -> zext, because sext can't sign extend after zext
2893 return Instruction::ZExt;
2894 case 11: {
2895 // inttoptr, ptrtoint -> bitcast if SrcSize<=PtrSize and SrcSize==DstSize
2896 if (!MidIntPtrTy)
2897 return 0;
2898 unsigned PtrSize = MidIntPtrTy->getScalarSizeInBits();
2899 unsigned SrcSize = SrcTy->getScalarSizeInBits();
2900 unsigned DstSize = DstTy->getScalarSizeInBits();
2901 if (SrcSize <= PtrSize && SrcSize == DstSize)
2902 return Instruction::BitCast;
2903 return 0;
2904 }
2905 case 12:
2906 // addrspacecast, addrspacecast -> bitcast, if SrcAS == DstAS
2907 // addrspacecast, addrspacecast -> addrspacecast, if SrcAS != DstAS
2908 if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace())
2909 return Instruction::AddrSpaceCast;
2910 return Instruction::BitCast;
2911 case 13:
2912 // FIXME: this state can be merged with (1), but the following assert
2913 // is useful to check the correcteness of the sequence due to semantic
2914 // change of bitcast.
2915 assert(
2916 SrcTy->isPtrOrPtrVectorTy() &&
2917 MidTy->isPtrOrPtrVectorTy() &&
2918 DstTy->isPtrOrPtrVectorTy() &&
2919 SrcTy->getPointerAddressSpace() != MidTy->getPointerAddressSpace() &&
2920 MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&
2921 "Illegal addrspacecast, bitcast sequence!");
2922 // Allowed, use first cast's opcode
2923 return firstOp;
2924 case 14:
2925 // bitcast, addrspacecast -> addrspacecast
2926 return Instruction::AddrSpaceCast;
2927 case 15:
2928 // FIXME: this state can be merged with (1), but the following assert
2929 // is useful to check the correcteness of the sequence due to semantic
2930 // change of bitcast.
2931 assert(
2932 SrcTy->isIntOrIntVectorTy() &&
2933 MidTy->isPtrOrPtrVectorTy() &&
2934 DstTy->isPtrOrPtrVectorTy() &&
2935 MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&
2936 "Illegal inttoptr, bitcast sequence!");
2937 // Allowed, use first cast's opcode
2938 return firstOp;
2939 case 16:
2940 // FIXME: this state can be merged with (2), but the following assert
2941 // is useful to check the correcteness of the sequence due to semantic
2942 // change of bitcast.
2943 assert(
2944 SrcTy->isPtrOrPtrVectorTy() &&
2945 MidTy->isPtrOrPtrVectorTy() &&
2946 DstTy->isIntOrIntVectorTy() &&
2947 SrcTy->getPointerAddressSpace() == MidTy->getPointerAddressSpace() &&
2948 "Illegal bitcast, ptrtoint sequence!");
2949 // Allowed, use second cast's opcode
2950 return secondOp;
2951 case 17:
2952 // (sitofp (zext x)) -> (uitofp x)
2953 return Instruction::UIToFP;
2954 case 99:
2955 // Cast combination can't happen (error in input). This is for all cases
2956 // where the MidTy is not the same for the two cast instructions.
2957 llvm_unreachable("Invalid Cast Combination");
2958 default:
2959 llvm_unreachable("Error in CastResults table!!!");
2960 }
2961}
2962
2964 const Twine &Name, InsertPosition InsertBefore) {
2965 assert(castIsValid(op, S, Ty) && "Invalid cast!");
2966 // Construct and return the appropriate CastInst subclass
2967 switch (op) {
2968 case Trunc: return new TruncInst (S, Ty, Name, InsertBefore);
2969 case ZExt: return new ZExtInst (S, Ty, Name, InsertBefore);
2970 case SExt: return new SExtInst (S, Ty, Name, InsertBefore);
2971 case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertBefore);
2972 case FPExt: return new FPExtInst (S, Ty, Name, InsertBefore);
2973 case UIToFP: return new UIToFPInst (S, Ty, Name, InsertBefore);
2974 case SIToFP: return new SIToFPInst (S, Ty, Name, InsertBefore);
2975 case FPToUI: return new FPToUIInst (S, Ty, Name, InsertBefore);
2976 case FPToSI: return new FPToSIInst (S, Ty, Name, InsertBefore);
2977 case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertBefore);
2978 case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertBefore);
2979 case BitCast:
2980 return new BitCastInst(S, Ty, Name, InsertBefore);
2981 case AddrSpaceCast:
2982 return new AddrSpaceCastInst(S, Ty, Name, InsertBefore);
2983 default:
2984 llvm_unreachable("Invalid opcode provided");
2985 }
2986}
2987
2989 InsertPosition InsertBefore) {
2990 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
2991 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
2992 return Create(Instruction::ZExt, S, Ty, Name, InsertBefore);
2993}
2994
2996 InsertPosition InsertBefore) {
2997 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
2998 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
2999 return Create(Instruction::SExt, S, Ty, Name, InsertBefore);
3000}
3001
3003 InsertPosition InsertBefore) {
3004 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3005 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3006 return Create(Instruction::Trunc, S, Ty, Name, InsertBefore);
3007}
3008
3009/// Create a BitCast or a PtrToInt cast instruction
3011 InsertPosition InsertBefore) {
3012 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3013 assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) &&
3014 "Invalid cast");
3015 assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast");
3016 assert((!Ty->isVectorTy() ||
3017 cast<VectorType>(Ty)->getElementCount() ==
3018 cast<VectorType>(S->getType())->getElementCount()) &&
3019 "Invalid cast");
3020
3021 if (Ty->isIntOrIntVectorTy())
3022 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
3023
3024 return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertBefore);
3025}
3026
3028 Value *S, Type *Ty, const Twine &Name, InsertPosition InsertBefore) {
3029 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3030 assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast");
3031
3033 return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertBefore);
3034
3035 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3036}
3037
3039 const Twine &Name,
3040 InsertPosition InsertBefore) {
3041 if (S->getType()->isPointerTy() && Ty->isIntegerTy())
3042 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
3043 if (S->getType()->isIntegerTy() && Ty->isPointerTy())
3044 return Create(Instruction::IntToPtr, S, Ty, Name, InsertBefore);
3045
3046 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3047}
3048
3050 const Twine &Name,
3051 InsertPosition InsertBefore) {
3052 assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() &&
3053 "Invalid integer cast");
3054 unsigned SrcBits = C->getType()->getScalarSizeInBits();
3055 unsigned DstBits = Ty->getScalarSizeInBits();
3056 Instruction::CastOps opcode =
3057 (SrcBits == DstBits ? Instruction::BitCast :
3058 (SrcBits > DstBits ? Instruction::Trunc :
3059 (isSigned ? Instruction::SExt : Instruction::ZExt)));
3060 return Create(opcode, C, Ty, Name, InsertBefore);
3061}
3062
3064 InsertPosition InsertBefore) {
3065 assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
3066 "Invalid cast");
3067 unsigned SrcBits = C->getType()->getScalarSizeInBits();
3068 unsigned DstBits = Ty->getScalarSizeInBits();
3069 assert((C->getType() == Ty || SrcBits != DstBits) && "Invalid cast");
3070 Instruction::CastOps opcode =
3071 (SrcBits == DstBits ? Instruction::BitCast :
3072 (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt));
3073 return Create(opcode, C, Ty, Name, InsertBefore);
3074}
3075
3076bool CastInst::isBitCastable(Type *SrcTy, Type *DestTy) {
3077 if (!SrcTy->isFirstClassType() || !DestTy->isFirstClassType())
3078 return false;
3079
3080 if (SrcTy == DestTy)
3081 return true;
3082
3083 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) {
3084 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) {
3085 if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {
3086 // An element by element cast. Valid if casting the elements is valid.
3087 SrcTy = SrcVecTy->getElementType();
3088 DestTy = DestVecTy->getElementType();
3089 }
3090 }
3091 }
3092
3093 if (PointerType *DestPtrTy = dyn_cast<PointerType>(DestTy)) {
3094 if (PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy)) {
3095 return SrcPtrTy->getAddressSpace() == DestPtrTy->getAddressSpace();
3096 }
3097 }
3098
3099 TypeSize SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr
3100 TypeSize DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr
3101
3102 // Could still have vectors of pointers if the number of elements doesn't
3103 // match
3104 if (SrcBits.getKnownMinValue() == 0 || DestBits.getKnownMinValue() == 0)
3105 return false;
3106
3107 if (SrcBits != DestBits)
3108 return false;
3109
3110 if (DestTy->isX86_MMXTy() || SrcTy->isX86_MMXTy())
3111 return false;
3112
3113 return true;
3114}
3115
3117 const DataLayout &DL) {
3118 // ptrtoint and inttoptr are not allowed on non-integral pointers
3119 if (auto *PtrTy = dyn_cast<PointerType>(SrcTy))
3120 if (auto *IntTy = dyn_cast<IntegerType>(DestTy))
3121 return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) &&
3122 !DL.isNonIntegralPointerType(PtrTy));
3123 if (auto *PtrTy = dyn_cast<PointerType>(DestTy))
3124 if (auto *IntTy = dyn_cast<IntegerType>(SrcTy))
3125 return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) &&
3126 !DL.isNonIntegralPointerType(PtrTy));
3127
3128 return isBitCastable(SrcTy, DestTy);
3129}
3130
3131// Provide a way to get a "cast" where the cast opcode is inferred from the
3132// types and size of the operand. This, basically, is a parallel of the
3133// logic in the castIsValid function below. This axiom should hold:
3134// castIsValid( getCastOpcode(Val, Ty), Val, Ty)
3135// should not assert in castIsValid. In other words, this produces a "correct"
3136// casting opcode for the arguments passed to it.
3139 const Value *Src, bool SrcIsSigned, Type *DestTy, bool DestIsSigned) {
3140 Type *SrcTy = Src->getType();
3141
3142 assert(SrcTy->isFirstClassType() && DestTy->isFirstClassType() &&
3143 "Only first class types are castable!");
3144
3145 if (SrcTy == DestTy)
3146 return BitCast;
3147
3148 // FIXME: Check address space sizes here
3149 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy))
3150 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy))
3151 if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {
3152 // An element by element cast. Find the appropriate opcode based on the
3153 // element types.
3154 SrcTy = SrcVecTy->getElementType();
3155 DestTy = DestVecTy->getElementType();
3156 }
3157
3158 // Get the bit sizes, we'll need these
3159 unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr
3160 unsigned DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr
3161
3162 // Run through the possibilities ...
3163 if (DestTy->isIntegerTy()) { // Casting to integral
3164 if (SrcTy->isIntegerTy()) { // Casting from integral
3165 if (DestBits < SrcBits)
3166 return Trunc; // int -> smaller int
3167 else if (DestBits > SrcBits) { // its an extension
3168 if (SrcIsSigned)
3169 return SExt; // signed -> SEXT
3170 else
3171 return ZExt; // unsigned -> ZEXT
3172 } else {
3173 return BitCast; // Same size, No-op cast
3174 }
3175 } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt
3176 if (DestIsSigned)
3177 return FPToSI; // FP -> sint
3178 else
3179 return FPToUI; // FP -> uint
3180 } else if (SrcTy->isVectorTy()) {
3181 assert(DestBits == SrcBits &&
3182 "Casting vector to integer of different width");
3183 return BitCast; // Same size, no-op cast
3184 } else {
3185 assert(SrcTy->isPointerTy() &&
3186 "Casting from a value that is not first-class type");
3187 return PtrToInt; // ptr -> int
3188 }
3189 } else if (DestTy->isFloatingPointTy()) { // Casting to floating pt
3190 if (SrcTy->isIntegerTy()) { // Casting from integral
3191 if (SrcIsSigned)
3192 return SIToFP; // sint -> FP
3193 else
3194 return UIToFP; // uint -> FP
3195 } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt
3196 if (DestBits < SrcBits) {
3197 return FPTrunc; // FP -> smaller FP
3198 } else if (DestBits > SrcBits) {
3199 return FPExt; // FP -> larger FP
3200 } else {
3201 return BitCast; // same size, no-op cast
3202 }
3203 } else if (SrcTy->isVectorTy()) {
3204 assert(DestBits == SrcBits &&
3205 "Casting vector to floating point of different width");
3206 return BitCast; // same size, no-op cast
3207 }
3208 llvm_unreachable("Casting pointer or non-first class to float");
3209 } else if (DestTy->isVectorTy()) {
3210 assert(DestBits == SrcBits &&
3211 "Illegal cast to vector (wrong type or size)");
3212 return BitCast;
3213 } else if (DestTy->isPointerTy()) {
3214 if (SrcTy->isPointerTy()) {
3215 if (DestTy->getPointerAddressSpace() != SrcTy->getPointerAddressSpace())
3216 return AddrSpaceCast;
3217 return BitCast; // ptr -> ptr
3218 } else if (SrcTy->isIntegerTy()) {
3219 return IntToPtr; // int -> ptr
3220 }
3221 llvm_unreachable("Casting pointer to other than pointer or int");
3222 } else if (DestTy->isX86_MMXTy()) {
3223 if (SrcTy->isVectorTy()) {
3224 assert(DestBits == SrcBits && "Casting vector of wrong width to X86_MMX");
3225 return BitCast; // 64-bit vector to MMX
3226 }
3227 llvm_unreachable("Illegal cast to X86_MMX");
3228 }
3229 llvm_unreachable("Casting to type that is not first-class");
3230}
3231
3232//===----------------------------------------------------------------------===//
3233// CastInst SubClass Constructors
3234//===----------------------------------------------------------------------===//
3235
3236/// Check that the construction parameters for a CastInst are correct. This
3237/// could be broken out into the separate constructors but it is useful to have
3238/// it in one place and to eliminate the redundant code for getting the sizes
3239/// of the types involved.
3240bool
3242 if (!SrcTy->isFirstClassType() || !DstTy->isFirstClassType() ||
3243 SrcTy->isAggregateType() || DstTy->isAggregateType())
3244 return false;
3245
3246 // Get the size of the types in bits, and whether we are dealing
3247 // with vector types, we'll need this later.
3248 bool SrcIsVec = isa<VectorType>(SrcTy);
3249 bool DstIsVec = isa<VectorType>(DstTy);
3250 unsigned SrcScalarBitSize = SrcTy->getScalarSizeInBits();
3251 unsigned DstScalarBitSize = DstTy->getScalarSizeInBits();
3252
3253 // If these are vector types, get the lengths of the vectors (using zero for
3254 // scalar types means that checking that vector lengths match also checks that
3255 // scalars are not being converted to vectors or vectors to scalars).
3256 ElementCount SrcEC = SrcIsVec ? cast<VectorType>(SrcTy)->getElementCount()
3258 ElementCount DstEC = DstIsVec ? cast<VectorType>(DstTy)->getElementCount()
3260
3261 // Switch on the opcode provided
3262 switch (op) {
3263 default: return false; // This is an input error
3264 case Instruction::Trunc:
3265 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3266 SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize;
3267 case Instruction::ZExt:
3268 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3269 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3270 case Instruction::SExt:
3271 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3272 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3273 case Instruction::FPTrunc:
3274 return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&
3275 SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize;
3276 case Instruction::FPExt:
3277 return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&
3278 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3279 case Instruction::UIToFP:
3280 case Instruction::SIToFP:
3281 return SrcTy->isIntOrIntVectorTy() && DstTy->isFPOrFPVectorTy() &&
3282 SrcEC == DstEC;
3283 case Instruction::FPToUI:
3284 case Instruction::FPToSI:
3285 return SrcTy->isFPOrFPVectorTy() && DstTy->isIntOrIntVectorTy() &&
3286 SrcEC == DstEC;
3287 case Instruction::PtrToInt:
3288 if (SrcEC != DstEC)
3289 return false;
3290 return SrcTy->isPtrOrPtrVectorTy() && DstTy->isIntOrIntVectorTy();
3291 case Instruction::IntToPtr:
3292 if (SrcEC != DstEC)
3293 return false;
3294 return SrcTy->isIntOrIntVectorTy() && DstTy->isPtrOrPtrVectorTy();
3295 case Instruction::BitCast: {
3296 PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
3297 PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
3298
3299 // BitCast implies a no-op cast of type only. No bits change.
3300 // However, you can't cast pointers to anything but pointers.
3301 if (!SrcPtrTy != !DstPtrTy)
3302 return false;
3303
3304 // For non-pointer cases, the cast is okay if the source and destination bit
3305 // widths are identical.
3306 if (!SrcPtrTy)
3307 return SrcTy->getPrimitiveSizeInBits() == DstTy->getPrimitiveSizeInBits();
3308
3309 // If both are pointers then the address spaces must match.
3310 if (SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace())
3311 return false;
3312
3313 // A vector of pointers must have the same number of elements.
3314 if (SrcIsVec && DstIsVec)
3315 return SrcEC == DstEC;
3316 if (SrcIsVec)
3317 return SrcEC == ElementCount::getFixed(1);
3318 if (DstIsVec)
3319 return DstEC == ElementCount::getFixed(1);
3320
3321 return true;
3322 }
3323 case Instruction::AddrSpaceCast: {
3324 PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
3325 if (!SrcPtrTy)
3326 return false;
3327
3328 PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
3329 if (!DstPtrTy)
3330 return false;
3331
3332 if (SrcPtrTy->getAddressSpace() == DstPtrTy->getAddressSpace())
3333 return false;
3334
3335 return SrcEC == DstEC;
3336 }
3337 }
3338}
3339
3341 InsertPosition InsertBefore)
3342 : CastInst(Ty, Trunc, S, Name, InsertBefore) {
3343 assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc");
3344}
3345
3347 InsertPosition InsertBefore)
3348 : CastInst(Ty, ZExt, S, Name, InsertBefore) {
3349 assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt");
3350}
3351
3353 InsertPosition InsertBefore)
3354 : CastInst(Ty, SExt, S, Name, InsertBefore) {
3355 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt");
3356}
3357
3359 InsertPosition InsertBefore)
3360 : CastInst(Ty, FPTrunc, S, Name, InsertBefore) {
3361 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc");
3362}
3363
3365 InsertPosition InsertBefore)
3366 : CastInst(Ty, FPExt, S, Name, InsertBefore) {
3367 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt");
3368}
3369
3371 InsertPosition InsertBefore)
3372 : CastInst(Ty, UIToFP, S, Name, InsertBefore) {
3373 assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP");
3374}
3375
3377 InsertPosition InsertBefore)
3378 : CastInst(Ty, SIToFP, S, Name, InsertBefore) {
3379 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP");
3380}
3381
3383 InsertPosition InsertBefore)
3384 : CastInst(Ty, FPToUI, S, Name, InsertBefore) {
3385 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI");
3386}
3387
3389 InsertPosition InsertBefore)
3390 : CastInst(Ty, FPToSI, S, Name, InsertBefore) {
3391 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI");
3392}
3393
3395 InsertPosition InsertBefore)
3396 : CastInst(Ty, PtrToInt, S, Name, InsertBefore) {
3397 assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt");
3398}
3399
3401 InsertPosition InsertBefore)
3402 : CastInst(Ty, IntToPtr, S, Name, InsertBefore) {
3403 assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr");
3404}
3405
3407 InsertPosition InsertBefore)
3408 : CastInst(Ty, BitCast, S, Name, InsertBefore) {
3409 assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast");
3410}
3411
3413 InsertPosition InsertBefore)
3414 : CastInst(Ty, AddrSpaceCast, S, Name, InsertBefore) {
3415 assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast");
3416}
3417
3418//===----------------------------------------------------------------------===//
3419// CmpInst Classes
3420//===----------------------------------------------------------------------===//
3421
3423 Value *RHS, const Twine &Name, InsertPosition InsertBefore,
3424 Instruction *FlagsSource)
3425 : Instruction(ty, op, OperandTraits<CmpInst>::op_begin(this),
3426 OperandTraits<CmpInst>::operands(this), InsertBefore) {
3427 Op<0>() = LHS;
3428 Op<1>() = RHS;
3429 setPredicate((Predicate)predicate);
3430 setName(Name);
3431 if (FlagsSource)
3432 copyIRFlags(FlagsSource);
3433}
3434
3436 const Twine &Name, InsertPosition InsertBefore) {
3437 if (Op == Instruction::ICmp) {
3438 if (InsertBefore.isValid())
3439 return new ICmpInst(InsertBefore, CmpInst::Predicate(predicate),
3440 S1, S2, Name);
3441 else
3442 return new ICmpInst(CmpInst::Predicate(predicate),
3443 S1, S2, Name);
3444 }
3445
3446 if (InsertBefore.isValid())
3447 return new FCmpInst(InsertBefore, CmpInst::Predicate(predicate),
3448 S1, S2, Name);
3449 else
3450 return new FCmpInst(CmpInst::Predicate(predicate),
3451 S1, S2, Name);
3452}
3453
3455 Value *S2,
3456 const Instruction *FlagsSource,
3457 const Twine &Name,
3458 InsertPosition InsertBefore) {
3459 CmpInst *Inst = Create(Op, Pred, S1, S2, Name, InsertBefore);
3460 Inst->copyIRFlags(FlagsSource);
3461 return Inst;
3462}
3463
3465 if (ICmpInst *IC = dyn_cast<ICmpInst>(this))
3466 IC->swapOperands();
3467 else
3468 cast<FCmpInst>(this)->swapOperands();
3469}
3470
3472 if (const ICmpInst *IC = dyn_cast<ICmpInst>(this))
3473 return IC->isCommutative();
3474 return cast<FCmpInst>(this)->isCommutative();
3475}
3476
3479 return ICmpInst::isEquality(P);
3481 return FCmpInst::isEquality(P);
3482 llvm_unreachable("Unsupported predicate kind");
3483}
3484
3486 switch (pred) {
3487 default: llvm_unreachable("Unknown cmp predicate!");
3488 case ICMP_EQ: return ICMP_NE;
3489 case ICMP_NE: return ICMP_EQ;
3490 case ICMP_UGT: return ICMP_ULE;
3491 case ICMP_ULT: return ICMP_UGE;
3492 case ICMP_UGE: return ICMP_ULT;
3493 case ICMP_ULE: return ICMP_UGT;
3494 case ICMP_SGT: return ICMP_SLE;
3495 case ICMP_SLT: return ICMP_SGE;
3496 case ICMP_SGE: return ICMP_SLT;
3497 case ICMP_SLE: return ICMP_SGT;
3498
3499 case FCMP_OEQ: return FCMP_UNE;
3500 case FCMP_ONE: return FCMP_UEQ;
3501 case FCMP_OGT: return FCMP_ULE;
3502 case FCMP_OLT: return FCMP_UGE;
3503 case FCMP_OGE: return FCMP_ULT;
3504 case FCMP_OLE: return FCMP_UGT;
3505 case FCMP_UEQ: return FCMP_ONE;
3506 case FCMP_UNE: return FCMP_OEQ;
3507 case FCMP_UGT: return FCMP_OLE;
3508 case FCMP_ULT: return FCMP_OGE;
3509 case FCMP_UGE: return FCMP_OLT;
3510 case FCMP_ULE: return FCMP_OGT;
3511 case FCMP_ORD: return FCMP_UNO;
3512 case FCMP_UNO: return FCMP_ORD;
3513 case FCMP_TRUE: return FCMP_FALSE;
3514 case FCMP_FALSE: return FCMP_TRUE;
3515 }
3516}
3517
3519 switch (Pred) {
3520 default: return "unknown";
3521 case FCmpInst::FCMP_FALSE: return "false";
3522 case FCmpInst::FCMP_OEQ: return "oeq";
3523 case FCmpInst::FCMP_OGT: return "ogt";
3524 case FCmpInst::FCMP_OGE: return "oge";
3525 case FCmpInst::FCMP_OLT: return "olt";
3526 case FCmpInst::FCMP_OLE: return "ole";
3527 case FCmpInst::FCMP_ONE: return "one";
3528 case FCmpInst::FCMP_ORD: return "ord";
3529 case FCmpInst::FCMP_UNO: return "uno";
3530 case FCmpInst::FCMP_UEQ: return "ueq";
3531 case FCmpInst::FCMP_UGT: return "ugt";
3532 case FCmpInst::FCMP_UGE: return "uge";
3533 case FCmpInst::FCMP_ULT: return "ult";
3534 case FCmpInst::FCMP_ULE: return "ule";
3535 case FCmpInst::FCMP_UNE: return "une";
3536 case FCmpInst::FCMP_TRUE: return "true";
3537 case ICmpInst::ICMP_EQ: return "eq";
3538 case ICmpInst::ICMP_NE: return "ne";
3539 case ICmpInst::ICMP_SGT: return "sgt";
3540 case ICmpInst::ICMP_SGE: return "sge";
3541 case ICmpInst::ICMP_SLT: return "slt";
3542 case ICmpInst::ICMP_SLE: return "sle";
3543 case ICmpInst::ICMP_UGT: return "ugt";
3544 case ICmpInst::ICMP_UGE: return "uge";
3545 case ICmpInst::ICMP_ULT: return "ult";
3546 case ICmpInst::ICMP_ULE: return "ule";
3547 }
3548}
3549
3552 return OS;
3553}
3554
3556 switch (pred) {
3557 default: llvm_unreachable("Unknown icmp predicate!");
3558 case ICMP_EQ: case ICMP_NE:
3559 case ICMP_SGT: case ICMP_SLT: case ICMP_SGE: case ICMP_SLE:
3560 return pred;
3561 case ICMP_UGT: return ICMP_SGT;
3562 case ICMP_ULT: return ICMP_SLT;
3563 case ICMP_UGE: return ICMP_SGE;
3564 case ICMP_ULE: return ICMP_SLE;
3565 }
3566}
3567
3569 switch (pred) {
3570 default: llvm_unreachable("Unknown icmp predicate!");
3571 case ICMP_EQ: case ICMP_NE:
3572 case ICMP_UGT: case ICMP_ULT: case ICMP_UGE: case ICMP_ULE:
3573 return pred;
3574 case ICMP_SGT: return ICMP_UGT;
3575 case ICMP_SLT: return ICMP_ULT;
3576 case ICMP_SGE: return ICMP_UGE;
3577 case ICMP_SLE: return ICMP_ULE;
3578 }
3579}
3580
3582 switch (pred) {
3583 default: llvm_unreachable("Unknown cmp predicate!");
3584 case ICMP_EQ: case ICMP_NE:
3585 return pred;
3586 case ICMP_SGT: return ICMP_SLT;
3587 case ICMP_SLT: return ICMP_SGT;
3588 case ICMP_SGE: return ICMP_SLE;
3589 case ICMP_SLE: return ICMP_SGE;
3590 case ICMP_UGT: return ICMP_ULT;
3591 case ICMP_ULT: return ICMP_UGT;
3592 case ICMP_UGE: return ICMP_ULE;
3593 case ICMP_ULE: return ICMP_UGE;
3594
3595 case FCMP_FALSE: case FCMP_TRUE:
3596 case FCMP_OEQ: case FCMP_ONE:
3597 case FCMP_UEQ: case FCMP_UNE:
3598 case FCMP_ORD: case FCMP_UNO:
3599 return pred;
3600 case FCMP_OGT: return FCMP_OLT;
3601 case FCMP_OLT: return FCMP_OGT;
3602 case FCMP_OGE: return FCMP_OLE;
3603 case FCMP_OLE: return FCMP_OGE;
3604 case FCMP_UGT: return FCMP_ULT;
3605 case FCMP_ULT: return FCMP_UGT;
3606 case FCMP_UGE: return FCMP_ULE;
3607 case FCMP_ULE: return FCMP_UGE;
3608 }
3609}
3610
3612 switch (pred) {
3613 case ICMP_SGE:
3614 case ICMP_SLE:
3615 case ICMP_UGE:
3616 case ICMP_ULE:
3617 case FCMP_OGE:
3618 case FCMP_OLE:
3619 case FCMP_UGE:
3620 case FCMP_ULE:
3621 return true;
3622 default:
3623 return false;
3624 }
3625}
3626
3628 switch (pred) {
3629 case ICMP_SGT:
3630 case ICMP_SLT:
3631 case ICMP_UGT:
3632 case ICMP_ULT:
3633 case FCMP_OGT:
3634 case FCMP_OLT:
3635 case FCMP_UGT:
3636 case FCMP_ULT:
3637 return true;
3638 default:
3639 return false;
3640 }
3641}
3642
3644 switch (pred) {
3645 case ICMP_SGE:
3646 return ICMP_SGT;
3647 case ICMP_SLE:
3648 return ICMP_SLT;
3649 case ICMP_UGE:
3650 return ICMP_UGT;
3651 case ICMP_ULE:
3652 return ICMP_ULT;
3653 case FCMP_OGE:
3654 return FCMP_OGT;
3655 case FCMP_OLE:
3656 return FCMP_OLT;
3657 case FCMP_UGE:
3658 return FCMP_UGT;
3659 case FCMP_ULE:
3660 return FCMP_ULT;
3661 default:
3662 return pred;
3663 }
3664}
3665
3667 switch (pred) {
3668 case ICMP_SGT:
3669 return ICMP_SGE;
3670 case ICMP_SLT:
3671 return ICMP_SLE;
3672 case ICMP_UGT:
3673 return ICMP_UGE;
3674 case ICMP_ULT:
3675 return ICMP_ULE;
3676 case FCMP_OGT:
3677 return FCMP_OGE;
3678 case FCMP_OLT:
3679 return FCMP_OLE;
3680 case FCMP_UGT:
3681 return FCMP_UGE;
3682 case FCMP_ULT:
3683 return FCMP_ULE;
3684 default:
3685 return pred;
3686 }
3687}
3688
3690 assert(CmpInst::isRelational(pred) && "Call only with relational predicate!");
3691
3695 return getStrictPredicate(pred);
3696
3697 llvm_unreachable("Unknown predicate!");
3698}
3699
3701 assert(CmpInst::isUnsigned(pred) && "Call only with unsigned predicates!");
3702
3703 switch (pred) {
3704 default:
3705 llvm_unreachable("Unknown predicate!");
3706 case CmpInst::ICMP_ULT:
3707 return CmpInst::ICMP_SLT;
3708 case CmpInst::ICMP_ULE:
3709 return CmpInst::ICMP_SLE;
3710 case CmpInst::ICMP_UGT:
3711 return CmpInst::ICMP_SGT;
3712 case CmpInst::ICMP_UGE:
3713 return CmpInst::ICMP_SGE;
3714 }
3715}
3716
3718 assert(CmpInst::isSigned(pred) && "Call only with signed predicates!");
3719
3720 switch (pred) {
3721 default:
3722 llvm_unreachable("Unknown predicate!");
3723 case CmpInst::ICMP_SLT:
3724 return CmpInst::ICMP_ULT;
3725 case CmpInst::ICMP_SLE:
3726 return CmpInst::ICMP_ULE;
3727 case CmpInst::ICMP_SGT:
3728 return CmpInst::ICMP_UGT;
3729 case CmpInst::ICMP_SGE:
3730 return CmpInst::ICMP_UGE;
3731 }
3732}
3733
3735 switch (predicate) {
3736 default: return false;
3738 case ICmpInst::ICMP_UGE: return true;
3739 }
3740}
3741
3743 switch (predicate) {
3744 default: return false;
3746 case ICmpInst::ICMP_SGE: return true;
3747 }
3748}
3749
3750bool ICmpInst::compare(const APInt &LHS, const APInt &RHS,
3751 ICmpInst::Predicate Pred) {
3752 assert(ICmpInst::isIntPredicate(Pred) && "Only for integer predicates!");
3753 switch (Pred) {
3755 return LHS.eq(RHS);
3757 return LHS.ne(RHS);
3759 return LHS.ugt(RHS);
3761 return LHS.uge(RHS);
3763 return LHS.ult(RHS);
3765 return LHS.ule(RHS);
3767 return LHS.sgt(RHS);
3769 return LHS.sge(RHS);
3771 return LHS.slt(RHS);
3773 return LHS.sle(RHS);
3774 default:
3775 llvm_unreachable("Unexpected non-integer predicate.");
3776 };
3777}
3778
3779bool FCmpInst::compare(const APFloat &LHS, const APFloat &RHS,
3780 FCmpInst::Predicate Pred) {
3781 APFloat::cmpResult R = LHS.compare(RHS);
3782 switch (Pred) {
3783 default:
3784 llvm_unreachable("Invalid FCmp Predicate");
3786 return false;
3788 return true;
3789 case FCmpInst::FCMP_UNO:
3790 return R == APFloat::cmpUnordered;
3791 case FCmpInst::FCMP_ORD:
3792 return R != APFloat::cmpUnordered;
3793 case FCmpInst::FCMP_UEQ:
3794 return R == APFloat::cmpUnordered || R == APFloat::cmpEqual;
3795 case FCmpInst::FCMP_OEQ:
3796 return R == APFloat::cmpEqual;
3797 case FCmpInst::FCMP_UNE:
3798 return R != APFloat::cmpEqual;
3799 case FCmpInst::FCMP_ONE:
3801 case FCmpInst::FCMP_ULT:
3802 return R == APFloat::cmpUnordered || R == APFloat::cmpLessThan;
3803 case FCmpInst::FCMP_OLT:
3804 return R == APFloat::cmpLessThan;
3805 case FCmpInst::FCMP_UGT:
3807 case FCmpInst::FCMP_OGT:
3808 return R == APFloat::cmpGreaterThan;
3809 case FCmpInst::FCMP_ULE:
3810 return R != APFloat::cmpGreaterThan;
3811 case FCmpInst::FCMP_OLE:
3812 return R == APFloat::cmpLessThan || R == APFloat::cmpEqual;
3813 case FCmpInst::FCMP_UGE:
3814 return R != APFloat::cmpLessThan;
3815 case FCmpInst::FCMP_OGE:
3816 return R == APFloat::cmpGreaterThan || R == APFloat::cmpEqual;
3817 }
3818}
3819
3822 "Call only with non-equality predicates!");
3823
3824 if (isSigned(pred))
3825 return getUnsignedPredicate(pred);
3826 if (isUnsigned(pred))
3827 return getSignedPredicate(pred);
3828
3829 llvm_unreachable("Unknown predicate!");
3830}
3831
3833 switch (predicate) {
3834 default: return false;
3837 case FCmpInst::FCMP_ORD: return true;
3838 }
3839}
3840
3842 switch (predicate) {
3843 default: return false;
3846 case FCmpInst::FCMP_UNO: return true;
3847 }
3848}
3849
3851 switch(predicate) {
3852 default: return false;
3853 case ICMP_EQ: case ICMP_UGE: case ICMP_ULE: case ICMP_SGE: case ICMP_SLE:
3854 case FCMP_TRUE: case FCMP_UEQ: case FCMP_UGE: case FCMP_ULE: return true;
3855 }
3856}
3857
3859 switch(predicate) {
3860 case ICMP_NE: case ICMP_UGT: case ICMP_ULT: case ICMP_SGT: case ICMP_SLT:
3861 case FCMP_FALSE: case FCMP_ONE: case FCMP_OGT: case FCMP_OLT: return true;
3862 default: return false;
3863 }
3864}
3865
3867 // If the predicates match, then we know the first condition implies the
3868 // second is true.
3869 if (Pred1 == Pred2)
3870 return true;
3871
3872 switch (Pred1) {
3873 default:
3874 break;
3875 case ICMP_EQ:
3876 // A == B implies A >=u B, A <=u B, A >=s B, and A <=s B are true.
3877 return Pred2 == ICMP_UGE || Pred2 == ICMP_ULE || Pred2 == ICMP_SGE ||
3878 Pred2 == ICMP_SLE;
3879 case ICMP_UGT: // A >u B implies A != B and A >=u B are true.
3880 return Pred2 == ICMP_NE || Pred2 == ICMP_UGE;
3881 case ICMP_ULT: // A <u B implies A != B and A <=u B are true.
3882 return Pred2 == ICMP_NE || Pred2 == ICMP_ULE;
3883 case ICMP_SGT: // A >s B implies A != B and A >=s B are true.
3884 return Pred2 == ICMP_NE || Pred2 == ICMP_SGE;
3885 case ICMP_SLT: // A <s B implies A != B and A <=s B are true.
3886 return Pred2 == ICMP_NE || Pred2 == ICMP_SLE;
3887 }
3888 return false;
3889}
3890
3892 return isImpliedTrueByMatchingCmp(Pred1, getInversePredicate(Pred2));
3893}
3894
3895//===----------------------------------------------------------------------===//
3896// SwitchInst Implementation
3897//===----------------------------------------------------------------------===//
3898
3899void SwitchInst::init(Value *Value, BasicBlock *Default, unsigned NumReserved) {
3900 assert(Value && Default && NumReserved);
3901 ReservedSpace = NumReserved;
3903 allocHungoffUses(ReservedSpace);
3904
3905 Op<0>() = Value;
3906 Op<1>() = Default;
3907}
3908
3909/// SwitchInst ctor - Create a new switch instruction, specifying a value to
3910/// switch on and a default destination. The number of additional cases can
3911/// be specified here to make memory allocation more efficient. This
3912/// constructor can also autoinsert before another instruction.
3913SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3914 InsertPosition InsertBefore)
3915 : Instruction(Type::getVoidTy(Value->getContext()), Instruction::Switch,
3916 nullptr, 0, InsertBefore) {
3917 init(Value, Default, 2+NumCases*2);
3918}
3919
3920SwitchInst::SwitchInst(const SwitchInst &SI)
3921 : Instruction(SI.getType(), Instruction::Switch, nullptr, 0) {
3922 init(SI.getCondition(), SI.getDefaultDest(), SI.getNumOperands());
3923 setNumHungOffUseOperands(SI.getNumOperands());
3924 Use *OL = getOperandList();
3925 const Use *InOL = SI.getOperandList();
3926 for (unsigned i = 2, E = SI.getNumOperands(); i != E; i += 2) {
3927 OL[i] = InOL[i];
3928 OL[i+1] = InOL[i+1];
3929 }
3930 SubclassOptionalData = SI.SubclassOptionalData;
3931}
3932
3933/// addCase - Add an entry to the switch instruction...
3934///
3936 unsigned NewCaseIdx = getNumCases();
3937 unsigned OpNo = getNumOperands();
3938 if (OpNo+2 > ReservedSpace)
3939 growOperands(); // Get more space!
3940 // Initialize some new operands.
3941 assert(OpNo+1 < ReservedSpace && "Growing didn't work!");
3943 CaseHandle Case(this, NewCaseIdx);
3944 Case.setValue(OnVal);
3945 Case.setSuccessor(Dest);
3946}
3947
3948/// removeCase - This method removes the specified case and its successor
3949/// from the switch instruction.
3951 unsigned idx = I->getCaseIndex();
3952
3953 assert(2 + idx*2 < getNumOperands() && "Case index out of range!!!");
3954
3955 unsigned NumOps = getNumOperands();
3956 Use *OL = getOperandList();
3957
3958 // Overwrite this case with the end of the list.
3959 if (2 + (idx + 1) * 2 != NumOps) {
3960 OL[2 + idx * 2] = OL[NumOps - 2];
3961 OL[2 + idx * 2 + 1] = OL[NumOps - 1];
3962 }
3963
3964 // Nuke the last value.
3965 OL[NumOps-2].set(nullptr);
3966 OL[NumOps-2+1].set(nullptr);
3967 setNumHungOffUseOperands(NumOps-2);
3968
3969 return CaseIt(this, idx);
3970}
3971
3972/// growOperands - grow operands - This grows the operand list in response
3973/// to a push_back style of operation. This grows the number of ops by 3 times.
3974///
3975void SwitchInst::growOperands() {
3976 unsigned e = getNumOperands();
3977 unsigned NumOps = e*3;
3978
3979 ReservedSpace = NumOps;
3980 growHungoffUses(ReservedSpace);
3981}
3982
3984 assert(Changed && "called only if metadata has changed");
3985
3986 if (!Weights)
3987 return nullptr;
3988
3989 assert(SI.getNumSuccessors() == Weights->size() &&
3990 "num of prof branch_weights must accord with num of successors");
3991
3992 bool AllZeroes = all_of(*Weights, [](uint32_t W) { return W == 0; });
3993
3994 if (AllZeroes || Weights->size() < 2)
3995 return nullptr;
3996
3997 return MDBuilder(SI.getParent()->getContext()).createBranchWeights(*Weights);
3998}
3999
4001 MDNode *ProfileData = getBranchWeightMDNode(SI);
4002 if (!ProfileData)
4003 return;
4004
4005 if (getNumBranchWeights(*ProfileData) != SI.getNumSuccessors()) {
4006 llvm_unreachable("number of prof branch_weights metadata operands does "
4007 "not correspond to number of succesors");
4008 }
4009
4011 if (!extractBranchWeights(ProfileData, Weights))
4012 return;
4013 this->Weights = std::move(Weights);
4014}
4015
4018 if (Weights) {
4019 assert(SI.getNumSuccessors() == Weights->size() &&
4020 "num of prof branch_weights must accord with num of successors");
4021 Changed = true;
4022 // Copy the last case to the place of the removed one and shrink.
4023 // This is tightly coupled with the way SwitchInst::removeCase() removes
4024 // the cases in SwitchInst::removeCase(CaseIt).
4025 (*Weights)[I->getCaseIndex() + 1] = Weights->back();
4026 Weights->pop_back();
4027 }
4028 return SI.removeCase(I);
4029}
4030
4032 ConstantInt *OnVal, BasicBlock *Dest,
4034 SI.addCase(OnVal, Dest);
4035
4036 if (!Weights && W && *W) {
4037 Changed = true;
4038 Weights = SmallVector<uint32_t, 8>(SI.getNumSuccessors(), 0);
4039 (*Weights)[SI.getNumSuccessors() - 1] = *W;
4040 } else if (Weights) {
4041 Changed = true;
4042 Weights->push_back(W.value_or(0));
4043 }
4044 if (Weights)
4045 assert(SI.getNumSuccessors() == Weights->size() &&
4046 "num of prof branch_weights must accord with num of successors");
4047}
4048
4051 // Instruction is erased. Mark as unchanged to not touch it in the destructor.
4052 Changed = false;
4053 if (Weights)
4054 Weights->resize(0);
4055 return SI.eraseFromParent();
4056}
4057
4060 if (!Weights)
4061 return std::nullopt;
4062 return (*Weights)[idx];
4063}
4064
4067 if (!W)
4068 return;
4069
4070 if (!Weights && *W)
4071 Weights = SmallVector<uint32_t, 8>(SI.getNumSuccessors(), 0);
4072
4073 if (Weights) {
4074 auto &OldW = (*Weights)[idx];
4075 if (*W != OldW) {
4076 Changed = true;
4077 OldW = *W;
4078 }
4079 }
4080}
4081
4084 unsigned idx) {
4085 if (MDNode *ProfileData = getBranchWeightMDNode(SI))
4086 if (ProfileData->getNumOperands() == SI.getNumSuccessors() + 1)
4087 return mdconst::extract<ConstantInt>(ProfileData->getOperand(idx + 1))
4088 ->getValue()
4089 .getZExtValue();
4090
4091 return std::nullopt;
4092}
4093
4094//===----------------------------------------------------------------------===//
4095// IndirectBrInst Implementation
4096//===----------------------------------------------------------------------===//
4097
4098void IndirectBrInst::init(Value *Address, unsigned NumDests) {
4099 assert(Address && Address->getType()->isPointerTy() &&
4100 "Address of indirectbr must be a pointer");
4101 ReservedSpace = 1+NumDests;
4103 allocHungoffUses(ReservedSpace);
4104
4105 Op<0>() = Address;
4106}
4107
4108
4109/// growOperands - grow operands - This grows the operand list in response
4110/// to a push_back style of operation. This grows the number of ops by 2 times.
4111///
4112void IndirectBrInst::growOperands() {
4113 unsigned e = getNumOperands();
4114 unsigned NumOps = e*2;
4115
4116 ReservedSpace = NumOps;
4117 growHungoffUses(ReservedSpace);
4118}
4119
4120IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases,
4121 InsertPosition InsertBefore)
4122 : Instruction(Type::getVoidTy(Address->getContext()),
4123 Instruction::IndirectBr, nullptr, 0, InsertBefore) {
4124 init(Address, NumCases);
4125}
4126
4127IndirectBrInst::IndirectBrInst(const IndirectBrInst &IBI)
4128 : Instruction(Type::getVoidTy(IBI.getContext()), Instruction::IndirectBr,
4129 nullptr, IBI.getNumOperands()) {
4130 allocHungoffUses(IBI.getNumOperands());
4131 Use *OL = getOperandList();
4132 const Use *InOL = IBI.getOperandList();
4133 for (unsigned i = 0, E = IBI.getNumOperands(); i != E; ++i)
4134 OL[i] = InOL[i];
4135 SubclassOptionalData = IBI.SubclassOptionalData;
4136}
4137
4138/// addDestination - Add a destination.
4139///
4141 unsigned OpNo = getNumOperands();
4142 if (OpNo+1 > ReservedSpace)
4143 growOperands(); // Get more space!
4144 // Initialize some new operands.
4145 assert(OpNo < ReservedSpace && "Growing didn't work!");
4147 getOperandList()[OpNo] = DestBB;
4148}
4149
4150/// removeDestination - This method removes the specified successor from the
4151/// indirectbr instruction.
4153 assert(idx < getNumOperands()-1 && "Successor index out of range!");
4154
4155 unsigned NumOps = getNumOperands();
4156 Use *OL = getOperandList();
4157
4158 // Replace this value with the last one.
4159 OL[idx+1] = OL[NumOps-1];
4160
4161 // Nuke the last value.
4162 OL[NumOps-1].set(nullptr);
4163 setNumHungOffUseOperands(NumOps-1);
4164}
4165
4166//===----------------------------------------------------------------------===//
4167// FreezeInst Implementation
4168//===----------------------------------------------------------------------===//
4169
4171 : UnaryInstruction(S->getType(), Freeze, S, InsertBefore) {
4172 setName(Name);
4173}
4174
4175//===----------------------------------------------------------------------===//
4176// cloneImpl() implementations
4177//===----------------------------------------------------------------------===//
4178
4179// Define these methods here so vtables don't get emitted into every translation
4180// unit that uses these classes.
4181
4183 return new (getNumOperands()) GetElementPtrInst(*this);
4184}
4185
4187 return Create(getOpcode(), Op<0>());
4188}
4189
4191 return Create(getOpcode(), Op<0>(), Op<1>());
4192}
4193
4195 return new FCmpInst(getPredicate(), Op<0>(), Op<1>());
4196}
4197
4199 return new ICmpInst(getPredicate(), Op<0>(), Op<1>());
4200}
4201
4203 return new ExtractValueInst(*this);
4204}
4205
4207 return new InsertValueInst(*this);
4208}
4209
4212 getOperand(0), getAlign());
4213 Result->setUsedWithInAlloca(isUsedWithInAlloca());
4214 Result->setSwiftError(isSwiftError());
4215 return Result;
4216}
4217
4219 return new LoadInst(getType(), getOperand(0), Twine(), isVolatile(),
4221}
4222
4224 return new StoreInst(getOperand(0), getOperand(1), isVolatile(), getAlign(),
4226}
4227
4232 Result->setVolatile(isVolatile());
4233 Result->setWeak(isWeak());
4234 return Result;
4235}
4236
4238 AtomicRMWInst *Result =
4241 Result->setVolatile(isVolatile());
4242 return Result;
4243}
4244
4246 return new FenceInst(getContext(), getOrdering(), getSyncScopeID());
4247}
4248
4250 return new TruncInst(getOperand(0), getType());
4251}
4252
4254 return new ZExtInst(getOperand(0), getType());
4255}
4256
4258 return new SExtInst(getOperand(0), getType());
4259}
4260
4262 return new FPTruncInst(getOperand(0), getType());
4263}
4264
4266 return new FPExtInst(getOperand(0), getType());
4267}
4268
4270 return new UIToFPInst(getOperand(0), getType());
4271}
4272
4274 return new SIToFPInst(getOperand(0), getType());
4275}
4276
4278 return new FPToUIInst(getOperand(0), getType());
4279}
4280
4282 return new FPToSIInst(getOperand(0), getType());
4283}
4284
4286 return new PtrToIntInst(getOperand(0), getType());
4287}
4288
4290 return new IntToPtrInst(getOperand(0), getType());
4291}
4292
4294 return new BitCastInst(getOperand(0), getType());
4295}
4296
4298 return new AddrSpaceCastInst(getOperand(0), getType());
4299}
4300
4302 if (hasOperandBundles()) {
4303 unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo);
4304 return new(getNumOperands(), DescriptorBytes) CallInst(*this);
4305 }
4306 return new(getNumOperands()) CallInst(*this);
4307}
4308
4311}
4312
4314 return new VAArgInst(getOperand(0), getType());
4315}
4316
4319}
4320
4323}
4324
4327}
4328
4329PHINode *PHINode::cloneImpl() const { return new PHINode(*this); }
4330
4332 return new LandingPadInst(*this);
4333}
4334
4336 return new(getNumOperands()) ReturnInst(*this);
4337}
4338
4340 return new(getNumOperands()) BranchInst(*this);
4341}
4342
4343SwitchInst *SwitchInst::cloneImpl() const { return new SwitchInst(*this); }
4344
4346 return new IndirectBrInst(*this);
4347}
4348
4350 if (hasOperandBundles()) {
4351 unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo);
4352 return new(getNumOperands(), DescriptorBytes) InvokeInst(*this);
4353 }
4354 return new(getNumOperands()) InvokeInst(*this);
4355}
4356
4358 if (hasOperandBundles()) {
4359 unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo);
4360 return new (getNumOperands(), DescriptorBytes) CallBrInst(*this);
4361 }
4362 return new (getNumOperands()) CallBrInst(*this);
4363}
4364
4365ResumeInst *ResumeInst::cloneImpl() const { return new (1) ResumeInst(*this); }
4366
4368 return new (getNumOperands()) CleanupReturnInst(*this);
4369}
4370
4372 return new (getNumOperands()) CatchReturnInst(*this);
4373}
4374
4376 return new CatchSwitchInst(*this);
4377}
4378
4380 return new (getNumOperands()) FuncletPadInst(*this);
4381}
4382
4384 LLVMContext &Context = getContext();
4385 return new UnreachableInst(Context);
4386}
4387
4389 return new FreezeInst(getOperand(0));
4390}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static const LLT S1
Rewrite undef for PHI
Atomic ordering constants.
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
return RetTy
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(X)
Definition: Debug.h:101
std::string Name
uint64_t Size
bool End
Definition: ELF_riscv.cpp:480
static bool isSigned(unsigned int Opcode)
#define op(i)
hexagon gen pred
static Align computeLoadStoreDefaultAlign(Type *Ty, InsertPosition Pos)
static Value * createPlaceholderForShuffleVector(Value *V)
static Align computeAllocaDefaultAlign(Type *Ty, InsertPosition Pos)
static cl::opt< bool > DisableI2pP2iOpt("disable-i2p-p2i-opt", cl::init(false), cl::desc("Disables inttoptr/ptrtoint roundtrip optimization"))
static int matchShuffleAsBitRotate(ArrayRef< int > Mask, int NumSubElts)
Try to lower a vector shuffle as a bit rotation.
static Type * getIndexedTypeInternal(Type *Ty, ArrayRef< IndexTy > IdxList)
static bool isReplicationMaskWithParams(ArrayRef< int > Mask, int ReplicationFactor, int VF)
static bool isIdentityMaskImpl(ArrayRef< int > Mask, int NumOpElts)
static bool isSingleSourceMaskImpl(ArrayRef< int > Mask, int NumOpElts)
static Value * getAISize(LLVMContext &Context, Value *Amt)
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file contains the declarations for metadata subclasses.
Module.h This file contains the declarations for the Module class.
uint64_t IntrinsicInst * II
#define P(N)
PowerPC Reduce CR logical Operation
This file contains the declarations for profiling metadata utility functions.
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
raw_pwrite_stream & OS
This file implements the SmallBitVector class.
This file defines the SmallVector class.
static SymbolRef::Type getType(const Symbol *Sym)
Definition: TapiFile.cpp:40
@ Struct
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Definition: VPlanSLP.cpp:191
Value * RHS
Value * LHS
float convertToFloat() const
Converts this APFloat to host float value.
Definition: APFloat.cpp:5369
Class for arbitrary precision integers.
Definition: APInt.h:77
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
Definition: APInt.h:1309
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition: APInt.h:359
unsigned countr_zero() const
Count the number of trailing zero bits.
Definition: APInt.h:1597
unsigned countl_zero() const
The APInt version of std::countl_zero.
Definition: APInt.h:1556
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
Definition: APInt.h:179
This class represents a conversion between pointers from one address space to another.
AddrSpaceCastInst * cloneImpl() const
Clone an identical AddrSpaceCastInst.
AddrSpaceCastInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
an instruction to allocate memory on the stack
Definition: Instructions.h:60
std::optional< TypeSize > getAllocationSizeInBits(const DataLayout &DL) const
Get allocation size in bits.
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
Definition: Instructions.h:146
bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Definition: Instructions.h:121
AllocaInst * cloneImpl() const
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Definition: Instructions.h:114
bool isUsedWithInAlloca() const
Return true if this alloca is used as an inalloca argument to a call.
Definition: Instructions.h:136
unsigned getAddressSpace() const
Return the address space for the allocation.
Definition: Instructions.h:101
std::optional< TypeSize > getAllocationSize(const DataLayout &DL) const
Get allocation size in bytes.
bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
void setAlignment(Align Align)
Definition: Instructions.h:125
const Value * getArraySize() const
Get the number of elements allocated.
Definition: Instructions.h:92
AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, const Twine &Name, InsertPosition InsertBefore)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
iterator end() const
Definition: ArrayRef.h:154
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:165
iterator begin() const
Definition: ArrayRef.h:153
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:160
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
Definition: ArrayRef.h:195
Class to represent array types.
Definition: DerivedTypes.h:371
An instruction that atomically checks whether a specified value is in a memory location,...
Definition: Instructions.h:494
void setSyncScopeID(SyncScope::ID SSID)
Sets the synchronization scope ID of this cmpxchg instruction.
Definition: Instructions.h:616
bool isVolatile() const
Return true if this is a cmpxchg from a volatile memory location.
Definition: Instructions.h:546
void setFailureOrdering(AtomicOrdering Ordering)
Sets the failure ordering constraint of this cmpxchg instruction.
Definition: Instructions.h:590
AtomicOrdering getFailureOrdering() const
Returns the failure ordering constraint of this cmpxchg instruction.
Definition: Instructions.h:585
void setSuccessOrdering(AtomicOrdering Ordering)
Sets the success ordering constraint of this cmpxchg instruction.
Definition: Instructions.h:578
AtomicCmpXchgInst * cloneImpl() const
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Definition: Instructions.h:535
bool isWeak() const
Return true if this cmpxchg may spuriously fail.
Definition: Instructions.h:553
void setAlignment(Align Align)
Definition: Instructions.h:539
AtomicOrdering getSuccessOrdering() const
Returns the success ordering constraint of this cmpxchg instruction.
Definition: Instructions.h:573
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this cmpxchg instruction.
Definition: Instructions.h:611
AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment, AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering, SyncScope::ID SSID, InsertPosition InsertBefore=nullptr)
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:695
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Definition: Instructions.h:808
AtomicRMWInst * cloneImpl() const
bool isVolatile() const
Return true if this is a RMW on a volatile memory location.
Definition: Instructions.h:818
BinOp
This enumeration lists the possible modifications atomicrmw can make.
Definition: Instructions.h:707
@ Add
*p = old + v
Definition: Instructions.h:711
@ FAdd
*p = old + v
Definition: Instructions.h:732
@ Min
*p = old <signed v ? old : v
Definition: Instructions.h:725
@ Or
*p = old | v
Definition: Instructions.h:719
@ Sub
*p = old - v
Definition: Instructions.h:713
@ And
*p = old & v
Definition: Instructions.h:715
@ Xor
*p = old ^ v
Definition: Instructions.h:721
@ FSub
*p = old - v
Definition: Instructions.h:735
@ UIncWrap
Increment one up to a maximum value.
Definition: Instructions.h:747
@ Max
*p = old >signed v ? old : v
Definition: Instructions.h:723
@ UMin
*p = old <unsigned v ? old : v
Definition: Instructions.h:729
@ FMin
*p = minnum(old, v) minnum matches the behavior of llvm.minnum.
Definition: Instructions.h:743
@ UMax
*p = old >unsigned v ? old : v
Definition: Instructions.h:727
@ FMax
*p = maxnum(old, v) maxnum matches the behavior of llvm.maxnum.
Definition: Instructions.h:739
@ UDecWrap
Decrement one until a minimum value or zero.
Definition: Instructions.h:751
@ Nand
*p = ~(old & v)
Definition: Instructions.h:717
void setSyncScopeID(SyncScope::ID SSID)
Sets the synchronization scope ID of this rmw instruction.
Definition: Instructions.h:847
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this rmw instruction.
Definition: Instructions.h:833
void setOperation(BinOp Operation)
Definition: Instructions.h:802
BinOp getOperation() const
Definition: Instructions.h:786
AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment, AtomicOrdering Ordering, SyncScope::ID SSID, InsertPosition InsertBefore=nullptr)
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this rmw instruction.
Definition: Instructions.h:842
void setAlignment(Align Align)
Definition: Instructions.h:812
static StringRef getOperationName(BinOp Op)
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
Definition: Instructions.h:828
bool hasAttrSomewhere(Attribute::AttrKind Kind, unsigned *Index=nullptr) const
Return true if the specified attribute is set for at least one parameter or for the return value.
FPClassTest getRetNoFPClass() const
Get the disallowed floating-point classes of the return value.
bool hasParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Return true if the attribute exists for the given argument.
Definition: Attributes.h:805
FPClassTest getParamNoFPClass(unsigned ArgNo) const
Get the disallowed floating-point classes of the argument value.
MemoryEffects getMemoryEffects() const
Returns memory effects of the function.
const ConstantRange & getRange() const
Returns the value of the range attribute.
Definition: Attributes.cpp:495
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
Definition: Attributes.h:86
static Attribute getWithMemoryEffects(LLVMContext &Context, MemoryEffects ME)
Definition: Attributes.cpp:279
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition: Attributes.h:203
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
bool isEntryBlock() const
Return true if this is the entry block of the containing function.
Definition: BasicBlock.cpp:569
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:209
const DataLayout & getDataLayout() const
Get the data layout of the module this basic block belongs to.
Definition: BasicBlock.cpp:294
static BinaryOperator * CreateNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Helper functions to construct and inspect unary operations (NEG and NOT) via binary operators SUB and...
BinaryOps getOpcode() const
Definition: InstrTypes.h:442
bool swapOperands()
Exchange the two operands to this instruction.
static BinaryOperator * CreateNot(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
static BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
BinaryOperator(BinaryOps iType, Value *S1, Value *S2, Type *Ty, const Twine &Name, InsertPosition InsertBefore)
static BinaryOperator * CreateNSWNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
BinaryOperator * cloneImpl() const
This class represents a no-op cast from one type to another.
BitCastInst * cloneImpl() const
Clone an identical BitCastInst.
BitCastInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
Conditional or Unconditional Branch instruction.
void swapSuccessors()
Swap the successors of this branch instruction.
BranchInst * cloneImpl() const
bool isConditional() const
Value * getCondition() const
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1236
FPClassTest getParamNoFPClass(unsigned i) const
Extract a test mask for disallowed floating-point value classes for the parameter.
bool isInlineAsm() const
Check if this call is an inline asm statement.
Definition: InstrTypes.h:1532
BundleOpInfo & getBundleOpInfoForOperand(unsigned OpIdx)
Return the BundleOpInfo for the operand at index OpIdx.
Attribute getRetAttr(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind for the return value.
Definition: InstrTypes.h:1680
void setCallingConv(CallingConv::ID CC)
Definition: InstrTypes.h:1527
FPClassTest getRetNoFPClass() const
Extract a test mask for disallowed floating-point value classes for the return value.
bundle_op_iterator bundle_op_info_begin()
Return the start of the list of BundleOpInfo instances associated with this OperandBundleUser.
Definition: InstrTypes.h:2304
MemoryEffects getMemoryEffects() const
void addFnAttr(Attribute::AttrKind Kind)
Adds the attribute to the function.
Definition: InstrTypes.h:1574
bool doesNotAccessMemory() const
Determine if the call does not access memory.
void getOperandBundlesAsDefs(SmallVectorImpl< OperandBundleDef > &Defs) const
Return the list of operand bundles attached to this instruction as a vector of OperandBundleDefs.
void setOnlyAccessesArgMemory()
OperandBundleUse getOperandBundleAt(unsigned Index) const
Return the operand bundle at a specific index.
Definition: InstrTypes.h:2112
void setOnlyAccessesInaccessibleMemOrArgMem()
std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
Definition: InstrTypes.h:2143
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
Definition: InstrTypes.h:1465
void setDoesNotAccessMemory()
bool hasRetAttr(Attribute::AttrKind Kind) const
Determine whether the return value has the given attribute.
Definition: InstrTypes.h:1673
bool onlyAccessesInaccessibleMemory() const
Determine if the function may only access memory that is inaccessible from the IR.
unsigned getNumOperandBundles() const
Return the number of operand bundles associated with this User.
Definition: InstrTypes.h:2056
CallingConv::ID getCallingConv() const
Definition: InstrTypes.h:1523
bundle_op_iterator bundle_op_info_end()
Return the end of the list of BundleOpInfo instances associated with this OperandBundleUser.
Definition: InstrTypes.h:2321
unsigned getNumSubclassExtraOperandsDynamic() const
Get the number of extra operands for instructions that don't have a fixed number of extra operands.
bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
Definition: InstrTypes.h:1385
bool isMustTailCall() const
Tests if this call site must be tail call optimized.
bool isIndirectCall() const
Return true if the callsite is an indirect call.
bool onlyReadsMemory() const
Determine if the call does not access or only reads memory.
iterator_range< bundle_op_iterator > bundle_op_infos()
Return the range [bundle_op_info_begin, bundle_op_info_end).
Definition: InstrTypes.h:2337
void setOnlyReadsMemory()
static CallBase * addOperandBundle(CallBase *CB, uint32_t ID, OperandBundleDef OB, InsertPosition InsertPt=nullptr)
Create a clone of CB with operand bundle OB added.
bool onlyAccessesInaccessibleMemOrArgMem() const
Determine if the function may only access memory that is either inaccessible from the IR or pointed t...
Value * getCalledOperand() const
Definition: InstrTypes.h:1458
void setOnlyWritesMemory()
op_iterator populateBundleOperandInfos(ArrayRef< OperandBundleDef > Bundles, const unsigned BeginIndex)
Populate the BundleOpInfo instances and the Use& vector from Bundles.
AttributeList Attrs
parameter attributes for callable
Definition: InstrTypes.h:1250
bool hasOperandBundlesOtherThan(ArrayRef< uint32_t > IDs) const
Return true if this operand bundle user contains operand bundles with tags other than those specified...
Definition: InstrTypes.h:2217
std::optional< ConstantRange > getRange() const
If this return value has a range attribute, return the value range of the argument.
bool isReturnNonNull() const
Return true if the return value is known to be not null.
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1410
FunctionType * FTy
Definition: InstrTypes.h:1251
uint64_t getRetDereferenceableBytes() const
Extract the number of dereferenceable bytes for a call or parameter (0=unknown).
Definition: InstrTypes.h:1900
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
Definition: InstrTypes.h:1391
FunctionType * getFunctionType() const
Definition: InstrTypes.h:1323
Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
static unsigned CountBundleInputs(ArrayRef< OperandBundleDef > Bundles)
Return the total number of values used in Bundles.
Definition: InstrTypes.h:2372
Value * getArgOperandWithAttribute(Attribute::AttrKind Kind) const
If one of the arguments has the specified attribute, returns its operand value.
void setOnlyAccessesInaccessibleMemory()
static CallBase * Create(CallBase *CB, ArrayRef< OperandBundleDef > Bundles, InsertPosition InsertPt=nullptr)
Create a clone of CB with a different set of operand bundles and insert it before InsertPt.
bool onlyWritesMemory() const
Determine if the call does not access or only writes memory.
bool hasClobberingOperandBundles() const
Return true if this operand bundle user has operand bundles that may write to the heap.
void setCalledOperand(Value *V)
Definition: InstrTypes.h:1501
static CallBase * removeOperandBundle(CallBase *CB, uint32_t ID, InsertPosition InsertPt=nullptr)
Create a clone of CB with operand bundle ID removed.
bool hasReadingOperandBundles() const
Return true if this operand bundle user has operand bundles that may read from the heap.
bool onlyAccessesArgMemory() const
Determine if the call can access memmory only using pointers based on its arguments.
unsigned arg_size() const
Definition: InstrTypes.h:1408
AttributeList getAttributes() const
Return the parameter attributes for this call.
Definition: InstrTypes.h:1542
void setMemoryEffects(MemoryEffects ME)
bool hasOperandBundles() const
Return true if this User has any operand bundles.
Definition: InstrTypes.h:2061
bool isTailCall() const
Tests if this call site is marked as a tail call.
Function * getCaller()
Helper to get the caller (the parent function).
CallBr instruction, tracking function calls that may not return control but instead transfer it to a ...
SmallVector< BasicBlock *, 16 > getIndirectDests() const
void setDefaultDest(BasicBlock *B)
void setIndirectDest(unsigned i, BasicBlock *B)
BasicBlock * getDefaultDest() const
static CallBrInst * Create(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, ArrayRef< BasicBlock * > IndirectDests, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
CallBrInst * cloneImpl() const
This class represents a function call, abstracting a target machine's calling convention.
void updateProfWeight(uint64_t S, uint64_t T)
Updates profile metadata by scaling it by S / T.
TailCallKind getTailCallKind() const
CallInst * cloneImpl() const
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
This is the base class for all instructions that perform data casts.
Definition: InstrTypes.h:530
static Instruction::CastOps getCastOpcode(const Value *Val, bool SrcIsSigned, Type *Ty, bool DstIsSigned)
Returns the opcode necessary to cast Val into Ty using usual casting rules.
static CastInst * CreatePointerBitCastOrAddrSpaceCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a BitCast or an AddrSpaceCast cast instruction.
Instruction::CastOps getOpcode() const
Return the opcode of this CastInst.
Definition: InstrTypes.h:694
static CastInst * CreateIntegerCast(Value *S, Type *Ty, bool isSigned, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a ZExt, BitCast, or Trunc for int -> int casts.
static CastInst * CreateFPCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create an FPExt, BitCast, or FPTrunc for fp -> fp casts.
static unsigned isEliminableCastPair(Instruction::CastOps firstOpcode, Instruction::CastOps secondOpcode, Type *SrcTy, Type *MidTy, Type *DstTy, Type *SrcIntPtrTy, Type *MidIntPtrTy, Type *DstIntPtrTy)
Determine how a pair of casts can be eliminated, if they can be at all.
static bool isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, const DataLayout &DL)
Check whether a bitcast, inttoptr, or ptrtoint cast between these types is valid and a no-op.
static bool isBitCastable(Type *SrcTy, Type *DestTy)
Check whether a bitcast between these types is valid.
static CastInst * CreateTruncOrBitCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a Trunc or BitCast cast instruction.
static CastInst * CreatePointerCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a BitCast, AddrSpaceCast or a PtrToInt cast instruction.
static CastInst * CreateBitOrPointerCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a BitCast, a PtrToInt, or an IntToPTr cast instruction.
static bool isNoopCast(Instruction::CastOps Opcode, Type *SrcTy, Type *DstTy, const DataLayout &DL)
A no-op cast is one that can be effected without changing any bits.
static CastInst * CreateZExtOrBitCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a ZExt or BitCast cast instruction.
static CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
bool isIntegerCast() const
There are several places where we need to know if a cast instruction only deals with integer source a...
static CastInst * CreateSExtOrBitCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a SExt or BitCast cast instruction.
static bool castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy)
This method can be used to determine if a cast from SrcTy to DstTy using Opcode op is valid or not.
CatchReturnInst * cloneImpl() const
void setUnwindDest(BasicBlock *UnwindDest)
void addHandler(BasicBlock *Dest)
Add an entry to the switch instruction... Note: This action invalidates handler_end().
CatchSwitchInst * cloneImpl() const
Value * getParentPad() const
void setParentPad(Value *ParentPad)
BasicBlock * getUnwindDest() const
void removeHandler(handler_iterator HI)
bool hasUnwindDest() const
CleanupReturnInst * cloneImpl() const
This class is the base class for the comparison instructions.
Definition: InstrTypes.h:747
Predicate getStrictPredicate() const
For example, SGE -> SGT, SLE -> SLT, ULE -> ULT, UGE -> UGT.
Definition: InstrTypes.h:940
bool isEquality() const
Determine if this is an equals/not equals predicate.
Definition: InstrTypes.h:997
void setPredicate(Predicate P)
Set the predicate for this instruction to the specified value.
Definition: InstrTypes.h:850
bool isFalseWhenEqual() const
This is just a convenience.
Definition: InstrTypes.h:1062
Predicate getSignedPredicate()
For example, ULT->SLT, ULE->SLE, UGT->SGT, UGE->SGE, SLT->Failed assert.
Definition: InstrTypes.h:1026
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:757
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
Definition: InstrTypes.h:760
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
Definition: InstrTypes.h:774
@ ICMP_SLT
signed less than
Definition: InstrTypes.h:786
@ ICMP_SLE
signed less or equal
Definition: InstrTypes.h:787
@ FCMP_OLT
0 1 0 0 True if ordered and less than
Definition: InstrTypes.h:763
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
Definition: InstrTypes.h:772
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
Definition: InstrTypes.h:761
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
Definition: InstrTypes.h:762
@ ICMP_UGE
unsigned greater or equal
Definition: InstrTypes.h:781
@ ICMP_UGT
unsigned greater than
Definition: InstrTypes.h:780
@ ICMP_SGT
signed greater than
Definition: InstrTypes.h:784
@ FCMP_ULT
1 1 0 0 True if unordered or less than
Definition: InstrTypes.h:771
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
Definition: InstrTypes.h:765
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
Definition: InstrTypes.h:768
@ ICMP_ULT
unsigned less than
Definition: InstrTypes.h:782
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
Definition: InstrTypes.h:769
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
Definition: InstrTypes.h:764
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
Definition: InstrTypes.h:766
@ ICMP_EQ
equal
Definition: InstrTypes.h:778
@ ICMP_NE
not equal
Definition: InstrTypes.h:779
@ ICMP_SGE
signed greater or equal
Definition: InstrTypes.h:785
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
Definition: InstrTypes.h:773
@ ICMP_ULE
unsigned less or equal
Definition: InstrTypes.h:783
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
Definition: InstrTypes.h:770
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
Definition: InstrTypes.h:759
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition: InstrTypes.h:767
bool isSigned() const
Definition: InstrTypes.h:1007
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition: InstrTypes.h:909
bool isTrueWhenEqual() const
This is just a convenience.
Definition: InstrTypes.h:1056
static CmpInst * Create(OtherOps Op, Predicate Pred, Value *S1, Value *S2, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Construct a compare instruction, given the opcode, the predicate and the two operands.
Predicate getUnsignedPredicate()
For example, SLT->ULT, SLE->ULE, SGT->UGT, SGE->UGE, ULT->Failed assert.
Definition: InstrTypes.h:1038
Predicate getNonStrictPredicate() const
For example, SGT -> SGE, SLT -> SLE, ULT -> ULE, UGT -> UGE.
Definition: InstrTypes.h:953
static CmpInst * CreateWithCopiedFlags(OtherOps Op, Predicate Pred, Value *S1, Value *S2, const Instruction *FlagsSource, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Construct a compare instruction, given the opcode, the predicate, the two operands and the instructio...
bool isNonStrictPredicate() const
Definition: InstrTypes.h:934
bool isFPPredicate() const
Definition: InstrTypes.h:864
void swapOperands()
This is just a convenience that dispatches to the subclasses.
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition: InstrTypes.h:871
static StringRef getPredicateName(Predicate P)
Predicate getPredicate() const
Return the predicate for this instruction.
Definition: InstrTypes.h:847
bool isStrictPredicate() const
Definition: InstrTypes.h:925
static bool isUnordered(Predicate predicate)
Determine if the predicate is an unordered operation.
Predicate getFlippedStrictnessPredicate() const
For predicate of kind "is X or equal to 0" returns the predicate "is X".
Definition: InstrTypes.h:975
static bool isImpliedTrueByMatchingCmp(Predicate Pred1, Predicate Pred2)
Determine if Pred1 implies Pred2 is true when two compares have matching operands.
Predicate getFlippedSignednessPredicate()
For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->Failed assert.
Definition: InstrTypes.h:1050
bool isIntPredicate() const
Definition: InstrTypes.h:865
static bool isOrdered(Predicate predicate)
Determine if the predicate is an ordered operation.
CmpInst(Type *ty, Instruction::OtherOps op, Predicate pred, Value *LHS, Value *RHS, const Twine &Name="", InsertPosition InsertBefore=nullptr, Instruction *FlagsSource=nullptr)
static bool isImpliedFalseByMatchingCmp(Predicate Pred1, Predicate Pred2)
Determine if Pred1 implies Pred2 is false when two compares have matching operands.
bool isUnsigned() const
Definition: InstrTypes.h:1013
bool isCommutative() const
This is just a convenience that dispatches to the subclasses.
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
Definition: InstrTypes.h:1003
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:269
const APFloat & getValueAPF() const
Definition: Constants.h:312
This is the shared class of boolean and integer constants.
Definition: Constants.h:81
static Constant * get(ArrayRef< Constant * > V)
Definition: Constants.cpp:1399
This is an important base class in LLVM.
Definition: Constant.h:41
static Constant * getAllOnesValue(Type *Ty)
Definition: Constants.cpp:417
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:370
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
static constexpr ElementCount getFixed(ScalarTy MinVal)
Definition: TypeSize.h:308
This instruction extracts a single (scalar) element from a VectorType value.
ExtractElementInst * cloneImpl() const
static ExtractElementInst * Create(Value *Vec, Value *Idx, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static bool isValidOperands(const Value *Vec, const Value *Idx)
Return true if an extractelement instruction can be formed with the specified operands.
This instruction extracts a struct member or array element value from an aggregate value.
static Type * getIndexedType(Type *Agg, ArrayRef< unsigned > Idxs)
Returns the type of the element that would be extracted with an extractvalue instruction with the spe...
ExtractValueInst * cloneImpl() const
This instruction compares its operands according to the predicate given to the constructor.
bool isEquality() const
static bool compare(const APFloat &LHS, const APFloat &RHS, FCmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
FCmpInst * cloneImpl() const
Clone an identical FCmpInst.
This class represents an extension of floating point types.
FPExtInst * cloneImpl() const
Clone an identical FPExtInst.
FPExtInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
float getFPAccuracy() const
Get the maximum error permitted by this operation in ULPs.
This class represents a cast from floating point to signed integer.
FPToSIInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
FPToSIInst * cloneImpl() const
Clone an identical FPToSIInst.
This class represents a cast from floating point to unsigned integer.
FPToUIInst * cloneImpl() const
Clone an identical FPToUIInst.
FPToUIInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
This class represents a truncation of floating point types.
FPTruncInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
FPTruncInst * cloneImpl() const
Clone an identical FPTruncInst.
An instruction for ordering other memory operations.
Definition: Instructions.h:419
FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System, InsertPosition InsertBefore=nullptr)
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this fence instruction.
Definition: Instructions.h:453
void setSyncScopeID(SyncScope::ID SSID)
Sets the synchronization scope ID of this fence instruction.
Definition: Instructions.h:458
FenceInst * cloneImpl() const
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this fence instruction.
Definition: Instructions.h:448
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
Definition: Instructions.h:442
Class to represent fixed width SIMD vectors.
Definition: DerivedTypes.h:539
unsigned getNumElements() const
Definition: DerivedTypes.h:582
This class represents a freeze function that returns random concrete value if an operand is either a ...
FreezeInst(Value *S, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
FreezeInst * cloneImpl() const
Clone an identical FreezeInst.
void setParentPad(Value *ParentPad)
Definition: InstrTypes.h:2450
Value * getParentPad() const
Convenience accessors.
Definition: InstrTypes.h:2449
FuncletPadInst * cloneImpl() const
Class to represent function types.
Definition: DerivedTypes.h:103
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
Definition: DerivedTypes.h:142
Type * getParamType(unsigned i) const
Parameter type accessors.
Definition: DerivedTypes.h:135
bool isVarArg() const
Definition: DerivedTypes.h:123
Represents flags for the getelementptr instruction/expression.
static GEPNoWrapFlags inBounds()
GEPNoWrapFlags withoutInBounds() const
unsigned getRaw() const
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:914
bool isInBounds() const
Determine whether the GEP has the inbounds flag.
bool hasNoUnsignedSignedWrap() const
Determine whether the GEP has the nusw flag.
static Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
bool hasAllZeroIndices() const
Return true if all of the indices of this GEP are zeros.
bool hasNoUnsignedWrap() const
Determine whether the GEP has the nuw flag.
bool hasAllConstantIndices() const
Return true if all of the indices of this GEP are constant integers.
bool collectOffset(const DataLayout &DL, unsigned BitWidth, MapVector< Value *, APInt > &VariableOffsets, APInt &ConstantOffset) const
void setIsInBounds(bool b=true)
Set or clear the inbounds flag on this GEP instruction.
static Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const
Accumulate the constant address offset of this GEP if possible.
GetElementPtrInst * cloneImpl() const
void setNoWrapFlags(GEPNoWrapFlags NW)
Set nowrap flags for GEP instruction.
GEPNoWrapFlags getNoWrapFlags() const
Get the nowrap flags for the GEP instruction.
This instruction compares its operands according to the predicate given to the constructor.
static bool compare(const APInt &LHS, const APInt &RHS, ICmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
ICmpInst * cloneImpl() const
Clone an identical ICmpInst.
Predicate getSignedPredicate() const
For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
bool isEquality() const
Return true if this predicate is either EQ or NE.
Predicate getUnsignedPredicate() const
For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
Indirect Branch Instruction.
void addDestination(BasicBlock *Dest)
Add a destination.
void removeDestination(unsigned i)
This method removes the specified successor from the indirectbr instruction.
IndirectBrInst * cloneImpl() const
This instruction inserts a single (scalar) element into a VectorType value.
InsertElementInst * cloneImpl() const
static InsertElementInst * Create(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static bool isValidOperands(const Value *Vec, const Value *NewElt, const Value *Idx)
Return true if an insertelement instruction can be formed with the specified operands.
bool isValid() const
Definition: Instruction.h:61
BasicBlock * getBasicBlock()
Definition: Instruction.h:62
This instruction inserts a struct field of array element value into an aggregate value.
InsertValueInst * cloneImpl() const
BitfieldElement::Type getSubclassData() const
Definition: Instruction.h:1024
void copyIRFlags(const Value *V, bool IncludeWrapFlags=true)
Convenience method to copy supported exact, fast-math, and (optionally) wrapping flags from V to this...
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:476
bool isCommutative() const LLVM_READONLY
Return true if the instruction is commutative:
InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Definition: Instruction.cpp:92
void swapProfMetadata()
If the instruction has "branch_weights" MD_prof metadata and the MDNode has three operands (including...
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:274
This class represents a cast from an integer to a pointer.
IntToPtrInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
IntToPtrInst * cloneImpl() const
Clone an identical IntToPtrInst.
Invoke instruction.
BasicBlock * getUnwindDest() const
void setNormalDest(BasicBlock *B)
InvokeInst * cloneImpl() const
LandingPadInst * getLandingPadInst() const
Get the landingpad instruction from the landing pad block (the unwind destination).
void setUnwindDest(BasicBlock *B)
void updateProfWeight(uint64_t S, uint64_t T)
Updates profile metadata by scaling it by S / T.
static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
LLVMContextImpl *const pImpl
Definition: LLVMContext.h:69
The landingpad instruction holds all of the information necessary to generate correct exception handl...
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
LandingPadInst * cloneImpl() const
static LandingPadInst * Create(Type *RetTy, unsigned NumReservedClauses, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedClauses is a hint for the number of incoming clauses that this landingpad w...
void addClause(Constant *ClauseVal)
Add a catch or filter clause to the landing pad.
void setCleanup(bool V)
Indicate that this landingpad instruction is a cleanup.
An instruction for reading from memory.
Definition: Instructions.h:173
void setAlignment(Align Align)
Definition: Instructions.h:212
bool isVolatile() const
Return true if this is a load from a volatile memory location.
Definition: Instructions.h:202
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this load instruction.
Definition: Instructions.h:238
LoadInst * cloneImpl() const
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
Definition: Instructions.h:217
void setVolatile(bool V)
Specify whether this is a volatile load or not.
Definition: Instructions.h:205
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
Definition: Instructions.h:227
LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, InsertPosition InsertBefore)
Align getAlign() const
Return the alignment of the access that is being performed.
Definition: Instructions.h:208
MDNode * createBranchWeights(uint32_t TrueWeight, uint32_t FalseWeight, bool IsExpected=false)
Return metadata containing two branch weights.
Definition: MDBuilder.cpp:37
Metadata node.
Definition: Metadata.h:1067
const MDOperand & getOperand(unsigned I) const
Definition: Metadata.h:1428
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:36
static MemoryEffectsBase readOnly()
Create MemoryEffectsBase that can read any memory.
Definition: ModRef.h:122
bool onlyWritesMemory() const
Whether this function only (at most) writes memory.
Definition: ModRef.h:198
bool doesNotAccessMemory() const
Whether this function accesses no memory.
Definition: ModRef.h:192
static MemoryEffectsBase argMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Create MemoryEffectsBase that can only access argument memory.
Definition: ModRef.h:132
static MemoryEffectsBase inaccessibleMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Create MemoryEffectsBase that can only access inaccessible memory.
Definition: ModRef.h:138
bool onlyAccessesInaccessibleMem() const
Whether this function only (at most) accesses inaccessible memory.
Definition: ModRef.h:211
bool onlyAccessesArgPointees() const
Whether this function only (at most) accesses argument memory.
Definition: ModRef.h:201
bool onlyReadsMemory() const
Whether this function only (at most) reads memory.
Definition: ModRef.h:195
static MemoryEffectsBase writeOnly()
Create MemoryEffectsBase that can write any memory.
Definition: ModRef.h:127
static MemoryEffectsBase inaccessibleOrArgMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Create MemoryEffectsBase that can only access inaccessible or argument memory.
Definition: ModRef.h:145
static MemoryEffectsBase none()
Create MemoryEffectsBase that cannot read or write any memory.
Definition: ModRef.h:117
bool onlyAccessesInaccessibleOrArgMem() const
Whether this function only (at most) accesses argument and inaccessible memory.
Definition: ModRef.h:217
A container for an operand bundle being viewed as a set of values rather than a set of uses.
Definition: InstrTypes.h:1189
StringRef getTag() const
Definition: InstrTypes.h:1212
iterator_range< const_block_iterator > blocks() const
void allocHungoffUses(unsigned N)
const_block_iterator block_begin() const
void removeIncomingValueIf(function_ref< bool(unsigned)> Predicate, bool DeletePHIIfEmpty=true)
Remove all incoming values for which the predicate returns true.
Value * removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty=true)
Remove an incoming value.
bool hasConstantOrUndefValue() const
Whether the specified PHI node always merges together the same value, assuming undefs are equal to a ...
void copyIncomingBlocks(iterator_range< const_block_iterator > BBRange, uint32_t ToIdx=0)
Copies the basic blocks from BBRange to the incoming basic block list of this PHINode,...
const_block_iterator block_end() const
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
Value * hasConstantValue() const
If the specified PHI node always merges together the same value, return the value,...
PHINode * cloneImpl() const
unsigned getNumIncomingValues() const
Return the number of incoming edges.
Class to represent pointers.
Definition: DerivedTypes.h:646
unsigned getAddressSpace() const
Return the address space of the Pointer type.
Definition: DerivedTypes.h:679
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Definition: Constants.cpp:1814
This class represents a cast from a pointer to an integer.
PtrToIntInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
PtrToIntInst * cloneImpl() const
Clone an identical PtrToIntInst.
Resume the propagation of an exception.
ResumeInst * cloneImpl() const
Return a value (possibly void), from a function.
ReturnInst * cloneImpl() const
This class represents a sign extension of integer types.
SExtInst * cloneImpl() const
Clone an identical SExtInst.
SExtInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
This class represents a cast from signed integer to floating point.
SIToFPInst * cloneImpl() const
Clone an identical SIToFPInst.
SIToFPInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
Class to represent scalable SIMD vectors.
Definition: DerivedTypes.h:586
This class represents the LLVM 'select' instruction.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, Instruction *MDFrom=nullptr)
SelectInst * cloneImpl() const
static const char * areInvalidOperands(Value *Cond, Value *True, Value *False)
Return a string if the specified operands are invalid for a select operation, otherwise return null.
This instruction constructs a fixed permutation of two input vectors.
static bool isZeroEltSplatMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses all elements with the same value as the first element of exa...
ArrayRef< int > getShuffleMask() const
static bool isSpliceMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is a splice mask, concatenating the two inputs together and then ext...
int getMaskValue(unsigned Elt) const
Return the shuffle mask value of this instruction for the given element index.
ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static bool isValidOperands(const Value *V1, const Value *V2, const Value *Mask)
Return true if a shufflevector instruction can be formed with the specified operands.
static bool isSelectMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from its source vectors without lane crossings.
static bool isBitRotateMask(ArrayRef< int > Mask, unsigned EltSizeInBits, unsigned MinSubElts, unsigned MaxSubElts, unsigned &NumSubElts, unsigned &RotateAmt)
Checks if the shuffle is a bit rotation of the first operand across multiple subelements,...
VectorType * getType() const
Overload to return most specific vector type.
bool isIdentityWithExtract() const
Return true if this shuffle extracts the first N elements of exactly one source vector.
static bool isOneUseSingleSourceMask(ArrayRef< int > Mask, int VF)
Return true if this shuffle mask represents "clustered" mask of size VF, i.e.
bool isIdentityWithPadding() const
Return true if this shuffle lengthens exactly one source vector with undefs in the high elements.
static bool isSingleSourceMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from exactly one source vector.
bool isConcat() const
Return true if this shuffle concatenates its 2 source vectors.
static bool isDeInterleaveMaskOfFactor(ArrayRef< int > Mask, unsigned Factor, unsigned &Index)
Check if the mask is a DE-interleave mask of the given factor Factor like: <Index,...
ShuffleVectorInst * cloneImpl() const
static bool isIdentityMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from exactly one source vector without lane crossin...
static bool isExtractSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is an extract subvector mask.
void setShuffleMask(ArrayRef< int > Mask)
bool isInterleave(unsigned Factor)
Return if this shuffle interleaves its two input vectors together.
static bool isReverseMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask swaps the order of elements from exactly one source vector.
static bool isTransposeMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask is a transpose mask.
void commute()
Swap the operands and adjust the mask to preserve the semantics of the instruction.
static bool isInsertSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &NumSubElts, int &Index)
Return true if this shuffle mask is an insert subvector mask.
static Constant * convertShuffleMaskForBitcode(ArrayRef< int > Mask, Type *ResultTy)
static bool isReplicationMask(ArrayRef< int > Mask, int &ReplicationFactor, int &VF)
Return true if this shuffle mask replicates each of the VF elements in a vector ReplicationFactor tim...
static bool isInterleaveMask(ArrayRef< int > Mask, unsigned Factor, unsigned NumInputElts, SmallVectorImpl< unsigned > &StartIndexes)
Return true if the mask interleaves one or more input vectors together.
This is a 'bitvector' (really, a variable-sized bit array), optimized for the case when the array is ...
Implements a dense probed hash-table based set with some number of buckets stored inline.
Definition: DenseSet.h:290
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
void assign(size_type NumElts, ValueParamT Elt)
Definition: SmallVector.h:717
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:950
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:696
void resize(size_type N)
Definition: SmallVector.h:651
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
An instruction for storing to memory.
Definition: Instructions.h:289
AtomicOrdering getOrdering() const
Returns the ordering constraint of this store instruction.
Definition: Instructions.h:337
Align getAlign() const
Definition: Instructions.h:328
void setVolatile(bool V)
Specify whether this is a volatile store or not.
Definition: Instructions.h:323
void setAlignment(Align Align)
Definition: Instructions.h:332
StoreInst * cloneImpl() const
StoreInst(Value *Val, Value *Ptr, InsertPosition InsertBefore)
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this store instruction.
Definition: Instructions.h:348
bool isVolatile() const
Return true if this is a store to a volatile memory location.
Definition: Instructions.h:320
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this store instruction.
Definition: Instructions.h:359
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
Class to represent struct types.
Definition: DerivedTypes.h:216
void setSuccessorWeight(unsigned idx, CaseWeightOpt W)
Instruction::InstListType::iterator eraseFromParent()
Delegate the call to the underlying SwitchInst::eraseFromParent() and mark this object to not touch t...
void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W)
Delegate the call to the underlying SwitchInst::addCase() and set the specified branch weight for the...
CaseWeightOpt getSuccessorWeight(unsigned idx)
std::optional< uint32_t > CaseWeightOpt
SwitchInst::CaseIt removeCase(SwitchInst::CaseIt I)
Delegate the call to the underlying SwitchInst::removeCase() and remove correspondent branch weight.
void setValue(ConstantInt *V) const
Sets the new value for current case.
void setSuccessor(BasicBlock *S) const
Sets the new successor for current case.
Multiway switch.
SwitchInst * cloneImpl() const
void addCase(ConstantInt *OnVal, BasicBlock *Dest)
Add an entry to the switch instruction.
CaseIteratorImpl< CaseHandle > CaseIt
unsigned getNumCases() const
Return the number of 'cases' in this switch instruction, excluding the default case.
CaseIt removeCase(CaseIt I)
This method removes the specified case and its successor from the switch instruction.
This class represents a truncation of integer types.
TruncInst * cloneImpl() const
Clone an identical TruncInst.
TruncInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:265
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition: Type.h:234
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:255
static IntegerType * getInt1Ty(LLVMContext &C)
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
bool isX86_MMXTy() const
Return true if this is X86 MMX.
Definition: Type.h:201
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isFirstClassType() const
Return true if the type is "first class", meaning it is a valid type for a Value.
Definition: Type.h:281
bool isAggregateType() const
Return true if the type is an aggregate type.
Definition: Type.h:295
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:129
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition: Type.h:185
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition: Type.h:262
static IntegerType * getInt32Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:228
bool isTokenTy() const
Return true if this is 'token'.
Definition: Type.h:225
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition: Type.h:216
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
bool isVoidTy() const
Return true if this is 'void'.
Definition: Type.h:140
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition: Type.h:348
This class represents a cast unsigned integer to floating point.
UIToFPInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
UIToFPInst * cloneImpl() const
Clone an identical UIToFPInst.
static UnaryOperator * Create(UnaryOps Op, Value *S, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a unary instruction, given the opcode and an operand.
UnaryOperator(UnaryOps iType, Value *S, Type *Ty, const Twine &Name, InsertPosition InsertBefore)
UnaryOperator * cloneImpl() const
UnaryOps getOpcode() const
Definition: InstrTypes.h:171
This function has undefined behavior.
UnreachableInst(LLVMContext &C, InsertPosition InsertBefore=nullptr)
UnreachableInst * cloneImpl() const
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
void set(Value *Val)
Definition: Value.h:882
const Use * getOperandList() const
Definition: User.h:162
op_range operands()
Definition: User.h:242
void allocHungoffUses(unsigned N, bool IsPhi=false)
Allocate the array of Uses, followed by a pointer (with bottom bit set) to the User.
Definition: User.cpp:50
op_iterator op_begin()
Definition: User.h:234
void setNumHungOffUseOperands(unsigned NumOps)
Subclasses with hung off uses need to manage the operand count themselves.
Definition: User.h:215
Use & Op()
Definition: User.h:133
Value * getOperand(unsigned i) const
Definition: User.h:169
unsigned getNumOperands() const
Definition: User.h:191
op_iterator op_end()
Definition: User.h:236
void growHungoffUses(unsigned N, bool IsPhi=false)
Grow the number of hung off uses.
Definition: User.cpp:67
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
VAArgInst * cloneImpl() const
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
unsigned char SubclassOptionalData
Hold subclass data that can be dropped.
Definition: Value.h:84
void setName(const Twine &Name)
Change the name of the value.
Definition: Value.cpp:377
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:534
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1074
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
Base class of all SIMD vector types.
Definition: DerivedTypes.h:403
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
Definition: DerivedTypes.h:641
static VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Definition: Type.cpp:676
This class represents zero extension of integer types.
ZExtInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
ZExtInst * cloneImpl() const
Clone an identical ZExtInst.
std::pair< iterator, bool > insert(const ValueT &V)
Definition: DenseSet.h:206
size_type size() const
Definition: DenseSet.h:81
bool contains(const_arg_type_t< ValueT > V) const
Check if the set contains the given element.
Definition: DenseSet.h:185
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition: TypeSize.h:168
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition: ilist_node.h:32
base_list_type::iterator iterator
Definition: ilist.h:121
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
@ Switch
The "resume-switch" lowering, where there are separate resume and destroy functions that are shared b...
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:329
@ Offset
Definition: DWP.cpp:480
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1722
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition: STLExtras.h:1680
unsigned getPointerAddressSpace(const Type *T)
Definition: SPIRVUtils.h:126
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
MDNode * getBranchWeightMDNode(const Instruction &I)
Get the branch weights metadata node.
auto reverse(ContainerTy &&C)
Definition: STLExtras.h:419
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:275
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
Definition: Function.cpp:2073
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
bool isPointerTy(const Type *T)
Definition: SPIRVUtils.h:120
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition: Casting.h:548
constexpr int PoisonMaskElem
unsigned getNumBranchWeights(const MDNode &ProfileData)
AtomicOrdering
Atomic ordering for LLVM's memory model.
auto remove_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::remove_if which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1761
@ Or
Bitwise or logical OR of integers.
@ Mul
Product of integers.
@ Xor
Bitwise or logical XOR of integers.
@ FMul
Product of floats.
@ And
Bitwise or logical AND of integers.
@ Add
Sum of integers.
@ FAdd
Sum of floats.
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
Definition: APFixedPoint.h:293
OutputIt copy(R &&Range, OutputIt Out)
Definition: STLExtras.h:1824
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:191
bool extractBranchWeights(const MDNode *ProfileData, SmallVectorImpl< uint32_t > &Weights)
Extract branch weights from MD_prof metadata.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition: Casting.h:565
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1879
bool all_equal(std::initializer_list< T > Values)
Returns true if all Values in the initializer lists are equal or the list.
Definition: STLExtras.h:2039
auto seq(T Begin, T End)
Iterate over an integral type from Begin up to - but not including - End.
Definition: Sequence.h:305
@ Default
The result values are uniform if and only if all operands are uniform.
void scaleProfData(Instruction &I, uint64_t S, uint64_t T)
Scaling the profile data attached to 'I' using the ratio of S/T.
cmpResult
IEEE-754R 5.11: Floating Point Comparison Relations.
Definition: APFloat.h:236
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Describes an element of a Bitfield.
Definition: Bitfields.h:223
Used to keep track of an operand bundle.
Definition: InstrTypes.h:2228
uint32_t End
The index in the Use& vector where operands for this operand bundle ends.
Definition: InstrTypes.h:2239
uint32_t Begin
The index in the Use& vector where operands for this operand bundle starts.
Definition: InstrTypes.h:2235
Incoming for lane maks phi as machine instruction, incoming register Reg and incoming block Block are...
Compile-time customization of User operands.
Definition: User.h:42