LLVM 23.0.0git
Instructions.cpp
Go to the documentation of this file.
1//===- Instructions.cpp - Implement the LLVM instructions -----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements all of the non-inline methods for the LLVM instruction
10// classes.
11//
12//===----------------------------------------------------------------------===//
13
15#include "LLVMContextImpl.h"
18#include "llvm/ADT/Twine.h"
19#include "llvm/IR/Attributes.h"
20#include "llvm/IR/BasicBlock.h"
21#include "llvm/IR/Constant.h"
23#include "llvm/IR/Constants.h"
24#include "llvm/IR/DataLayout.h"
26#include "llvm/IR/Function.h"
27#include "llvm/IR/InstrTypes.h"
28#include "llvm/IR/Instruction.h"
29#include "llvm/IR/Intrinsics.h"
30#include "llvm/IR/LLVMContext.h"
31#include "llvm/IR/MDBuilder.h"
32#include "llvm/IR/Metadata.h"
33#include "llvm/IR/Module.h"
34#include "llvm/IR/Operator.h"
37#include "llvm/IR/Type.h"
38#include "llvm/IR/Value.h"
46#include "llvm/Support/ModRef.h"
48#include <algorithm>
49#include <cassert>
50#include <cstdint>
51#include <optional>
52#include <vector>
53
54using namespace llvm;
55
57 "disable-i2p-p2i-opt", cl::init(false),
58 cl::desc("Disables inttoptr/ptrtoint roundtrip optimization"));
59
60//===----------------------------------------------------------------------===//
61// AllocaInst Class
62//===----------------------------------------------------------------------===//
63
64std::optional<TypeSize>
66 TypeSize Size = DL.getTypeAllocSize(getAllocatedType());
67 // Zero-sized types can return early since 0 * N = 0 for any array size N.
68 if (Size.isZero())
69 return Size;
70 if (isArrayAllocation()) {
72 if (!C)
73 return std::nullopt;
74 assert(!Size.isScalable() && "Array elements cannot have a scalable size");
75 auto CheckedProd =
76 checkedMulUnsigned(Size.getKnownMinValue(), C->getZExtValue());
77 if (!CheckedProd)
78 return std::nullopt;
79 return TypeSize::getFixed(*CheckedProd);
80 }
81 return Size;
82}
83
84std::optional<TypeSize>
86 std::optional<TypeSize> Size = getAllocationSize(DL);
87 if (!Size)
88 return std::nullopt;
89 auto CheckedProd = checkedMulUnsigned(Size->getKnownMinValue(),
90 static_cast<TypeSize::ScalarTy>(8));
91 if (!CheckedProd)
92 return std::nullopt;
93 return TypeSize::get(*CheckedProd, Size->isScalable());
94}
95
96//===----------------------------------------------------------------------===//
97// SelectInst Class
98//===----------------------------------------------------------------------===//
99
100/// areInvalidOperands - Return a string if the specified operands are invalid
101/// for a select operation, otherwise return null.
102const char *SelectInst::areInvalidOperands(Value *Op0, Value *Op1, Value *Op2) {
103 if (Op1->getType() != Op2->getType())
104 return "both values to select must have same type";
105
106 if (Op1->getType()->isTokenTy())
107 return "select values cannot have token type";
108
109 if (VectorType *VT = dyn_cast<VectorType>(Op0->getType())) {
110 // Vector select.
111 if (VT->getElementType() != Type::getInt1Ty(Op0->getContext()))
112 return "vector select condition element type must be i1";
114 if (!ET)
115 return "selected values for vector select must be vectors";
116 if (ET->getElementCount() != VT->getElementCount())
117 return "vector select requires selected vectors to have "
118 "the same vector length as select condition";
119 } else if (Op0->getType() != Type::getInt1Ty(Op0->getContext())) {
120 return "select condition must be i1 or <n x i1>";
121 }
122 return nullptr;
123}
124
125//===----------------------------------------------------------------------===//
126// PHINode Class
127//===----------------------------------------------------------------------===//
128
129PHINode::PHINode(const PHINode &PN)
130 : Instruction(PN.getType(), Instruction::PHI, AllocMarker),
131 ReservedSpace(PN.getNumOperands()) {
134 std::copy(PN.op_begin(), PN.op_end(), op_begin());
135 copyIncomingBlocks(make_range(PN.block_begin(), PN.block_end()));
137}
138
139// removeIncomingValue - Remove an incoming value. This is useful if a
140// predecessor basic block is deleted.
141Value *PHINode::removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty) {
142 Value *Removed = getIncomingValue(Idx);
143 // Swap with the end of the list.
144 unsigned Last = getNumOperands() - 1;
145 if (Idx != Last) {
148 }
149
150 // Nuke the last value.
151 Op<-1>().set(nullptr);
153
154 // If the PHI node is dead, because it has zero entries, nuke it now.
155 if (getNumOperands() == 0 && DeletePHIIfEmpty) {
156 // If anyone is using this PHI, make them use a dummy value instead...
159 }
160 return Removed;
161}
162
163void PHINode::removeIncomingValueIf(function_ref<bool(unsigned)> Predicate,
164 bool DeletePHIIfEmpty) {
165 unsigned NumOps = getNumIncomingValues();
166
167 // Loop backwards in case the predicate is purely index based.
168 for (unsigned Idx = NumOps; Idx-- > 0;) {
169 if (Predicate(Idx)) {
170 unsigned LastIdx = NumOps - 1;
171 if (Idx != LastIdx) {
172 setIncomingValue(Idx, getIncomingValue(LastIdx));
173 setIncomingBlock(Idx, getIncomingBlock(LastIdx));
174 }
175 getOperandUse(LastIdx).set(nullptr);
176 NumOps--;
177 }
178 }
179
181
182 // If the PHI node is dead, because it has zero entries, nuke it now.
183 if (getNumOperands() == 0 && DeletePHIIfEmpty) {
184 // If anyone is using this PHI, make them use a dummy value instead...
187 }
188}
189
190/// growOperands - grow operands - This grows the operand list in response
191/// to a push_back style of operation. This grows the number of ops by 1.5
192/// times.
193///
194void PHINode::growOperands() {
195 unsigned e = getNumOperands();
196 unsigned NumOps = e + e / 2;
197 if (NumOps < 2) NumOps = 2; // 2 op PHI nodes are VERY common.
198
199 ReservedSpace = NumOps;
200 growHungoffUses(ReservedSpace, /*WithExtraValues=*/true);
201}
202
203/// hasConstantValue - If the specified PHI node always merges together the same
204/// value, return the value, otherwise return null.
206 // Exploit the fact that phi nodes always have at least one entry.
207 Value *ConstantValue = getIncomingValue(0);
208 for (unsigned i = 1, e = getNumIncomingValues(); i != e; ++i)
209 if (getIncomingValue(i) != ConstantValue && getIncomingValue(i) != this) {
210 if (ConstantValue != this)
211 return nullptr; // Incoming values not all the same.
212 // The case where the first value is this PHI.
213 ConstantValue = getIncomingValue(i);
214 }
215 if (ConstantValue == this)
216 return PoisonValue::get(getType());
217 return ConstantValue;
218}
219
220/// hasConstantOrUndefValue - Whether the specified PHI node always merges
221/// together the same value, assuming that undefs result in the same value as
222/// non-undefs.
223/// Unlike \ref hasConstantValue, this does not return a value because the
224/// unique non-undef incoming value need not dominate the PHI node.
226 Value *ConstantValue = nullptr;
227 for (unsigned i = 0, e = getNumIncomingValues(); i != e; ++i) {
228 Value *Incoming = getIncomingValue(i);
229 if (Incoming != this && !isa<UndefValue>(Incoming)) {
230 if (ConstantValue && ConstantValue != Incoming)
231 return false;
232 ConstantValue = Incoming;
233 }
234 }
235 return true;
236}
237
238//===----------------------------------------------------------------------===//
239// LandingPadInst Implementation
240//===----------------------------------------------------------------------===//
241
242LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues,
243 const Twine &NameStr,
244 InsertPosition InsertBefore)
245 : Instruction(RetTy, Instruction::LandingPad, AllocMarker, InsertBefore) {
246 init(NumReservedValues, NameStr);
247}
248
249LandingPadInst::LandingPadInst(const LandingPadInst &LP)
250 : Instruction(LP.getType(), Instruction::LandingPad, AllocMarker),
251 ReservedSpace(LP.getNumOperands()) {
254 Use *OL = getOperandList();
255 const Use *InOL = LP.getOperandList();
256 for (unsigned I = 0, E = ReservedSpace; I != E; ++I)
257 OL[I] = InOL[I];
258
259 setCleanup(LP.isCleanup());
260}
261
262LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses,
263 const Twine &NameStr,
264 InsertPosition InsertBefore) {
265 return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertBefore);
266}
267
268void LandingPadInst::init(unsigned NumReservedValues, const Twine &NameStr) {
269 ReservedSpace = NumReservedValues;
271 allocHungoffUses(ReservedSpace);
272 setName(NameStr);
273 setCleanup(false);
274}
275
276/// growOperands - grow operands - This grows the operand list in response to a
277/// push_back style of operation. This grows the number of ops by 2 times.
278void LandingPadInst::growOperands(unsigned Size) {
279 unsigned e = getNumOperands();
280 if (ReservedSpace >= e + Size) return;
281 ReservedSpace = (std::max(e, 1U) + Size / 2) * 2;
282 growHungoffUses(ReservedSpace);
283}
284
286 unsigned OpNo = getNumOperands();
287 growOperands(1);
288 assert(OpNo < ReservedSpace && "Growing didn't work!");
290 getOperandList()[OpNo] = Val;
291}
292
293//===----------------------------------------------------------------------===//
294// CallBase Implementation
295//===----------------------------------------------------------------------===//
296
298 InsertPosition InsertPt) {
299 switch (CB->getOpcode()) {
300 case Instruction::Call:
301 return CallInst::Create(cast<CallInst>(CB), Bundles, InsertPt);
302 case Instruction::Invoke:
303 return InvokeInst::Create(cast<InvokeInst>(CB), Bundles, InsertPt);
304 case Instruction::CallBr:
305 return CallBrInst::Create(cast<CallBrInst>(CB), Bundles, InsertPt);
306 default:
307 llvm_unreachable("Unknown CallBase sub-class!");
308 }
309}
310
312 InsertPosition InsertPt) {
314 for (unsigned i = 0, e = CI->getNumOperandBundles(); i < e; ++i) {
315 auto ChildOB = CI->getOperandBundleAt(i);
316 if (ChildOB.getTagName() != OpB.getTag())
317 OpDefs.emplace_back(ChildOB);
318 }
319 OpDefs.emplace_back(OpB);
320 return CallBase::Create(CI, OpDefs, InsertPt);
321}
322
324
326 assert(getOpcode() == Instruction::CallBr && "Unexpected opcode!");
327 return cast<CallBrInst>(this)->getNumIndirectDests() + 1;
328}
329
331 const Value *V = getCalledOperand();
332 if (isa<Function>(V) || isa<Constant>(V))
333 return false;
334 return !isInlineAsm();
335}
336
337/// Tests if this call site must be tail call optimized. Only a CallInst can
338/// be tail call optimized.
340 if (auto *CI = dyn_cast<CallInst>(this))
341 return CI->isMustTailCall();
342 return false;
343}
344
345/// Tests if this call site is marked as a tail call.
347 if (auto *CI = dyn_cast<CallInst>(this))
348 return CI->isTailCall();
349 return false;
350}
351
354 return F->getIntrinsicID();
356}
357
359 FPClassTest Mask = Attrs.getRetNoFPClass();
360
361 if (const Function *F = getCalledFunction())
362 Mask |= F->getAttributes().getRetNoFPClass();
363 return Mask;
364}
365
367 FPClassTest Mask = Attrs.getParamNoFPClass(i);
368
369 if (const Function *F = getCalledFunction())
370 Mask |= F->getAttributes().getParamNoFPClass(i);
371 return Mask;
372}
373
374std::optional<ConstantRange> CallBase::getRange() const {
375 Attribute CallAttr = Attrs.getRetAttr(Attribute::Range);
377 if (const Function *F = getCalledFunction())
378 FnAttr = F->getRetAttribute(Attribute::Range);
379
380 if (CallAttr.isValid() && FnAttr.isValid())
381 return CallAttr.getRange().intersectWith(FnAttr.getRange());
382 if (CallAttr.isValid())
383 return CallAttr.getRange();
384 if (FnAttr.isValid())
385 return FnAttr.getRange();
386 return std::nullopt;
387}
388
390 if (hasRetAttr(Attribute::NonNull))
391 return true;
392
393 if (getRetDereferenceableBytes() > 0 &&
395 return true;
396
397 return false;
398}
399
401 unsigned Index;
402
403 if (Attrs.hasAttrSomewhere(Kind, &Index))
404 return getArgOperand(Index - AttributeList::FirstArgIndex);
405 if (const Function *F = getCalledFunction())
406 if (F->getAttributes().hasAttrSomewhere(Kind, &Index))
407 return getArgOperand(Index - AttributeList::FirstArgIndex);
408
409 return nullptr;
410}
411
412/// Determine whether the argument or parameter has the given attribute.
413bool CallBase::paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const {
414 assert(ArgNo < arg_size() && "Param index out of bounds!");
415
416 if (Attrs.hasParamAttr(ArgNo, Kind))
417 return true;
418
419 const Function *F = getCalledFunction();
420 if (!F)
421 return false;
422
423 if (!F->getAttributes().hasParamAttr(ArgNo, Kind))
424 return false;
425
426 // Take into account mod/ref by operand bundles.
427 switch (Kind) {
428 case Attribute::ReadNone:
430 case Attribute::ReadOnly:
432 case Attribute::WriteOnly:
433 return !hasReadingOperandBundles();
434 default:
435 return true;
436 }
437}
438
440 bool AllowUndefOrPoison) const {
442 "Argument must be a pointer");
443 if (paramHasAttr(ArgNo, Attribute::NonNull) &&
444 (AllowUndefOrPoison || paramHasAttr(ArgNo, Attribute::NoUndef)))
445 return true;
446
447 if (paramHasAttr(ArgNo, Attribute::Dereferenceable) &&
449 getCaller(),
451 return true;
452
453 return false;
454}
455
456bool CallBase::hasFnAttrOnCalledFunction(Attribute::AttrKind Kind) const {
458 return F->getAttributes().hasFnAttr(Kind);
459
460 return false;
461}
462
463bool CallBase::hasFnAttrOnCalledFunction(StringRef Kind) const {
465 return F->getAttributes().hasFnAttr(Kind);
466
467 return false;
468}
469
470template <typename AK>
471Attribute CallBase::getFnAttrOnCalledFunction(AK Kind) const {
472 if constexpr (std::is_same_v<AK, Attribute::AttrKind>) {
473 // getMemoryEffects() correctly combines memory effects from the call-site,
474 // operand bundles and function.
475 assert(Kind != Attribute::Memory && "Use getMemoryEffects() instead");
476 }
477
479 return F->getAttributes().getFnAttr(Kind);
480
481 return Attribute();
482}
483
484template LLVM_ABI Attribute
485CallBase::getFnAttrOnCalledFunction(Attribute::AttrKind Kind) const;
486template LLVM_ABI Attribute
487CallBase::getFnAttrOnCalledFunction(StringRef Kind) const;
488
489template <typename AK>
490Attribute CallBase::getParamAttrOnCalledFunction(unsigned ArgNo,
491 AK Kind) const {
493
494 if (auto *F = dyn_cast<Function>(V))
495 return F->getAttributes().getParamAttr(ArgNo, Kind);
496
497 return Attribute();
498}
499template LLVM_ABI Attribute CallBase::getParamAttrOnCalledFunction(
500 unsigned ArgNo, Attribute::AttrKind Kind) const;
501template LLVM_ABI Attribute
502CallBase::getParamAttrOnCalledFunction(unsigned ArgNo, StringRef Kind) const;
503
506 for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i)
508}
509
512 const unsigned BeginIndex) {
513 auto It = op_begin() + BeginIndex;
514 for (auto &B : Bundles)
515 It = std::copy(B.input_begin(), B.input_end(), It);
516
517 auto *ContextImpl = getContext().pImpl;
518 auto BI = Bundles.begin();
519 unsigned CurrentIndex = BeginIndex;
520
521 for (auto &BOI : bundle_op_infos()) {
522 assert(BI != Bundles.end() && "Incorrect allocation?");
523
524 BOI.Tag = ContextImpl->getOrInsertBundleTag(BI->getTag());
525 BOI.Begin = CurrentIndex;
526 BOI.End = CurrentIndex + BI->input_size();
527 CurrentIndex = BOI.End;
528 BI++;
529 }
530
531 assert(BI == Bundles.end() && "Incorrect allocation?");
532
533 return It;
534}
535
537 /// When there isn't many bundles, we do a simple linear search.
538 /// Else fallback to a binary-search that use the fact that bundles usually
539 /// have similar number of argument to get faster convergence.
541 for (auto &BOI : bundle_op_infos())
542 if (BOI.Begin <= OpIdx && OpIdx < BOI.End)
543 return BOI;
544
545 llvm_unreachable("Did not find operand bundle for operand!");
546 }
547
548 assert(OpIdx >= arg_size() && "the Idx is not in the operand bundles");
550 OpIdx < std::prev(bundle_op_info_end())->End &&
551 "The Idx isn't in the operand bundle");
552
553 /// We need a decimal number below and to prevent using floating point numbers
554 /// we use an intergal value multiplied by this constant.
555 constexpr unsigned NumberScaling = 1024;
556
559 bundle_op_iterator Current = Begin;
560
561 while (Begin != End) {
562 unsigned ScaledOperandPerBundle =
563 NumberScaling * (std::prev(End)->End - Begin->Begin) / (End - Begin);
564 Current = Begin + (((OpIdx - Begin->Begin) * NumberScaling) /
565 ScaledOperandPerBundle);
566 if (Current >= End)
567 Current = std::prev(End);
568 assert(Current < End && Current >= Begin &&
569 "the operand bundle doesn't cover every value in the range");
570 if (OpIdx >= Current->Begin && OpIdx < Current->End)
571 break;
572 if (OpIdx >= Current->End)
573 Begin = Current + 1;
574 else
575 End = Current;
576 }
577
578 assert(OpIdx >= Current->Begin && OpIdx < Current->End &&
579 "the operand bundle doesn't cover every value in the range");
580 return *Current;
581}
582
585 InsertPosition InsertPt) {
586 if (CB->getOperandBundle(ID))
587 return CB;
588
590 CB->getOperandBundlesAsDefs(Bundles);
591 Bundles.push_back(OB);
592 return Create(CB, Bundles, InsertPt);
593}
594
596 InsertPosition InsertPt) {
598 bool CreateNew = false;
599
600 for (unsigned I = 0, E = CB->getNumOperandBundles(); I != E; ++I) {
601 auto Bundle = CB->getOperandBundleAt(I);
602 if (Bundle.getTagID() == ID) {
603 CreateNew = true;
604 continue;
605 }
606 Bundles.emplace_back(Bundle);
607 }
608
609 return CreateNew ? Create(CB, Bundles, InsertPt) : CB;
610}
611
613 // Implementation note: this is a conservative implementation of operand
614 // bundle semantics, where *any* non-assume operand bundle (other than
615 // ptrauth) forces a callsite to be at least readonly.
620 getIntrinsicID() != Intrinsic::assume;
621}
622
631
633 MemoryEffects ME = getAttributes().getMemoryEffects();
634 if (auto *Fn = dyn_cast<Function>(getCalledOperand())) {
635 MemoryEffects FnME = Fn->getMemoryEffects();
636 if (hasOperandBundles()) {
637 // TODO: Add a method to get memory effects for operand bundles instead.
639 FnME |= MemoryEffects::readOnly();
641 FnME |= MemoryEffects::writeOnly();
642 }
643 if (isVolatile()) {
644 // Volatile operations also access inaccessible memory.
646 }
647 ME &= FnME;
648 }
649 return ME;
650}
654
655/// Determine if the function does not access memory.
662
663/// Determine if the function does not access or only reads memory.
670
671/// Determine if the function does not access or only writes memory.
678
679/// Determine if the call can access memmory only using pointers based
680/// on its arguments.
687
688/// Determine if the function may only access memory that is
689/// inaccessible from the IR.
696
697/// Determine if the function may only access memory that is
698/// either inaccessible from the IR or pointed to by its arguments.
706
708 if (OpNo < arg_size()) {
709 // If the argument is passed byval, the callee does not have access to the
710 // original pointer and thus cannot capture it.
711 if (isByValArgument(OpNo))
712 return CaptureInfo::none();
713
715 if (auto *Fn = dyn_cast<Function>(getCalledOperand()))
716 CI &= Fn->getAttributes().getParamAttrs(OpNo).getCaptureInfo();
717 return CI;
718 }
719
720 // Bundles on assumes are captures(none).
721 if (getIntrinsicID() == Intrinsic::assume)
722 return CaptureInfo::none();
723
724 // deopt operand bundles are captures(none)
725 auto &BOI = getBundleOpInfoForOperand(OpNo);
726 auto OBU = operandBundleFromBundleOpInfo(BOI);
727 return OBU.isDeoptOperandBundle() ? CaptureInfo::none() : CaptureInfo::all();
728}
729
731 for (unsigned I = 0, E = arg_size(); I < E; ++I) {
733 continue;
734
736 if (auto *Fn = dyn_cast<Function>(getCalledOperand()))
737 CI &= Fn->getAttributes().getParamAttrs(I).getCaptureInfo();
739 return true;
740 }
741 return false;
742}
743
744//===----------------------------------------------------------------------===//
745// CallInst Implementation
746//===----------------------------------------------------------------------===//
747
748void CallInst::init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
749 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr) {
750 this->FTy = FTy;
751 assert(getNumOperands() == Args.size() + CountBundleInputs(Bundles) + 1 &&
752 "NumOperands not set up?");
753
754#ifndef NDEBUG
755 assert((Args.size() == FTy->getNumParams() ||
756 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
757 "Calling a function with bad signature!");
758
759 for (unsigned i = 0; i != Args.size(); ++i)
760 assert((i >= FTy->getNumParams() ||
761 FTy->getParamType(i) == Args[i]->getType()) &&
762 "Calling a function with a bad signature!");
763#endif
764
765 // Set operands in order of their index to match use-list-order
766 // prediction.
767 llvm::copy(Args, op_begin());
768 setCalledOperand(Func);
769
770 auto It = populateBundleOperandInfos(Bundles, Args.size());
771 (void)It;
772 assert(It + 1 == op_end() && "Should add up!");
773
774 setName(NameStr);
775}
776
777void CallInst::init(FunctionType *FTy, Value *Func, const Twine &NameStr) {
778 this->FTy = FTy;
779 assert(getNumOperands() == 1 && "NumOperands not set up?");
780 setCalledOperand(Func);
781
782 assert(FTy->getNumParams() == 0 && "Calling a function with bad signature");
783
784 setName(NameStr);
785}
786
787CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name,
788 AllocInfo AllocInfo, InsertPosition InsertBefore)
789 : CallBase(Ty->getReturnType(), Instruction::Call, AllocInfo,
790 InsertBefore) {
791 init(Ty, Func, Name);
792}
793
794CallInst::CallInst(const CallInst &CI, AllocInfo AllocInfo)
795 : CallBase(CI.Attrs, CI.FTy, CI.getType(), Instruction::Call, AllocInfo) {
797 "Wrong number of operands allocated");
798 setTailCallKind(CI.getTailCallKind());
800
801 std::copy(CI.op_begin(), CI.op_end(), op_begin());
802 std::copy(CI.bundle_op_info_begin(), CI.bundle_op_info_end(),
805}
806
808 InsertPosition InsertPt) {
809 std::vector<Value *> Args(CI->arg_begin(), CI->arg_end());
810
811 auto *NewCI = CallInst::Create(CI->getFunctionType(), CI->getCalledOperand(),
812 Args, OpB, CI->getName(), InsertPt);
813 NewCI->setTailCallKind(CI->getTailCallKind());
814 NewCI->setCallingConv(CI->getCallingConv());
815 NewCI->SubclassOptionalData = CI->SubclassOptionalData;
816 NewCI->setAttributes(CI->getAttributes());
817 NewCI->setDebugLoc(CI->getDebugLoc());
818 return NewCI;
819}
820
821// Update profile weight for call instruction by scaling it using the ratio
822// of S/T. The meaning of "branch_weights" meta data for call instruction is
823// transfered to represent call count.
825 if (T == 0) {
826 LLVM_DEBUG(dbgs() << "Attempting to update profile weights will result in "
827 "div by 0. Ignoring. Likely the function "
828 << getParent()->getParent()->getName()
829 << " has 0 entry count, and contains call instructions "
830 "with non-zero prof info.");
831 return;
832 }
833 scaleProfData(*this, S, T);
834}
835
836//===----------------------------------------------------------------------===//
837// InvokeInst Implementation
838//===----------------------------------------------------------------------===//
839
840void InvokeInst::init(FunctionType *FTy, Value *Fn, BasicBlock *IfNormal,
841 BasicBlock *IfException, ArrayRef<Value *> Args,
843 const Twine &NameStr) {
844 this->FTy = FTy;
845
847 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)) &&
848 "NumOperands not set up?");
849
850#ifndef NDEBUG
851 assert(((Args.size() == FTy->getNumParams()) ||
852 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
853 "Invoking a function with bad signature");
854
855 for (unsigned i = 0, e = Args.size(); i != e; i++)
856 assert((i >= FTy->getNumParams() ||
857 FTy->getParamType(i) == Args[i]->getType()) &&
858 "Invoking a function with a bad signature!");
859#endif
860
861 // Set operands in order of their index to match use-list-order
862 // prediction.
863 llvm::copy(Args, op_begin());
864 setNormalDest(IfNormal);
865 setUnwindDest(IfException);
867
868 auto It = populateBundleOperandInfos(Bundles, Args.size());
869 (void)It;
870 assert(It + 3 == op_end() && "Should add up!");
871
872 setName(NameStr);
873}
874
875InvokeInst::InvokeInst(const InvokeInst &II, AllocInfo AllocInfo)
876 : CallBase(II.Attrs, II.FTy, II.getType(), Instruction::Invoke, AllocInfo) {
877 assert(getNumOperands() == II.getNumOperands() &&
878 "Wrong number of operands allocated");
879 setCallingConv(II.getCallingConv());
880 std::copy(II.op_begin(), II.op_end(), op_begin());
881 std::copy(II.bundle_op_info_begin(), II.bundle_op_info_end(),
883 SubclassOptionalData = II.SubclassOptionalData;
884}
885
887 InsertPosition InsertPt) {
888 std::vector<Value *> Args(II->arg_begin(), II->arg_end());
889
890 auto *NewII = InvokeInst::Create(
891 II->getFunctionType(), II->getCalledOperand(), II->getNormalDest(),
892 II->getUnwindDest(), Args, OpB, II->getName(), InsertPt);
893 NewII->setCallingConv(II->getCallingConv());
894 NewII->SubclassOptionalData = II->SubclassOptionalData;
895 NewII->setAttributes(II->getAttributes());
896 NewII->setDebugLoc(II->getDebugLoc());
897 return NewII;
898}
899
901 return cast<LandingPadInst>(getUnwindDest()->getFirstNonPHIIt());
902}
903
905 if (T == 0) {
906 LLVM_DEBUG(dbgs() << "Attempting to update profile weights will result in "
907 "div by 0. Ignoring. Likely the function "
908 << getParent()->getParent()->getName()
909 << " has 0 entry count, and contains call instructions "
910 "with non-zero prof info.");
911 return;
912 }
913 scaleProfData(*this, S, T);
914}
915
916//===----------------------------------------------------------------------===//
917// CallBrInst Implementation
918//===----------------------------------------------------------------------===//
919
920void CallBrInst::init(FunctionType *FTy, Value *Fn, BasicBlock *Fallthrough,
921 ArrayRef<BasicBlock *> IndirectDests,
924 const Twine &NameStr) {
925 this->FTy = FTy;
926
927 assert(getNumOperands() == ComputeNumOperands(Args.size(),
928 IndirectDests.size(),
929 CountBundleInputs(Bundles)) &&
930 "NumOperands not set up?");
931
932#ifndef NDEBUG
933 assert(((Args.size() == FTy->getNumParams()) ||
934 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
935 "Calling a function with bad signature");
936
937 for (unsigned i = 0, e = Args.size(); i != e; i++)
938 assert((i >= FTy->getNumParams() ||
939 FTy->getParamType(i) == Args[i]->getType()) &&
940 "Calling a function with a bad signature!");
941#endif
942
943 // Set operands in order of their index to match use-list-order
944 // prediction.
945 llvm::copy(Args, op_begin());
946 NumIndirectDests = IndirectDests.size();
947 setDefaultDest(Fallthrough);
948 for (unsigned i = 0; i != NumIndirectDests; ++i)
949 setIndirectDest(i, IndirectDests[i]);
951
952 auto It = populateBundleOperandInfos(Bundles, Args.size());
953 (void)It;
954 assert(It + 2 + IndirectDests.size() == op_end() && "Should add up!");
955
956 setName(NameStr);
957}
958
959CallBrInst::CallBrInst(const CallBrInst &CBI, AllocInfo AllocInfo)
960 : CallBase(CBI.Attrs, CBI.FTy, CBI.getType(), Instruction::CallBr,
961 AllocInfo) {
963 "Wrong number of operands allocated");
965 std::copy(CBI.op_begin(), CBI.op_end(), op_begin());
966 std::copy(CBI.bundle_op_info_begin(), CBI.bundle_op_info_end(),
969 NumIndirectDests = CBI.NumIndirectDests;
970}
971
972CallBrInst *CallBrInst::Create(CallBrInst *CBI, ArrayRef<OperandBundleDef> OpB,
973 InsertPosition InsertPt) {
974 std::vector<Value *> Args(CBI->arg_begin(), CBI->arg_end());
975
976 auto *NewCBI = CallBrInst::Create(
977 CBI->getFunctionType(), CBI->getCalledOperand(), CBI->getDefaultDest(),
978 CBI->getIndirectDests(), Args, OpB, CBI->getName(), InsertPt);
979 NewCBI->setCallingConv(CBI->getCallingConv());
980 NewCBI->SubclassOptionalData = CBI->SubclassOptionalData;
981 NewCBI->setAttributes(CBI->getAttributes());
982 NewCBI->setDebugLoc(CBI->getDebugLoc());
983 NewCBI->NumIndirectDests = CBI->NumIndirectDests;
984 return NewCBI;
985}
986
987//===----------------------------------------------------------------------===//
988// ReturnInst Implementation
989//===----------------------------------------------------------------------===//
990
991ReturnInst::ReturnInst(const ReturnInst &RI, AllocInfo AllocInfo)
992 : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Ret,
993 AllocInfo) {
995 "Wrong number of operands allocated");
996 if (RI.getNumOperands())
997 Op<0>() = RI.Op<0>();
999}
1000
1001ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, AllocInfo AllocInfo,
1002 InsertPosition InsertBefore)
1003 : Instruction(Type::getVoidTy(C), Instruction::Ret, AllocInfo,
1004 InsertBefore) {
1005 if (retVal)
1006 Op<0>() = retVal;
1007}
1008
1009//===----------------------------------------------------------------------===//
1010// ResumeInst Implementation
1011//===----------------------------------------------------------------------===//
1012
1013ResumeInst::ResumeInst(const ResumeInst &RI)
1014 : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Resume,
1015 AllocMarker) {
1016 Op<0>() = RI.Op<0>();
1017}
1018
1019ResumeInst::ResumeInst(Value *Exn, InsertPosition InsertBefore)
1020 : Instruction(Type::getVoidTy(Exn->getContext()), Instruction::Resume,
1021 AllocMarker, InsertBefore) {
1022 Op<0>() = Exn;
1023}
1024
1025//===----------------------------------------------------------------------===//
1026// CleanupReturnInst Implementation
1027//===----------------------------------------------------------------------===//
1028
1029CleanupReturnInst::CleanupReturnInst(const CleanupReturnInst &CRI,
1031 : Instruction(CRI.getType(), Instruction::CleanupRet, AllocInfo) {
1033 "Wrong number of operands allocated");
1034 setSubclassData<Instruction::OpaqueField>(
1036 Op<0>() = CRI.Op<0>();
1037 if (CRI.hasUnwindDest())
1038 Op<1>() = CRI.Op<1>();
1039}
1040
1041void CleanupReturnInst::init(Value *CleanupPad, BasicBlock *UnwindBB) {
1042 if (UnwindBB)
1043 setSubclassData<UnwindDestField>(true);
1044
1045 Op<0>() = CleanupPad;
1046 if (UnwindBB)
1047 Op<1>() = UnwindBB;
1048}
1049
1050CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB,
1052 InsertPosition InsertBefore)
1053 : Instruction(Type::getVoidTy(CleanupPad->getContext()),
1054 Instruction::CleanupRet, AllocInfo, InsertBefore) {
1055 init(CleanupPad, UnwindBB);
1056}
1057
1058//===----------------------------------------------------------------------===//
1059// CatchReturnInst Implementation
1060//===----------------------------------------------------------------------===//
1061void CatchReturnInst::init(Value *CatchPad, BasicBlock *BB) {
1062 Op<0>() = CatchPad;
1063 Op<1>() = BB;
1064}
1065
1066CatchReturnInst::CatchReturnInst(const CatchReturnInst &CRI)
1067 : Instruction(Type::getVoidTy(CRI.getContext()), Instruction::CatchRet,
1068 AllocMarker) {
1069 Op<0>() = CRI.Op<0>();
1070 Op<1>() = CRI.Op<1>();
1071}
1072
1073CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB,
1074 InsertPosition InsertBefore)
1075 : Instruction(Type::getVoidTy(BB->getContext()), Instruction::CatchRet,
1076 AllocMarker, InsertBefore) {
1077 init(CatchPad, BB);
1078}
1079
1080//===----------------------------------------------------------------------===//
1081// CatchSwitchInst Implementation
1082//===----------------------------------------------------------------------===//
1083
1084CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
1085 unsigned NumReservedValues,
1086 const Twine &NameStr,
1087 InsertPosition InsertBefore)
1088 : Instruction(ParentPad->getType(), Instruction::CatchSwitch, AllocMarker,
1089 InsertBefore) {
1090 if (UnwindDest)
1091 ++NumReservedValues;
1092 init(ParentPad, UnwindDest, NumReservedValues + 1);
1093 setName(NameStr);
1094}
1095
1096CatchSwitchInst::CatchSwitchInst(const CatchSwitchInst &CSI)
1097 : Instruction(CSI.getType(), Instruction::CatchSwitch, AllocMarker) {
1099 init(CSI.getParentPad(), CSI.getUnwindDest(), CSI.getNumOperands());
1100 setNumHungOffUseOperands(ReservedSpace);
1101 Use *OL = getOperandList();
1102 const Use *InOL = CSI.getOperandList();
1103 for (unsigned I = 1, E = ReservedSpace; I != E; ++I)
1104 OL[I] = InOL[I];
1105}
1106
1107void CatchSwitchInst::init(Value *ParentPad, BasicBlock *UnwindDest,
1108 unsigned NumReservedValues) {
1109 assert(ParentPad && NumReservedValues);
1110
1111 ReservedSpace = NumReservedValues;
1112 setNumHungOffUseOperands(UnwindDest ? 2 : 1);
1113 allocHungoffUses(ReservedSpace);
1114
1115 Op<0>() = ParentPad;
1116 if (UnwindDest) {
1118 setUnwindDest(UnwindDest);
1119 }
1120}
1121
1122/// growOperands - grow operands - This grows the operand list in response to a
1123/// push_back style of operation. This grows the number of ops by 2 times.
1124void CatchSwitchInst::growOperands(unsigned Size) {
1125 unsigned NumOperands = getNumOperands();
1126 assert(NumOperands >= 1);
1127 if (ReservedSpace >= NumOperands + Size)
1128 return;
1129 ReservedSpace = (NumOperands + Size / 2) * 2;
1130 growHungoffUses(ReservedSpace);
1131}
1132
1134 unsigned OpNo = getNumOperands();
1135 growOperands(1);
1136 assert(OpNo < ReservedSpace && "Growing didn't work!");
1138 getOperandList()[OpNo] = Handler;
1139}
1140
1142 // Move all subsequent handlers up one.
1143 Use *EndDst = op_end() - 1;
1144 for (Use *CurDst = HI.getCurrent(); CurDst != EndDst; ++CurDst)
1145 *CurDst = *(CurDst + 1);
1146 // Null out the last handler use.
1147 *EndDst = nullptr;
1148
1150}
1151
1152//===----------------------------------------------------------------------===//
1153// FuncletPadInst Implementation
1154//===----------------------------------------------------------------------===//
1155void FuncletPadInst::init(Value *ParentPad, ArrayRef<Value *> Args,
1156 const Twine &NameStr) {
1157 assert(getNumOperands() == 1 + Args.size() && "NumOperands not set up?");
1158 llvm::copy(Args, op_begin());
1159 setParentPad(ParentPad);
1160 setName(NameStr);
1161}
1162
1163FuncletPadInst::FuncletPadInst(const FuncletPadInst &FPI, AllocInfo AllocInfo)
1164 : Instruction(FPI.getType(), FPI.getOpcode(), AllocInfo) {
1166 "Wrong number of operands allocated");
1167 std::copy(FPI.op_begin(), FPI.op_end(), op_begin());
1169}
1170
1171FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
1173 const Twine &NameStr,
1174 InsertPosition InsertBefore)
1175 : Instruction(ParentPad->getType(), Op, AllocInfo, InsertBefore) {
1176 init(ParentPad, Args, NameStr);
1177}
1178
1179//===----------------------------------------------------------------------===//
1180// UnreachableInst Implementation
1181//===----------------------------------------------------------------------===//
1182
1184 InsertPosition InsertBefore)
1185 : Instruction(Type::getVoidTy(Context), Instruction::Unreachable,
1186 AllocMarker, InsertBefore) {}
1187
1188//===----------------------------------------------------------------------===//
1189// UncondBrInst Implementation
1190//===----------------------------------------------------------------------===//
1191
1192// Suppress deprecation warnings from BranchInst.
1194
1195UncondBrInst::UncondBrInst(BasicBlock *Target, InsertPosition InsertBefore)
1196 : BranchInst(Type::getVoidTy(Target->getContext()), Instruction::UncondBr,
1197 AllocMarker, InsertBefore) {
1198 Op<-1>() = Target;
1199}
1200
1201UncondBrInst::UncondBrInst(const UncondBrInst &BI)
1202 : BranchInst(Type::getVoidTy(BI.getContext()), Instruction::UncondBr,
1203 AllocMarker) {
1204 Op<-1>() = BI.Op<-1>();
1205 SubclassOptionalData = BI.SubclassOptionalData;
1206}
1207
1208//===----------------------------------------------------------------------===//
1209// CondBrInst Implementation
1210//===----------------------------------------------------------------------===//
1211
1212void CondBrInst::AssertOK() {
1213 assert(getCondition()->getType()->isIntegerTy(1) &&
1214 "May only branch on boolean predicates!");
1215}
1216
1217CondBrInst::CondBrInst(Value *Cond, BasicBlock *IfTrue, BasicBlock *IfFalse,
1218 InsertPosition InsertBefore)
1219 : BranchInst(Type::getVoidTy(IfTrue->getContext()), Instruction::CondBr,
1220 AllocMarker, InsertBefore) {
1221 // Assign in order of operand index to make use-list order predictable.
1222 Op<-3>() = Cond;
1223 Op<-2>() = IfTrue;
1224 Op<-1>() = IfFalse;
1225#ifndef NDEBUG
1226 AssertOK();
1227#endif
1228}
1229
1230CondBrInst::CondBrInst(const CondBrInst &BI)
1231 : BranchInst(Type::getVoidTy(BI.getContext()), Instruction::CondBr,
1232 AllocMarker) {
1233 // Assign in order of operand index to make use-list order predictable.
1234 Op<-3>() = BI.Op<-3>();
1235 Op<-2>() = BI.Op<-2>();
1236 Op<-1>() = BI.Op<-1>();
1237 SubclassOptionalData = BI.SubclassOptionalData;
1238}
1239
1241 Op<-1>().swap(Op<-2>());
1242
1243 // Update profile metadata if present and it matches our structural
1244 // expectations.
1245 swapProfMetadata();
1246}
1247
1248// Suppress deprecation warnings from BranchInst.
1250
1251//===----------------------------------------------------------------------===//
1252// AllocaInst Implementation
1253//===----------------------------------------------------------------------===//
1254
1255static Value *getAISize(LLVMContext &Context, Value *Amt) {
1256 if (!Amt)
1257 Amt = ConstantInt::get(Type::getInt32Ty(Context), 1);
1258 else {
1259 assert(!isa<BasicBlock>(Amt) &&
1260 "Passed basic block into allocation size parameter! Use other ctor");
1261 assert(Amt->getType()->isIntegerTy() &&
1262 "Allocation array size is not an integer!");
1263 }
1264 return Amt;
1265}
1266
1268 assert(Pos.isValid() &&
1269 "Insertion position cannot be null when alignment not provided!");
1270 BasicBlock *BB = Pos.getBasicBlock();
1271 assert(BB->getParent() &&
1272 "BB must be in a Function when alignment not provided!");
1273 const DataLayout &DL = BB->getDataLayout();
1274 return DL.getPrefTypeAlign(Ty);
1275}
1276
1277AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
1278 InsertPosition InsertBefore)
1279 : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertBefore) {}
1280
1281AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1282 const Twine &Name, InsertPosition InsertBefore)
1283 : AllocaInst(Ty, AddrSpace, ArraySize,
1284 computeAllocaDefaultAlign(Ty, InsertBefore), Name,
1285 InsertBefore) {}
1286
1287AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1288 Align Align, const Twine &Name,
1289 InsertPosition InsertBefore)
1290 : UnaryInstruction(PointerType::get(Ty->getContext(), AddrSpace), Alloca,
1291 getAISize(Ty->getContext(), ArraySize), InsertBefore),
1292 AllocatedType(Ty) {
1294 assert(!Ty->isVoidTy() && "Cannot allocate void!");
1295 setName(Name);
1296}
1297
1300 return !CI->isOne();
1301 return true;
1302}
1303
1304/// isStaticAlloca - Return true if this alloca is in the entry block of the
1305/// function and is a constant size. If so, the code generator will fold it
1306/// into the prolog/epilog code, so it is basically free.
1308 // Must be constant size.
1309 if (!isa<ConstantInt>(getArraySize())) return false;
1310
1311 // Must be in the entry block.
1312 const BasicBlock *Parent = getParent();
1313 return Parent->isEntryBlock() && !isUsedWithInAlloca();
1314}
1315
1316//===----------------------------------------------------------------------===//
1317// LoadInst Implementation
1318//===----------------------------------------------------------------------===//
1319
1320void LoadInst::AssertOK() {
1322 "Ptr must have pointer type.");
1323}
1324
1326 assert(Pos.isValid() &&
1327 "Insertion position cannot be null when alignment not provided!");
1328 BasicBlock *BB = Pos.getBasicBlock();
1329 assert(BB->getParent() &&
1330 "BB must be in a Function when alignment not provided!");
1331 const DataLayout &DL = BB->getDataLayout();
1332 return DL.getABITypeAlign(Ty);
1333}
1334
1335LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name,
1336 InsertPosition InsertBef)
1337 : LoadInst(Ty, Ptr, Name, /*isVolatile=*/false, InsertBef) {}
1338
1339LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1340 InsertPosition InsertBef)
1341 : LoadInst(Ty, Ptr, Name, isVolatile,
1342 computeLoadStoreDefaultAlign(Ty, InsertBef), InsertBef) {}
1343
1344LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1345 Align Align, InsertPosition InsertBef)
1346 : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
1347 SyncScope::System, InsertBef) {}
1348
1349LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1351 InsertPosition InsertBef)
1352 : UnaryInstruction(Ty, Load, Ptr, InsertBef) {
1355 setAtomic(Order, SSID);
1356 AssertOK();
1357 setName(Name);
1358}
1359
1360//===----------------------------------------------------------------------===//
1361// StoreInst Implementation
1362//===----------------------------------------------------------------------===//
1363
1364void StoreInst::AssertOK() {
1365 assert(getOperand(0) && getOperand(1) && "Both operands must be non-null!");
1367 "Ptr must have pointer type!");
1368}
1369
1371 : StoreInst(val, addr, /*isVolatile=*/false, InsertBefore) {}
1372
1374 InsertPosition InsertBefore)
1375 : StoreInst(val, addr, isVolatile,
1376 computeLoadStoreDefaultAlign(val->getType(), InsertBefore),
1377 InsertBefore) {}
1378
1380 InsertPosition InsertBefore)
1382 SyncScope::System, InsertBefore) {}
1383
1385 AtomicOrdering Order, SyncScope::ID SSID,
1386 InsertPosition InsertBefore)
1387 : Instruction(Type::getVoidTy(val->getContext()), Store, AllocMarker,
1388 InsertBefore) {
1389 Op<0>() = val;
1390 Op<1>() = addr;
1393 setAtomic(Order, SSID);
1394 AssertOK();
1395}
1396
1397//===----------------------------------------------------------------------===//
1398// AtomicCmpXchgInst Implementation
1399//===----------------------------------------------------------------------===//
1400
1401void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal,
1402 Align Alignment, AtomicOrdering SuccessOrdering,
1403 AtomicOrdering FailureOrdering,
1404 SyncScope::ID SSID) {
1405 Op<0>() = Ptr;
1406 Op<1>() = Cmp;
1407 Op<2>() = NewVal;
1408 setSuccessOrdering(SuccessOrdering);
1409 setFailureOrdering(FailureOrdering);
1410 setSyncScopeID(SSID);
1411 setAlignment(Alignment);
1412
1413 assert(getOperand(0) && getOperand(1) && getOperand(2) &&
1414 "All operands must be non-null!");
1416 "Ptr must have pointer type!");
1417 assert(getOperand(1)->getType() == getOperand(2)->getType() &&
1418 "Cmp type and NewVal type must be same!");
1419}
1420
1422 Align Alignment,
1423 AtomicOrdering SuccessOrdering,
1424 AtomicOrdering FailureOrdering,
1425 SyncScope::ID SSID,
1426 InsertPosition InsertBefore)
1427 : Instruction(
1428 StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())),
1429 AtomicCmpXchg, AllocMarker, InsertBefore) {
1430 Init(Ptr, Cmp, NewVal, Alignment, SuccessOrdering, FailureOrdering, SSID);
1431}
1432
1433//===----------------------------------------------------------------------===//
1434// AtomicRMWInst Implementation
1435//===----------------------------------------------------------------------===//
1436
1437void AtomicRMWInst::Init(BinOp Operation, Value *Ptr, Value *Val,
1438 Align Alignment, AtomicOrdering Ordering,
1439 SyncScope::ID SSID, bool Elementwise) {
1440 assert(Ordering != AtomicOrdering::NotAtomic &&
1441 "atomicrmw instructions can only be atomic.");
1442 assert(Ordering != AtomicOrdering::Unordered &&
1443 "atomicrmw instructions cannot be unordered.");
1444 Op<0>() = Ptr;
1445 Op<1>() = Val;
1447 setOrdering(Ordering);
1448 setSyncScopeID(SSID);
1449 setElementwise(Elementwise);
1450 setAlignment(Alignment);
1451
1452 assert(getOperand(0) && getOperand(1) && "All operands must be non-null!");
1454 "Ptr must have pointer type!");
1455 assert(Ordering != AtomicOrdering::NotAtomic &&
1456 "AtomicRMW instructions must be atomic!");
1457}
1458
1460 Align Alignment, AtomicOrdering Ordering,
1461 SyncScope::ID SSID, bool Elementwise,
1462 InsertPosition InsertBefore)
1463 : Instruction(Val->getType(), AtomicRMW, AllocMarker, InsertBefore) {
1464 Init(Operation, Ptr, Val, Alignment, Ordering, SSID, Elementwise);
1465}
1466
1468 switch (Op) {
1470 return "xchg";
1471 case AtomicRMWInst::Add:
1472 return "add";
1473 case AtomicRMWInst::Sub:
1474 return "sub";
1475 case AtomicRMWInst::And:
1476 return "and";
1478 return "nand";
1479 case AtomicRMWInst::Or:
1480 return "or";
1481 case AtomicRMWInst::Xor:
1482 return "xor";
1483 case AtomicRMWInst::Max:
1484 return "max";
1485 case AtomicRMWInst::Min:
1486 return "min";
1488 return "umax";
1490 return "umin";
1492 return "fadd";
1494 return "fsub";
1496 return "fmax";
1498 return "fmin";
1500 return "fmaximum";
1502 return "fminimum";
1504 return "fmaximumnum";
1506 return "fminimumnum";
1508 return "uinc_wrap";
1510 return "udec_wrap";
1512 return "usub_cond";
1514 return "usub_sat";
1516 return "<invalid operation>";
1517 }
1518
1519 llvm_unreachable("invalid atomicrmw operation");
1520}
1521
1522//===----------------------------------------------------------------------===//
1523// FenceInst Implementation
1524//===----------------------------------------------------------------------===//
1525
1527 SyncScope::ID SSID, InsertPosition InsertBefore)
1528 : Instruction(Type::getVoidTy(C), Fence, AllocMarker, InsertBefore) {
1529 setOrdering(Ordering);
1530 setSyncScopeID(SSID);
1531}
1532
1533//===----------------------------------------------------------------------===//
1534// GetElementPtrInst Implementation
1535//===----------------------------------------------------------------------===//
1536
1537void GetElementPtrInst::init(Value *Ptr, ArrayRef<Value *> IdxList,
1538 const Twine &Name) {
1539 assert(getNumOperands() == 1 + IdxList.size() &&
1540 "NumOperands not initialized?");
1541 Op<0>() = Ptr;
1542 llvm::copy(IdxList, op_begin() + 1);
1543 setName(Name);
1544}
1545
1546GetElementPtrInst::GetElementPtrInst(const GetElementPtrInst &GEPI,
1548 : Instruction(GEPI.getType(), GetElementPtr, AllocInfo),
1549 SourceElementType(GEPI.SourceElementType),
1550 ResultElementType(GEPI.ResultElementType) {
1551 assert(getNumOperands() == GEPI.getNumOperands() &&
1552 "Wrong number of operands allocated");
1553 std::copy(GEPI.op_begin(), GEPI.op_end(), op_begin());
1555}
1556
1558 if (auto *Struct = dyn_cast<StructType>(Ty)) {
1559 if (!Struct->indexValid(Idx))
1560 return nullptr;
1561 return Struct->getTypeAtIndex(Idx);
1562 }
1563 if (!Idx->getType()->isIntOrIntVectorTy())
1564 return nullptr;
1565 if (auto *Array = dyn_cast<ArrayType>(Ty))
1566 return Array->getElementType();
1567 if (auto *Vector = dyn_cast<VectorType>(Ty))
1568 return Vector->getElementType();
1569 return nullptr;
1570}
1571
1573 if (auto *Struct = dyn_cast<StructType>(Ty)) {
1574 if (Idx >= Struct->getNumElements())
1575 return nullptr;
1576 return Struct->getElementType(Idx);
1577 }
1578 if (auto *Array = dyn_cast<ArrayType>(Ty))
1579 return Array->getElementType();
1580 if (auto *Vector = dyn_cast<VectorType>(Ty))
1581 return Vector->getElementType();
1582 return nullptr;
1583}
1584
1585template <typename IndexTy>
1587 if (IdxList.empty())
1588 return Ty;
1589 for (IndexTy V : IdxList.slice(1)) {
1591 if (!Ty)
1592 return Ty;
1593 }
1594 return Ty;
1595}
1596
1600
1602 ArrayRef<Constant *> IdxList) {
1603 return getIndexedTypeInternal(Ty, IdxList);
1604}
1605
1609
1610/// hasAllZeroIndices - Return true if all of the indices of this GEP are
1611/// zeros. If so, the result pointer and the first operand have the same
1612/// value, just potentially different types.
1614 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
1616 if (!CI->isZero()) return false;
1617 } else {
1618 return false;
1619 }
1620 }
1621 return true;
1622}
1623
1624/// hasAllConstantIndices - Return true if all of the indices of this GEP are
1625/// constant integers. If so, the result pointer and the first operand have
1626/// a constant offset between them.
1628 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
1630 return false;
1631 }
1632 return true;
1633}
1634
1638
1640 GEPNoWrapFlags NW = cast<GEPOperator>(this)->getNoWrapFlags();
1641 if (B)
1643 else
1644 NW = NW.withoutInBounds();
1645 setNoWrapFlags(NW);
1646}
1647
1649 return cast<GEPOperator>(this)->getNoWrapFlags();
1650}
1651
1653 return cast<GEPOperator>(this)->isInBounds();
1654}
1655
1657 return cast<GEPOperator>(this)->hasNoUnsignedSignedWrap();
1658}
1659
1661 return cast<GEPOperator>(this)->hasNoUnsignedWrap();
1662}
1663
1665 APInt &Offset) const {
1666 // Delegate to the generic GEPOperator implementation.
1667 return cast<GEPOperator>(this)->accumulateConstantOffset(DL, Offset);
1668}
1669
1671 const DataLayout &DL, unsigned BitWidth,
1672 SmallMapVector<Value *, APInt, 4> &VariableOffsets,
1673 APInt &ConstantOffset) const {
1674 // Delegate to the generic GEPOperator implementation.
1675 return cast<GEPOperator>(this)->collectOffset(DL, BitWidth, VariableOffsets,
1676 ConstantOffset);
1677}
1678
1679//===----------------------------------------------------------------------===//
1680// ExtractElementInst Implementation
1681//===----------------------------------------------------------------------===//
1682
1683ExtractElementInst::ExtractElementInst(Value *Val, Value *Index,
1684 const Twine &Name,
1685 InsertPosition InsertBef)
1686 : Instruction(cast<VectorType>(Val->getType())->getElementType(),
1687 ExtractElement, AllocMarker, InsertBef) {
1688 assert(isValidOperands(Val, Index) &&
1689 "Invalid extractelement instruction operands!");
1690 Op<0>() = Val;
1691 Op<1>() = Index;
1692 setName(Name);
1693}
1694
1695bool ExtractElementInst::isValidOperands(const Value *Val, const Value *Index) {
1696 if (!Val->getType()->isVectorTy() || !Index->getType()->isIntegerTy())
1697 return false;
1698 return true;
1699}
1700
1701//===----------------------------------------------------------------------===//
1702// InsertElementInst Implementation
1703//===----------------------------------------------------------------------===//
1704
1705InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index,
1706 const Twine &Name,
1707 InsertPosition InsertBef)
1708 : Instruction(Vec->getType(), InsertElement, AllocMarker, InsertBef) {
1709 assert(isValidOperands(Vec, Elt, Index) &&
1710 "Invalid insertelement instruction operands!");
1711 Op<0>() = Vec;
1712 Op<1>() = Elt;
1713 Op<2>() = Index;
1714 setName(Name);
1715}
1716
1718 const Value *Index) {
1719 if (!Vec->getType()->isVectorTy())
1720 return false; // First operand of insertelement must be vector type.
1721
1722 if (Elt->getType() != cast<VectorType>(Vec->getType())->getElementType())
1723 return false;// Second operand of insertelement must be vector element type.
1724
1725 if (!Index->getType()->isIntegerTy())
1726 return false; // Third operand of insertelement must be i32.
1727 return true;
1728}
1729
1730//===----------------------------------------------------------------------===//
1731// ShuffleVectorInst Implementation
1732//===----------------------------------------------------------------------===//
1733
1735 assert(V && "Cannot create placeholder of nullptr V");
1736 return PoisonValue::get(V->getType());
1737}
1738
1740 InsertPosition InsertBefore)
1742 InsertBefore) {}
1743
1745 const Twine &Name,
1746 InsertPosition InsertBefore)
1748 InsertBefore) {}
1749
1751 const Twine &Name,
1752 InsertPosition InsertBefore)
1753 : Instruction(
1754 VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
1755 cast<VectorType>(Mask->getType())->getElementCount()),
1756 ShuffleVector, AllocMarker, InsertBefore) {
1757 assert(isValidOperands(V1, V2, Mask) &&
1758 "Invalid shuffle vector instruction operands!");
1759
1760 Op<0>() = V1;
1761 Op<1>() = V2;
1762 SmallVector<int, 16> MaskArr;
1763 getShuffleMask(cast<Constant>(Mask), MaskArr);
1764 setShuffleMask(MaskArr);
1765 setName(Name);
1766}
1767
1769 const Twine &Name,
1770 InsertPosition InsertBefore)
1771 : Instruction(
1772 VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
1773 Mask.size(), isa<ScalableVectorType>(V1->getType())),
1774 ShuffleVector, AllocMarker, InsertBefore) {
1775 assert(isValidOperands(V1, V2, Mask) &&
1776 "Invalid shuffle vector instruction operands!");
1777 Op<0>() = V1;
1778 Op<1>() = V2;
1779 setShuffleMask(Mask);
1780 setName(Name);
1781}
1782
1784 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
1785 int NumMaskElts = ShuffleMask.size();
1786 SmallVector<int, 16> NewMask(NumMaskElts);
1787 for (int i = 0; i != NumMaskElts; ++i) {
1788 int MaskElt = getMaskValue(i);
1789 if (MaskElt == PoisonMaskElem) {
1790 NewMask[i] = PoisonMaskElem;
1791 continue;
1792 }
1793 assert(MaskElt >= 0 && MaskElt < 2 * NumOpElts && "Out-of-range mask");
1794 MaskElt = (MaskElt < NumOpElts) ? MaskElt + NumOpElts : MaskElt - NumOpElts;
1795 NewMask[i] = MaskElt;
1796 }
1797 setShuffleMask(NewMask);
1798 Op<0>().swap(Op<1>());
1799}
1800
1802 ArrayRef<int> Mask) {
1803 // V1 and V2 must be vectors of the same type.
1804 if (!isa<VectorType>(V1->getType()) || V1->getType() != V2->getType())
1805 return false;
1806
1807 // Make sure the mask elements make sense.
1808 int V1Size =
1809 cast<VectorType>(V1->getType())->getElementCount().getKnownMinValue();
1810 for (int Elem : Mask)
1811 if (Elem != PoisonMaskElem && Elem >= V1Size * 2)
1812 return false;
1813
1815 if ((Mask[0] != 0 && Mask[0] != PoisonMaskElem) || !all_equal(Mask))
1816 return false;
1817
1818 return true;
1819}
1820
1822 const Value *Mask) {
1823 // V1 and V2 must be vectors of the same type.
1824 if (!V1->getType()->isVectorTy() || V1->getType() != V2->getType())
1825 return false;
1826
1827 // Mask must be vector of i32, and must be the same kind of vector as the
1828 // input vectors
1829 auto *MaskTy = dyn_cast<VectorType>(Mask->getType());
1830 if (!MaskTy || !MaskTy->getElementType()->isIntegerTy(32) ||
1832 return false;
1833
1834 // Check to see if Mask is valid.
1836 return true;
1837
1838 // NOTE: Through vector ConstantInt we have the potential to support more
1839 // than just zero splat masks but that requires a LangRef change.
1840 if (isa<ScalableVectorType>(MaskTy))
1841 return false;
1842
1843 unsigned V1Size = cast<FixedVectorType>(V1->getType())->getNumElements();
1844
1845 if (const auto *CI = dyn_cast<ConstantInt>(Mask))
1846 return !CI->uge(V1Size * 2);
1847
1848 if (const auto *MV = dyn_cast<ConstantVector>(Mask)) {
1849 for (Value *Op : MV->operands()) {
1850 if (auto *CI = dyn_cast<ConstantInt>(Op)) {
1851 if (CI->uge(V1Size*2))
1852 return false;
1853 } else if (!isa<UndefValue>(Op)) {
1854 return false;
1855 }
1856 }
1857 return true;
1858 }
1859
1860 if (const auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
1861 for (unsigned i = 0, e = cast<FixedVectorType>(MaskTy)->getNumElements();
1862 i != e; ++i)
1863 if (CDS->getElementAsInteger(i) >= V1Size*2)
1864 return false;
1865 return true;
1866 }
1867
1868 return false;
1869}
1870
1872 SmallVectorImpl<int> &Result) {
1873 ElementCount EC = cast<VectorType>(Mask->getType())->getElementCount();
1874
1875 if (isa<ConstantAggregateZero>(Mask) || isa<UndefValue>(Mask)) {
1876 int MaskVal = isa<UndefValue>(Mask) ? -1 : 0;
1877 Result.append(EC.getKnownMinValue(), MaskVal);
1878 return;
1879 }
1880
1881 assert(!EC.isScalable() &&
1882 "Scalable vector shuffle mask must be undef or zeroinitializer");
1883
1884 unsigned NumElts = EC.getFixedValue();
1885
1886 Result.reserve(NumElts);
1887
1888 if (auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
1889 for (unsigned i = 0; i != NumElts; ++i)
1890 Result.push_back(CDS->getElementAsInteger(i));
1891 return;
1892 }
1893 for (unsigned i = 0; i != NumElts; ++i) {
1894 Constant *C = Mask->getAggregateElement(i);
1895 Result.push_back(isa<UndefValue>(C) ? -1 :
1896 cast<ConstantInt>(C)->getZExtValue());
1897 }
1898}
1899
1901 ShuffleMask.assign(Mask.begin(), Mask.end());
1902 ShuffleMaskForBitcode = convertShuffleMaskForBitcode(Mask, getType());
1903}
1904
1906 Type *ResultTy) {
1907 Type *Int32Ty = Type::getInt32Ty(ResultTy->getContext());
1908 if (isa<ScalableVectorType>(ResultTy)) {
1909 assert(all_equal(Mask) && "Unexpected shuffle");
1910 Type *VecTy = VectorType::get(Int32Ty, Mask.size(), true);
1911 if (Mask[0] == 0)
1912 return Constant::getNullValue(VecTy);
1913 return PoisonValue::get(VecTy);
1914 }
1916 for (int Elem : Mask) {
1917 if (Elem == PoisonMaskElem)
1919 else
1920 MaskConst.push_back(ConstantInt::get(Int32Ty, Elem));
1921 }
1922 return ConstantVector::get(MaskConst);
1923}
1924
1925static bool isSingleSourceMaskImpl(ArrayRef<int> Mask, int NumOpElts) {
1926 assert(!Mask.empty() && "Shuffle mask must contain elements");
1927 bool UsesLHS = false;
1928 bool UsesRHS = false;
1929 for (int I : Mask) {
1930 if (I == -1)
1931 continue;
1932 assert(I >= 0 && I < (NumOpElts * 2) &&
1933 "Out-of-bounds shuffle mask element");
1934 UsesLHS |= (I < NumOpElts);
1935 UsesRHS |= (I >= NumOpElts);
1936 if (UsesLHS && UsesRHS)
1937 return false;
1938 }
1939 // Allow for degenerate case: completely undef mask means neither source is used.
1940 return UsesLHS || UsesRHS;
1941}
1942
1944 // We don't have vector operand size information, so assume operands are the
1945 // same size as the mask.
1946 return isSingleSourceMaskImpl(Mask, NumSrcElts);
1947}
1948
1949static bool isIdentityMaskImpl(ArrayRef<int> Mask, int NumOpElts) {
1950 if (!isSingleSourceMaskImpl(Mask, NumOpElts))
1951 return false;
1952 for (int i = 0, NumMaskElts = Mask.size(); i < NumMaskElts; ++i) {
1953 if (Mask[i] == -1)
1954 continue;
1955 if (Mask[i] != i && Mask[i] != (NumOpElts + i))
1956 return false;
1957 }
1958 return true;
1959}
1960
1962 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
1963 return false;
1964 // We don't have vector operand size information, so assume operands are the
1965 // same size as the mask.
1966 return isIdentityMaskImpl(Mask, NumSrcElts);
1967}
1968
1970 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
1971 return false;
1972 if (!isSingleSourceMask(Mask, NumSrcElts))
1973 return false;
1974
1975 // The number of elements in the mask must be at least 2.
1976 if (NumSrcElts < 2)
1977 return false;
1978
1979 for (int I = 0, E = Mask.size(); I < E; ++I) {
1980 if (Mask[I] == -1)
1981 continue;
1982 if (Mask[I] != (NumSrcElts - 1 - I) &&
1983 Mask[I] != (NumSrcElts + NumSrcElts - 1 - I))
1984 return false;
1985 }
1986 return true;
1987}
1988
1990 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
1991 return false;
1992 if (!isSingleSourceMask(Mask, NumSrcElts))
1993 return false;
1994 for (int I = 0, E = Mask.size(); I < E; ++I) {
1995 if (Mask[I] == -1)
1996 continue;
1997 if (Mask[I] != 0 && Mask[I] != NumSrcElts)
1998 return false;
1999 }
2000 return true;
2001}
2002
2004 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
2005 return false;
2006 // Select is differentiated from identity. It requires using both sources.
2007 if (isSingleSourceMask(Mask, NumSrcElts))
2008 return false;
2009 for (int I = 0, E = Mask.size(); I < E; ++I) {
2010 if (Mask[I] == -1)
2011 continue;
2012 if (Mask[I] != I && Mask[I] != (NumSrcElts + I))
2013 return false;
2014 }
2015 return true;
2016}
2017
2019 // Example masks that will return true:
2020 // v1 = <a, b, c, d>
2021 // v2 = <e, f, g, h>
2022 // trn1 = shufflevector v1, v2 <0, 4, 2, 6> = <a, e, c, g>
2023 // trn2 = shufflevector v1, v2 <1, 5, 3, 7> = <b, f, d, h>
2024
2025 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
2026 return false;
2027 // 1. The number of elements in the mask must be a power-of-2 and at least 2.
2028 int Sz = Mask.size();
2029 if (Sz < 2 || !isPowerOf2_32(Sz))
2030 return false;
2031
2032 // 2. The first element of the mask must be either a 0 or a 1.
2033 if (Mask[0] != 0 && Mask[0] != 1)
2034 return false;
2035
2036 // 3. The difference between the first 2 elements must be equal to the
2037 // number of elements in the mask.
2038 if ((Mask[1] - Mask[0]) != NumSrcElts)
2039 return false;
2040
2041 // 4. The difference between consecutive even-numbered and odd-numbered
2042 // elements must be equal to 2.
2043 for (int I = 2; I < Sz; ++I) {
2044 int MaskEltVal = Mask[I];
2045 if (MaskEltVal == -1)
2046 return false;
2047 int MaskEltPrevVal = Mask[I - 2];
2048 if (MaskEltVal - MaskEltPrevVal != 2)
2049 return false;
2050 }
2051 return true;
2052}
2053
2055 int &Index) {
2056 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
2057 return false;
2058 // Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4>
2059 int StartIndex = -1;
2060 for (int I = 0, E = Mask.size(); I != E; ++I) {
2061 int MaskEltVal = Mask[I];
2062 if (MaskEltVal == -1)
2063 continue;
2064
2065 if (StartIndex == -1) {
2066 // Don't support a StartIndex that begins in the second input, or if the
2067 // first non-undef index would access below the StartIndex.
2068 if (MaskEltVal < I || NumSrcElts <= (MaskEltVal - I))
2069 return false;
2070
2071 StartIndex = MaskEltVal - I;
2072 continue;
2073 }
2074
2075 // Splice is sequential starting from StartIndex.
2076 if (MaskEltVal != (StartIndex + I))
2077 return false;
2078 }
2079
2080 if (StartIndex == -1)
2081 return false;
2082
2083 // NOTE: This accepts StartIndex == 0 (COPY).
2084 Index = StartIndex;
2085 return true;
2086}
2087
2089 int NumSrcElts, int &Index) {
2090 // Must extract from a single source.
2091 if (!isSingleSourceMaskImpl(Mask, NumSrcElts))
2092 return false;
2093
2094 // Must be smaller (else this is an Identity shuffle).
2095 if (NumSrcElts <= (int)Mask.size())
2096 return false;
2097
2098 // Find start of extraction, accounting that we may start with an UNDEF.
2099 int SubIndex = -1;
2100 for (int i = 0, e = Mask.size(); i != e; ++i) {
2101 int M = Mask[i];
2102 if (M < 0)
2103 continue;
2104 int Offset = (M % NumSrcElts) - i;
2105 if (0 <= SubIndex && SubIndex != Offset)
2106 return false;
2107 SubIndex = Offset;
2108 }
2109
2110 if (0 <= SubIndex && SubIndex + (int)Mask.size() <= NumSrcElts) {
2111 Index = SubIndex;
2112 return true;
2113 }
2114 return false;
2115}
2116
2118 int NumSrcElts, int &NumSubElts,
2119 int &Index) {
2120 int NumMaskElts = Mask.size();
2121
2122 // Don't try to match if we're shuffling to a smaller size.
2123 if (NumMaskElts < NumSrcElts)
2124 return false;
2125
2126 // TODO: We don't recognize self-insertion/widening.
2127 if (isSingleSourceMaskImpl(Mask, NumSrcElts))
2128 return false;
2129
2130 // Determine which mask elements are attributed to which source.
2131 APInt UndefElts = APInt::getZero(NumMaskElts);
2132 APInt Src0Elts = APInt::getZero(NumMaskElts);
2133 APInt Src1Elts = APInt::getZero(NumMaskElts);
2134 bool Src0Identity = true;
2135 bool Src1Identity = true;
2136
2137 for (int i = 0; i != NumMaskElts; ++i) {
2138 int M = Mask[i];
2139 if (M < 0) {
2140 UndefElts.setBit(i);
2141 continue;
2142 }
2143 if (M < NumSrcElts) {
2144 Src0Elts.setBit(i);
2145 Src0Identity &= (M == i);
2146 continue;
2147 }
2148 Src1Elts.setBit(i);
2149 Src1Identity &= (M == (i + NumSrcElts));
2150 }
2151 assert((Src0Elts | Src1Elts | UndefElts).isAllOnes() &&
2152 "unknown shuffle elements");
2153 assert(!Src0Elts.isZero() && !Src1Elts.isZero() &&
2154 "2-source shuffle not found");
2155
2156 // Determine lo/hi span ranges.
2157 // TODO: How should we handle undefs at the start of subvector insertions?
2158 int Src0Lo = Src0Elts.countr_zero();
2159 int Src1Lo = Src1Elts.countr_zero();
2160 int Src0Hi = NumMaskElts - Src0Elts.countl_zero();
2161 int Src1Hi = NumMaskElts - Src1Elts.countl_zero();
2162
2163 // If src0 is in place, see if the src1 elements is inplace within its own
2164 // span.
2165 if (Src0Identity) {
2166 int NumSub1Elts = Src1Hi - Src1Lo;
2167 ArrayRef<int> Sub1Mask = Mask.slice(Src1Lo, NumSub1Elts);
2168 if (isIdentityMaskImpl(Sub1Mask, NumSrcElts)) {
2169 NumSubElts = NumSub1Elts;
2170 Index = Src1Lo;
2171 return true;
2172 }
2173 }
2174
2175 // If src1 is in place, see if the src0 elements is inplace within its own
2176 // span.
2177 if (Src1Identity) {
2178 int NumSub0Elts = Src0Hi - Src0Lo;
2179 ArrayRef<int> Sub0Mask = Mask.slice(Src0Lo, NumSub0Elts);
2180 if (isIdentityMaskImpl(Sub0Mask, NumSrcElts)) {
2181 NumSubElts = NumSub0Elts;
2182 Index = Src0Lo;
2183 return true;
2184 }
2185 }
2186
2187 return false;
2188}
2189
2191 // FIXME: Not currently possible to express a shuffle mask for a scalable
2192 // vector for this case.
2194 return false;
2195
2196 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2197 int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2198 if (NumMaskElts <= NumOpElts)
2199 return false;
2200
2201 // The first part of the mask must choose elements from exactly 1 source op.
2203 if (!isIdentityMaskImpl(Mask, NumOpElts))
2204 return false;
2205
2206 // All extending must be with undef elements.
2207 for (int i = NumOpElts; i < NumMaskElts; ++i)
2208 if (Mask[i] != -1)
2209 return false;
2210
2211 return true;
2212}
2213
2215 // FIXME: Not currently possible to express a shuffle mask for a scalable
2216 // vector for this case.
2218 return false;
2219
2220 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2221 int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2222 if (NumMaskElts >= NumOpElts)
2223 return false;
2224
2225 return isIdentityMaskImpl(getShuffleMask(), NumOpElts);
2226}
2227
2229 // Vector concatenation is differentiated from identity with padding.
2231 return false;
2232
2233 // FIXME: Not currently possible to express a shuffle mask for a scalable
2234 // vector for this case.
2236 return false;
2237
2238 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2239 int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2240 if (NumMaskElts != NumOpElts * 2)
2241 return false;
2242
2243 // Use the mask length rather than the operands' vector lengths here. We
2244 // already know that the shuffle returns a vector twice as long as the inputs,
2245 // and neither of the inputs are undef vectors. If the mask picks consecutive
2246 // elements from both inputs, then this is a concatenation of the inputs.
2247 return isIdentityMaskImpl(getShuffleMask(), NumMaskElts);
2248}
2249
2251 int ReplicationFactor, int VF) {
2252 assert(Mask.size() == (unsigned)ReplicationFactor * VF &&
2253 "Unexpected mask size.");
2254
2255 for (int CurrElt : seq(VF)) {
2256 ArrayRef<int> CurrSubMask = Mask.take_front(ReplicationFactor);
2257 assert(CurrSubMask.size() == (unsigned)ReplicationFactor &&
2258 "Run out of mask?");
2259 Mask = Mask.drop_front(ReplicationFactor);
2260 if (!all_of(CurrSubMask, [CurrElt](int MaskElt) {
2261 return MaskElt == PoisonMaskElem || MaskElt == CurrElt;
2262 }))
2263 return false;
2264 }
2265 assert(Mask.empty() && "Did not consume the whole mask?");
2266
2267 return true;
2268}
2269
2271 int &ReplicationFactor, int &VF) {
2272 // undef-less case is trivial.
2273 if (!llvm::is_contained(Mask, PoisonMaskElem)) {
2274 ReplicationFactor =
2275 Mask.take_while([](int MaskElt) { return MaskElt == 0; }).size();
2276 if (ReplicationFactor == 0 || Mask.size() % ReplicationFactor != 0)
2277 return false;
2278 VF = Mask.size() / ReplicationFactor;
2279 return isReplicationMaskWithParams(Mask, ReplicationFactor, VF);
2280 }
2281
2282 // However, if the mask contains undef's, we have to enumerate possible tuples
2283 // and pick one. There are bounds on replication factor: [1, mask size]
2284 // (where RF=1 is an identity shuffle, RF=mask size is a broadcast shuffle)
2285 // Additionally, mask size is a replication factor multiplied by vector size,
2286 // which further significantly reduces the search space.
2287
2288 // Before doing that, let's perform basic correctness checking first.
2289 int Largest = -1;
2290 for (int MaskElt : Mask) {
2291 if (MaskElt == PoisonMaskElem)
2292 continue;
2293 // Elements must be in non-decreasing order.
2294 if (MaskElt < Largest)
2295 return false;
2296 Largest = std::max(Largest, MaskElt);
2297 }
2298
2299 // Prefer larger replication factor if all else equal.
2300 for (int PossibleReplicationFactor :
2301 reverse(seq_inclusive<unsigned>(1, Mask.size()))) {
2302 if (Mask.size() % PossibleReplicationFactor != 0)
2303 continue;
2304 int PossibleVF = Mask.size() / PossibleReplicationFactor;
2305 if (!isReplicationMaskWithParams(Mask, PossibleReplicationFactor,
2306 PossibleVF))
2307 continue;
2308 ReplicationFactor = PossibleReplicationFactor;
2309 VF = PossibleVF;
2310 return true;
2311 }
2312
2313 return false;
2314}
2315
2316bool ShuffleVectorInst::isReplicationMask(int &ReplicationFactor,
2317 int &VF) const {
2318 // Not possible to express a shuffle mask for a scalable vector for this
2319 // case.
2321 return false;
2322
2323 VF = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2324 if (ShuffleMask.size() % VF != 0)
2325 return false;
2326 ReplicationFactor = ShuffleMask.size() / VF;
2327
2328 return isReplicationMaskWithParams(ShuffleMask, ReplicationFactor, VF);
2329}
2330
2332 if (VF <= 0 || Mask.size() < static_cast<unsigned>(VF) ||
2333 Mask.size() % VF != 0)
2334 return false;
2335 for (unsigned K = 0, Sz = Mask.size(); K < Sz; K += VF) {
2336 ArrayRef<int> SubMask = Mask.slice(K, VF);
2337 if (all_of(SubMask, equal_to(PoisonMaskElem)))
2338 continue;
2339 SmallBitVector Used(VF, false);
2340 for (int Idx : SubMask) {
2341 if (Idx != PoisonMaskElem && Idx < VF)
2342 Used.set(Idx);
2343 }
2344 if (!Used.all())
2345 return false;
2346 }
2347 return true;
2348}
2349
2350/// Return true if this shuffle mask is a replication mask.
2352 // Not possible to express a shuffle mask for a scalable vector for this
2353 // case.
2355 return false;
2356 if (!isSingleSourceMask(ShuffleMask, VF))
2357 return false;
2358
2359 return isOneUseSingleSourceMask(ShuffleMask, VF);
2360}
2361
2362bool ShuffleVectorInst::isInterleave(unsigned Factor) {
2364 // shuffle_vector can only interleave fixed length vectors - for scalable
2365 // vectors, see the @llvm.vector.interleave2 intrinsic
2366 if (!OpTy)
2367 return false;
2368 unsigned OpNumElts = OpTy->getNumElements();
2369
2370 return isInterleaveMask(ShuffleMask, Factor, OpNumElts * 2);
2371}
2372
2374 ArrayRef<int> Mask, unsigned Factor, unsigned NumInputElts,
2375 SmallVectorImpl<unsigned> &StartIndexes) {
2376 unsigned NumElts = Mask.size();
2377 if (NumElts % Factor)
2378 return false;
2379
2380 unsigned LaneLen = NumElts / Factor;
2381 if (!isPowerOf2_32(LaneLen))
2382 return false;
2383
2384 StartIndexes.resize(Factor);
2385
2386 // Check whether each element matches the general interleaved rule.
2387 // Ignore undef elements, as long as the defined elements match the rule.
2388 // Outer loop processes all factors (x, y, z in the above example)
2389 unsigned I = 0, J;
2390 for (; I < Factor; I++) {
2391 unsigned SavedLaneValue;
2392 unsigned SavedNoUndefs = 0;
2393
2394 // Inner loop processes consecutive accesses (x, x+1... in the example)
2395 for (J = 0; J < LaneLen - 1; J++) {
2396 // Lane computes x's position in the Mask
2397 unsigned Lane = J * Factor + I;
2398 unsigned NextLane = Lane + Factor;
2399 int LaneValue = Mask[Lane];
2400 int NextLaneValue = Mask[NextLane];
2401
2402 // If both are defined, values must be sequential
2403 if (LaneValue >= 0 && NextLaneValue >= 0 &&
2404 LaneValue + 1 != NextLaneValue)
2405 break;
2406
2407 // If the next value is undef, save the current one as reference
2408 if (LaneValue >= 0 && NextLaneValue < 0) {
2409 SavedLaneValue = LaneValue;
2410 SavedNoUndefs = 1;
2411 }
2412
2413 // Undefs are allowed, but defined elements must still be consecutive:
2414 // i.e.: x,..., undef,..., x + 2,..., undef,..., undef,..., x + 5, ....
2415 // Verify this by storing the last non-undef followed by an undef
2416 // Check that following non-undef masks are incremented with the
2417 // corresponding distance.
2418 if (SavedNoUndefs > 0 && LaneValue < 0) {
2419 SavedNoUndefs++;
2420 if (NextLaneValue >= 0 &&
2421 SavedLaneValue + SavedNoUndefs != (unsigned)NextLaneValue)
2422 break;
2423 }
2424 }
2425
2426 if (J < LaneLen - 1)
2427 return false;
2428
2429 int StartMask = 0;
2430 if (Mask[I] >= 0) {
2431 // Check that the start of the I range (J=0) is greater than 0
2432 StartMask = Mask[I];
2433 } else if (Mask[(LaneLen - 1) * Factor + I] >= 0) {
2434 // StartMask defined by the last value in lane
2435 StartMask = Mask[(LaneLen - 1) * Factor + I] - J;
2436 } else if (SavedNoUndefs > 0) {
2437 // StartMask defined by some non-zero value in the j loop
2438 StartMask = SavedLaneValue - (LaneLen - 1 - SavedNoUndefs);
2439 }
2440 // else StartMask remains set to 0, i.e. all elements are undefs
2441
2442 if (StartMask < 0)
2443 return false;
2444 // We must stay within the vectors; This case can happen with undefs.
2445 if (StartMask + LaneLen > NumInputElts)
2446 return false;
2447
2448 StartIndexes[I] = StartMask;
2449 }
2450
2451 return true;
2452}
2453
2454/// Check if the mask is a DE-interleave mask of the given factor
2455/// \p Factor like:
2456/// <Index, Index+Factor, ..., Index+(NumElts-1)*Factor>
2458 unsigned Factor,
2459 unsigned &Index) {
2460 // Check all potential start indices from 0 to (Factor - 1).
2461 for (unsigned Idx = 0; Idx < Factor; Idx++) {
2462 unsigned I = 0;
2463
2464 // Check that elements are in ascending order by Factor. Ignore undef
2465 // elements.
2466 for (; I < Mask.size(); I++)
2467 if (Mask[I] >= 0 && static_cast<unsigned>(Mask[I]) != Idx + I * Factor)
2468 break;
2469
2470 if (I == Mask.size()) {
2471 Index = Idx;
2472 return true;
2473 }
2474 }
2475
2476 return false;
2477}
2478
2479/// Try to lower a vector shuffle as a bit rotation.
2480///
2481/// Look for a repeated rotation pattern in each sub group.
2482/// Returns an element-wise left bit rotation amount or -1 if failed.
2483static int matchShuffleAsBitRotate(ArrayRef<int> Mask, int NumSubElts) {
2484 int NumElts = Mask.size();
2485 assert((NumElts % NumSubElts) == 0 && "Illegal shuffle mask");
2486
2487 int RotateAmt = -1;
2488 for (int i = 0; i != NumElts; i += NumSubElts) {
2489 for (int j = 0; j != NumSubElts; ++j) {
2490 int M = Mask[i + j];
2491 if (M < 0)
2492 continue;
2493 if (M < i || M >= i + NumSubElts)
2494 return -1;
2495 int Offset = (NumSubElts - (M - (i + j))) % NumSubElts;
2496 if (0 <= RotateAmt && Offset != RotateAmt)
2497 return -1;
2498 RotateAmt = Offset;
2499 }
2500 }
2501 return RotateAmt;
2502}
2503
2505 ArrayRef<int> Mask, unsigned EltSizeInBits, unsigned MinSubElts,
2506 unsigned MaxSubElts, unsigned &NumSubElts, unsigned &RotateAmt) {
2507 for (NumSubElts = MinSubElts; NumSubElts <= MaxSubElts; NumSubElts *= 2) {
2508 int EltRotateAmt = matchShuffleAsBitRotate(Mask, NumSubElts);
2509 if (EltRotateAmt < 0)
2510 continue;
2511 RotateAmt = EltRotateAmt * EltSizeInBits;
2512 return true;
2513 }
2514
2515 return false;
2516}
2517
2518//===----------------------------------------------------------------------===//
2519// InsertValueInst Class
2520//===----------------------------------------------------------------------===//
2521
2522void InsertValueInst::init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
2523 const Twine &Name) {
2524 assert(getNumOperands() == 2 && "NumOperands not initialized?");
2525
2526 // There's no fundamental reason why we require at least one index
2527 // (other than weirdness with &*IdxBegin being invalid; see
2528 // getelementptr's init routine for example). But there's no
2529 // present need to support it.
2530 assert(!Idxs.empty() && "InsertValueInst must have at least one index");
2531
2533 Val->getType() && "Inserted value must match indexed type!");
2534 Op<0>() = Agg;
2535 Op<1>() = Val;
2536
2537 Indices.append(Idxs.begin(), Idxs.end());
2538 setName(Name);
2539}
2540
2541InsertValueInst::InsertValueInst(const InsertValueInst &IVI)
2542 : Instruction(IVI.getType(), InsertValue, AllocMarker),
2543 Indices(IVI.Indices) {
2544 Op<0>() = IVI.getOperand(0);
2545 Op<1>() = IVI.getOperand(1);
2547}
2548
2549//===----------------------------------------------------------------------===//
2550// ExtractValueInst Class
2551//===----------------------------------------------------------------------===//
2552
2553void ExtractValueInst::init(ArrayRef<unsigned> Idxs, const Twine &Name) {
2554 assert(getNumOperands() == 1 && "NumOperands not initialized?");
2555
2556 // There's no fundamental reason why we require at least one index.
2557 // But there's no present need to support it.
2558 assert(!Idxs.empty() && "ExtractValueInst must have at least one index");
2559
2560 Indices.append(Idxs.begin(), Idxs.end());
2561 setName(Name);
2562}
2563
2564ExtractValueInst::ExtractValueInst(const ExtractValueInst &EVI)
2565 : UnaryInstruction(EVI.getType(), ExtractValue, EVI.getOperand(0),
2566 (BasicBlock *)nullptr),
2567 Indices(EVI.Indices) {
2569}
2570
2571// getIndexedType - Returns the type of the element that would be extracted
2572// with an extractvalue instruction with the specified parameters.
2573//
2574// A null type is returned if the indices are invalid for the specified
2575// pointer type.
2576//
2578 ArrayRef<unsigned> Idxs) {
2579 for (unsigned Index : Idxs) {
2580 // We can't use CompositeType::indexValid(Index) here.
2581 // indexValid() always returns true for arrays because getelementptr allows
2582 // out-of-bounds indices. Since we don't allow those for extractvalue and
2583 // insertvalue we need to check array indexing manually.
2584 // Since the only other types we can index into are struct types it's just
2585 // as easy to check those manually as well.
2586 if (ArrayType *AT = dyn_cast<ArrayType>(Agg)) {
2587 if (Index >= AT->getNumElements())
2588 return nullptr;
2589 Agg = AT->getElementType();
2590 } else if (StructType *ST = dyn_cast<StructType>(Agg)) {
2591 if (Index >= ST->getNumElements())
2592 return nullptr;
2593 Agg = ST->getElementType(Index);
2594 } else {
2595 // Not a valid type to index into.
2596 return nullptr;
2597 }
2598 }
2599 return Agg;
2600}
2601
2602//===----------------------------------------------------------------------===//
2603// UnaryOperator Class
2604//===----------------------------------------------------------------------===//
2605
2607 const Twine &Name, InsertPosition InsertBefore)
2608 : UnaryInstruction(Ty, iType, S, InsertBefore) {
2609 Op<0>() = S;
2610 setName(Name);
2611 AssertOK();
2612}
2613
2615 InsertPosition InsertBefore) {
2616 return new UnaryOperator(Op, S, S->getType(), Name, InsertBefore);
2617}
2618
2619void UnaryOperator::AssertOK() {
2620 Value *LHS = getOperand(0);
2621 (void)LHS; // Silence warnings.
2622#ifndef NDEBUG
2623 switch (getOpcode()) {
2624 case FNeg:
2625 assert(getType() == LHS->getType() &&
2626 "Unary operation should return same type as operand!");
2627 assert(getType()->isFPOrFPVectorTy() &&
2628 "Tried to create a floating-point operation on a "
2629 "non-floating-point type!");
2630 break;
2631 default: llvm_unreachable("Invalid opcode provided");
2632 }
2633#endif
2634}
2635
2636//===----------------------------------------------------------------------===//
2637// BinaryOperator Class
2638//===----------------------------------------------------------------------===//
2639
2641 const Twine &Name, InsertPosition InsertBefore)
2642 : Instruction(Ty, iType, AllocMarker, InsertBefore) {
2643 Op<0>() = S1;
2644 Op<1>() = S2;
2645 setName(Name);
2646 AssertOK();
2647}
2648
2649void BinaryOperator::AssertOK() {
2650 Value *LHS = getOperand(0), *RHS = getOperand(1);
2651 (void)LHS; (void)RHS; // Silence warnings.
2652 assert(LHS->getType() == RHS->getType() &&
2653 "Binary operator operand types must match!");
2654#ifndef NDEBUG
2655 switch (getOpcode()) {
2656 case Add: case Sub:
2657 case Mul:
2658 assert(getType() == LHS->getType() &&
2659 "Arithmetic operation should return same type as operands!");
2660 assert(getType()->isIntOrIntVectorTy() &&
2661 "Tried to create an integer operation on a non-integer type!");
2662 break;
2663 case FAdd: case FSub:
2664 case FMul:
2665 assert(getType() == LHS->getType() &&
2666 "Arithmetic operation should return same type as operands!");
2667 assert(getType()->isFPOrFPVectorTy() &&
2668 "Tried to create a floating-point operation on a "
2669 "non-floating-point type!");
2670 break;
2671 case UDiv:
2672 case SDiv:
2673 assert(getType() == LHS->getType() &&
2674 "Arithmetic operation should return same type as operands!");
2675 assert(getType()->isIntOrIntVectorTy() &&
2676 "Incorrect operand type (not integer) for S/UDIV");
2677 break;
2678 case FDiv:
2679 assert(getType() == LHS->getType() &&
2680 "Arithmetic operation should return same type as operands!");
2681 assert(getType()->isFPOrFPVectorTy() &&
2682 "Incorrect operand type (not floating point) for FDIV");
2683 break;
2684 case URem:
2685 case SRem:
2686 assert(getType() == LHS->getType() &&
2687 "Arithmetic operation should return same type as operands!");
2688 assert(getType()->isIntOrIntVectorTy() &&
2689 "Incorrect operand type (not integer) for S/UREM");
2690 break;
2691 case FRem:
2692 assert(getType() == LHS->getType() &&
2693 "Arithmetic operation should return same type as operands!");
2694 assert(getType()->isFPOrFPVectorTy() &&
2695 "Incorrect operand type (not floating point) for FREM");
2696 break;
2697 case Shl:
2698 case LShr:
2699 case AShr:
2700 assert(getType() == LHS->getType() &&
2701 "Shift operation should return same type as operands!");
2702 assert(getType()->isIntOrIntVectorTy() &&
2703 "Tried to create a shift operation on a non-integral type!");
2704 break;
2705 case And: case Or:
2706 case Xor:
2707 assert(getType() == LHS->getType() &&
2708 "Logical operation should return same type as operands!");
2709 assert(getType()->isIntOrIntVectorTy() &&
2710 "Tried to create a logical operation on a non-integral type!");
2711 break;
2712 default: llvm_unreachable("Invalid opcode provided");
2713 }
2714#endif
2715}
2716
2718 const Twine &Name,
2719 InsertPosition InsertBefore) {
2720 assert(S1->getType() == S2->getType() &&
2721 "Cannot create binary operator with two operands of differing type!");
2722 return new BinaryOperator(Op, S1, S2, S1->getType(), Name, InsertBefore);
2723}
2724
2726 InsertPosition InsertBefore) {
2727 Value *Zero = ConstantInt::get(Op->getType(), 0);
2728 return new BinaryOperator(Instruction::Sub, Zero, Op, Op->getType(), Name,
2729 InsertBefore);
2730}
2731
2733 InsertPosition InsertBefore) {
2734 Value *Zero = ConstantInt::get(Op->getType(), 0);
2735 return BinaryOperator::CreateNSWSub(Zero, Op, Name, InsertBefore);
2736}
2737
2739 InsertPosition InsertBefore) {
2740 Constant *C = Constant::getAllOnesValue(Op->getType());
2741 return new BinaryOperator(Instruction::Xor, Op, C,
2742 Op->getType(), Name, InsertBefore);
2743}
2744
2745// Exchange the two operands to this instruction. This instruction is safe to
2746// use on any binary instruction and does not modify the semantics of the
2747// instruction.
2749 if (!isCommutative())
2750 return true; // Can't commute operands
2751 Op<0>().swap(Op<1>());
2752 return false;
2753}
2754
2755//===----------------------------------------------------------------------===//
2756// FPMathOperator Class
2757//===----------------------------------------------------------------------===//
2758
2760 const MDNode *MD =
2761 cast<Instruction>(this)->getMetadata(LLVMContext::MD_fpmath);
2762 if (!MD)
2763 return 0.0;
2765 return Accuracy->getValueAPF().convertToFloat();
2766}
2767
2768//===----------------------------------------------------------------------===//
2769// CastInst Class
2770//===----------------------------------------------------------------------===//
2771
2772// Just determine if this cast only deals with integral->integral conversion.
2774 switch (getOpcode()) {
2775 default: return false;
2776 case Instruction::ZExt:
2777 case Instruction::SExt:
2778 case Instruction::Trunc:
2779 return true;
2780 case Instruction::BitCast:
2781 return getOperand(0)->getType()->isIntegerTy() &&
2782 getType()->isIntegerTy();
2783 }
2784}
2785
2786/// This function determines if the CastInst does not require any bits to be
2787/// changed in order to effect the cast. Essentially, it identifies cases where
2788/// no code gen is necessary for the cast, hence the name no-op cast. For
2789/// example, the following are all no-op casts:
2790/// # bitcast i32* %x to i8*
2791/// # bitcast <2 x i32> %x to <4 x i16>
2792/// # ptrtoint i32* %x to i32 ; on 32-bit plaforms only
2793/// Determine if the described cast is a no-op.
2795 Type *SrcTy,
2796 Type *DestTy,
2797 const DataLayout &DL) {
2798 assert(castIsValid(Opcode, SrcTy, DestTy) && "method precondition");
2799 switch (Opcode) {
2800 default: llvm_unreachable("Invalid CastOp");
2801 case Instruction::Trunc:
2802 case Instruction::ZExt:
2803 case Instruction::SExt:
2804 case Instruction::FPTrunc:
2805 case Instruction::FPExt:
2806 case Instruction::UIToFP:
2807 case Instruction::SIToFP:
2808 case Instruction::FPToUI:
2809 case Instruction::FPToSI:
2810 case Instruction::AddrSpaceCast:
2811 // TODO: Target informations may give a more accurate answer here.
2812 return false;
2813 case Instruction::BitCast:
2814 return true; // BitCast never modifies bits.
2815 case Instruction::PtrToAddr:
2816 case Instruction::PtrToInt:
2817 return DL.getIntPtrType(SrcTy)->getScalarSizeInBits() ==
2818 DestTy->getScalarSizeInBits();
2819 case Instruction::IntToPtr:
2820 return DL.getIntPtrType(DestTy)->getScalarSizeInBits() ==
2821 SrcTy->getScalarSizeInBits();
2822 }
2823}
2824
2826 return isNoopCast(getOpcode(), getOperand(0)->getType(), getType(), DL);
2827}
2828
2829/// This function determines if a pair of casts can be eliminated and what
2830/// opcode should be used in the elimination. This assumes that there are two
2831/// instructions like this:
2832/// * %F = firstOpcode SrcTy %x to MidTy
2833/// * %S = secondOpcode MidTy %F to DstTy
2834/// The function returns a resultOpcode so these two casts can be replaced with:
2835/// * %Replacement = resultOpcode %SrcTy %x to DstTy
2836/// If no such cast is permitted, the function returns 0.
2838 Instruction::CastOps secondOp,
2839 Type *SrcTy, Type *MidTy, Type *DstTy,
2840 const DataLayout *DL) {
2841 // Define the 144 possibilities for these two cast instructions. The values
2842 // in this matrix determine what to do in a given situation and select the
2843 // case in the switch below. The rows correspond to firstOp, the columns
2844 // correspond to secondOp. In looking at the table below, keep in mind
2845 // the following cast properties:
2846 //
2847 // Size Compare Source Destination
2848 // Operator Src ? Size Type Sign Type Sign
2849 // -------- ------------ ------------------- ---------------------
2850 // TRUNC > Integer Any Integral Any
2851 // ZEXT < Integral Unsigned Integer Any
2852 // SEXT < Integral Signed Integer Any
2853 // FPTOUI n/a FloatPt n/a Integral Unsigned
2854 // FPTOSI n/a FloatPt n/a Integral Signed
2855 // UITOFP n/a Integral Unsigned FloatPt n/a
2856 // SITOFP n/a Integral Signed FloatPt n/a
2857 // FPTRUNC > FloatPt n/a FloatPt n/a
2858 // FPEXT < FloatPt n/a FloatPt n/a
2859 // PTRTOINT n/a Pointer n/a Integral Unsigned
2860 // PTRTOADDR n/a Pointer n/a Integral Unsigned
2861 // INTTOPTR n/a Integral Unsigned Pointer n/a
2862 // BITCAST = FirstClass n/a FirstClass n/a
2863 // ADDRSPCST n/a Pointer n/a Pointer n/a
2864 //
2865 // NOTE: some transforms are safe, but we consider them to be non-profitable.
2866 // For example, we could merge "fptoui double to i32" + "zext i32 to i64",
2867 // into "fptoui double to i64", but this loses information about the range
2868 // of the produced value (we no longer know the top-part is all zeros).
2869 // Further this conversion is often much more expensive for typical hardware,
2870 // and causes issues when building libgcc. We disallow fptosi+sext for the
2871 // same reason.
2872 const unsigned numCastOps =
2873 Instruction::CastOpsEnd - Instruction::CastOpsBegin;
2874 // clang-format off
2875 static const uint8_t CastResults[numCastOps][numCastOps] = {
2876 // T F F U S F F P P I B A -+
2877 // R Z S P P I I T P 2 2 N T S |
2878 // U E E 2 2 2 2 R E I A T C C +- secondOp
2879 // N X X U S F F N X N D 2 V V |
2880 // C T T I I P P C T T R P T T -+
2881 { 1, 0, 0,99,99, 0, 0,99,99,99,99, 0, 3, 0}, // Trunc -+
2882 { 8, 1, 9,99,99, 2,17,99,99,99,99, 2, 3, 0}, // ZExt |
2883 { 8, 0, 1,99,99, 0, 2,99,99,99,99, 0, 3, 0}, // SExt |
2884 { 0, 0, 0,99,99, 0, 0,99,99,99,99, 0, 3, 0}, // FPToUI |
2885 { 0, 0, 0,99,99, 0, 0,99,99,99,99, 0, 3, 0}, // FPToSI |
2886 { 99,99,99, 0, 0,99,99, 0, 0,99,99,99, 4, 0}, // UIToFP +- firstOp
2887 { 99,99,99, 0, 0,99,99, 0, 0,99,99,99, 4, 0}, // SIToFP |
2888 { 99,99,99, 0, 0,99,99, 0, 0,99,99,99, 4, 0}, // FPTrunc |
2889 { 99,99,99, 2, 2,99,99, 8, 2,99,99,99, 4, 0}, // FPExt |
2890 { 1, 0, 0,99,99, 0, 0,99,99,99,99, 7, 3, 0}, // PtrToInt |
2891 { 0, 0, 0,99,99, 0, 0,99,99,99,99, 0, 3, 0}, // PtrToAddr |
2892 { 99,99,99,99,99,99,99,99,99,11,11,99,15, 0}, // IntToPtr |
2893 { 5, 5, 5, 0, 0, 5, 5, 0, 0,16,16, 5, 1,14}, // BitCast |
2894 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,13,12}, // AddrSpaceCast -+
2895 };
2896 // clang-format on
2897
2898 // TODO: This logic could be encoded into the table above and handled in the
2899 // switch below.
2900 // If either of the casts are a bitcast from scalar to vector, disallow the
2901 // merging. However, any pair of bitcasts are allowed.
2902 bool IsFirstBitcast = (firstOp == Instruction::BitCast);
2903 bool IsSecondBitcast = (secondOp == Instruction::BitCast);
2904 bool AreBothBitcasts = IsFirstBitcast && IsSecondBitcast;
2905
2906 // Check if any of the casts convert scalars <-> vectors.
2907 if ((IsFirstBitcast && isa<VectorType>(SrcTy) != isa<VectorType>(MidTy)) ||
2908 (IsSecondBitcast && isa<VectorType>(MidTy) != isa<VectorType>(DstTy)))
2909 if (!AreBothBitcasts)
2910 return 0;
2911
2912 int ElimCase = CastResults[firstOp-Instruction::CastOpsBegin]
2913 [secondOp-Instruction::CastOpsBegin];
2914 switch (ElimCase) {
2915 case 0:
2916 // Categorically disallowed.
2917 return 0;
2918 case 1:
2919 // Allowed, use first cast's opcode.
2920 return firstOp;
2921 case 2:
2922 // Allowed, use second cast's opcode.
2923 return secondOp;
2924 case 3:
2925 // No-op cast in second op implies firstOp as long as the DestTy
2926 // is integer and we are not converting between a vector and a
2927 // non-vector type.
2928 if (!SrcTy->isVectorTy() && DstTy->isIntegerTy())
2929 return firstOp;
2930 return 0;
2931 case 4:
2932 // No-op cast in second op implies firstOp as long as the DestTy
2933 // matches MidTy.
2934 if (DstTy == MidTy)
2935 return firstOp;
2936 return 0;
2937 case 5:
2938 // No-op cast in first op implies secondOp as long as the SrcTy
2939 // is an integer.
2940 if (SrcTy->isIntegerTy())
2941 return secondOp;
2942 return 0;
2943 case 7: {
2944 // Disable inttoptr/ptrtoint optimization if enabled.
2945 if (DisableI2pP2iOpt)
2946 return 0;
2947
2948 // Cannot simplify if address spaces are different!
2949 if (SrcTy != DstTy)
2950 return 0;
2951
2952 // Cannot simplify if the intermediate integer size is smaller than the
2953 // pointer size.
2954 unsigned MidSize = MidTy->getScalarSizeInBits();
2955 if (!DL || MidSize < DL->getPointerTypeSizeInBits(SrcTy))
2956 return 0;
2957
2958 return Instruction::BitCast;
2959 }
2960 case 8: {
2961 // ext, trunc -> bitcast, if the SrcTy and DstTy are the same
2962 // ext, trunc -> ext, if sizeof(SrcTy) < sizeof(DstTy)
2963 // ext, trunc -> trunc, if sizeof(SrcTy) > sizeof(DstTy)
2964 unsigned SrcSize = SrcTy->getScalarSizeInBits();
2965 unsigned DstSize = DstTy->getScalarSizeInBits();
2966 if (SrcTy == DstTy)
2967 return Instruction::BitCast;
2968 if (SrcSize < DstSize)
2969 return firstOp;
2970 if (SrcSize > DstSize)
2971 return secondOp;
2972 return 0;
2973 }
2974 case 9:
2975 // zext, sext -> zext, because sext can't sign extend after zext
2976 return Instruction::ZExt;
2977 case 11: {
2978 // inttoptr, ptrtoint/ptrtoaddr -> integer cast
2979 if (!DL)
2980 return 0;
2981 unsigned MidSize = secondOp == Instruction::PtrToAddr
2982 ? DL->getAddressSizeInBits(MidTy)
2983 : DL->getPointerTypeSizeInBits(MidTy);
2984 unsigned SrcSize = SrcTy->getScalarSizeInBits();
2985 unsigned DstSize = DstTy->getScalarSizeInBits();
2986 // If the middle size is smaller than both source and destination,
2987 // an additional masking operation would be required.
2988 if (MidSize < SrcSize && MidSize < DstSize)
2989 return 0;
2990 if (DstSize < SrcSize)
2991 return Instruction::Trunc;
2992 if (DstSize > SrcSize)
2993 return Instruction::ZExt;
2994 return Instruction::BitCast;
2995 }
2996 case 12:
2997 // addrspacecast, addrspacecast -> bitcast, if SrcAS == DstAS
2998 // addrspacecast, addrspacecast -> addrspacecast, if SrcAS != DstAS
2999 if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace())
3000 return Instruction::AddrSpaceCast;
3001 return Instruction::BitCast;
3002 case 13:
3003 // FIXME: this state can be merged with (1), but the following assert
3004 // is useful to check the correcteness of the sequence due to semantic
3005 // change of bitcast.
3006 assert(
3007 SrcTy->isPtrOrPtrVectorTy() &&
3008 MidTy->isPtrOrPtrVectorTy() &&
3009 DstTy->isPtrOrPtrVectorTy() &&
3010 SrcTy->getPointerAddressSpace() != MidTy->getPointerAddressSpace() &&
3011 MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&
3012 "Illegal addrspacecast, bitcast sequence!");
3013 // Allowed, use first cast's opcode
3014 return firstOp;
3015 case 14:
3016 // bitcast, addrspacecast -> addrspacecast
3017 return Instruction::AddrSpaceCast;
3018 case 15:
3019 // FIXME: this state can be merged with (1), but the following assert
3020 // is useful to check the correcteness of the sequence due to semantic
3021 // change of bitcast.
3022 assert(
3023 SrcTy->isIntOrIntVectorTy() &&
3024 MidTy->isPtrOrPtrVectorTy() &&
3025 DstTy->isPtrOrPtrVectorTy() &&
3026 MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&
3027 "Illegal inttoptr, bitcast sequence!");
3028 // Allowed, use first cast's opcode
3029 return firstOp;
3030 case 16:
3031 // FIXME: this state can be merged with (2), but the following assert
3032 // is useful to check the correcteness of the sequence due to semantic
3033 // change of bitcast.
3034 assert(
3035 SrcTy->isPtrOrPtrVectorTy() &&
3036 MidTy->isPtrOrPtrVectorTy() &&
3037 DstTy->isIntOrIntVectorTy() &&
3038 SrcTy->getPointerAddressSpace() == MidTy->getPointerAddressSpace() &&
3039 "Illegal bitcast, ptrtoint sequence!");
3040 // Allowed, use second cast's opcode
3041 return secondOp;
3042 case 17:
3043 // (sitofp (zext x)) -> (uitofp x)
3044 return Instruction::UIToFP;
3045 case 99:
3046 // Cast combination can't happen (error in input). This is for all cases
3047 // where the MidTy is not the same for the two cast instructions.
3048 llvm_unreachable("Invalid Cast Combination");
3049 default:
3050 llvm_unreachable("Error in CastResults table!!!");
3051 }
3052}
3053
3055 const Twine &Name, InsertPosition InsertBefore) {
3056 assert(castIsValid(op, S, Ty) && "Invalid cast!");
3057 // Construct and return the appropriate CastInst subclass
3058 switch (op) {
3059 case Trunc: return new TruncInst (S, Ty, Name, InsertBefore);
3060 case ZExt: return new ZExtInst (S, Ty, Name, InsertBefore);
3061 case SExt: return new SExtInst (S, Ty, Name, InsertBefore);
3062 case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertBefore);
3063 case FPExt: return new FPExtInst (S, Ty, Name, InsertBefore);
3064 case UIToFP: return new UIToFPInst (S, Ty, Name, InsertBefore);
3065 case SIToFP: return new SIToFPInst (S, Ty, Name, InsertBefore);
3066 case FPToUI: return new FPToUIInst (S, Ty, Name, InsertBefore);
3067 case FPToSI: return new FPToSIInst (S, Ty, Name, InsertBefore);
3068 case PtrToAddr: return new PtrToAddrInst (S, Ty, Name, InsertBefore);
3069 case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertBefore);
3070 case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertBefore);
3071 case BitCast:
3072 return new BitCastInst(S, Ty, Name, InsertBefore);
3073 case AddrSpaceCast:
3074 return new AddrSpaceCastInst(S, Ty, Name, InsertBefore);
3075 default:
3076 llvm_unreachable("Invalid opcode provided");
3077 }
3078}
3079
3081 InsertPosition InsertBefore) {
3082 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3083 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3084 return Create(Instruction::ZExt, S, Ty, Name, InsertBefore);
3085}
3086
3088 InsertPosition InsertBefore) {
3089 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3090 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3091 return Create(Instruction::SExt, S, Ty, Name, InsertBefore);
3092}
3093
3095 InsertPosition InsertBefore) {
3096 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3097 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3098 return Create(Instruction::Trunc, S, Ty, Name, InsertBefore);
3099}
3100
3101/// Create a BitCast or a PtrToInt cast instruction
3103 InsertPosition InsertBefore) {
3104 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3105 assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) &&
3106 "Invalid cast");
3107 assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast");
3108 assert((!Ty->isVectorTy() ||
3109 cast<VectorType>(Ty)->getElementCount() ==
3110 cast<VectorType>(S->getType())->getElementCount()) &&
3111 "Invalid cast");
3112
3113 if (Ty->isIntOrIntVectorTy())
3114 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
3115
3116 return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertBefore);
3117}
3118
3120 Value *S, Type *Ty, const Twine &Name, InsertPosition InsertBefore) {
3121 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3122 assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast");
3123
3124 if (S->getType()->getPointerAddressSpace() != Ty->getPointerAddressSpace())
3125 return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertBefore);
3126
3127 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3128}
3129
3131 const Twine &Name,
3132 InsertPosition InsertBefore) {
3133 if (S->getType()->isPointerTy() && Ty->isIntegerTy())
3134 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
3135 if (S->getType()->isIntegerTy() && Ty->isPointerTy())
3136 return Create(Instruction::IntToPtr, S, Ty, Name, InsertBefore);
3137
3138 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3139}
3140
3142 const Twine &Name,
3143 InsertPosition InsertBefore) {
3144 assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() &&
3145 "Invalid integer cast");
3146 unsigned SrcBits = C->getType()->getScalarSizeInBits();
3147 unsigned DstBits = Ty->getScalarSizeInBits();
3148 Instruction::CastOps opcode =
3149 (SrcBits == DstBits ? Instruction::BitCast :
3150 (SrcBits > DstBits ? Instruction::Trunc :
3151 (isSigned ? Instruction::SExt : Instruction::ZExt)));
3152 return Create(opcode, C, Ty, Name, InsertBefore);
3153}
3154
3156 InsertPosition InsertBefore) {
3157 assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
3158 "Invalid cast");
3159 unsigned SrcBits = C->getType()->getScalarSizeInBits();
3160 unsigned DstBits = Ty->getScalarSizeInBits();
3161 assert((C->getType() == Ty || SrcBits != DstBits) && "Invalid cast");
3162 Instruction::CastOps opcode =
3163 (SrcBits == DstBits ? Instruction::BitCast :
3164 (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt));
3165 return Create(opcode, C, Ty, Name, InsertBefore);
3166}
3167
3168bool CastInst::isBitCastable(Type *SrcTy, Type *DestTy) {
3169 if (!SrcTy->isFirstClassType() || !DestTy->isFirstClassType())
3170 return false;
3171
3172 if (SrcTy == DestTy)
3173 return true;
3174
3175 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) {
3176 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) {
3177 if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {
3178 // An element by element cast. Valid if casting the elements is valid.
3179 SrcTy = SrcVecTy->getElementType();
3180 DestTy = DestVecTy->getElementType();
3181 }
3182 }
3183 }
3184
3185 if (PointerType *DestPtrTy = dyn_cast<PointerType>(DestTy)) {
3186 if (PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy)) {
3187 return SrcPtrTy->getAddressSpace() == DestPtrTy->getAddressSpace();
3188 }
3189 }
3190
3191 TypeSize SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr
3192 TypeSize DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr
3193
3194 // Could still have vectors of pointers if the number of elements doesn't
3195 // match
3196 if (SrcBits.getKnownMinValue() == 0 || DestBits.getKnownMinValue() == 0)
3197 return false;
3198
3199 if (SrcBits != DestBits)
3200 return false;
3201
3202 return true;
3203}
3204
3206 const DataLayout &DL) {
3207 // ptrtoint and inttoptr are not allowed on non-integral pointers
3208 if (auto *PtrTy = dyn_cast<PointerType>(SrcTy))
3209 if (auto *IntTy = dyn_cast<IntegerType>(DestTy))
3210 return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) &&
3211 !DL.isNonIntegralPointerType(PtrTy));
3212 if (auto *PtrTy = dyn_cast<PointerType>(DestTy))
3213 if (auto *IntTy = dyn_cast<IntegerType>(SrcTy))
3214 return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) &&
3215 !DL.isNonIntegralPointerType(PtrTy));
3216
3217 return isBitCastable(SrcTy, DestTy);
3218}
3219
3220// Provide a way to get a "cast" where the cast opcode is inferred from the
3221// types and size of the operand. This, basically, is a parallel of the
3222// logic in the castIsValid function below. This axiom should hold:
3223// castIsValid( getCastOpcode(Val, Ty), Val, Ty)
3224// should not assert in castIsValid. In other words, this produces a "correct"
3225// casting opcode for the arguments passed to it.
3228 const Value *Src, bool SrcIsSigned, Type *DestTy, bool DestIsSigned) {
3229 Type *SrcTy = Src->getType();
3230
3231 assert(SrcTy->isFirstClassType() && DestTy->isFirstClassType() &&
3232 "Only first class types are castable!");
3233
3234 if (SrcTy == DestTy)
3235 return BitCast;
3236
3237 // FIXME: Check address space sizes here
3238 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy))
3239 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy))
3240 if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {
3241 // An element by element cast. Find the appropriate opcode based on the
3242 // element types.
3243 SrcTy = SrcVecTy->getElementType();
3244 DestTy = DestVecTy->getElementType();
3245 }
3246
3247 // Get the bit sizes, we'll need these
3248 // FIXME: This doesn't work for scalable vector types with different element
3249 // counts that don't call getElementType above.
3250 unsigned SrcBits =
3251 SrcTy->getPrimitiveSizeInBits().getFixedValue(); // 0 for ptr
3252 unsigned DestBits =
3253 DestTy->getPrimitiveSizeInBits().getFixedValue(); // 0 for ptr
3254
3255 // Run through the possibilities ...
3256 if (DestTy->isByteTy()) { // Casting to byte
3257 if (SrcTy->isIntegerTy()) { // Casting from integral
3258 assert(DestBits == SrcBits && "Illegal cast from integer to byte type");
3259 return BitCast;
3260 } else if (SrcTy->isPointerTy()) { // Casting from pointer
3261 assert(DestBits == SrcBits && "Illegal cast from pointer to byte type");
3262 return BitCast;
3263 }
3264 llvm_unreachable("Illegal cast to byte type");
3265 } else if (DestTy->isIntegerTy()) { // Casting to integral
3266 if (SrcTy->isIntegerTy()) { // Casting from integral
3267 if (DestBits < SrcBits)
3268 return Trunc; // int -> smaller int
3269 else if (DestBits > SrcBits) { // its an extension
3270 if (SrcIsSigned)
3271 return SExt; // signed -> SEXT
3272 else
3273 return ZExt; // unsigned -> ZEXT
3274 } else {
3275 return BitCast; // Same size, No-op cast
3276 }
3277 } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt
3278 if (DestIsSigned)
3279 return FPToSI; // FP -> sint
3280 else
3281 return FPToUI; // FP -> uint
3282 } else if (SrcTy->isVectorTy()) {
3283 assert(DestBits == SrcBits &&
3284 "Casting vector to integer of different width");
3285 return BitCast; // Same size, no-op cast
3286 } else {
3287 assert(SrcTy->isPointerTy() &&
3288 "Casting from a value that is not first-class type");
3289 return PtrToInt; // ptr -> int
3290 }
3291 } else if (DestTy->isFloatingPointTy()) { // Casting to floating pt
3292 if (SrcTy->isIntegerTy()) { // Casting from integral
3293 if (SrcIsSigned)
3294 return SIToFP; // sint -> FP
3295 else
3296 return UIToFP; // uint -> FP
3297 } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt
3298 if (DestBits < SrcBits) {
3299 return FPTrunc; // FP -> smaller FP
3300 } else if (DestBits > SrcBits) {
3301 return FPExt; // FP -> larger FP
3302 } else {
3303 return BitCast; // same size, no-op cast
3304 }
3305 } else if (SrcTy->isVectorTy()) {
3306 assert(DestBits == SrcBits &&
3307 "Casting vector to floating point of different width");
3308 return BitCast; // same size, no-op cast
3309 }
3310 llvm_unreachable("Casting pointer or non-first class to float");
3311 } else if (DestTy->isVectorTy()) {
3312 assert(DestBits == SrcBits &&
3313 "Illegal cast to vector (wrong type or size)");
3314 return BitCast;
3315 } else if (DestTy->isPointerTy()) {
3316 if (SrcTy->isPointerTy()) {
3317 if (DestTy->getPointerAddressSpace() != SrcTy->getPointerAddressSpace())
3318 return AddrSpaceCast;
3319 return BitCast; // ptr -> ptr
3320 } else if (SrcTy->isIntegerTy()) {
3321 return IntToPtr; // int -> ptr
3322 }
3323 llvm_unreachable("Casting pointer to other than pointer or int");
3324 }
3325 llvm_unreachable("Casting to type that is not first-class");
3326}
3327
3328//===----------------------------------------------------------------------===//
3329// CastInst SubClass Constructors
3330//===----------------------------------------------------------------------===//
3331
3332/// Check that the construction parameters for a CastInst are correct. This
3333/// could be broken out into the separate constructors but it is useful to have
3334/// it in one place and to eliminate the redundant code for getting the sizes
3335/// of the types involved.
3336bool
3338 if (!SrcTy->isFirstClassType() || !DstTy->isFirstClassType() ||
3339 SrcTy->isAggregateType() || DstTy->isAggregateType())
3340 return false;
3341
3342 // Get the size of the types in bits, and whether we are dealing
3343 // with vector types, we'll need this later.
3344 bool SrcIsVec = isa<VectorType>(SrcTy);
3345 bool DstIsVec = isa<VectorType>(DstTy);
3346 unsigned SrcScalarBitSize = SrcTy->getScalarSizeInBits();
3347 unsigned DstScalarBitSize = DstTy->getScalarSizeInBits();
3348
3349 // If these are vector types, get the lengths of the vectors (using zero for
3350 // scalar types means that checking that vector lengths match also checks that
3351 // scalars are not being converted to vectors or vectors to scalars).
3352 ElementCount SrcEC = SrcIsVec ? cast<VectorType>(SrcTy)->getElementCount()
3354 ElementCount DstEC = DstIsVec ? cast<VectorType>(DstTy)->getElementCount()
3356
3357 // Switch on the opcode provided
3358 switch (op) {
3359 default: return false; // This is an input error
3360 case Instruction::Trunc:
3361 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3362 SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize;
3363 case Instruction::ZExt:
3364 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3365 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3366 case Instruction::SExt:
3367 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3368 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3369 case Instruction::FPTrunc:
3370 return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&
3371 SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize;
3372 case Instruction::FPExt:
3373 return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&
3374 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3375 case Instruction::UIToFP:
3376 case Instruction::SIToFP:
3377 return SrcTy->isIntOrIntVectorTy() && DstTy->isFPOrFPVectorTy() &&
3378 SrcEC == DstEC;
3379 case Instruction::FPToUI:
3380 case Instruction::FPToSI:
3381 return SrcTy->isFPOrFPVectorTy() && DstTy->isIntOrIntVectorTy() &&
3382 SrcEC == DstEC;
3383 case Instruction::PtrToAddr:
3384 case Instruction::PtrToInt:
3385 if (SrcEC != DstEC)
3386 return false;
3387 return SrcTy->isPtrOrPtrVectorTy() && DstTy->isIntOrIntVectorTy();
3388 case Instruction::IntToPtr:
3389 if (SrcEC != DstEC)
3390 return false;
3391 return SrcTy->isIntOrIntVectorTy() && DstTy->isPtrOrPtrVectorTy();
3392 case Instruction::BitCast: {
3393 PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
3394 PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
3395
3396 // BitCast implies a no-op cast of type only. No bits change.
3397 // However, you can't cast pointers to anything but pointers/bytes.
3398 if ((SrcPtrTy && DstTy->isByteOrByteVectorTy()) ||
3399 (SrcTy->isByteOrByteVectorTy() && DstPtrTy))
3400 return true;
3401 if (!SrcPtrTy != !DstPtrTy)
3402 return false;
3403
3404 // For non-pointer cases, the cast is okay if the source and destination bit
3405 // widths are identical.
3406 if (!SrcPtrTy)
3407 return SrcTy->getPrimitiveSizeInBits() == DstTy->getPrimitiveSizeInBits();
3408
3409 // If both are pointers then the address spaces must match.
3410 if (SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace())
3411 return false;
3412
3413 // A vector of pointers must have the same number of elements.
3414 if (SrcIsVec && DstIsVec)
3415 return SrcEC == DstEC;
3416 if (SrcIsVec)
3417 return SrcEC == ElementCount::getFixed(1);
3418 if (DstIsVec)
3419 return DstEC == ElementCount::getFixed(1);
3420
3421 return true;
3422 }
3423 case Instruction::AddrSpaceCast: {
3424 PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
3425 if (!SrcPtrTy)
3426 return false;
3427
3428 PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
3429 if (!DstPtrTy)
3430 return false;
3431
3432 if (SrcPtrTy->getAddressSpace() == DstPtrTy->getAddressSpace())
3433 return false;
3434
3435 return SrcEC == DstEC;
3436 }
3437 }
3438}
3439
3441 InsertPosition InsertBefore)
3442 : CastInst(Ty, Trunc, S, Name, InsertBefore) {
3443 assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc");
3444}
3445
3446ZExtInst::ZExtInst(Value *S, Type *Ty, const Twine &Name,
3447 InsertPosition InsertBefore)
3448 : CastInst(Ty, ZExt, S, Name, InsertBefore) {
3449 assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt");
3450}
3451
3452SExtInst::SExtInst(Value *S, Type *Ty, const Twine &Name,
3453 InsertPosition InsertBefore)
3454 : CastInst(Ty, SExt, S, Name, InsertBefore) {
3455 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt");
3456}
3457
3459 InsertPosition InsertBefore)
3460 : CastInst(Ty, FPTrunc, S, Name, InsertBefore) {
3461 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc");
3462}
3463
3465 InsertPosition InsertBefore)
3466 : CastInst(Ty, FPExt, S, Name, InsertBefore) {
3467 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt");
3468}
3469
3471 InsertPosition InsertBefore)
3472 : CastInst(Ty, UIToFP, S, Name, InsertBefore) {
3473 assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP");
3474}
3475
3477 InsertPosition InsertBefore)
3478 : CastInst(Ty, SIToFP, S, Name, InsertBefore) {
3479 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP");
3480}
3481
3483 InsertPosition InsertBefore)
3484 : CastInst(Ty, FPToUI, S, Name, InsertBefore) {
3485 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI");
3486}
3487
3489 InsertPosition InsertBefore)
3490 : CastInst(Ty, FPToSI, S, Name, InsertBefore) {
3491 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI");
3492}
3493
3495 InsertPosition InsertBefore)
3496 : CastInst(Ty, PtrToInt, S, Name, InsertBefore) {
3497 assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt");
3498}
3499
3501 InsertPosition InsertBefore)
3502 : CastInst(Ty, PtrToAddr, S, Name, InsertBefore) {
3503 assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToAddr");
3504}
3505
3507 InsertPosition InsertBefore)
3508 : CastInst(Ty, IntToPtr, S, Name, InsertBefore) {
3509 assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr");
3510}
3511
3513 InsertPosition InsertBefore)
3514 : CastInst(Ty, BitCast, S, Name, InsertBefore) {
3515 assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast");
3516}
3517
3519 InsertPosition InsertBefore)
3520 : CastInst(Ty, AddrSpaceCast, S, Name, InsertBefore) {
3521 assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast");
3522}
3523
3524//===----------------------------------------------------------------------===//
3525// CmpInst Classes
3526//===----------------------------------------------------------------------===//
3527
3529 Value *RHS, const Twine &Name, InsertPosition InsertBefore,
3530 Instruction *FlagsSource)
3531 : Instruction(ty, op, AllocMarker, InsertBefore) {
3532 Op<0>() = LHS;
3533 Op<1>() = RHS;
3534 setPredicate(predicate);
3535 setName(Name);
3536 if (FlagsSource)
3537 copyIRFlags(FlagsSource);
3538}
3539
3541 const Twine &Name, InsertPosition InsertBefore) {
3542 if (Op == Instruction::ICmp) {
3543 if (InsertBefore.isValid())
3544 return new ICmpInst(InsertBefore, CmpInst::Predicate(predicate),
3545 S1, S2, Name);
3546 else
3547 return new ICmpInst(CmpInst::Predicate(predicate),
3548 S1, S2, Name);
3549 }
3550
3551 if (InsertBefore.isValid())
3552 return new FCmpInst(InsertBefore, CmpInst::Predicate(predicate),
3553 S1, S2, Name);
3554 else
3555 return new FCmpInst(CmpInst::Predicate(predicate),
3556 S1, S2, Name);
3557}
3558
3560 Value *S2,
3561 const Instruction *FlagsSource,
3562 const Twine &Name,
3563 InsertPosition InsertBefore) {
3564 CmpInst *Inst = Create(Op, Pred, S1, S2, Name, InsertBefore);
3565 Inst->copyIRFlags(FlagsSource);
3566 return Inst;
3567}
3568
3570 if (ICmpInst *IC = dyn_cast<ICmpInst>(this))
3571 IC->swapOperands();
3572 else
3573 cast<FCmpInst>(this)->swapOperands();
3574}
3575
3577 if (const ICmpInst *IC = dyn_cast<ICmpInst>(this))
3578 return IC->isCommutative();
3579 return cast<FCmpInst>(this)->isCommutative();
3580}
3581
3584 return ICmpInst::isEquality(P);
3586 return FCmpInst::isEquality(P);
3587 llvm_unreachable("Unsupported predicate kind");
3588}
3589
3590// Returns true if either operand of CmpInst is a provably non-zero
3591// floating-point constant.
3592static bool hasNonZeroFPOperands(const CmpInst *Cmp) {
3593 auto *LHS = dyn_cast<Constant>(Cmp->getOperand(0));
3594 auto *RHS = dyn_cast<Constant>(Cmp->getOperand(1));
3595 if (auto *Const = LHS ? LHS : RHS) {
3596 using namespace llvm::PatternMatch;
3597 return match(Const, m_NonZeroNotDenormalFP());
3598 }
3599 return false;
3600}
3601
3602// Floating-point equality is not an equivalence when comparing +0.0 with
3603// -0.0, when comparing NaN with another value, or when flushing
3604// denormals-to-zero.
3605bool CmpInst::isEquivalence(bool Invert) const {
3606 switch (Invert ? getInversePredicate() : getPredicate()) {
3608 return true;
3610 if (!hasNoNaNs())
3611 return false;
3612 [[fallthrough]];
3614 return hasNonZeroFPOperands(this);
3615 default:
3616 return false;
3617 }
3618}
3619
3621 switch (pred) {
3622 default: llvm_unreachable("Unknown cmp predicate!");
3623 case ICMP_EQ: return ICMP_NE;
3624 case ICMP_NE: return ICMP_EQ;
3625 case ICMP_UGT: return ICMP_ULE;
3626 case ICMP_ULT: return ICMP_UGE;
3627 case ICMP_UGE: return ICMP_ULT;
3628 case ICMP_ULE: return ICMP_UGT;
3629 case ICMP_SGT: return ICMP_SLE;
3630 case ICMP_SLT: return ICMP_SGE;
3631 case ICMP_SGE: return ICMP_SLT;
3632 case ICMP_SLE: return ICMP_SGT;
3633
3634 case FCMP_OEQ: return FCMP_UNE;
3635 case FCMP_ONE: return FCMP_UEQ;
3636 case FCMP_OGT: return FCMP_ULE;
3637 case FCMP_OLT: return FCMP_UGE;
3638 case FCMP_OGE: return FCMP_ULT;
3639 case FCMP_OLE: return FCMP_UGT;
3640 case FCMP_UEQ: return FCMP_ONE;
3641 case FCMP_UNE: return FCMP_OEQ;
3642 case FCMP_UGT: return FCMP_OLE;
3643 case FCMP_ULT: return FCMP_OGE;
3644 case FCMP_UGE: return FCMP_OLT;
3645 case FCMP_ULE: return FCMP_OGT;
3646 case FCMP_ORD: return FCMP_UNO;
3647 case FCMP_UNO: return FCMP_ORD;
3648 case FCMP_TRUE: return FCMP_FALSE;
3649 case FCMP_FALSE: return FCMP_TRUE;
3650 }
3651}
3652
3654 switch (Pred) {
3655 default: return "unknown";
3656 case FCmpInst::FCMP_FALSE: return "false";
3657 case FCmpInst::FCMP_OEQ: return "oeq";
3658 case FCmpInst::FCMP_OGT: return "ogt";
3659 case FCmpInst::FCMP_OGE: return "oge";
3660 case FCmpInst::FCMP_OLT: return "olt";
3661 case FCmpInst::FCMP_OLE: return "ole";
3662 case FCmpInst::FCMP_ONE: return "one";
3663 case FCmpInst::FCMP_ORD: return "ord";
3664 case FCmpInst::FCMP_UNO: return "uno";
3665 case FCmpInst::FCMP_UEQ: return "ueq";
3666 case FCmpInst::FCMP_UGT: return "ugt";
3667 case FCmpInst::FCMP_UGE: return "uge";
3668 case FCmpInst::FCMP_ULT: return "ult";
3669 case FCmpInst::FCMP_ULE: return "ule";
3670 case FCmpInst::FCMP_UNE: return "une";
3671 case FCmpInst::FCMP_TRUE: return "true";
3672 case ICmpInst::ICMP_EQ: return "eq";
3673 case ICmpInst::ICMP_NE: return "ne";
3674 case ICmpInst::ICMP_SGT: return "sgt";
3675 case ICmpInst::ICMP_SGE: return "sge";
3676 case ICmpInst::ICMP_SLT: return "slt";
3677 case ICmpInst::ICMP_SLE: return "sle";
3678 case ICmpInst::ICMP_UGT: return "ugt";
3679 case ICmpInst::ICMP_UGE: return "uge";
3680 case ICmpInst::ICMP_ULT: return "ult";
3681 case ICmpInst::ICMP_ULE: return "ule";
3682 }
3683}
3684
3686 OS << CmpInst::getPredicateName(Pred);
3687 return OS;
3688}
3689
3691 switch (pred) {
3692 default: llvm_unreachable("Unknown icmp predicate!");
3693 case ICMP_EQ: case ICMP_NE:
3694 case ICMP_SGT: case ICMP_SLT: case ICMP_SGE: case ICMP_SLE:
3695 return pred;
3696 case ICMP_UGT: return ICMP_SGT;
3697 case ICMP_ULT: return ICMP_SLT;
3698 case ICMP_UGE: return ICMP_SGE;
3699 case ICMP_ULE: return ICMP_SLE;
3700 }
3701}
3702
3704 switch (pred) {
3705 default: llvm_unreachable("Unknown icmp predicate!");
3706 case ICMP_EQ: case ICMP_NE:
3707 case ICMP_UGT: case ICMP_ULT: case ICMP_UGE: case ICMP_ULE:
3708 return pred;
3709 case ICMP_SGT: return ICMP_UGT;
3710 case ICMP_SLT: return ICMP_ULT;
3711 case ICMP_SGE: return ICMP_UGE;
3712 case ICMP_SLE: return ICMP_ULE;
3713 }
3714}
3715
3717 switch (pred) {
3718 default: llvm_unreachable("Unknown cmp predicate!");
3719 case ICMP_EQ: case ICMP_NE:
3720 return pred;
3721 case ICMP_SGT: return ICMP_SLT;
3722 case ICMP_SLT: return ICMP_SGT;
3723 case ICMP_SGE: return ICMP_SLE;
3724 case ICMP_SLE: return ICMP_SGE;
3725 case ICMP_UGT: return ICMP_ULT;
3726 case ICMP_ULT: return ICMP_UGT;
3727 case ICMP_UGE: return ICMP_ULE;
3728 case ICMP_ULE: return ICMP_UGE;
3729
3730 case FCMP_FALSE: case FCMP_TRUE:
3731 case FCMP_OEQ: case FCMP_ONE:
3732 case FCMP_UEQ: case FCMP_UNE:
3733 case FCMP_ORD: case FCMP_UNO:
3734 return pred;
3735 case FCMP_OGT: return FCMP_OLT;
3736 case FCMP_OLT: return FCMP_OGT;
3737 case FCMP_OGE: return FCMP_OLE;
3738 case FCMP_OLE: return FCMP_OGE;
3739 case FCMP_UGT: return FCMP_ULT;
3740 case FCMP_ULT: return FCMP_UGT;
3741 case FCMP_UGE: return FCMP_ULE;
3742 case FCMP_ULE: return FCMP_UGE;
3743 }
3744}
3745
3747 switch (pred) {
3748 case ICMP_SGE:
3749 case ICMP_SLE:
3750 case ICMP_UGE:
3751 case ICMP_ULE:
3752 case FCMP_OGE:
3753 case FCMP_OLE:
3754 case FCMP_UGE:
3755 case FCMP_ULE:
3756 return true;
3757 default:
3758 return false;
3759 }
3760}
3761
3763 switch (pred) {
3764 case ICMP_SGT:
3765 case ICMP_SLT:
3766 case ICMP_UGT:
3767 case ICMP_ULT:
3768 case FCMP_OGT:
3769 case FCMP_OLT:
3770 case FCMP_UGT:
3771 case FCMP_ULT:
3772 return true;
3773 default:
3774 return false;
3775 }
3776}
3777
3779 switch (pred) {
3780 case ICMP_SGE:
3781 return ICMP_SGT;
3782 case ICMP_SLE:
3783 return ICMP_SLT;
3784 case ICMP_UGE:
3785 return ICMP_UGT;
3786 case ICMP_ULE:
3787 return ICMP_ULT;
3788 case FCMP_OGE:
3789 return FCMP_OGT;
3790 case FCMP_OLE:
3791 return FCMP_OLT;
3792 case FCMP_UGE:
3793 return FCMP_UGT;
3794 case FCMP_ULE:
3795 return FCMP_ULT;
3796 default:
3797 return pred;
3798 }
3799}
3800
3802 switch (pred) {
3803 case ICMP_SGT:
3804 return ICMP_SGE;
3805 case ICMP_SLT:
3806 return ICMP_SLE;
3807 case ICMP_UGT:
3808 return ICMP_UGE;
3809 case ICMP_ULT:
3810 return ICMP_ULE;
3811 case FCMP_OGT:
3812 return FCMP_OGE;
3813 case FCMP_OLT:
3814 return FCMP_OLE;
3815 case FCMP_UGT:
3816 return FCMP_UGE;
3817 case FCMP_ULT:
3818 return FCMP_ULE;
3819 default:
3820 return pred;
3821 }
3822}
3823
3825 assert(CmpInst::isRelational(pred) && "Call only with relational predicate!");
3826
3827 if (isStrictPredicate(pred))
3828 return getNonStrictPredicate(pred);
3829 if (isNonStrictPredicate(pred))
3830 return getStrictPredicate(pred);
3831
3832 llvm_unreachable("Unknown predicate!");
3833}
3834
3835bool ICmpInst::compare(const APInt &LHS, const APInt &RHS,
3836 ICmpInst::Predicate Pred) {
3837 assert(ICmpInst::isIntPredicate(Pred) && "Only for integer predicates!");
3838 switch (Pred) {
3840 return LHS.eq(RHS);
3842 return LHS.ne(RHS);
3844 return LHS.ugt(RHS);
3846 return LHS.uge(RHS);
3848 return LHS.ult(RHS);
3850 return LHS.ule(RHS);
3852 return LHS.sgt(RHS);
3854 return LHS.sge(RHS);
3856 return LHS.slt(RHS);
3858 return LHS.sle(RHS);
3859 default:
3860 llvm_unreachable("Unexpected non-integer predicate.");
3861 };
3862}
3863
3864bool FCmpInst::compare(const APFloat &LHS, const APFloat &RHS,
3865 FCmpInst::Predicate Pred) {
3866 APFloat::cmpResult R = LHS.compare(RHS);
3867 switch (Pred) {
3868 default:
3869 llvm_unreachable("Invalid FCmp Predicate");
3871 return false;
3873 return true;
3874 case FCmpInst::FCMP_UNO:
3875 return R == APFloat::cmpUnordered;
3876 case FCmpInst::FCMP_ORD:
3877 return R != APFloat::cmpUnordered;
3878 case FCmpInst::FCMP_UEQ:
3879 return R == APFloat::cmpUnordered || R == APFloat::cmpEqual;
3880 case FCmpInst::FCMP_OEQ:
3881 return R == APFloat::cmpEqual;
3882 case FCmpInst::FCMP_UNE:
3883 return R != APFloat::cmpEqual;
3884 case FCmpInst::FCMP_ONE:
3886 case FCmpInst::FCMP_ULT:
3887 return R == APFloat::cmpUnordered || R == APFloat::cmpLessThan;
3888 case FCmpInst::FCMP_OLT:
3889 return R == APFloat::cmpLessThan;
3890 case FCmpInst::FCMP_UGT:
3892 case FCmpInst::FCMP_OGT:
3893 return R == APFloat::cmpGreaterThan;
3894 case FCmpInst::FCMP_ULE:
3895 return R != APFloat::cmpGreaterThan;
3896 case FCmpInst::FCMP_OLE:
3897 return R == APFloat::cmpLessThan || R == APFloat::cmpEqual;
3898 case FCmpInst::FCMP_UGE:
3899 return R != APFloat::cmpLessThan;
3900 case FCmpInst::FCMP_OGE:
3901 return R == APFloat::cmpGreaterThan || R == APFloat::cmpEqual;
3902 }
3903}
3904
3905std::optional<bool> ICmpInst::compare(const KnownBits &LHS,
3906 const KnownBits &RHS,
3907 ICmpInst::Predicate Pred) {
3908 switch (Pred) {
3909 case ICmpInst::ICMP_EQ:
3910 return KnownBits::eq(LHS, RHS);
3911 case ICmpInst::ICMP_NE:
3912 return KnownBits::ne(LHS, RHS);
3913 case ICmpInst::ICMP_UGE:
3914 return KnownBits::uge(LHS, RHS);
3915 case ICmpInst::ICMP_UGT:
3916 return KnownBits::ugt(LHS, RHS);
3917 case ICmpInst::ICMP_ULE:
3918 return KnownBits::ule(LHS, RHS);
3919 case ICmpInst::ICMP_ULT:
3920 return KnownBits::ult(LHS, RHS);
3921 case ICmpInst::ICMP_SGE:
3922 return KnownBits::sge(LHS, RHS);
3923 case ICmpInst::ICMP_SGT:
3924 return KnownBits::sgt(LHS, RHS);
3925 case ICmpInst::ICMP_SLE:
3926 return KnownBits::sle(LHS, RHS);
3927 case ICmpInst::ICMP_SLT:
3928 return KnownBits::slt(LHS, RHS);
3929 default:
3930 llvm_unreachable("Unexpected non-integer predicate.");
3931 }
3932}
3933
3935 if (CmpInst::isEquality(pred))
3936 return pred;
3937 if (isSigned(pred))
3938 return getUnsignedPredicate(pred);
3939 if (isUnsigned(pred))
3940 return getSignedPredicate(pred);
3941
3942 llvm_unreachable("Unknown predicate!");
3943}
3944
3946 switch (predicate) {
3947 default: return false;
3950 case FCmpInst::FCMP_ORD: return true;
3951 }
3952}
3953
3955 switch (predicate) {
3956 default: return false;
3959 case FCmpInst::FCMP_UNO: return true;
3960 }
3961}
3962
3964 switch(predicate) {
3965 default: return false;
3966 case ICMP_EQ: case ICMP_UGE: case ICMP_ULE: case ICMP_SGE: case ICMP_SLE:
3967 case FCMP_TRUE: case FCMP_UEQ: case FCMP_UGE: case FCMP_ULE: return true;
3968 }
3969}
3970
3972 switch(predicate) {
3973 case ICMP_NE: case ICMP_UGT: case ICMP_ULT: case ICMP_SGT: case ICMP_SLT:
3974 case FCMP_FALSE: case FCMP_ONE: case FCMP_OGT: case FCMP_OLT: return true;
3975 default: return false;
3976 }
3977}
3978
3980 // If the predicates match, then we know the first condition implies the
3981 // second is true.
3982 if (CmpPredicate::getMatching(Pred1, Pred2))
3983 return true;
3984
3985 if (Pred1.hasSameSign() && CmpInst::isSigned(Pred2))
3987 else if (Pred2.hasSameSign() && CmpInst::isSigned(Pred1))
3989
3990 switch (Pred1) {
3991 default:
3992 break;
3993 case CmpInst::ICMP_EQ:
3994 // A == B implies A >=u B, A <=u B, A >=s B, and A <=s B are true.
3995 return Pred2 == CmpInst::ICMP_UGE || Pred2 == CmpInst::ICMP_ULE ||
3996 Pred2 == CmpInst::ICMP_SGE || Pred2 == CmpInst::ICMP_SLE;
3997 case CmpInst::ICMP_UGT: // A >u B implies A != B and A >=u B are true.
3998 return Pred2 == CmpInst::ICMP_NE || Pred2 == CmpInst::ICMP_UGE;
3999 case CmpInst::ICMP_ULT: // A <u B implies A != B and A <=u B are true.
4000 return Pred2 == CmpInst::ICMP_NE || Pred2 == CmpInst::ICMP_ULE;
4001 case CmpInst::ICMP_SGT: // A >s B implies A != B and A >=s B are true.
4002 return Pred2 == CmpInst::ICMP_NE || Pred2 == CmpInst::ICMP_SGE;
4003 case CmpInst::ICMP_SLT: // A <s B implies A != B and A <=s B are true.
4004 return Pred2 == CmpInst::ICMP_NE || Pred2 == CmpInst::ICMP_SLE;
4005 }
4006 return false;
4007}
4008
4010 CmpPredicate Pred2) {
4011 return isImpliedTrueByMatchingCmp(Pred1,
4013}
4014
4016 CmpPredicate Pred2) {
4017 if (isImpliedTrueByMatchingCmp(Pred1, Pred2))
4018 return true;
4019 if (isImpliedFalseByMatchingCmp(Pred1, Pred2))
4020 return false;
4021 return std::nullopt;
4022}
4023
4024//===----------------------------------------------------------------------===//
4025// CmpPredicate Implementation
4026//===----------------------------------------------------------------------===//
4027
4028std::optional<CmpPredicate> CmpPredicate::getMatching(CmpPredicate A,
4029 CmpPredicate B) {
4030 if (A.Pred == B.Pred)
4031 return A.HasSameSign == B.HasSameSign ? A : CmpPredicate(A.Pred);
4033 return {};
4034 if (A.HasSameSign &&
4036 return B.Pred;
4037 if (B.HasSameSign &&
4039 return A.Pred;
4040 return {};
4041}
4042
4046
4048 if (auto *ICI = dyn_cast<ICmpInst>(Cmp))
4049 return ICI->getCmpPredicate();
4050 return Cmp->getPredicate();
4051}
4052
4056
4058 return getSwapped(get(Cmp));
4059}
4060
4061//===----------------------------------------------------------------------===//
4062// SwitchInst Implementation
4063//===----------------------------------------------------------------------===//
4064
4065void SwitchInst::init(Value *Value, BasicBlock *Default, unsigned NumReserved) {
4066 assert(Value && Default && NumReserved);
4067 ReservedSpace = NumReserved;
4069 allocHungoffUses(ReservedSpace);
4070
4071 Op<0>() = Value;
4072 Op<1>() = Default;
4073}
4074
4075/// SwitchInst ctor - Create a new switch instruction, specifying a value to
4076/// switch on and a default destination. The number of additional cases can
4077/// be specified here to make memory allocation more efficient. This
4078/// constructor can also autoinsert before another instruction.
4079SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
4080 InsertPosition InsertBefore)
4081 : Instruction(Type::getVoidTy(Value->getContext()), Instruction::Switch,
4082 AllocMarker, InsertBefore) {
4083 init(Value, Default, 2 + NumCases);
4084}
4085
4086SwitchInst::SwitchInst(const SwitchInst &SI)
4087 : Instruction(SI.getType(), Instruction::Switch, AllocMarker) {
4088 init(SI.getCondition(), SI.getDefaultDest(), SI.getNumOperands());
4089 setNumHungOffUseOperands(SI.getNumOperands());
4090 Use *OL = getOperandList();
4091 ConstantInt **VL = case_values();
4092 const Use *InOL = SI.getOperandList();
4093 ConstantInt *const *InVL = SI.case_values();
4094 for (unsigned i = 2, E = SI.getNumOperands(); i != E; ++i) {
4095 OL[i] = InOL[i];
4096 VL[i - 2] = InVL[i - 2];
4097 }
4098 SubclassOptionalData = SI.SubclassOptionalData;
4099}
4100
4101/// addCase - Add an entry to the switch instruction...
4102///
4104 unsigned NewCaseIdx = getNumCases();
4105 unsigned OpNo = getNumOperands();
4106 if (OpNo + 1 > ReservedSpace)
4107 growOperands(); // Get more space!
4108 // Initialize some new operands.
4109 assert(OpNo < ReservedSpace && "Growing didn't work!");
4110 setNumHungOffUseOperands(OpNo + 1);
4111 CaseHandle Case(this, NewCaseIdx);
4112 Case.setValue(OnVal);
4113 Case.setSuccessor(Dest);
4114}
4115
4116/// removeCase - This method removes the specified case and its successor
4117/// from the switch instruction.
4119 unsigned idx = I->getCaseIndex();
4120
4121 assert(2 + idx < getNumOperands() && "Case index out of range!!!");
4122
4123 unsigned NumOps = getNumOperands();
4124 Use *OL = getOperandList();
4125 ConstantInt **VL = case_values();
4126
4127 // Overwrite this case with the end of the list.
4128 if (2 + idx + 1 != NumOps) {
4129 OL[2 + idx] = OL[NumOps - 1];
4130 VL[idx] = VL[NumOps - 2 - 1];
4131 }
4132
4133 // Nuke the last value.
4134 OL[NumOps - 1].set(nullptr);
4135 VL[NumOps - 2 - 1] = nullptr;
4137
4138 return CaseIt(this, idx);
4139}
4140
4141/// growOperands - grow operands - This grows the operand list in response
4142/// to a push_back style of operation. This grows the number of ops by 3 times.
4143///
4144void SwitchInst::growOperands() {
4145 unsigned e = getNumOperands();
4146 unsigned NumOps = e*3;
4147
4148 ReservedSpace = NumOps;
4149 growHungoffUses(ReservedSpace, /*WithExtraValues=*/true);
4150}
4151
4153 MDNode *ProfileData = getBranchWeightMDNode(SI);
4154 if (!ProfileData)
4155 return;
4156
4157 if (getNumBranchWeights(*ProfileData) != SI.getNumSuccessors()) {
4158 llvm_unreachable("number of prof branch_weights metadata operands does "
4159 "not correspond to number of succesors");
4160 }
4161
4163 if (!extractBranchWeights(ProfileData, Weights))
4164 return;
4165 this->Weights = std::move(Weights);
4166}
4167
4170 if (Weights) {
4171 assert(SI.getNumSuccessors() == Weights->size() &&
4172 "num of prof branch_weights must accord with num of successors");
4173 Changed = true;
4174 // Copy the last case to the place of the removed one and shrink.
4175 // This is tightly coupled with the way SwitchInst::removeCase() removes
4176 // the cases in SwitchInst::removeCase(CaseIt).
4177 (*Weights)[I->getCaseIndex() + 1] = Weights->back();
4178 Weights->pop_back();
4179 }
4180 return SI.removeCase(I);
4181}
4182
4184 auto *DestBlock = I->getCaseSuccessor();
4185 if (Weights) {
4186 auto Weight = getSuccessorWeight(I->getCaseIndex() + 1);
4187 (*Weights)[0] = Weight.value();
4188 }
4189
4190 SI.setDefaultDest(DestBlock);
4191}
4192
4194 ConstantInt *OnVal, BasicBlock *Dest,
4196 SI.addCase(OnVal, Dest);
4197
4198 if (!Weights && W && *W) {
4199 Changed = true;
4200 Weights = SmallVector<uint32_t, 8>(SI.getNumSuccessors(), 0);
4201 (*Weights)[SI.getNumSuccessors() - 1] = *W;
4202 } else if (Weights) {
4203 Changed = true;
4204 Weights->push_back(W.value_or(0));
4205 }
4206 if (Weights)
4207 assert(SI.getNumSuccessors() == Weights->size() &&
4208 "num of prof branch_weights must accord with num of successors");
4209}
4210
4213 // Instruction is erased. Mark as unchanged to not touch it in the destructor.
4214 Changed = false;
4215 if (Weights)
4216 Weights->resize(0);
4217 return SI.eraseFromParent();
4218}
4219
4222 if (!Weights)
4223 return std::nullopt;
4224 return (*Weights)[idx];
4225}
4226
4229 if (!W)
4230 return;
4231
4232 if (!Weights && *W)
4233 Weights = SmallVector<uint32_t, 8>(SI.getNumSuccessors(), 0);
4234
4235 if (Weights) {
4236 auto &OldW = (*Weights)[idx];
4237 if (*W != OldW) {
4238 Changed = true;
4239 OldW = *W;
4240 }
4241 }
4242}
4243
4246 unsigned idx) {
4247 if (MDNode *ProfileData = getBranchWeightMDNode(SI))
4248 if (ProfileData->getNumOperands() == SI.getNumSuccessors() + 1)
4249 return mdconst::extract<ConstantInt>(ProfileData->getOperand(idx + 1))
4250 ->getValue()
4251 .getZExtValue();
4252
4253 return std::nullopt;
4254}
4255
4256//===----------------------------------------------------------------------===//
4257// IndirectBrInst Implementation
4258//===----------------------------------------------------------------------===//
4259
4260void IndirectBrInst::init(Value *Address, unsigned NumDests) {
4261 assert(Address && Address->getType()->isPointerTy() &&
4262 "Address of indirectbr must be a pointer");
4263 ReservedSpace = 1+NumDests;
4265 allocHungoffUses(ReservedSpace);
4266
4267 Op<0>() = Address;
4268}
4269
4270
4271/// growOperands - grow operands - This grows the operand list in response
4272/// to a push_back style of operation. This grows the number of ops by 2 times.
4273///
4274void IndirectBrInst::growOperands() {
4275 unsigned e = getNumOperands();
4276 unsigned NumOps = e*2;
4277
4278 ReservedSpace = NumOps;
4279 growHungoffUses(ReservedSpace);
4280}
4281
4282IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases,
4283 InsertPosition InsertBefore)
4284 : Instruction(Type::getVoidTy(Address->getContext()),
4285 Instruction::IndirectBr, AllocMarker, InsertBefore) {
4286 init(Address, NumCases);
4287}
4288
4289IndirectBrInst::IndirectBrInst(const IndirectBrInst &IBI)
4290 : Instruction(Type::getVoidTy(IBI.getContext()), Instruction::IndirectBr,
4291 AllocMarker) {
4292 NumUserOperands = IBI.NumUserOperands;
4293 allocHungoffUses(IBI.getNumOperands());
4294 Use *OL = getOperandList();
4295 const Use *InOL = IBI.getOperandList();
4296 for (unsigned i = 0, E = IBI.getNumOperands(); i != E; ++i)
4297 OL[i] = InOL[i];
4298 SubclassOptionalData = IBI.SubclassOptionalData;
4299}
4300
4301/// addDestination - Add a destination.
4302///
4304 unsigned OpNo = getNumOperands();
4305 if (OpNo+1 > ReservedSpace)
4306 growOperands(); // Get more space!
4307 // Initialize some new operands.
4308 assert(OpNo < ReservedSpace && "Growing didn't work!");
4310 getOperandList()[OpNo] = DestBB;
4311}
4312
4313/// removeDestination - This method removes the specified successor from the
4314/// indirectbr instruction.
4316 assert(idx < getNumOperands()-1 && "Successor index out of range!");
4317
4318 unsigned NumOps = getNumOperands();
4319 Use *OL = getOperandList();
4320
4321 // Replace this value with the last one.
4322 OL[idx+1] = OL[NumOps-1];
4323
4324 // Nuke the last value.
4325 OL[NumOps-1].set(nullptr);
4327}
4328
4329//===----------------------------------------------------------------------===//
4330// FreezeInst Implementation
4331//===----------------------------------------------------------------------===//
4332
4333FreezeInst::FreezeInst(Value *S, const Twine &Name, InsertPosition InsertBefore)
4334 : UnaryInstruction(S->getType(), Freeze, S, InsertBefore) {
4335 setName(Name);
4336}
4337
4338//===----------------------------------------------------------------------===//
4339// cloneImpl() implementations
4340//===----------------------------------------------------------------------===//
4341
4342// Define these methods here so vtables don't get emitted into every translation
4343// unit that uses these classes.
4344
4345GetElementPtrInst *GetElementPtrInst::cloneImpl() const {
4347 return new (AllocMarker) GetElementPtrInst(*this, AllocMarker);
4348}
4349
4353
4357
4359 return new FCmpInst(getPredicate(), Op<0>(), Op<1>());
4360}
4361
4363 return new ICmpInst(getPredicate(), Op<0>(), Op<1>());
4364}
4365
4366ExtractValueInst *ExtractValueInst::cloneImpl() const {
4367 return new ExtractValueInst(*this);
4368}
4369
4370InsertValueInst *InsertValueInst::cloneImpl() const {
4371 return new InsertValueInst(*this);
4372}
4373
4376 getOperand(0), getAlign());
4377 Result->setUsedWithInAlloca(isUsedWithInAlloca());
4378 Result->setSwiftError(isSwiftError());
4379 return Result;
4380}
4381
4383 return new LoadInst(getType(), getOperand(0), Twine(), isVolatile(),
4385}
4386
4391
4396 Result->setVolatile(isVolatile());
4397 Result->setWeak(isWeak());
4398 return Result;
4399}
4400
4402 AtomicRMWInst *Result = new AtomicRMWInst(
4405 Result->setVolatile(isVolatile());
4406 return Result;
4407}
4408
4412
4414 return new TruncInst(getOperand(0), getType());
4415}
4416
4418 return new ZExtInst(getOperand(0), getType());
4419}
4420
4422 return new SExtInst(getOperand(0), getType());
4423}
4424
4426 return new FPTruncInst(getOperand(0), getType());
4427}
4428
4430 return new FPExtInst(getOperand(0), getType());
4431}
4432
4434 return new UIToFPInst(getOperand(0), getType());
4435}
4436
4438 return new SIToFPInst(getOperand(0), getType());
4439}
4440
4442 return new FPToUIInst(getOperand(0), getType());
4443}
4444
4446 return new FPToSIInst(getOperand(0), getType());
4447}
4448
4450 return new PtrToIntInst(getOperand(0), getType());
4451}
4452
4456
4458 return new IntToPtrInst(getOperand(0), getType());
4459}
4460
4462 return new BitCastInst(getOperand(0), getType());
4463}
4464
4468
4469CallInst *CallInst::cloneImpl() const {
4470 if (hasOperandBundles()) {
4474 return new (AllocMarker) CallInst(*this, AllocMarker);
4475 }
4477 return new (AllocMarker) CallInst(*this, AllocMarker);
4478}
4479
4480SelectInst *SelectInst::cloneImpl() const {
4482}
4483
4485 return new VAArgInst(getOperand(0), getType());
4486}
4487
4488ExtractElementInst *ExtractElementInst::cloneImpl() const {
4490}
4491
4492InsertElementInst *InsertElementInst::cloneImpl() const {
4494}
4495
4499
4500PHINode *PHINode::cloneImpl() const { return new (AllocMarker) PHINode(*this); }
4501
4502LandingPadInst *LandingPadInst::cloneImpl() const {
4503 return new LandingPadInst(*this);
4504}
4505
4506ReturnInst *ReturnInst::cloneImpl() const {
4508 return new (AllocMarker) ReturnInst(*this, AllocMarker);
4509}
4510
4511UncondBrInst *UncondBrInst::cloneImpl() const {
4512 return new (AllocMarker) UncondBrInst(*this);
4513}
4514
4515CondBrInst *CondBrInst::cloneImpl() const {
4516 return new (AllocMarker) CondBrInst(*this);
4517}
4518
4519SwitchInst *SwitchInst::cloneImpl() const { return new SwitchInst(*this); }
4520
4521IndirectBrInst *IndirectBrInst::cloneImpl() const {
4522 return new IndirectBrInst(*this);
4523}
4524
4525InvokeInst *InvokeInst::cloneImpl() const {
4526 if (hasOperandBundles()) {
4530 return new (AllocMarker) InvokeInst(*this, AllocMarker);
4531 }
4533 return new (AllocMarker) InvokeInst(*this, AllocMarker);
4534}
4535
4536CallBrInst *CallBrInst::cloneImpl() const {
4537 if (hasOperandBundles()) {
4541 return new (AllocMarker) CallBrInst(*this, AllocMarker);
4542 }
4544 return new (AllocMarker) CallBrInst(*this, AllocMarker);
4545}
4546
4547ResumeInst *ResumeInst::cloneImpl() const {
4548 return new (AllocMarker) ResumeInst(*this);
4549}
4550
4551CleanupReturnInst *CleanupReturnInst::cloneImpl() const {
4553 return new (AllocMarker) CleanupReturnInst(*this, AllocMarker);
4554}
4555
4556CatchReturnInst *CatchReturnInst::cloneImpl() const {
4557 return new (AllocMarker) CatchReturnInst(*this);
4558}
4559
4560CatchSwitchInst *CatchSwitchInst::cloneImpl() const {
4561 return new CatchSwitchInst(*this);
4562}
4563
4564FuncletPadInst *FuncletPadInst::cloneImpl() const {
4566 return new (AllocMarker) FuncletPadInst(*this, AllocMarker);
4567}
4568
4570 LLVMContext &Context = getContext();
4571 return new UnreachableInst(Context);
4572}
4573
4574bool UnreachableInst::shouldLowerToTrap(bool TrapUnreachable,
4575 bool NoTrapAfterNoreturn) const {
4576 if (!TrapUnreachable)
4577 return false;
4578
4579 // We may be able to ignore unreachable behind a noreturn call.
4581 Call && Call->doesNotReturn()) {
4582 if (NoTrapAfterNoreturn)
4583 return false;
4584 // Do not emit an additional trap instruction.
4585 if (Call->isNonContinuableTrap())
4586 return false;
4587 }
4588
4589 if (getFunction()->hasFnAttribute(Attribute::Naked))
4590 return false;
4591
4592 return true;
4593}
4594
4596 return new FreezeInst(getOperand(0));
4597}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
constexpr LLT S1
Rewrite undef for PHI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Atomic ordering constants.
@ FnAttr
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define LLVM_SUPPRESS_DEPRECATED_DECLARATIONS_PUSH
Definition Compiler.h:269
#define LLVM_SUPPRESS_DEPRECATED_DECLARATIONS_POP
Definition Compiler.h:270
#define LLVM_ABI
Definition Compiler.h:213
This file contains the declarations for the subclasses of Constant, which represent the different fla...
@ Default
static bool isSigned(unsigned Opcode)
#define op(i)
Module.h This file contains the declarations for the Module class.
static Align computeLoadStoreDefaultAlign(Type *Ty, InsertPosition Pos)
static bool isImpliedFalseByMatchingCmp(CmpPredicate Pred1, CmpPredicate Pred2)
static Value * createPlaceholderForShuffleVector(Value *V)
static Align computeAllocaDefaultAlign(Type *Ty, InsertPosition Pos)
static cl::opt< bool > DisableI2pP2iOpt("disable-i2p-p2i-opt", cl::init(false), cl::desc("Disables inttoptr/ptrtoint roundtrip optimization"))
static bool hasNonZeroFPOperands(const CmpInst *Cmp)
static int matchShuffleAsBitRotate(ArrayRef< int > Mask, int NumSubElts)
Try to lower a vector shuffle as a bit rotation.
static Type * getIndexedTypeInternal(Type *Ty, ArrayRef< IndexTy > IdxList)
static bool isReplicationMaskWithParams(ArrayRef< int > Mask, int ReplicationFactor, int VF)
static bool isIdentityMaskImpl(ArrayRef< int > Mask, int NumOpElts)
static bool isSingleSourceMaskImpl(ArrayRef< int > Mask, int NumOpElts)
static bool isImpliedTrueByMatchingCmp(CmpPredicate Pred1, CmpPredicate Pred2)
static LLVM_SUPPRESS_DEPRECATED_DECLARATIONS_POP Value * getAISize(LLVMContext &Context, Value *Amt)
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
This file contains the declarations for metadata subclasses.
#define T
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
#define P(N)
PowerPC Reduce CR logical Operation
This file contains the declarations for profiling metadata utility functions.
const SmallVectorImpl< MachineOperand > & Cond
static unsigned getNumElements(Type *Ty)
This file implements the SmallBitVector class.
This file defines the SmallVector class.
#define LLVM_DEBUG(...)
Definition Debug.h:114
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
Value * RHS
Value * LHS
cmpResult
IEEE-754R 5.11: Floating Point Comparison Relations.
Definition APFloat.h:334
LLVM_ABI float convertToFloat() const
Converts this APFloat to host float value.
Definition APFloat.cpp:5986
Class for arbitrary precision integers.
Definition APInt.h:78
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
Definition APInt.h:1353
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition APInt.h:381
unsigned countr_zero() const
Count the number of trailing zero bits.
Definition APInt.h:1662
unsigned countl_zero() const
The APInt version of std::countl_zero.
Definition APInt.h:1621
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
Definition APInt.h:201
This class represents a conversion between pointers from one address space to another.
LLVM_ABI AddrSpaceCastInst * cloneImpl() const
Clone an identical AddrSpaceCastInst.
LLVM_ABI AddrSpaceCastInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI std::optional< TypeSize > getAllocationSizeInBits(const DataLayout &DL) const
Get allocation size in bits.
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
LLVM_ABI bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
LLVM_ABI AllocaInst * cloneImpl() const
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
bool isUsedWithInAlloca() const
Return true if this alloca is used as an inalloca argument to a call.
unsigned getAddressSpace() const
Return the address space for the allocation.
LLVM_ABI std::optional< TypeSize > getAllocationSize(const DataLayout &DL) const
Get allocation size in bytes.
LLVM_ABI bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
void setAlignment(Align Align)
const Value * getArraySize() const
Get the number of elements allocated.
LLVM_ABI AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, const Twine &Name, InsertPosition InsertBefore)
Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:39
iterator end() const
Definition ArrayRef.h:129
size_t size() const
Get the array size.
Definition ArrayRef.h:140
iterator begin() const
Definition ArrayRef.h:128
bool empty() const
Check if the array is empty.
Definition ArrayRef.h:135
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
Definition ArrayRef.h:184
Class to represent array types.
void setSyncScopeID(SyncScope::ID SSID)
Sets the synchronization scope ID of this cmpxchg instruction.
bool isVolatile() const
Return true if this is a cmpxchg from a volatile memory location.
void setFailureOrdering(AtomicOrdering Ordering)
Sets the failure ordering constraint of this cmpxchg instruction.
AtomicOrdering getFailureOrdering() const
Returns the failure ordering constraint of this cmpxchg instruction.
void setSuccessOrdering(AtomicOrdering Ordering)
Sets the success ordering constraint of this cmpxchg instruction.
LLVM_ABI AtomicCmpXchgInst * cloneImpl() const
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
friend class Instruction
Iterator for Instructions in a `BasicBlock.
bool isWeak() const
Return true if this cmpxchg may spuriously fail.
void setAlignment(Align Align)
AtomicOrdering getSuccessOrdering() const
Returns the success ordering constraint of this cmpxchg instruction.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this cmpxchg instruction.
LLVM_ABI AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment, AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering, SyncScope::ID SSID, InsertPosition InsertBefore=nullptr)
bool isElementwise() const
Return true if this RMW has elementwise vector semantics.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
LLVM_ABI AtomicRMWInst * cloneImpl() const
bool isVolatile() const
Return true if this is a RMW on a volatile memory location.
LLVM_ABI AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment, AtomicOrdering Ordering, SyncScope::ID SSID, bool Elementwise=false, InsertPosition InsertBefore=nullptr)
BinOp
This enumeration lists the possible modifications atomicrmw can make.
@ Add
*p = old + v
@ FAdd
*p = old + v
@ USubCond
Subtract only if no unsigned overflow.
@ FMinimum
*p = minimum(old, v) minimum matches the behavior of llvm.minimum.
@ Min
*p = old <signed v ? old : v
@ Sub
*p = old - v
@ And
*p = old & v
@ Xor
*p = old ^ v
@ USubSat
*p = usub.sat(old, v) usub.sat matches the behavior of llvm.usub.sat.
@ FMaximum
*p = maximum(old, v) maximum matches the behavior of llvm.maximum.
@ FSub
*p = old - v
@ UIncWrap
Increment one up to a maximum value.
@ Max
*p = old >signed v ? old : v
@ UMin
*p = old <unsigned v ? old : v
@ FMin
*p = minnum(old, v) minnum matches the behavior of llvm.minnum.
@ UMax
*p = old >unsigned v ? old : v
@ FMaximumNum
*p = maximumnum(old, v) maximumnum matches the behavior of llvm.maximumnum.
@ FMax
*p = maxnum(old, v) maxnum matches the behavior of llvm.maxnum.
@ UDecWrap
Decrement one until a minimum value or zero.
@ FMinimumNum
*p = minimumnum(old, v) minimumnum matches the behavior of llvm.minimumnum.
@ Nand
*p = ~(old & v)
void setSyncScopeID(SyncScope::ID SSID)
Sets the synchronization scope ID of this rmw instruction.
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this rmw instruction.
void setOperation(BinOp Operation)
friend class Instruction
Iterator for Instructions in a `BasicBlock.
BinOp getOperation() const
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this rmw instruction.
void setAlignment(Align Align)
void setElementwise(bool V)
Specify whether this RMW has elementwise vector semantics.
static LLVM_ABI StringRef getOperationName(BinOp Op)
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
LLVM_ABI CaptureInfo getCaptureInfo() const
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:105
LLVM_ABI const ConstantRange & getRange() const
Returns the value of the range attribute.
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
Definition Attributes.h:124
static LLVM_ABI Attribute getWithMemoryEffects(LLVMContext &Context, MemoryEffects ME)
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition Attributes.h:261
LLVM Basic Block Representation.
Definition BasicBlock.h:62
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this basic block belongs to.
static LLVM_ABI BinaryOperator * CreateNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Helper functions to construct and inspect unary operations (NEG and NOT) via binary operators SUB and...
BinaryOps getOpcode() const
Definition InstrTypes.h:374
LLVM_ABI bool swapOperands()
Exchange the two operands to this instruction.
static LLVM_ABI BinaryOperator * CreateNot(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition InstrTypes.h:181
static LLVM_ABI BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
LLVM_ABI BinaryOperator(BinaryOps iType, Value *S1, Value *S2, Type *Ty, const Twine &Name, InsertPosition InsertBefore)
static LLVM_ABI BinaryOperator * CreateNSWNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
LLVM_ABI BinaryOperator * cloneImpl() const
This class represents a no-op cast from one type to another.
LLVM_ABI BitCastInst * cloneImpl() const
Clone an identical BitCastInst.
LLVM_ABI BitCastInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
LLVM_ABI FPClassTest getParamNoFPClass(unsigned i) const
Extract a test mask for disallowed floating-point value classes for the parameter.
bool isInlineAsm() const
Check if this call is an inline asm statement.
LLVM_ABI BundleOpInfo & getBundleOpInfoForOperand(unsigned OpIdx)
Return the BundleOpInfo for the operand at index OpIdx.
void setCallingConv(CallingConv::ID CC)
LLVM_ABI FPClassTest getRetNoFPClass() const
Extract a test mask for disallowed floating-point value classes for the return value.
bundle_op_iterator bundle_op_info_begin()
Return the start of the list of BundleOpInfo instances associated with this OperandBundleUser.
LLVM_ABI bool paramHasNonNullAttr(unsigned ArgNo, bool AllowUndefOrPoison) const
Return true if this argument has the nonnull attribute on either the CallBase instruction or the call...
LLVM_ABI MemoryEffects getMemoryEffects() const
void addFnAttr(Attribute::AttrKind Kind)
Adds the attribute to the function.
LLVM_ABI bool doesNotAccessMemory() const
Determine if the call does not access memory.
LLVM_ABI void getOperandBundlesAsDefs(SmallVectorImpl< OperandBundleDef > &Defs) const
Return the list of operand bundles attached to this instruction as a vector of OperandBundleDefs.
LLVM_ABI void setOnlyAccessesArgMemory()
OperandBundleUse getOperandBundleAt(unsigned Index) const
Return the operand bundle at a specific index.
OperandBundleUse operandBundleFromBundleOpInfo(const BundleOpInfo &BOI) const
Simple helper function to map a BundleOpInfo to an OperandBundleUse.
LLVM_ABI void setOnlyAccessesInaccessibleMemOrArgMem()
std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
LLVM_ABI void setDoesNotAccessMemory()
AttributeSet getParamAttributes(unsigned ArgNo) const
Return the param attributes for this call.
bool hasRetAttr(Attribute::AttrKind Kind) const
Determine whether the return value has the given attribute.
LLVM_ABI bool onlyAccessesInaccessibleMemory() const
Determine if the function may only access memory that is inaccessible from the IR.
unsigned getNumOperandBundles() const
Return the number of operand bundles associated with this User.
CallingConv::ID getCallingConv() const
bundle_op_iterator bundle_op_info_end()
Return the end of the list of BundleOpInfo instances associated with this OperandBundleUser.
LLVM_ABI unsigned getNumSubclassExtraOperandsDynamic() const
Get the number of extra operands for instructions that don't have a fixed number of extra operands.
BundleOpInfo * bundle_op_iterator
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
LLVM_ABI bool isMustTailCall() const
Tests if this call site must be tail call optimized.
LLVM_ABI bool isIndirectCall() const
Return true if the callsite is an indirect call.
LLVM_ABI bool onlyReadsMemory() const
Determine if the call does not access or only reads memory.
bool isByValArgument(unsigned ArgNo) const
Determine whether this argument is passed by value.
iterator_range< bundle_op_iterator > bundle_op_infos()
Return the range [bundle_op_info_begin, bundle_op_info_end).
LLVM_ABI void setOnlyReadsMemory()
static LLVM_ABI CallBase * addOperandBundle(CallBase *CB, uint32_t ID, OperandBundleDef OB, InsertPosition InsertPt=nullptr)
Create a clone of CB with operand bundle OB added.
LLVM_ABI bool onlyAccessesInaccessibleMemOrArgMem() const
Determine if the function may only access memory that is either inaccessible from the IR or pointed t...
LLVM_ABI CaptureInfo getCaptureInfo(unsigned OpNo) const
Return which pointer components this operand may capture.
LLVM_ABI bool hasArgumentWithAdditionalReturnCaptureComponents() const
Returns whether the call has an argument that has an attribute like captures(ret: address,...
CallBase(AttributeList const &A, FunctionType *FT, ArgsTy &&... Args)
Value * getCalledOperand() const
LLVM_ABI void setOnlyWritesMemory()
LLVM_ABI op_iterator populateBundleOperandInfos(ArrayRef< OperandBundleDef > Bundles, const unsigned BeginIndex)
Populate the BundleOpInfo instances and the Use& vector from Bundles.
AttributeList Attrs
parameter attributes for callable
bool hasOperandBundlesOtherThan(ArrayRef< uint32_t > IDs) const
Return true if this operand bundle user contains operand bundles with tags other than those specified...
LLVM_ABI std::optional< ConstantRange > getRange() const
If this return value has a range attribute, return the value range of the argument.
LLVM_ABI bool isReturnNonNull() const
Return true if the return value is known to be not null.
Value * getArgOperand(unsigned i) const
FunctionType * FTy
uint64_t getRetDereferenceableBytes() const
Extract the number of dereferenceable bytes for a call or parameter (0=unknown).
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
FunctionType * getFunctionType() const
LLVM_ABI Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
static unsigned CountBundleInputs(ArrayRef< OperandBundleDef > Bundles)
Return the total number of values used in Bundles.
LLVM_ABI Value * getArgOperandWithAttribute(Attribute::AttrKind Kind) const
If one of the arguments has the specified attribute, returns its operand value.
LLVM_ABI void setOnlyAccessesInaccessibleMemory()
static LLVM_ABI CallBase * Create(CallBase *CB, ArrayRef< OperandBundleDef > Bundles, InsertPosition InsertPt=nullptr)
Create a clone of CB with a different set of operand bundles and insert it before InsertPt.
LLVM_ABI bool onlyWritesMemory() const
Determine if the call does not access or only writes memory.
LLVM_ABI bool hasClobberingOperandBundles() const
Return true if this operand bundle user has operand bundles that may write to the heap.
void setCalledOperand(Value *V)
static LLVM_ABI CallBase * removeOperandBundle(CallBase *CB, uint32_t ID, InsertPosition InsertPt=nullptr)
Create a clone of CB with operand bundle ID removed.
LLVM_ABI bool hasReadingOperandBundles() const
Return true if this operand bundle user has operand bundles that may read from the heap.
LLVM_ABI bool onlyAccessesArgMemory() const
Determine if the call can access memmory only using pointers based on its arguments.
unsigned arg_size() const
AttributeList getAttributes() const
Return the attributes for this call.
LLVM_ABI void setMemoryEffects(MemoryEffects ME)
bool hasOperandBundles() const
Return true if this User has any operand bundles.
LLVM_ABI bool isTailCall() const
Tests if this call site is marked as a tail call.
LLVM_ABI Function * getCaller()
Helper to get the caller (the parent function).
CallBr instruction, tracking function calls that may not return control but instead transfer it to a ...
SmallVector< BasicBlock *, 16 > getIndirectDests() const
void setDefaultDest(BasicBlock *B)
void setIndirectDest(unsigned i, BasicBlock *B)
BasicBlock * getDefaultDest() const
static CallBrInst * Create(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, ArrayRef< BasicBlock * > IndirectDests, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
LLVM_ABI CallBrInst * cloneImpl() const
This class represents a function call, abstracting a target machine's calling convention.
LLVM_ABI void updateProfWeight(uint64_t S, uint64_t T)
Updates profile metadata by scaling it by S / T.
TailCallKind getTailCallKind() const
LLVM_ABI CallInst * cloneImpl() const
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Represents which components of the pointer may be captured in which location.
Definition ModRef.h:414
CaptureComponents getOtherComponents() const
Get components potentially captured through locations other than the return value.
Definition ModRef.h:446
static CaptureInfo none()
Create CaptureInfo that does not capture any components of the pointer.
Definition ModRef.h:427
static CaptureInfo all()
Create CaptureInfo that may capture all components of the pointer.
Definition ModRef.h:430
CaptureComponents getRetComponents() const
Get components potentially captured by the return value.
Definition ModRef.h:442
static LLVM_ABI Instruction::CastOps getCastOpcode(const Value *Val, bool SrcIsSigned, Type *Ty, bool DstIsSigned)
Returns the opcode necessary to cast Val into Ty using usual casting rules.
static LLVM_ABI CastInst * CreatePointerBitCastOrAddrSpaceCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a BitCast or an AddrSpaceCast cast instruction.
Instruction::CastOps getOpcode() const
Return the opcode of this CastInst.
Definition InstrTypes.h:610
static LLVM_ABI unsigned isEliminableCastPair(Instruction::CastOps firstOpcode, Instruction::CastOps secondOpcode, Type *SrcTy, Type *MidTy, Type *DstTy, const DataLayout *DL)
Determine how a pair of casts can be eliminated, if they can be at all.
static LLVM_ABI CastInst * CreateIntegerCast(Value *S, Type *Ty, bool isSigned, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a ZExt, BitCast, or Trunc for int -> int casts.
static LLVM_ABI CastInst * CreateFPCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create an FPExt, BitCast, or FPTrunc for fp -> fp casts.
CastInst(Type *Ty, unsigned iType, Value *S, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics for subclasses.
Definition InstrTypes.h:451
static LLVM_ABI bool isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, const DataLayout &DL)
Check whether a bitcast, inttoptr, or ptrtoint cast between these types is valid and a no-op.
static LLVM_ABI bool isBitCastable(Type *SrcTy, Type *DestTy)
Check whether a bitcast between these types is valid.
static LLVM_ABI CastInst * CreateTruncOrBitCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a Trunc or BitCast cast instruction.
static LLVM_ABI CastInst * CreatePointerCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a BitCast, AddrSpaceCast or a PtrToInt cast instruction.
static LLVM_ABI CastInst * CreateBitOrPointerCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a BitCast, a PtrToInt, or an IntToPTr cast instruction.
static LLVM_ABI bool isNoopCast(Instruction::CastOps Opcode, Type *SrcTy, Type *DstTy, const DataLayout &DL)
A no-op cast is one that can be effected without changing any bits.
static LLVM_ABI CastInst * CreateZExtOrBitCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a ZExt or BitCast cast instruction.
static LLVM_ABI CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
LLVM_ABI bool isIntegerCast() const
There are several places where we need to know if a cast instruction only deals with integer source a...
static LLVM_ABI CastInst * CreateSExtOrBitCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a SExt or BitCast cast instruction.
static LLVM_ABI bool castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy)
This method can be used to determine if a cast from SrcTy to DstTy using Opcode op is valid or not.
LLVM_ABI CatchReturnInst * cloneImpl() const
void setUnwindDest(BasicBlock *UnwindDest)
LLVM_ABI void addHandler(BasicBlock *Dest)
Add an entry to the switch instruction... Note: This action invalidates handler_end().
LLVM_ABI CatchSwitchInst * cloneImpl() const
mapped_iterator< op_iterator, DerefFnTy > handler_iterator
Value * getParentPad() const
void setParentPad(Value *ParentPad)
BasicBlock * getUnwindDest() const
LLVM_ABI void removeHandler(handler_iterator HI)
LLVM_ABI CleanupReturnInst * cloneImpl() const
This class is the base class for the comparison instructions.
Definition InstrTypes.h:664
Predicate getStrictPredicate() const
For example, SGE -> SGT, SLE -> SLT, ULE -> ULT, UGE -> UGT.
Definition InstrTypes.h:858
bool isEquality() const
Determine if this is an equals/not equals predicate.
Definition InstrTypes.h:915
void setPredicate(Predicate P)
Set the predicate for this instruction to the specified value.
Definition InstrTypes.h:768
bool isFalseWhenEqual() const
This is just a convenience.
Definition InstrTypes.h:948
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
Definition InstrTypes.h:679
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
Definition InstrTypes.h:693
@ ICMP_SLT
signed less than
Definition InstrTypes.h:705
@ ICMP_SLE
signed less or equal
Definition InstrTypes.h:706
@ FCMP_OLT
0 1 0 0 True if ordered and less than
Definition InstrTypes.h:682
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
Definition InstrTypes.h:691
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
Definition InstrTypes.h:680
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
Definition InstrTypes.h:681
@ ICMP_UGE
unsigned greater or equal
Definition InstrTypes.h:700
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:699
@ ICMP_SGT
signed greater than
Definition InstrTypes.h:703
@ FCMP_ULT
1 1 0 0 True if unordered or less than
Definition InstrTypes.h:690
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
Definition InstrTypes.h:684
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
Definition InstrTypes.h:687
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
Definition InstrTypes.h:688
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
Definition InstrTypes.h:683
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
Definition InstrTypes.h:685
@ ICMP_NE
not equal
Definition InstrTypes.h:698
@ ICMP_SGE
signed greater or equal
Definition InstrTypes.h:704
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
Definition InstrTypes.h:692
@ ICMP_ULE
unsigned less or equal
Definition InstrTypes.h:702
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
Definition InstrTypes.h:689
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
Definition InstrTypes.h:678
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition InstrTypes.h:686
LLVM_ABI bool isEquivalence(bool Invert=false) const
Determine if one operand of this compare can always be replaced by the other operand,...
bool isSigned() const
Definition InstrTypes.h:930
static LLVM_ABI bool isEquality(Predicate pred)
Determine if this is an equals/not equals predicate.
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition InstrTypes.h:827
bool isTrueWhenEqual() const
This is just a convenience.
Definition InstrTypes.h:942
static LLVM_ABI CmpInst * Create(OtherOps Op, Predicate Pred, Value *S1, Value *S2, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Construct a compare instruction, given the opcode, the predicate and the two operands.
static bool isFPPredicate(Predicate P)
Definition InstrTypes.h:770
Predicate getNonStrictPredicate() const
For example, SGT -> SGE, SLT -> SLE, ULT -> ULE, UGT -> UGE.
Definition InstrTypes.h:871
static LLVM_ABI CmpInst * CreateWithCopiedFlags(OtherOps Op, Predicate Pred, Value *S1, Value *S2, const Instruction *FlagsSource, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Construct a compare instruction, given the opcode, the predicate, the two operands and the instructio...
bool isNonStrictPredicate() const
Definition InstrTypes.h:852
LLVM_ABI void swapOperands()
This is just a convenience that dispatches to the subclasses.
static bool isRelational(Predicate P)
Return true if the predicate is relational (not EQ or NE).
Definition InstrTypes.h:923
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition InstrTypes.h:789
static LLVM_ABI StringRef getPredicateName(Predicate P)
Predicate getPredicate() const
Return the predicate for this instruction.
Definition InstrTypes.h:765
bool isStrictPredicate() const
Definition InstrTypes.h:843
static LLVM_ABI bool isUnordered(Predicate predicate)
Determine if the predicate is an unordered operation.
Predicate getFlippedStrictnessPredicate() const
For predicate of kind "is X or equal to 0" returns the predicate "is X".
Definition InstrTypes.h:893
static bool isIntPredicate(Predicate P)
Definition InstrTypes.h:776
static LLVM_ABI bool isOrdered(Predicate predicate)
Determine if the predicate is an ordered operation.
LLVM_ABI CmpInst(Type *ty, Instruction::OtherOps op, Predicate pred, Value *LHS, Value *RHS, const Twine &Name="", InsertPosition InsertBefore=nullptr, Instruction *FlagsSource=nullptr)
bool isUnsigned() const
Definition InstrTypes.h:936
LLVM_ABI bool isCommutative() const
This is just a convenience that dispatches to the subclasses.
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
static LLVM_ABI std::optional< CmpPredicate > getMatching(CmpPredicate A, CmpPredicate B)
Compares two CmpPredicates taking samesign into account and returns the canonicalized CmpPredicate if...
CmpPredicate()
Default constructor.
static LLVM_ABI CmpPredicate get(const CmpInst *Cmp)
Do a ICmpInst::getCmpPredicate() or CmpInst::getPredicate(), as appropriate.
LLVM_ABI CmpInst::Predicate getPreferredSignedPredicate() const
Attempts to return a signed CmpInst::Predicate from the CmpPredicate.
bool hasSameSign() const
Query samesign information, for optimizations.
static LLVM_ABI CmpPredicate getSwapped(CmpPredicate P)
Get the swapped predicate of a CmpPredicate.
Conditional Branch instruction.
LLVM_ABI void swapSuccessors()
Swap the successors of this branch instruction.
LLVM_ABI CondBrInst * cloneImpl() const
Value * getCondition() const
ConstantFP - Floating Point Values [float, double].
Definition Constants.h:420
const APFloat & getValueAPF() const
Definition Constants.h:463
This is the shared class of boolean and integer constants.
Definition Constants.h:87
LLVM_ABI ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
static LLVM_ABI Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
Definition Constant.h:43
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
static constexpr ElementCount getFixed(ScalarTy MinVal)
Definition TypeSize.h:309
LLVM_ABI ExtractElementInst * cloneImpl() const
static ExtractElementInst * Create(Value *Vec, Value *Idx, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI bool isValidOperands(const Value *Vec, const Value *Idx)
Return true if an extractelement instruction can be formed with the specified operands.
This instruction extracts a struct member or array element value from an aggregate value.
static LLVM_ABI Type * getIndexedType(Type *Agg, ArrayRef< unsigned > Idxs)
Returns the type of the element that would be extracted with an extractvalue instruction with the spe...
LLVM_ABI ExtractValueInst * cloneImpl() const
This instruction compares its operands according to the predicate given to the constructor.
bool isEquality() const
static LLVM_ABI bool compare(const APFloat &LHS, const APFloat &RHS, FCmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
LLVM_ABI FCmpInst * cloneImpl() const
Clone an identical FCmpInst.
FCmpInst(InsertPosition InsertBefore, Predicate pred, Value *LHS, Value *RHS, const Twine &NameStr="")
Constructor with insertion semantics.
This class represents an extension of floating point types.
LLVM_ABI FPExtInst * cloneImpl() const
Clone an identical FPExtInst.
LLVM_ABI FPExtInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI float getFPAccuracy() const
Get the maximum error permitted by this operation in ULPs.
This class represents a cast from floating point to signed integer.
LLVM_ABI FPToSIInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI FPToSIInst * cloneImpl() const
Clone an identical FPToSIInst.
This class represents a cast from floating point to unsigned integer.
LLVM_ABI FPToUIInst * cloneImpl() const
Clone an identical FPToUIInst.
LLVM_ABI FPToUIInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
This class represents a truncation of floating point types.
LLVM_ABI FPTruncInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI FPTruncInst * cloneImpl() const
Clone an identical FPTruncInst.
LLVM_ABI FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System, InsertPosition InsertBefore=nullptr)
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this fence instruction.
void setSyncScopeID(SyncScope::ID SSID)
Sets the synchronization scope ID of this fence instruction.
LLVM_ABI FenceInst * cloneImpl() const
friend class Instruction
Iterator for Instructions in a `BasicBlock.
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this fence instruction.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
Class to represent fixed width SIMD vectors.
unsigned getNumElements() const
LLVM_ABI FreezeInst(Value *S, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
LLVM_ABI FreezeInst * cloneImpl() const
Clone an identical FreezeInst.
void setParentPad(Value *ParentPad)
Value * getParentPad() const
Convenience accessors.
LLVM_ABI FuncletPadInst * cloneImpl() const
Class to represent function types.
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
Type * getParamType(unsigned i) const
Parameter type accessors.
bool isVarArg() const
Represents flags for the getelementptr instruction/expression.
static GEPNoWrapFlags inBounds()
GEPNoWrapFlags withoutInBounds() const
unsigned getRaw() const
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
LLVM_ABI bool isInBounds() const
Determine whether the GEP has the inbounds flag.
LLVM_ABI bool hasNoUnsignedSignedWrap() const
Determine whether the GEP has the nusw flag.
static LLVM_ABI Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
LLVM_ABI bool hasAllZeroIndices() const
Return true if all of the indices of this GEP are zeros.
LLVM_ABI bool hasNoUnsignedWrap() const
Determine whether the GEP has the nuw flag.
LLVM_ABI bool hasAllConstantIndices() const
Return true if all of the indices of this GEP are constant integers.
LLVM_ABI void setIsInBounds(bool b=true)
Set or clear the inbounds flag on this GEP instruction.
static LLVM_ABI Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
LLVM_ABI bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const
Accumulate the constant address offset of this GEP if possible.
LLVM_ABI GetElementPtrInst * cloneImpl() const
LLVM_ABI bool collectOffset(const DataLayout &DL, unsigned BitWidth, SmallMapVector< Value *, APInt, 4 > &VariableOffsets, APInt &ConstantOffset) const
LLVM_ABI void setNoWrapFlags(GEPNoWrapFlags NW)
Set nowrap flags for GEP instruction.
LLVM_ABI GEPNoWrapFlags getNoWrapFlags() const
Get the nowrap flags for the GEP instruction.
Module * getParent()
Get the module that this global value is contained inside of...
This instruction compares its operands according to the predicate given to the constructor.
ICmpInst(InsertPosition InsertBefore, Predicate pred, Value *LHS, Value *RHS, const Twine &NameStr="")
Constructor with insertion semantics.
static LLVM_ABI bool compare(const APInt &LHS, const APInt &RHS, ICmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
LLVM_ABI ICmpInst * cloneImpl() const
Clone an identical ICmpInst.
Predicate getFlippedSignednessPredicate() const
For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->EQ.
Predicate getSignedPredicate() const
For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
static CmpPredicate getInverseCmpPredicate(CmpPredicate Pred)
bool isEquality() const
Return true if this predicate is either EQ or NE.
static LLVM_ABI Predicate getFlippedSignednessPredicate(Predicate Pred)
For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->EQ.
static LLVM_ABI std::optional< bool > isImpliedByMatchingCmp(CmpPredicate Pred1, CmpPredicate Pred2)
Determine if Pred1 implies Pred2 is true, false, or if nothing can be inferred about the implication,...
Predicate getUnsignedPredicate() const
For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
Indirect Branch Instruction.
LLVM_ABI void addDestination(BasicBlock *Dest)
Add a destination.
LLVM_ABI void removeDestination(unsigned i)
This method removes the specified successor from the indirectbr instruction.
LLVM_ABI IndirectBrInst * cloneImpl() const
LLVM_ABI InsertElementInst * cloneImpl() const
static InsertElementInst * Create(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI bool isValidOperands(const Value *Vec, const Value *NewElt, const Value *Idx)
Return true if an insertelement instruction can be formed with the specified operands.
bool isValid() const
Definition Instruction.h:62
BasicBlock * getBasicBlock()
Definition Instruction.h:63
This instruction inserts a struct field of array element value into an aggregate value.
LLVM_ABI InsertValueInst * cloneImpl() const
BitfieldElement::Type getSubclassData() const
LLVM_ABI bool hasNoNaNs() const LLVM_READONLY
Determine whether the no-NaNs flag is set.
LLVM_ABI void copyIRFlags(const Value *V, bool IncludeWrapFlags=true)
Convenience method to copy supported exact, fast-math, and (optionally) wrapping flags from V to this...
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI bool isCommutative() const LLVM_READONLY
Return true if the instruction is commutative:
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
LLVM_ABI bool isVolatile() const LLVM_READONLY
Return true if this instruction has a volatile memory access.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Bitfield::Element< uint16_t, 0, 16 > OpaqueField
Instruction(const Instruction &)=delete
friend class Value
friend class BasicBlock
Various leaf nodes.
void setSubclassData(typename BitfieldElement::Type Value)
This class represents a cast from an integer to a pointer.
LLVM_ABI IntToPtrInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI IntToPtrInst * cloneImpl() const
Clone an identical IntToPtrInst.
Invoke instruction.
BasicBlock * getUnwindDest() const
void setNormalDest(BasicBlock *B)
LLVM_ABI InvokeInst * cloneImpl() const
LLVM_ABI LandingPadInst * getLandingPadInst() const
Get the landingpad instruction from the landing pad block (the unwind destination).
void setUnwindDest(BasicBlock *B)
LLVM_ABI void updateProfWeight(uint64_t S, uint64_t T)
Updates profile metadata by scaling it by S / T.
static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
LLVMContextImpl *const pImpl
Definition LLVMContext.h:70
The landingpad instruction holds all of the information necessary to generate correct exception handl...
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
LLVM_ABI LandingPadInst * cloneImpl() const
static LLVM_ABI LandingPadInst * Create(Type *RetTy, unsigned NumReservedClauses, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedClauses is a hint for the number of incoming clauses that this landingpad w...
LLVM_ABI void addClause(Constant *ClauseVal)
Add a catch or filter clause to the landing pad.
void setCleanup(bool V)
Indicate that this landingpad instruction is a cleanup.
void setAlignment(Align Align)
bool isVolatile() const
Return true if this is a load from a volatile memory location.
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this load instruction.
LLVM_ABI LoadInst * cloneImpl() const
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
void setVolatile(bool V)
Specify whether this is a volatile load or not.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
LLVM_ABI LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, InsertPosition InsertBefore)
Align getAlign() const
Return the alignment of the access that is being performed.
Metadata node.
Definition Metadata.h:1080
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1444
static MemoryEffectsBase readOnly()
Definition ModRef.h:133
bool onlyWritesMemory() const
Whether this function only (at most) writes memory.
Definition ModRef.h:252
bool doesNotAccessMemory() const
Whether this function accesses no memory.
Definition ModRef.h:246
static MemoryEffectsBase argMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Definition ModRef.h:143
static MemoryEffectsBase inaccessibleMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Definition ModRef.h:149
bool onlyAccessesInaccessibleMem() const
Whether this function only (at most) accesses inaccessible memory.
Definition ModRef.h:265
bool onlyAccessesArgPointees() const
Whether this function only (at most) accesses argument memory.
Definition ModRef.h:255
bool onlyReadsMemory() const
Whether this function only (at most) reads memory.
Definition ModRef.h:249
static MemoryEffectsBase writeOnly()
Definition ModRef.h:138
static MemoryEffectsBase inaccessibleOrArgMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Definition ModRef.h:166
static MemoryEffectsBase none()
Definition ModRef.h:128
bool onlyAccessesInaccessibleOrArgMem() const
Whether this function only (at most) accesses argument and inaccessible memory.
Definition ModRef.h:305
StringRef getTag() const
void allocHungoffUses(unsigned N)
const_block_iterator block_begin() const
LLVM_ABI void removeIncomingValueIf(function_ref< bool(unsigned)> Predicate, bool DeletePHIIfEmpty=true)
Remove all incoming values for which the predicate returns true.
void setIncomingBlock(unsigned i, BasicBlock *BB)
LLVM_ABI Value * removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty=true)
Remove an incoming value.
LLVM_ABI bool hasConstantOrUndefValue() const
Whether the specified PHI node always merges together the same value, assuming undefs are equal to a ...
void setIncomingValue(unsigned i, Value *V)
const_block_iterator block_end() const
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
LLVM_ABI Value * hasConstantValue() const
If the specified PHI node always merges together the same value, return the value,...
LLVM_ABI PHINode * cloneImpl() const
unsigned getNumIncomingValues() const
Return the number of incoming edges.
Class to represent pointers.
unsigned getAddressSpace() const
Return the address space of the Pointer type.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
This class represents a cast from a pointer to an address (non-capturing ptrtoint).
PtrToAddrInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
PtrToAddrInst * cloneImpl() const
Clone an identical PtrToAddrInst.
This class represents a cast from a pointer to an integer.
LLVM_ABI PtrToIntInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI PtrToIntInst * cloneImpl() const
Clone an identical PtrToIntInst.
Resume the propagation of an exception.
LLVM_ABI ResumeInst * cloneImpl() const
Return a value (possibly void), from a function.
LLVM_ABI ReturnInst * cloneImpl() const
This class represents a sign extension of integer types.
LLVM_ABI SExtInst * cloneImpl() const
Clone an identical SExtInst.
LLVM_ABI SExtInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
This class represents a cast from signed integer to floating point.
LLVM_ABI SIToFPInst * cloneImpl() const
Clone an identical SIToFPInst.
LLVM_ABI SIToFPInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
Class to represent scalable SIMD vectors.
LLVM_ABI SelectInst * cloneImpl() const
static LLVM_ABI const char * areInvalidOperands(Value *Cond, Value *True, Value *False)
Return a string if the specified operands are invalid for a select operation, otherwise return null.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, const Instruction *MDFrom=nullptr)
static LLVM_ABI bool isZeroEltSplatMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses all elements with the same value as the first element of exa...
ArrayRef< int > getShuffleMask() const
static LLVM_ABI bool isSpliceMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is a splice mask, concatenating the two inputs together and then ext...
int getMaskValue(unsigned Elt) const
Return the shuffle mask value of this instruction for the given element index.
LLVM_ABI ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI bool isValidOperands(const Value *V1, const Value *V2, const Value *Mask)
Return true if a shufflevector instruction can be formed with the specified operands.
static LLVM_ABI bool isSelectMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from its source vectors without lane crossings.
static LLVM_ABI bool isBitRotateMask(ArrayRef< int > Mask, unsigned EltSizeInBits, unsigned MinSubElts, unsigned MaxSubElts, unsigned &NumSubElts, unsigned &RotateAmt)
Checks if the shuffle is a bit rotation of the first operand across multiple subelements,...
VectorType * getType() const
Overload to return most specific vector type.
LLVM_ABI bool isIdentityWithExtract() const
Return true if this shuffle extracts the first N elements of exactly one source vector.
static LLVM_ABI bool isOneUseSingleSourceMask(ArrayRef< int > Mask, int VF)
Return true if this shuffle mask represents "clustered" mask of size VF, i.e.
LLVM_ABI bool isIdentityWithPadding() const
Return true if this shuffle lengthens exactly one source vector with undefs in the high elements.
static LLVM_ABI bool isSingleSourceMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from exactly one source vector.
LLVM_ABI bool isConcat() const
Return true if this shuffle concatenates its 2 source vectors.
static LLVM_ABI bool isDeInterleaveMaskOfFactor(ArrayRef< int > Mask, unsigned Factor, unsigned &Index)
Check if the mask is a DE-interleave mask of the given factor Factor like: <Index,...
LLVM_ABI ShuffleVectorInst * cloneImpl() const
static LLVM_ABI bool isIdentityMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from exactly one source vector without lane crossin...
static LLVM_ABI bool isExtractSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is an extract subvector mask.
LLVM_ABI void setShuffleMask(ArrayRef< int > Mask)
friend class Instruction
Iterator for Instructions in a `BasicBlock.
LLVM_ABI bool isInterleave(unsigned Factor)
Return if this shuffle interleaves its two input vectors together.
static LLVM_ABI bool isReverseMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask swaps the order of elements from exactly one source vector.
static LLVM_ABI bool isTransposeMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask is a transpose mask.
LLVM_ABI void commute()
Swap the operands and adjust the mask to preserve the semantics of the instruction.
static LLVM_ABI bool isInsertSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &NumSubElts, int &Index)
Return true if this shuffle mask is an insert subvector mask.
static LLVM_ABI Constant * convertShuffleMaskForBitcode(ArrayRef< int > Mask, Type *ResultTy)
static LLVM_ABI bool isReplicationMask(ArrayRef< int > Mask, int &ReplicationFactor, int &VF)
Return true if this shuffle mask replicates each of the VF elements in a vector ReplicationFactor tim...
static LLVM_ABI bool isInterleaveMask(ArrayRef< int > Mask, unsigned Factor, unsigned NumInputElts, SmallVectorImpl< unsigned > &StartIndexes)
Return true if the mask interleaves one or more input vectors together.
This is a 'bitvector' (really, a variable-sized bit array), optimized for the case when the array is ...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void resize(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this store instruction.
Align getAlign() const
void setVolatile(bool V)
Specify whether this is a volatile store or not.
void setAlignment(Align Align)
friend class Instruction
Iterator for Instructions in a `BasicBlock.
LLVM_ABI StoreInst * cloneImpl() const
LLVM_ABI StoreInst(Value *Val, Value *Ptr, InsertPosition InsertBefore)
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this store instruction.
bool isVolatile() const
Return true if this is a store to a volatile memory location.
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this store instruction.
Represent a constant reference to a string, i.e.
Definition StringRef.h:55
Class to represent struct types.
LLVM_ABI void setSuccessorWeight(unsigned idx, CaseWeightOpt W)
LLVM_ABI Instruction::InstListType::iterator eraseFromParent()
Delegate the call to the underlying SwitchInst::eraseFromParent() and mark this object to not touch t...
LLVM_ABI void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W)
Delegate the call to the underlying SwitchInst::addCase() and set the specified branch weight for the...
LLVM_ABI CaseWeightOpt getSuccessorWeight(unsigned idx)
LLVM_ABI void replaceDefaultDest(SwitchInst::CaseIt I)
Replace the default destination by given case.
std::optional< uint32_t > CaseWeightOpt
LLVM_ABI SwitchInst::CaseIt removeCase(SwitchInst::CaseIt I)
Delegate the call to the underlying SwitchInst::removeCase() and remove correspondent branch weight.
void setValue(ConstantInt *V) const
Sets the new value for current case.
void setSuccessor(BasicBlock *S) const
Sets the new successor for current case.
Multiway switch.
void allocHungoffUses(unsigned N)
LLVM_ABI SwitchInst * cloneImpl() const
LLVM_ABI void addCase(ConstantInt *OnVal, BasicBlock *Dest)
Add an entry to the switch instruction.
CaseIteratorImpl< CaseHandle > CaseIt
ConstantInt *const * case_values() const
unsigned getNumCases() const
Return the number of 'cases' in this switch instruction, excluding the default case.
LLVM_ABI CaseIt removeCase(CaseIt I)
This method removes the specified case and its successor from the switch instruction.
Target - Wrapper for Target specific information.
This class represents a truncation of integer types.
LLVM_ABI TruncInst * cloneImpl() const
Clone an identical TruncInst.
LLVM_ABI TruncInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition TypeSize.h:343
static constexpr TypeSize get(ScalarTy Quantity, bool Scalable)
Definition TypeSize.h:340
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:46
bool isByteTy() const
True if this is an instance of ByteType.
Definition Type.h:242
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:290
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:313
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition Type.h:263
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:284
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
LLVM_ABI bool isFirstClassType() const
Return true if the type is "first class", meaning it is a valid type for a Value.
Definition Type.cpp:255
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:370
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition Type.cpp:201
bool isByteOrByteVectorTy() const
Return true if this is a byte type or a vector of byte types.
Definition Type.h:248
bool isAggregateType() const
Return true if the type is an aggregate type.
Definition Type.h:321
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition Type.h:130
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:236
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
Definition Type.cpp:310
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition Type.h:186
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition Type.h:287
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:257
bool isTokenTy() const
Return true if this is 'token'.
Definition Type.h:236
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition Type.h:227
This class represents a cast unsigned integer to floating point.
LLVM_ABI UIToFPInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI UIToFPInst * cloneImpl() const
Clone an identical UIToFPInst.
UnaryInstruction(Type *Ty, unsigned iType, Value *V, InsertPosition InsertBefore=nullptr)
Definition InstrTypes.h:62
static LLVM_ABI UnaryOperator * Create(UnaryOps Op, Value *S, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a unary instruction, given the opcode and an operand.
LLVM_ABI UnaryOperator(UnaryOps iType, Value *S, Type *Ty, const Twine &Name, InsertPosition InsertBefore)
LLVM_ABI UnaryOperator * cloneImpl() const
UnaryOps getOpcode() const
Definition InstrTypes.h:154
Unconditional Branch instruction.
LLVM_ABI UncondBrInst * cloneImpl() const
LLVM_ABI UnreachableInst(LLVMContext &C, InsertPosition InsertBefore=nullptr)
LLVM_ABI bool shouldLowerToTrap(bool TrapUnreachable, bool NoTrapAfterNoreturn) const
friend class Instruction
Iterator for Instructions in a `BasicBlock.
LLVM_ABI UnreachableInst * cloneImpl() const
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
LLVM_ABI void set(Value *Val)
Definition Value.h:883
Use * op_iterator
Definition User.h:254
const Use * getOperandList() const
Definition User.h:200
op_iterator op_begin()
Definition User.h:259
LLVM_ABI void allocHungoffUses(unsigned N, bool WithExtraValues=false)
Allocate the array of Uses, followed by a pointer (with bottom bit set) to the User.
Definition User.cpp:54
const Use & getOperandUse(unsigned i) const
Definition User.h:220
void setNumHungOffUseOperands(unsigned NumOps)
Subclasses with hung off uses need to manage the operand count themselves.
Definition User.h:240
Use & Op()
Definition User.h:171
LLVM_ABI void growHungoffUses(unsigned N, bool WithExtraValues=false)
Grow the number of hung off uses.
Definition User.cpp:71
Value * getOperand(unsigned i) const
Definition User.h:207
unsigned getNumOperands() const
Definition User.h:229
op_iterator op_end()
Definition User.h:261
VAArgInst(Value *List, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
LLVM_ABI VAArgInst * cloneImpl() const
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:255
unsigned char SubclassOptionalData
Hold subclass data that can be dropped.
Definition Value.h:85
LLVM_ABI void setName(const Twine &Name)
Change the name of the value.
Definition Value.cpp:393
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition Value.cpp:549
LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.h:258
unsigned NumUserOperands
Definition Value.h:109
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:318
Base class of all SIMD vector types.
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
This class represents zero extension of integer types.
LLVM_ABI ZExtInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI ZExtInst * cloneImpl() const
Clone an identical ZExtInst.
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:200
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:165
An efficient, type-erasing, non-owning reference to a callable.
typename base_list_type::iterator iterator
Definition ilist.h:121
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
CallInst * Call
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
bool match(Val *V, const Pattern &P)
cstfp_pred_ty< is_non_zero_not_denormal_fp > m_NonZeroNotDenormalFP()
Match a floating-point non-zero that is not a denormal.
initializer< Ty > init(const Ty &Val)
@ Switch
The "resume-switch" lowering, where there are separate resume and destroy functions that are shared b...
Definition CoroShape.h:31
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
Definition Metadata.h:668
NodeAddr< UseNode * > Use
Definition RDFGraph.h:385
Context & getContext() const
Definition BasicBlock.h:99
This is an optimization pass for GlobalISel generic memory operations.
@ Offset
Definition DWP.cpp:557
auto seq_inclusive(T Begin, T End)
Iterate over an integral type from Begin to End inclusive.
Definition Sequence.h:325
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1738
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition STLExtras.h:1668
unsigned getPointerAddressSpace(const Type *T)
Definition SPIRVUtils.h:376
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
Definition InstrProf.h:328
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
LLVM_ABI MDNode * getBranchWeightMDNode(const Instruction &I)
Get the branch weights metadata node.
MemoryEffectsBase< IRMemLocation > MemoryEffects
Summary of how a function affects memory in the program.
Definition ModRef.h:356
constexpr auto equal_to(T &&Arg)
Functor variant of std::equal_to that can be used as a UnaryPredicate in functional algorithms like a...
Definition STLExtras.h:2172
std::enable_if_t< std::is_unsigned_v< T >, std::optional< T > > checkedMulUnsigned(T LHS, T RHS)
Multiply two unsigned integers LHS and RHS.
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
auto reverse(ContainerTy &&C)
Definition STLExtras.h:407
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:279
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
bool isPointerTy(const Type *T)
Definition SPIRVUtils.h:370
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
constexpr int PoisonMaskElem
LLVM_ABI unsigned getNumBranchWeights(const MDNode &ProfileData)
AtomicOrdering
Atomic ordering for LLVM's memory model.
OperandBundleDefT< Value * > OperandBundleDef
Definition AutoUpgrade.h:34
@ Mul
Product of integers.
@ Xor
Bitwise or logical XOR of integers.
@ FMul
Product of floats.
@ Sub
Subtraction of integers.
@ Add
Sum of integers.
@ FAdd
Sum of floats.
DWARFExpression::Operation Op
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
OutputIt copy(R &&Range, OutputIt Out)
Definition STLExtras.h:1884
constexpr unsigned BitWidth
LLVM_ABI bool extractBranchWeights(const MDNode *ProfileData, SmallVectorImpl< uint32_t > &Weights)
Extract branch weights from MD_prof metadata.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1946
bool capturesAnything(CaptureComponents CC)
Definition ModRef.h:379
bool all_equal(std::initializer_list< T > Values)
Returns true if all Values in the initializer lists are equal or the list.
Definition STLExtras.h:2165
auto seq(T Begin, T End)
Iterate over an integral type from Begin up to - but not including - End.
Definition Sequence.h:305
@ Default
The result value is uniform if and only if all operands are uniform.
Definition Uniformity.h:20
LLVM_ABI void scaleProfData(Instruction &I, uint64_t S, uint64_t T)
Scaling the profile data attached to 'I' using the ratio of S/T.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
Summary of memprof metadata on allocations.
Used to keep track of an operand bundle.
uint32_t End
The index in the Use& vector where operands for this operand bundle ends.
uint32_t Begin
The index in the Use& vector where operands for this operand bundle starts.
static LLVM_ABI std::optional< bool > eq(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_EQ result.
static LLVM_ABI std::optional< bool > ne(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_NE result.
static LLVM_ABI std::optional< bool > sge(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SGE result.
static LLVM_ABI std::optional< bool > ugt(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_UGT result.
static LLVM_ABI std::optional< bool > slt(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SLT result.
static LLVM_ABI std::optional< bool > ult(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_ULT result.
static LLVM_ABI std::optional< bool > ule(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_ULE result.
static LLVM_ABI std::optional< bool > sle(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SLE result.
static LLVM_ABI std::optional< bool > sgt(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SGT result.
static LLVM_ABI std::optional< bool > uge(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_UGE result.
Matching combinators.
A MapVector that performs no allocations if smaller than a certain size.
Definition MapVector.h:276
Indicates this User has operands co-allocated.
Definition User.h:60
Indicates this User has operands and a descriptor co-allocated .
Definition User.h:66