LLVM 23.0.0git
Instructions.cpp
Go to the documentation of this file.
1//===- Instructions.cpp - Implement the LLVM instructions -----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements all of the non-inline methods for the LLVM instruction
10// classes.
11//
12//===----------------------------------------------------------------------===//
13
15#include "LLVMContextImpl.h"
18#include "llvm/ADT/Twine.h"
19#include "llvm/IR/Attributes.h"
20#include "llvm/IR/BasicBlock.h"
21#include "llvm/IR/Constant.h"
23#include "llvm/IR/Constants.h"
24#include "llvm/IR/DataLayout.h"
26#include "llvm/IR/Function.h"
27#include "llvm/IR/InstrTypes.h"
28#include "llvm/IR/Instruction.h"
29#include "llvm/IR/Intrinsics.h"
30#include "llvm/IR/LLVMContext.h"
31#include "llvm/IR/MDBuilder.h"
32#include "llvm/IR/Metadata.h"
33#include "llvm/IR/Module.h"
34#include "llvm/IR/Operator.h"
37#include "llvm/IR/Type.h"
38#include "llvm/IR/Value.h"
46#include "llvm/Support/ModRef.h"
48#include <algorithm>
49#include <cassert>
50#include <cstdint>
51#include <optional>
52#include <vector>
53
54using namespace llvm;
55
57 "disable-i2p-p2i-opt", cl::init(false),
58 cl::desc("Disables inttoptr/ptrtoint roundtrip optimization"));
59
60//===----------------------------------------------------------------------===//
61// AllocaInst Class
62//===----------------------------------------------------------------------===//
63
64std::optional<TypeSize>
66 TypeSize Size = DL.getTypeAllocSize(getAllocatedType());
67 // Zero-sized types can return early since 0 * N = 0 for any array size N.
68 if (Size.isZero())
69 return Size;
70 if (isArrayAllocation()) {
72 if (!C)
73 return std::nullopt;
74 assert(!Size.isScalable() && "Array elements cannot have a scalable size");
75 auto CheckedProd =
76 checkedMulUnsigned(Size.getKnownMinValue(), C->getZExtValue());
77 if (!CheckedProd)
78 return std::nullopt;
79 return TypeSize::getFixed(*CheckedProd);
80 }
81 return Size;
82}
83
84std::optional<TypeSize>
86 std::optional<TypeSize> Size = getAllocationSize(DL);
87 if (!Size)
88 return std::nullopt;
89 auto CheckedProd = checkedMulUnsigned(Size->getKnownMinValue(),
90 static_cast<TypeSize::ScalarTy>(8));
91 if (!CheckedProd)
92 return std::nullopt;
93 return TypeSize::get(*CheckedProd, Size->isScalable());
94}
95
96//===----------------------------------------------------------------------===//
97// SelectInst Class
98//===----------------------------------------------------------------------===//
99
100/// areInvalidOperands - Return a string if the specified operands are invalid
101/// for a select operation, otherwise return null.
102const char *SelectInst::areInvalidOperands(Value *Op0, Value *Op1, Value *Op2) {
103 if (Op1->getType() != Op2->getType())
104 return "both values to select must have same type";
105
106 if (Op1->getType()->isTokenTy())
107 return "select values cannot have token type";
108
109 if (VectorType *VT = dyn_cast<VectorType>(Op0->getType())) {
110 // Vector select.
111 if (VT->getElementType() != Type::getInt1Ty(Op0->getContext()))
112 return "vector select condition element type must be i1";
114 if (!ET)
115 return "selected values for vector select must be vectors";
116 if (ET->getElementCount() != VT->getElementCount())
117 return "vector select requires selected vectors to have "
118 "the same vector length as select condition";
119 } else if (Op0->getType() != Type::getInt1Ty(Op0->getContext())) {
120 return "select condition must be i1 or <n x i1>";
121 }
122 return nullptr;
123}
124
125//===----------------------------------------------------------------------===//
126// PHINode Class
127//===----------------------------------------------------------------------===//
128
129PHINode::PHINode(const PHINode &PN)
130 : Instruction(PN.getType(), Instruction::PHI, AllocMarker),
131 ReservedSpace(PN.getNumOperands()) {
134 std::copy(PN.op_begin(), PN.op_end(), op_begin());
135 copyIncomingBlocks(make_range(PN.block_begin(), PN.block_end()));
137}
138
139// removeIncomingValue - Remove an incoming value. This is useful if a
140// predecessor basic block is deleted.
141Value *PHINode::removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty) {
142 Value *Removed = getIncomingValue(Idx);
143 // Swap with the end of the list.
144 unsigned Last = getNumOperands() - 1;
145 if (Idx != Last) {
148 }
149
150 // Nuke the last value.
151 Op<-1>().set(nullptr);
153
154 // If the PHI node is dead, because it has zero entries, nuke it now.
155 if (getNumOperands() == 0 && DeletePHIIfEmpty) {
156 // If anyone is using this PHI, make them use a dummy value instead...
159 }
160 return Removed;
161}
162
163void PHINode::removeIncomingValueIf(function_ref<bool(unsigned)> Predicate,
164 bool DeletePHIIfEmpty) {
165 unsigned NumOps = getNumIncomingValues();
166
167 // Loop backwards in case the predicate is purely index based.
168 for (unsigned Idx = NumOps; Idx-- > 0;) {
169 if (Predicate(Idx)) {
170 unsigned LastIdx = NumOps - 1;
171 if (Idx != LastIdx) {
172 setIncomingValue(Idx, getIncomingValue(LastIdx));
173 setIncomingBlock(Idx, getIncomingBlock(LastIdx));
174 }
175 getOperandUse(LastIdx).set(nullptr);
176 NumOps--;
177 }
178 }
179
181
182 // If the PHI node is dead, because it has zero entries, nuke it now.
183 if (getNumOperands() == 0 && DeletePHIIfEmpty) {
184 // If anyone is using this PHI, make them use a dummy value instead...
187 }
188}
189
190/// growOperands - grow operands - This grows the operand list in response
191/// to a push_back style of operation. This grows the number of ops by 1.5
192/// times.
193///
194void PHINode::growOperands() {
195 unsigned e = getNumOperands();
196 unsigned NumOps = e + e / 2;
197 if (NumOps < 2) NumOps = 2; // 2 op PHI nodes are VERY common.
198
199 ReservedSpace = NumOps;
200 growHungoffUses(ReservedSpace, /*WithExtraValues=*/true);
201}
202
203/// hasConstantValue - If the specified PHI node always merges together the same
204/// value, return the value, otherwise return null.
206 // Exploit the fact that phi nodes always have at least one entry.
207 Value *ConstantValue = getIncomingValue(0);
208 for (unsigned i = 1, e = getNumIncomingValues(); i != e; ++i)
209 if (getIncomingValue(i) != ConstantValue && getIncomingValue(i) != this) {
210 if (ConstantValue != this)
211 return nullptr; // Incoming values not all the same.
212 // The case where the first value is this PHI.
213 ConstantValue = getIncomingValue(i);
214 }
215 if (ConstantValue == this)
216 return PoisonValue::get(getType());
217 return ConstantValue;
218}
219
220/// hasConstantOrUndefValue - Whether the specified PHI node always merges
221/// together the same value, assuming that undefs result in the same value as
222/// non-undefs.
223/// Unlike \ref hasConstantValue, this does not return a value because the
224/// unique non-undef incoming value need not dominate the PHI node.
226 Value *ConstantValue = nullptr;
227 for (unsigned i = 0, e = getNumIncomingValues(); i != e; ++i) {
229 if (Incoming != this && !isa<UndefValue>(Incoming)) {
230 if (ConstantValue && ConstantValue != Incoming)
231 return false;
232 ConstantValue = Incoming;
233 }
234 }
235 return true;
236}
237
238//===----------------------------------------------------------------------===//
239// LandingPadInst Implementation
240//===----------------------------------------------------------------------===//
241
242LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues,
243 const Twine &NameStr,
244 InsertPosition InsertBefore)
245 : Instruction(RetTy, Instruction::LandingPad, AllocMarker, InsertBefore) {
246 init(NumReservedValues, NameStr);
247}
248
249LandingPadInst::LandingPadInst(const LandingPadInst &LP)
250 : Instruction(LP.getType(), Instruction::LandingPad, AllocMarker),
251 ReservedSpace(LP.getNumOperands()) {
254 Use *OL = getOperandList();
255 const Use *InOL = LP.getOperandList();
256 for (unsigned I = 0, E = ReservedSpace; I != E; ++I)
257 OL[I] = InOL[I];
258
259 setCleanup(LP.isCleanup());
260}
261
262LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses,
263 const Twine &NameStr,
264 InsertPosition InsertBefore) {
265 return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertBefore);
266}
267
268void LandingPadInst::init(unsigned NumReservedValues, const Twine &NameStr) {
269 ReservedSpace = NumReservedValues;
271 allocHungoffUses(ReservedSpace);
272 setName(NameStr);
273 setCleanup(false);
274}
275
276/// growOperands - grow operands - This grows the operand list in response to a
277/// push_back style of operation. This grows the number of ops by 2 times.
278void LandingPadInst::growOperands(unsigned Size) {
279 unsigned e = getNumOperands();
280 if (ReservedSpace >= e + Size) return;
281 ReservedSpace = (std::max(e, 1U) + Size / 2) * 2;
282 growHungoffUses(ReservedSpace);
283}
284
286 unsigned OpNo = getNumOperands();
287 growOperands(1);
288 assert(OpNo < ReservedSpace && "Growing didn't work!");
290 getOperandList()[OpNo] = Val;
291}
292
293//===----------------------------------------------------------------------===//
294// CallBase Implementation
295//===----------------------------------------------------------------------===//
296
298 InsertPosition InsertPt) {
299 switch (CB->getOpcode()) {
300 case Instruction::Call:
301 return CallInst::Create(cast<CallInst>(CB), Bundles, InsertPt);
302 case Instruction::Invoke:
303 return InvokeInst::Create(cast<InvokeInst>(CB), Bundles, InsertPt);
304 case Instruction::CallBr:
305 return CallBrInst::Create(cast<CallBrInst>(CB), Bundles, InsertPt);
306 default:
307 llvm_unreachable("Unknown CallBase sub-class!");
308 }
309}
310
312 InsertPosition InsertPt) {
314 for (unsigned i = 0, e = CI->getNumOperandBundles(); i < e; ++i) {
315 auto ChildOB = CI->getOperandBundleAt(i);
316 if (ChildOB.getTagName() != OpB.getTag())
317 OpDefs.emplace_back(ChildOB);
318 }
319 OpDefs.emplace_back(OpB);
320 return CallBase::Create(CI, OpDefs, InsertPt);
321}
322
324
326 assert(getOpcode() == Instruction::CallBr && "Unexpected opcode!");
327 return cast<CallBrInst>(this)->getNumIndirectDests() + 1;
328}
329
331 const Value *V = getCalledOperand();
332 if (isa<Function>(V) || isa<Constant>(V))
333 return false;
334 return !isInlineAsm();
335}
336
337/// Tests if this call site must be tail call optimized. Only a CallInst can
338/// be tail call optimized.
340 if (auto *CI = dyn_cast<CallInst>(this))
341 return CI->isMustTailCall();
342 return false;
343}
344
345/// Tests if this call site is marked as a tail call.
347 if (auto *CI = dyn_cast<CallInst>(this))
348 return CI->isTailCall();
349 return false;
350}
351
354 return F->getIntrinsicID();
356}
357
359 FPClassTest Mask = Attrs.getRetNoFPClass();
360
361 if (const Function *F = getCalledFunction())
362 Mask |= F->getAttributes().getRetNoFPClass();
363 return Mask;
364}
365
367 FPClassTest Mask = Attrs.getParamNoFPClass(i);
368
369 if (const Function *F = getCalledFunction())
370 Mask |= F->getAttributes().getParamNoFPClass(i);
371 return Mask;
372}
373
374std::optional<ConstantRange> CallBase::getRange() const {
375 Attribute CallAttr = Attrs.getRetAttr(Attribute::Range);
377 if (const Function *F = getCalledFunction())
378 FnAttr = F->getRetAttribute(Attribute::Range);
379
380 if (CallAttr.isValid() && FnAttr.isValid())
381 return CallAttr.getRange().intersectWith(FnAttr.getRange());
382 if (CallAttr.isValid())
383 return CallAttr.getRange();
384 if (FnAttr.isValid())
385 return FnAttr.getRange();
386 return std::nullopt;
387}
388
390 if (hasRetAttr(Attribute::NonNull))
391 return true;
392
393 if (getRetDereferenceableBytes() > 0 &&
395 return true;
396
397 return false;
398}
399
401 unsigned Index;
402
403 if (Attrs.hasAttrSomewhere(Kind, &Index))
404 return getArgOperand(Index - AttributeList::FirstArgIndex);
405 if (const Function *F = getCalledFunction())
406 if (F->getAttributes().hasAttrSomewhere(Kind, &Index))
407 return getArgOperand(Index - AttributeList::FirstArgIndex);
408
409 return nullptr;
410}
411
412/// Determine whether the argument or parameter has the given attribute.
413bool CallBase::paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const {
414 assert(ArgNo < arg_size() && "Param index out of bounds!");
415
416 if (Attrs.hasParamAttr(ArgNo, Kind))
417 return true;
418
419 const Function *F = getCalledFunction();
420 if (!F)
421 return false;
422
423 if (!F->getAttributes().hasParamAttr(ArgNo, Kind))
424 return false;
425
426 // Take into account mod/ref by operand bundles.
427 switch (Kind) {
428 case Attribute::ReadNone:
430 case Attribute::ReadOnly:
432 case Attribute::WriteOnly:
433 return !hasReadingOperandBundles();
434 default:
435 return true;
436 }
437}
438
440 bool AllowUndefOrPoison) const {
442 "Argument must be a pointer");
443 if (paramHasAttr(ArgNo, Attribute::NonNull) &&
444 (AllowUndefOrPoison || paramHasAttr(ArgNo, Attribute::NoUndef)))
445 return true;
446
447 if (paramHasAttr(ArgNo, Attribute::Dereferenceable) &&
449 getCaller(),
451 return true;
452
453 return false;
454}
455
456bool CallBase::hasFnAttrOnCalledFunction(Attribute::AttrKind Kind) const {
458 return F->getAttributes().hasFnAttr(Kind);
459
460 return false;
461}
462
463bool CallBase::hasFnAttrOnCalledFunction(StringRef Kind) const {
465 return F->getAttributes().hasFnAttr(Kind);
466
467 return false;
468}
469
470template <typename AK>
471Attribute CallBase::getFnAttrOnCalledFunction(AK Kind) const {
472 if constexpr (std::is_same_v<AK, Attribute::AttrKind>) {
473 // getMemoryEffects() correctly combines memory effects from the call-site,
474 // operand bundles and function.
475 assert(Kind != Attribute::Memory && "Use getMemoryEffects() instead");
476 }
477
479 return F->getAttributes().getFnAttr(Kind);
480
481 return Attribute();
482}
483
484template LLVM_ABI Attribute
485CallBase::getFnAttrOnCalledFunction(Attribute::AttrKind Kind) const;
486template LLVM_ABI Attribute
487CallBase::getFnAttrOnCalledFunction(StringRef Kind) const;
488
489template <typename AK>
490Attribute CallBase::getParamAttrOnCalledFunction(unsigned ArgNo,
491 AK Kind) const {
493
494 if (auto *F = dyn_cast<Function>(V))
495 return F->getAttributes().getParamAttr(ArgNo, Kind);
496
497 return Attribute();
498}
499template LLVM_ABI Attribute CallBase::getParamAttrOnCalledFunction(
500 unsigned ArgNo, Attribute::AttrKind Kind) const;
501template LLVM_ABI Attribute
502CallBase::getParamAttrOnCalledFunction(unsigned ArgNo, StringRef Kind) const;
503
506 for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i)
508}
509
512 const unsigned BeginIndex) {
513 auto It = op_begin() + BeginIndex;
514 for (auto &B : Bundles)
515 It = std::copy(B.input_begin(), B.input_end(), It);
516
517 auto *ContextImpl = getContext().pImpl;
518 auto BI = Bundles.begin();
519 unsigned CurrentIndex = BeginIndex;
520
521 for (auto &BOI : bundle_op_infos()) {
522 assert(BI != Bundles.end() && "Incorrect allocation?");
523
524 BOI.Tag = ContextImpl->getOrInsertBundleTag(BI->getTag());
525 BOI.Begin = CurrentIndex;
526 BOI.End = CurrentIndex + BI->input_size();
527 CurrentIndex = BOI.End;
528 BI++;
529 }
530
531 assert(BI == Bundles.end() && "Incorrect allocation?");
532
533 return It;
534}
535
537 /// When there isn't many bundles, we do a simple linear search.
538 /// Else fallback to a binary-search that use the fact that bundles usually
539 /// have similar number of argument to get faster convergence.
541 for (auto &BOI : bundle_op_infos())
542 if (BOI.Begin <= OpIdx && OpIdx < BOI.End)
543 return BOI;
544
545 llvm_unreachable("Did not find operand bundle for operand!");
546 }
547
548 assert(OpIdx >= arg_size() && "the Idx is not in the operand bundles");
550 OpIdx < std::prev(bundle_op_info_end())->End &&
551 "The Idx isn't in the operand bundle");
552
553 /// We need a decimal number below and to prevent using floating point numbers
554 /// we use an intergal value multiplied by this constant.
555 constexpr unsigned NumberScaling = 1024;
556
559 bundle_op_iterator Current = Begin;
560
561 while (Begin != End) {
562 unsigned ScaledOperandPerBundle =
563 NumberScaling * (std::prev(End)->End - Begin->Begin) / (End - Begin);
564 Current = Begin + (((OpIdx - Begin->Begin) * NumberScaling) /
565 ScaledOperandPerBundle);
566 if (Current >= End)
567 Current = std::prev(End);
568 assert(Current < End && Current >= Begin &&
569 "the operand bundle doesn't cover every value in the range");
570 if (OpIdx >= Current->Begin && OpIdx < Current->End)
571 break;
572 if (OpIdx >= Current->End)
573 Begin = Current + 1;
574 else
575 End = Current;
576 }
577
578 assert(OpIdx >= Current->Begin && OpIdx < Current->End &&
579 "the operand bundle doesn't cover every value in the range");
580 return *Current;
581}
582
585 InsertPosition InsertPt) {
586 if (CB->getOperandBundle(ID))
587 return CB;
588
590 CB->getOperandBundlesAsDefs(Bundles);
591 Bundles.push_back(OB);
592 return Create(CB, Bundles, InsertPt);
593}
594
596 InsertPosition InsertPt) {
598 bool CreateNew = false;
599
600 for (unsigned I = 0, E = CB->getNumOperandBundles(); I != E; ++I) {
601 auto Bundle = CB->getOperandBundleAt(I);
602 if (Bundle.getTagID() == ID) {
603 CreateNew = true;
604 continue;
605 }
606 Bundles.emplace_back(Bundle);
607 }
608
609 return CreateNew ? Create(CB, Bundles, InsertPt) : CB;
610}
611
613 // Implementation note: this is a conservative implementation of operand
614 // bundle semantics, where *any* non-assume operand bundle (other than
615 // ptrauth) forces a callsite to be at least readonly.
620 getIntrinsicID() != Intrinsic::assume;
621}
622
631
633 MemoryEffects ME = getAttributes().getMemoryEffects();
634 if (auto *Fn = dyn_cast<Function>(getCalledOperand())) {
635 MemoryEffects FnME = Fn->getMemoryEffects();
636 if (hasOperandBundles()) {
637 // TODO: Add a method to get memory effects for operand bundles instead.
639 FnME |= MemoryEffects::readOnly();
641 FnME |= MemoryEffects::writeOnly();
642 }
643 if (isVolatile()) {
644 // Volatile operations also access inaccessible memory.
646 }
647 ME &= FnME;
648 }
649 return ME;
650}
654
655/// Determine if the function does not access memory.
662
663/// Determine if the function does not access or only reads memory.
670
671/// Determine if the function does not access or only writes memory.
678
679/// Determine if the call can access memmory only using pointers based
680/// on its arguments.
687
688/// Determine if the function may only access memory that is
689/// inaccessible from the IR.
696
697/// Determine if the function may only access memory that is
698/// either inaccessible from the IR or pointed to by its arguments.
706
708 if (OpNo < arg_size()) {
709 // If the argument is passed byval, the callee does not have access to the
710 // original pointer and thus cannot capture it.
711 if (isByValArgument(OpNo))
712 return CaptureInfo::none();
713
715 if (auto *Fn = dyn_cast<Function>(getCalledOperand()))
716 CI &= Fn->getAttributes().getParamAttrs(OpNo).getCaptureInfo();
717 return CI;
718 }
719
720 // Bundles on assumes are captures(none).
721 if (getIntrinsicID() == Intrinsic::assume)
722 return CaptureInfo::none();
723
724 // deopt operand bundles are captures(none)
725 auto &BOI = getBundleOpInfoForOperand(OpNo);
726 auto OBU = operandBundleFromBundleOpInfo(BOI);
727 return OBU.isDeoptOperandBundle() ? CaptureInfo::none() : CaptureInfo::all();
728}
729
731 for (unsigned I = 0, E = arg_size(); I < E; ++I) {
733 continue;
734
736 if (auto *Fn = dyn_cast<Function>(getCalledOperand()))
737 CI &= Fn->getAttributes().getParamAttrs(I).getCaptureInfo();
739 return true;
740 }
741 return false;
742}
743
744//===----------------------------------------------------------------------===//
745// CallInst Implementation
746//===----------------------------------------------------------------------===//
747
748void CallInst::init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
749 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr) {
750 this->FTy = FTy;
751 assert(getNumOperands() == Args.size() + CountBundleInputs(Bundles) + 1 &&
752 "NumOperands not set up?");
753
754#ifndef NDEBUG
755 assert((Args.size() == FTy->getNumParams() ||
756 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
757 "Calling a function with bad signature!");
758
759 for (unsigned i = 0; i != Args.size(); ++i)
760 assert((i >= FTy->getNumParams() ||
761 FTy->getParamType(i) == Args[i]->getType()) &&
762 "Calling a function with a bad signature!");
763#endif
764
765 // Set operands in order of their index to match use-list-order
766 // prediction.
767 llvm::copy(Args, op_begin());
768 setCalledOperand(Func);
769
770 auto It = populateBundleOperandInfos(Bundles, Args.size());
771 (void)It;
772 assert(It + 1 == op_end() && "Should add up!");
773
774 setName(NameStr);
775}
776
777void CallInst::init(FunctionType *FTy, Value *Func, const Twine &NameStr) {
778 this->FTy = FTy;
779 assert(getNumOperands() == 1 && "NumOperands not set up?");
780 setCalledOperand(Func);
781
782 assert(FTy->getNumParams() == 0 && "Calling a function with bad signature");
783
784 setName(NameStr);
785}
786
787CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name,
788 AllocInfo AllocInfo, InsertPosition InsertBefore)
789 : CallBase(Ty->getReturnType(), Instruction::Call, AllocInfo,
790 InsertBefore) {
791 init(Ty, Func, Name);
792}
793
794CallInst::CallInst(const CallInst &CI, AllocInfo AllocInfo)
795 : CallBase(CI.Attrs, CI.FTy, CI.getType(), Instruction::Call, AllocInfo) {
797 "Wrong number of operands allocated");
798 setTailCallKind(CI.getTailCallKind());
800
801 std::copy(CI.op_begin(), CI.op_end(), op_begin());
802 std::copy(CI.bundle_op_info_begin(), CI.bundle_op_info_end(),
805}
806
808 InsertPosition InsertPt) {
809 std::vector<Value *> Args(CI->arg_begin(), CI->arg_end());
810
811 auto *NewCI = CallInst::Create(CI->getFunctionType(), CI->getCalledOperand(),
812 Args, OpB, CI->getName(), InsertPt);
813 NewCI->setTailCallKind(CI->getTailCallKind());
814 NewCI->setCallingConv(CI->getCallingConv());
815 NewCI->SubclassOptionalData = CI->SubclassOptionalData;
816 NewCI->setAttributes(CI->getAttributes());
817 NewCI->setDebugLoc(CI->getDebugLoc());
818 return NewCI;
819}
820
821// Update profile weight for call instruction by scaling it using the ratio
822// of S/T. The meaning of "branch_weights" meta data for call instruction is
823// transfered to represent call count.
825 if (T == 0) {
826 LLVM_DEBUG(dbgs() << "Attempting to update profile weights will result in "
827 "div by 0. Ignoring. Likely the function "
828 << getParent()->getParent()->getName()
829 << " has 0 entry count, and contains call instructions "
830 "with non-zero prof info.");
831 return;
832 }
833 scaleProfData(*this, S, T);
834}
835
836//===----------------------------------------------------------------------===//
837// InvokeInst Implementation
838//===----------------------------------------------------------------------===//
839
840void InvokeInst::init(FunctionType *FTy, Value *Fn, BasicBlock *IfNormal,
841 BasicBlock *IfException, ArrayRef<Value *> Args,
843 const Twine &NameStr) {
844 this->FTy = FTy;
845
847 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)) &&
848 "NumOperands not set up?");
849
850#ifndef NDEBUG
851 assert(((Args.size() == FTy->getNumParams()) ||
852 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
853 "Invoking a function with bad signature");
854
855 for (unsigned i = 0, e = Args.size(); i != e; i++)
856 assert((i >= FTy->getNumParams() ||
857 FTy->getParamType(i) == Args[i]->getType()) &&
858 "Invoking a function with a bad signature!");
859#endif
860
861 // Set operands in order of their index to match use-list-order
862 // prediction.
863 llvm::copy(Args, op_begin());
864 setNormalDest(IfNormal);
865 setUnwindDest(IfException);
867
868 auto It = populateBundleOperandInfos(Bundles, Args.size());
869 (void)It;
870 assert(It + 3 == op_end() && "Should add up!");
871
872 setName(NameStr);
873}
874
875InvokeInst::InvokeInst(const InvokeInst &II, AllocInfo AllocInfo)
876 : CallBase(II.Attrs, II.FTy, II.getType(), Instruction::Invoke, AllocInfo) {
877 assert(getNumOperands() == II.getNumOperands() &&
878 "Wrong number of operands allocated");
879 setCallingConv(II.getCallingConv());
880 std::copy(II.op_begin(), II.op_end(), op_begin());
881 std::copy(II.bundle_op_info_begin(), II.bundle_op_info_end(),
883 SubclassOptionalData = II.SubclassOptionalData;
884}
885
887 InsertPosition InsertPt) {
888 std::vector<Value *> Args(II->arg_begin(), II->arg_end());
889
890 auto *NewII = InvokeInst::Create(
891 II->getFunctionType(), II->getCalledOperand(), II->getNormalDest(),
892 II->getUnwindDest(), Args, OpB, II->getName(), InsertPt);
893 NewII->setCallingConv(II->getCallingConv());
894 NewII->SubclassOptionalData = II->SubclassOptionalData;
895 NewII->setAttributes(II->getAttributes());
896 NewII->setDebugLoc(II->getDebugLoc());
897 return NewII;
898}
899
901 return cast<LandingPadInst>(getUnwindDest()->getFirstNonPHIIt());
902}
903
905 if (T == 0) {
906 LLVM_DEBUG(dbgs() << "Attempting to update profile weights will result in "
907 "div by 0. Ignoring. Likely the function "
908 << getParent()->getParent()->getName()
909 << " has 0 entry count, and contains call instructions "
910 "with non-zero prof info.");
911 return;
912 }
913 scaleProfData(*this, S, T);
914}
915
916//===----------------------------------------------------------------------===//
917// CallBrInst Implementation
918//===----------------------------------------------------------------------===//
919
920void CallBrInst::init(FunctionType *FTy, Value *Fn, BasicBlock *Fallthrough,
921 ArrayRef<BasicBlock *> IndirectDests,
924 const Twine &NameStr) {
925 this->FTy = FTy;
926
927 assert(getNumOperands() == ComputeNumOperands(Args.size(),
928 IndirectDests.size(),
929 CountBundleInputs(Bundles)) &&
930 "NumOperands not set up?");
931
932#ifndef NDEBUG
933 assert(((Args.size() == FTy->getNumParams()) ||
934 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
935 "Calling a function with bad signature");
936
937 for (unsigned i = 0, e = Args.size(); i != e; i++)
938 assert((i >= FTy->getNumParams() ||
939 FTy->getParamType(i) == Args[i]->getType()) &&
940 "Calling a function with a bad signature!");
941#endif
942
943 // Set operands in order of their index to match use-list-order
944 // prediction.
945 llvm::copy(Args, op_begin());
946 NumIndirectDests = IndirectDests.size();
947 setDefaultDest(Fallthrough);
948 for (unsigned i = 0; i != NumIndirectDests; ++i)
949 setIndirectDest(i, IndirectDests[i]);
951
952 auto It = populateBundleOperandInfos(Bundles, Args.size());
953 (void)It;
954 assert(It + 2 + IndirectDests.size() == op_end() && "Should add up!");
955
956 setName(NameStr);
957}
958
959CallBrInst::CallBrInst(const CallBrInst &CBI, AllocInfo AllocInfo)
960 : CallBase(CBI.Attrs, CBI.FTy, CBI.getType(), Instruction::CallBr,
961 AllocInfo) {
963 "Wrong number of operands allocated");
965 std::copy(CBI.op_begin(), CBI.op_end(), op_begin());
966 std::copy(CBI.bundle_op_info_begin(), CBI.bundle_op_info_end(),
969 NumIndirectDests = CBI.NumIndirectDests;
970}
971
972CallBrInst *CallBrInst::Create(CallBrInst *CBI, ArrayRef<OperandBundleDef> OpB,
973 InsertPosition InsertPt) {
974 std::vector<Value *> Args(CBI->arg_begin(), CBI->arg_end());
975
976 auto *NewCBI = CallBrInst::Create(
977 CBI->getFunctionType(), CBI->getCalledOperand(), CBI->getDefaultDest(),
978 CBI->getIndirectDests(), Args, OpB, CBI->getName(), InsertPt);
979 NewCBI->setCallingConv(CBI->getCallingConv());
980 NewCBI->SubclassOptionalData = CBI->SubclassOptionalData;
981 NewCBI->setAttributes(CBI->getAttributes());
982 NewCBI->setDebugLoc(CBI->getDebugLoc());
983 NewCBI->NumIndirectDests = CBI->NumIndirectDests;
984 return NewCBI;
985}
986
987//===----------------------------------------------------------------------===//
988// ReturnInst Implementation
989//===----------------------------------------------------------------------===//
990
991ReturnInst::ReturnInst(const ReturnInst &RI, AllocInfo AllocInfo)
992 : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Ret,
993 AllocInfo) {
995 "Wrong number of operands allocated");
996 if (RI.getNumOperands())
997 Op<0>() = RI.Op<0>();
999}
1000
1001ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, AllocInfo AllocInfo,
1002 InsertPosition InsertBefore)
1003 : Instruction(Type::getVoidTy(C), Instruction::Ret, AllocInfo,
1004 InsertBefore) {
1005 if (retVal)
1006 Op<0>() = retVal;
1007}
1008
1009//===----------------------------------------------------------------------===//
1010// ResumeInst Implementation
1011//===----------------------------------------------------------------------===//
1012
1013ResumeInst::ResumeInst(const ResumeInst &RI)
1014 : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Resume,
1015 AllocMarker) {
1016 Op<0>() = RI.Op<0>();
1017}
1018
1019ResumeInst::ResumeInst(Value *Exn, InsertPosition InsertBefore)
1020 : Instruction(Type::getVoidTy(Exn->getContext()), Instruction::Resume,
1021 AllocMarker, InsertBefore) {
1022 Op<0>() = Exn;
1023}
1024
1025//===----------------------------------------------------------------------===//
1026// CleanupReturnInst Implementation
1027//===----------------------------------------------------------------------===//
1028
1029CleanupReturnInst::CleanupReturnInst(const CleanupReturnInst &CRI,
1031 : Instruction(CRI.getType(), Instruction::CleanupRet, AllocInfo) {
1033 "Wrong number of operands allocated");
1034 setSubclassData<Instruction::OpaqueField>(
1036 Op<0>() = CRI.Op<0>();
1037 if (CRI.hasUnwindDest())
1038 Op<1>() = CRI.Op<1>();
1039}
1040
1041void CleanupReturnInst::init(Value *CleanupPad, BasicBlock *UnwindBB) {
1042 if (UnwindBB)
1043 setSubclassData<UnwindDestField>(true);
1044
1045 Op<0>() = CleanupPad;
1046 if (UnwindBB)
1047 Op<1>() = UnwindBB;
1048}
1049
1050CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB,
1052 InsertPosition InsertBefore)
1053 : Instruction(Type::getVoidTy(CleanupPad->getContext()),
1054 Instruction::CleanupRet, AllocInfo, InsertBefore) {
1055 init(CleanupPad, UnwindBB);
1056}
1057
1058//===----------------------------------------------------------------------===//
1059// CatchReturnInst Implementation
1060//===----------------------------------------------------------------------===//
1061void CatchReturnInst::init(Value *CatchPad, BasicBlock *BB) {
1062 Op<0>() = CatchPad;
1063 Op<1>() = BB;
1064}
1065
1066CatchReturnInst::CatchReturnInst(const CatchReturnInst &CRI)
1067 : Instruction(Type::getVoidTy(CRI.getContext()), Instruction::CatchRet,
1068 AllocMarker) {
1069 Op<0>() = CRI.Op<0>();
1070 Op<1>() = CRI.Op<1>();
1071}
1072
1073CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB,
1074 InsertPosition InsertBefore)
1075 : Instruction(Type::getVoidTy(BB->getContext()), Instruction::CatchRet,
1076 AllocMarker, InsertBefore) {
1077 init(CatchPad, BB);
1078}
1079
1080//===----------------------------------------------------------------------===//
1081// CatchSwitchInst Implementation
1082//===----------------------------------------------------------------------===//
1083
1084CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
1085 unsigned NumReservedValues,
1086 const Twine &NameStr,
1087 InsertPosition InsertBefore)
1088 : Instruction(ParentPad->getType(), Instruction::CatchSwitch, AllocMarker,
1089 InsertBefore) {
1090 if (UnwindDest)
1091 ++NumReservedValues;
1092 init(ParentPad, UnwindDest, NumReservedValues + 1);
1093 setName(NameStr);
1094}
1095
1096CatchSwitchInst::CatchSwitchInst(const CatchSwitchInst &CSI)
1097 : Instruction(CSI.getType(), Instruction::CatchSwitch, AllocMarker) {
1099 init(CSI.getParentPad(), CSI.getUnwindDest(), CSI.getNumOperands());
1100 setNumHungOffUseOperands(ReservedSpace);
1101 Use *OL = getOperandList();
1102 const Use *InOL = CSI.getOperandList();
1103 for (unsigned I = 1, E = ReservedSpace; I != E; ++I)
1104 OL[I] = InOL[I];
1105}
1106
1107void CatchSwitchInst::init(Value *ParentPad, BasicBlock *UnwindDest,
1108 unsigned NumReservedValues) {
1109 assert(ParentPad && NumReservedValues);
1110
1111 ReservedSpace = NumReservedValues;
1112 setNumHungOffUseOperands(UnwindDest ? 2 : 1);
1113 allocHungoffUses(ReservedSpace);
1114
1115 Op<0>() = ParentPad;
1116 if (UnwindDest) {
1118 setUnwindDest(UnwindDest);
1119 }
1120}
1121
1122/// growOperands - grow operands - This grows the operand list in response to a
1123/// push_back style of operation. This grows the number of ops by 2 times.
1124void CatchSwitchInst::growOperands(unsigned Size) {
1125 unsigned NumOperands = getNumOperands();
1126 assert(NumOperands >= 1);
1127 if (ReservedSpace >= NumOperands + Size)
1128 return;
1129 ReservedSpace = (NumOperands + Size / 2) * 2;
1130 growHungoffUses(ReservedSpace);
1131}
1132
1134 unsigned OpNo = getNumOperands();
1135 growOperands(1);
1136 assert(OpNo < ReservedSpace && "Growing didn't work!");
1138 getOperandList()[OpNo] = Handler;
1139}
1140
1142 // Move all subsequent handlers up one.
1143 Use *EndDst = op_end() - 1;
1144 for (Use *CurDst = HI.getCurrent(); CurDst != EndDst; ++CurDst)
1145 *CurDst = *(CurDst + 1);
1146 // Null out the last handler use.
1147 *EndDst = nullptr;
1148
1150}
1151
1152//===----------------------------------------------------------------------===//
1153// FuncletPadInst Implementation
1154//===----------------------------------------------------------------------===//
1155void FuncletPadInst::init(Value *ParentPad, ArrayRef<Value *> Args,
1156 const Twine &NameStr) {
1157 assert(getNumOperands() == 1 + Args.size() && "NumOperands not set up?");
1158 llvm::copy(Args, op_begin());
1159 setParentPad(ParentPad);
1160 setName(NameStr);
1161}
1162
1163FuncletPadInst::FuncletPadInst(const FuncletPadInst &FPI, AllocInfo AllocInfo)
1164 : Instruction(FPI.getType(), FPI.getOpcode(), AllocInfo) {
1166 "Wrong number of operands allocated");
1167 std::copy(FPI.op_begin(), FPI.op_end(), op_begin());
1169}
1170
1171FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
1173 const Twine &NameStr,
1174 InsertPosition InsertBefore)
1175 : Instruction(ParentPad->getType(), Op, AllocInfo, InsertBefore) {
1176 init(ParentPad, Args, NameStr);
1177}
1178
1179//===----------------------------------------------------------------------===//
1180// UnreachableInst Implementation
1181//===----------------------------------------------------------------------===//
1182
1184 InsertPosition InsertBefore)
1185 : Instruction(Type::getVoidTy(Context), Instruction::Unreachable,
1186 AllocMarker, InsertBefore) {}
1187
1188//===----------------------------------------------------------------------===//
1189// UncondBrInst Implementation
1190//===----------------------------------------------------------------------===//
1191
1192// Suppress deprecation warnings from BranchInst.
1194
1195UncondBrInst::UncondBrInst(BasicBlock *IfTrue, InsertPosition InsertBefore)
1196 : BranchInst(Type::getVoidTy(IfTrue->getContext()), Instruction::UncondBr,
1197 AllocMarker, InsertBefore) {
1198 assert(IfTrue && "Branch destination may not be null!");
1199 Op<-1>() = IfTrue;
1200}
1201
1202UncondBrInst::UncondBrInst(const UncondBrInst &BI)
1203 : BranchInst(Type::getVoidTy(BI.getContext()), Instruction::UncondBr,
1204 AllocMarker) {
1205 Op<-1>() = BI.Op<-1>();
1206 SubclassOptionalData = BI.SubclassOptionalData;
1207}
1208
1209//===----------------------------------------------------------------------===//
1210// CondBrInst Implementation
1211//===----------------------------------------------------------------------===//
1212
1213void CondBrInst::AssertOK() {
1214 assert(getCondition()->getType()->isIntegerTy(1) &&
1215 "May only branch on boolean predicates!");
1216}
1217
1218CondBrInst::CondBrInst(Value *Cond, BasicBlock *IfTrue, BasicBlock *IfFalse,
1219 InsertPosition InsertBefore)
1220 : BranchInst(Type::getVoidTy(IfTrue->getContext()), Instruction::CondBr,
1221 AllocMarker, InsertBefore) {
1222 // Assign in order of operand index to make use-list order predictable.
1223 Op<-3>() = Cond;
1224 Op<-2>() = IfTrue;
1225 Op<-1>() = IfFalse;
1226#ifndef NDEBUG
1227 AssertOK();
1228#endif
1229}
1230
1231CondBrInst::CondBrInst(const CondBrInst &BI)
1232 : BranchInst(Type::getVoidTy(BI.getContext()), Instruction::CondBr,
1233 AllocMarker) {
1234 // Assign in order of operand index to make use-list order predictable.
1235 Op<-3>() = BI.Op<-3>();
1236 Op<-2>() = BI.Op<-2>();
1237 Op<-1>() = BI.Op<-1>();
1238 SubclassOptionalData = BI.SubclassOptionalData;
1239}
1240
1242 Op<-1>().swap(Op<-2>());
1243
1244 // Update profile metadata if present and it matches our structural
1245 // expectations.
1246 swapProfMetadata();
1247}
1248
1249// Suppress deprecation warnings from BranchInst.
1251
1252//===----------------------------------------------------------------------===//
1253// AllocaInst Implementation
1254//===----------------------------------------------------------------------===//
1255
1256static Value *getAISize(LLVMContext &Context, Value *Amt) {
1257 if (!Amt)
1258 Amt = ConstantInt::get(Type::getInt32Ty(Context), 1);
1259 else {
1260 assert(!isa<BasicBlock>(Amt) &&
1261 "Passed basic block into allocation size parameter! Use other ctor");
1262 assert(Amt->getType()->isIntegerTy() &&
1263 "Allocation array size is not an integer!");
1264 }
1265 return Amt;
1266}
1267
1269 assert(Pos.isValid() &&
1270 "Insertion position cannot be null when alignment not provided!");
1271 BasicBlock *BB = Pos.getBasicBlock();
1272 assert(BB->getParent() &&
1273 "BB must be in a Function when alignment not provided!");
1274 const DataLayout &DL = BB->getDataLayout();
1275 return DL.getPrefTypeAlign(Ty);
1276}
1277
1278AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
1279 InsertPosition InsertBefore)
1280 : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertBefore) {}
1281
1282AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1283 const Twine &Name, InsertPosition InsertBefore)
1284 : AllocaInst(Ty, AddrSpace, ArraySize,
1285 computeAllocaDefaultAlign(Ty, InsertBefore), Name,
1286 InsertBefore) {}
1287
1288AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1289 Align Align, const Twine &Name,
1290 InsertPosition InsertBefore)
1291 : UnaryInstruction(PointerType::get(Ty->getContext(), AddrSpace), Alloca,
1292 getAISize(Ty->getContext(), ArraySize), InsertBefore),
1293 AllocatedType(Ty) {
1295 assert(!Ty->isVoidTy() && "Cannot allocate void!");
1296 setName(Name);
1297}
1298
1301 return !CI->isOne();
1302 return true;
1303}
1304
1305/// isStaticAlloca - Return true if this alloca is in the entry block of the
1306/// function and is a constant size. If so, the code generator will fold it
1307/// into the prolog/epilog code, so it is basically free.
1309 // Must be constant size.
1310 if (!isa<ConstantInt>(getArraySize())) return false;
1311
1312 // Must be in the entry block.
1313 const BasicBlock *Parent = getParent();
1314 return Parent->isEntryBlock() && !isUsedWithInAlloca();
1315}
1316
1317//===----------------------------------------------------------------------===//
1318// LoadInst Implementation
1319//===----------------------------------------------------------------------===//
1320
1321void LoadInst::AssertOK() {
1323 "Ptr must have pointer type.");
1324}
1325
1327 assert(Pos.isValid() &&
1328 "Insertion position cannot be null when alignment not provided!");
1329 BasicBlock *BB = Pos.getBasicBlock();
1330 assert(BB->getParent() &&
1331 "BB must be in a Function when alignment not provided!");
1332 const DataLayout &DL = BB->getDataLayout();
1333 return DL.getABITypeAlign(Ty);
1334}
1335
1336LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name,
1337 InsertPosition InsertBef)
1338 : LoadInst(Ty, Ptr, Name, /*isVolatile=*/false, InsertBef) {}
1339
1340LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1341 InsertPosition InsertBef)
1342 : LoadInst(Ty, Ptr, Name, isVolatile,
1343 computeLoadStoreDefaultAlign(Ty, InsertBef), InsertBef) {}
1344
1345LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1346 Align Align, InsertPosition InsertBef)
1347 : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
1348 SyncScope::System, InsertBef) {}
1349
1350LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1352 InsertPosition InsertBef)
1353 : UnaryInstruction(Ty, Load, Ptr, InsertBef) {
1356 setAtomic(Order, SSID);
1357 AssertOK();
1358 setName(Name);
1359}
1360
1361//===----------------------------------------------------------------------===//
1362// StoreInst Implementation
1363//===----------------------------------------------------------------------===//
1364
1365void StoreInst::AssertOK() {
1366 assert(getOperand(0) && getOperand(1) && "Both operands must be non-null!");
1368 "Ptr must have pointer type!");
1369}
1370
1372 : StoreInst(val, addr, /*isVolatile=*/false, InsertBefore) {}
1373
1375 InsertPosition InsertBefore)
1376 : StoreInst(val, addr, isVolatile,
1377 computeLoadStoreDefaultAlign(val->getType(), InsertBefore),
1378 InsertBefore) {}
1379
1381 InsertPosition InsertBefore)
1383 SyncScope::System, InsertBefore) {}
1384
1386 AtomicOrdering Order, SyncScope::ID SSID,
1387 InsertPosition InsertBefore)
1388 : Instruction(Type::getVoidTy(val->getContext()), Store, AllocMarker,
1389 InsertBefore) {
1390 Op<0>() = val;
1391 Op<1>() = addr;
1394 setAtomic(Order, SSID);
1395 AssertOK();
1396}
1397
1398//===----------------------------------------------------------------------===//
1399// AtomicCmpXchgInst Implementation
1400//===----------------------------------------------------------------------===//
1401
1402void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal,
1403 Align Alignment, AtomicOrdering SuccessOrdering,
1404 AtomicOrdering FailureOrdering,
1405 SyncScope::ID SSID) {
1406 Op<0>() = Ptr;
1407 Op<1>() = Cmp;
1408 Op<2>() = NewVal;
1409 setSuccessOrdering(SuccessOrdering);
1410 setFailureOrdering(FailureOrdering);
1411 setSyncScopeID(SSID);
1412 setAlignment(Alignment);
1413
1414 assert(getOperand(0) && getOperand(1) && getOperand(2) &&
1415 "All operands must be non-null!");
1417 "Ptr must have pointer type!");
1418 assert(getOperand(1)->getType() == getOperand(2)->getType() &&
1419 "Cmp type and NewVal type must be same!");
1420}
1421
1423 Align Alignment,
1424 AtomicOrdering SuccessOrdering,
1425 AtomicOrdering FailureOrdering,
1426 SyncScope::ID SSID,
1427 InsertPosition InsertBefore)
1428 : Instruction(
1429 StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())),
1430 AtomicCmpXchg, AllocMarker, InsertBefore) {
1431 Init(Ptr, Cmp, NewVal, Alignment, SuccessOrdering, FailureOrdering, SSID);
1432}
1433
1434//===----------------------------------------------------------------------===//
1435// AtomicRMWInst Implementation
1436//===----------------------------------------------------------------------===//
1437
1438void AtomicRMWInst::Init(BinOp Operation, Value *Ptr, Value *Val,
1439 Align Alignment, AtomicOrdering Ordering,
1440 SyncScope::ID SSID) {
1441 assert(Ordering != AtomicOrdering::NotAtomic &&
1442 "atomicrmw instructions can only be atomic.");
1443 assert(Ordering != AtomicOrdering::Unordered &&
1444 "atomicrmw instructions cannot be unordered.");
1445 Op<0>() = Ptr;
1446 Op<1>() = Val;
1448 setOrdering(Ordering);
1449 setSyncScopeID(SSID);
1450 setAlignment(Alignment);
1451
1452 assert(getOperand(0) && getOperand(1) && "All operands must be non-null!");
1454 "Ptr must have pointer type!");
1455 assert(Ordering != AtomicOrdering::NotAtomic &&
1456 "AtomicRMW instructions must be atomic!");
1457}
1458
1460 Align Alignment, AtomicOrdering Ordering,
1461 SyncScope::ID SSID, InsertPosition InsertBefore)
1462 : Instruction(Val->getType(), AtomicRMW, AllocMarker, InsertBefore) {
1463 Init(Operation, Ptr, Val, Alignment, Ordering, SSID);
1464}
1465
1467 switch (Op) {
1469 return "xchg";
1470 case AtomicRMWInst::Add:
1471 return "add";
1472 case AtomicRMWInst::Sub:
1473 return "sub";
1474 case AtomicRMWInst::And:
1475 return "and";
1477 return "nand";
1478 case AtomicRMWInst::Or:
1479 return "or";
1480 case AtomicRMWInst::Xor:
1481 return "xor";
1482 case AtomicRMWInst::Max:
1483 return "max";
1484 case AtomicRMWInst::Min:
1485 return "min";
1487 return "umax";
1489 return "umin";
1491 return "fadd";
1493 return "fsub";
1495 return "fmax";
1497 return "fmin";
1499 return "fmaximum";
1501 return "fminimum";
1503 return "fmaximumnum";
1505 return "fminimumnum";
1507 return "uinc_wrap";
1509 return "udec_wrap";
1511 return "usub_cond";
1513 return "usub_sat";
1515 return "<invalid operation>";
1516 }
1517
1518 llvm_unreachable("invalid atomicrmw operation");
1519}
1520
1521//===----------------------------------------------------------------------===//
1522// FenceInst Implementation
1523//===----------------------------------------------------------------------===//
1524
1526 SyncScope::ID SSID, InsertPosition InsertBefore)
1527 : Instruction(Type::getVoidTy(C), Fence, AllocMarker, InsertBefore) {
1528 setOrdering(Ordering);
1529 setSyncScopeID(SSID);
1530}
1531
1532//===----------------------------------------------------------------------===//
1533// GetElementPtrInst Implementation
1534//===----------------------------------------------------------------------===//
1535
1536void GetElementPtrInst::init(Value *Ptr, ArrayRef<Value *> IdxList,
1537 const Twine &Name) {
1538 assert(getNumOperands() == 1 + IdxList.size() &&
1539 "NumOperands not initialized?");
1540 Op<0>() = Ptr;
1541 llvm::copy(IdxList, op_begin() + 1);
1542 setName(Name);
1543}
1544
1545GetElementPtrInst::GetElementPtrInst(const GetElementPtrInst &GEPI,
1547 : Instruction(GEPI.getType(), GetElementPtr, AllocInfo),
1548 SourceElementType(GEPI.SourceElementType),
1549 ResultElementType(GEPI.ResultElementType) {
1550 assert(getNumOperands() == GEPI.getNumOperands() &&
1551 "Wrong number of operands allocated");
1552 std::copy(GEPI.op_begin(), GEPI.op_end(), op_begin());
1554}
1555
1557 if (auto *Struct = dyn_cast<StructType>(Ty)) {
1558 if (!Struct->indexValid(Idx))
1559 return nullptr;
1560 return Struct->getTypeAtIndex(Idx);
1561 }
1562 if (!Idx->getType()->isIntOrIntVectorTy())
1563 return nullptr;
1564 if (auto *Array = dyn_cast<ArrayType>(Ty))
1565 return Array->getElementType();
1566 if (auto *Vector = dyn_cast<VectorType>(Ty))
1567 return Vector->getElementType();
1568 return nullptr;
1569}
1570
1572 if (auto *Struct = dyn_cast<StructType>(Ty)) {
1573 if (Idx >= Struct->getNumElements())
1574 return nullptr;
1575 return Struct->getElementType(Idx);
1576 }
1577 if (auto *Array = dyn_cast<ArrayType>(Ty))
1578 return Array->getElementType();
1579 if (auto *Vector = dyn_cast<VectorType>(Ty))
1580 return Vector->getElementType();
1581 return nullptr;
1582}
1583
1584template <typename IndexTy>
1586 if (IdxList.empty())
1587 return Ty;
1588 for (IndexTy V : IdxList.slice(1)) {
1590 if (!Ty)
1591 return Ty;
1592 }
1593 return Ty;
1594}
1595
1599
1601 ArrayRef<Constant *> IdxList) {
1602 return getIndexedTypeInternal(Ty, IdxList);
1603}
1604
1608
1609/// hasAllZeroIndices - Return true if all of the indices of this GEP are
1610/// zeros. If so, the result pointer and the first operand have the same
1611/// value, just potentially different types.
1613 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
1615 if (!CI->isZero()) return false;
1616 } else {
1617 return false;
1618 }
1619 }
1620 return true;
1621}
1622
1623/// hasAllConstantIndices - Return true if all of the indices of this GEP are
1624/// constant integers. If so, the result pointer and the first operand have
1625/// a constant offset between them.
1627 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
1629 return false;
1630 }
1631 return true;
1632}
1633
1637
1639 GEPNoWrapFlags NW = cast<GEPOperator>(this)->getNoWrapFlags();
1640 if (B)
1642 else
1643 NW = NW.withoutInBounds();
1644 setNoWrapFlags(NW);
1645}
1646
1648 return cast<GEPOperator>(this)->getNoWrapFlags();
1649}
1650
1652 return cast<GEPOperator>(this)->isInBounds();
1653}
1654
1656 return cast<GEPOperator>(this)->hasNoUnsignedSignedWrap();
1657}
1658
1660 return cast<GEPOperator>(this)->hasNoUnsignedWrap();
1661}
1662
1664 APInt &Offset) const {
1665 // Delegate to the generic GEPOperator implementation.
1666 return cast<GEPOperator>(this)->accumulateConstantOffset(DL, Offset);
1667}
1668
1670 const DataLayout &DL, unsigned BitWidth,
1671 SmallMapVector<Value *, APInt, 4> &VariableOffsets,
1672 APInt &ConstantOffset) const {
1673 // Delegate to the generic GEPOperator implementation.
1674 return cast<GEPOperator>(this)->collectOffset(DL, BitWidth, VariableOffsets,
1675 ConstantOffset);
1676}
1677
1678//===----------------------------------------------------------------------===//
1679// ExtractElementInst Implementation
1680//===----------------------------------------------------------------------===//
1681
1682ExtractElementInst::ExtractElementInst(Value *Val, Value *Index,
1683 const Twine &Name,
1684 InsertPosition InsertBef)
1685 : Instruction(cast<VectorType>(Val->getType())->getElementType(),
1686 ExtractElement, AllocMarker, InsertBef) {
1687 assert(isValidOperands(Val, Index) &&
1688 "Invalid extractelement instruction operands!");
1689 Op<0>() = Val;
1690 Op<1>() = Index;
1691 setName(Name);
1692}
1693
1694bool ExtractElementInst::isValidOperands(const Value *Val, const Value *Index) {
1695 if (!Val->getType()->isVectorTy() || !Index->getType()->isIntegerTy())
1696 return false;
1697 return true;
1698}
1699
1700//===----------------------------------------------------------------------===//
1701// InsertElementInst Implementation
1702//===----------------------------------------------------------------------===//
1703
1704InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index,
1705 const Twine &Name,
1706 InsertPosition InsertBef)
1707 : Instruction(Vec->getType(), InsertElement, AllocMarker, InsertBef) {
1708 assert(isValidOperands(Vec, Elt, Index) &&
1709 "Invalid insertelement instruction operands!");
1710 Op<0>() = Vec;
1711 Op<1>() = Elt;
1712 Op<2>() = Index;
1713 setName(Name);
1714}
1715
1717 const Value *Index) {
1718 if (!Vec->getType()->isVectorTy())
1719 return false; // First operand of insertelement must be vector type.
1720
1721 if (Elt->getType() != cast<VectorType>(Vec->getType())->getElementType())
1722 return false;// Second operand of insertelement must be vector element type.
1723
1724 if (!Index->getType()->isIntegerTy())
1725 return false; // Third operand of insertelement must be i32.
1726 return true;
1727}
1728
1729//===----------------------------------------------------------------------===//
1730// ShuffleVectorInst Implementation
1731//===----------------------------------------------------------------------===//
1732
1734 assert(V && "Cannot create placeholder of nullptr V");
1735 return PoisonValue::get(V->getType());
1736}
1737
1739 InsertPosition InsertBefore)
1741 InsertBefore) {}
1742
1744 const Twine &Name,
1745 InsertPosition InsertBefore)
1747 InsertBefore) {}
1748
1750 const Twine &Name,
1751 InsertPosition InsertBefore)
1752 : Instruction(
1753 VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
1754 cast<VectorType>(Mask->getType())->getElementCount()),
1755 ShuffleVector, AllocMarker, InsertBefore) {
1756 assert(isValidOperands(V1, V2, Mask) &&
1757 "Invalid shuffle vector instruction operands!");
1758
1759 Op<0>() = V1;
1760 Op<1>() = V2;
1761 SmallVector<int, 16> MaskArr;
1762 getShuffleMask(cast<Constant>(Mask), MaskArr);
1763 setShuffleMask(MaskArr);
1764 setName(Name);
1765}
1766
1768 const Twine &Name,
1769 InsertPosition InsertBefore)
1770 : Instruction(
1771 VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
1772 Mask.size(), isa<ScalableVectorType>(V1->getType())),
1773 ShuffleVector, AllocMarker, InsertBefore) {
1774 assert(isValidOperands(V1, V2, Mask) &&
1775 "Invalid shuffle vector instruction operands!");
1776 Op<0>() = V1;
1777 Op<1>() = V2;
1778 setShuffleMask(Mask);
1779 setName(Name);
1780}
1781
1783 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
1784 int NumMaskElts = ShuffleMask.size();
1785 SmallVector<int, 16> NewMask(NumMaskElts);
1786 for (int i = 0; i != NumMaskElts; ++i) {
1787 int MaskElt = getMaskValue(i);
1788 if (MaskElt == PoisonMaskElem) {
1789 NewMask[i] = PoisonMaskElem;
1790 continue;
1791 }
1792 assert(MaskElt >= 0 && MaskElt < 2 * NumOpElts && "Out-of-range mask");
1793 MaskElt = (MaskElt < NumOpElts) ? MaskElt + NumOpElts : MaskElt - NumOpElts;
1794 NewMask[i] = MaskElt;
1795 }
1796 setShuffleMask(NewMask);
1797 Op<0>().swap(Op<1>());
1798}
1799
1801 ArrayRef<int> Mask) {
1802 // V1 and V2 must be vectors of the same type.
1803 if (!isa<VectorType>(V1->getType()) || V1->getType() != V2->getType())
1804 return false;
1805
1806 // Make sure the mask elements make sense.
1807 int V1Size =
1808 cast<VectorType>(V1->getType())->getElementCount().getKnownMinValue();
1809 for (int Elem : Mask)
1810 if (Elem != PoisonMaskElem && Elem >= V1Size * 2)
1811 return false;
1812
1814 if ((Mask[0] != 0 && Mask[0] != PoisonMaskElem) || !all_equal(Mask))
1815 return false;
1816
1817 return true;
1818}
1819
1821 const Value *Mask) {
1822 // V1 and V2 must be vectors of the same type.
1823 if (!V1->getType()->isVectorTy() || V1->getType() != V2->getType())
1824 return false;
1825
1826 // Mask must be vector of i32, and must be the same kind of vector as the
1827 // input vectors
1828 auto *MaskTy = dyn_cast<VectorType>(Mask->getType());
1829 if (!MaskTy || !MaskTy->getElementType()->isIntegerTy(32) ||
1831 return false;
1832
1833 // Check to see if Mask is valid.
1835 return true;
1836
1837 // NOTE: Through vector ConstantInt we have the potential to support more
1838 // than just zero splat masks but that requires a LangRef change.
1839 if (isa<ScalableVectorType>(MaskTy))
1840 return false;
1841
1842 unsigned V1Size = cast<FixedVectorType>(V1->getType())->getNumElements();
1843
1844 if (const auto *CI = dyn_cast<ConstantInt>(Mask))
1845 return !CI->uge(V1Size * 2);
1846
1847 if (const auto *MV = dyn_cast<ConstantVector>(Mask)) {
1848 for (Value *Op : MV->operands()) {
1849 if (auto *CI = dyn_cast<ConstantInt>(Op)) {
1850 if (CI->uge(V1Size*2))
1851 return false;
1852 } else if (!isa<UndefValue>(Op)) {
1853 return false;
1854 }
1855 }
1856 return true;
1857 }
1858
1859 if (const auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
1860 for (unsigned i = 0, e = cast<FixedVectorType>(MaskTy)->getNumElements();
1861 i != e; ++i)
1862 if (CDS->getElementAsInteger(i) >= V1Size*2)
1863 return false;
1864 return true;
1865 }
1866
1867 return false;
1868}
1869
1871 SmallVectorImpl<int> &Result) {
1872 ElementCount EC = cast<VectorType>(Mask->getType())->getElementCount();
1873
1874 if (isa<ConstantAggregateZero>(Mask) || isa<UndefValue>(Mask)) {
1875 int MaskVal = isa<UndefValue>(Mask) ? -1 : 0;
1876 Result.append(EC.getKnownMinValue(), MaskVal);
1877 return;
1878 }
1879
1880 assert(!EC.isScalable() &&
1881 "Scalable vector shuffle mask must be undef or zeroinitializer");
1882
1883 unsigned NumElts = EC.getFixedValue();
1884
1885 Result.reserve(NumElts);
1886
1887 if (auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
1888 for (unsigned i = 0; i != NumElts; ++i)
1889 Result.push_back(CDS->getElementAsInteger(i));
1890 return;
1891 }
1892 for (unsigned i = 0; i != NumElts; ++i) {
1893 Constant *C = Mask->getAggregateElement(i);
1894 Result.push_back(isa<UndefValue>(C) ? -1 :
1895 cast<ConstantInt>(C)->getZExtValue());
1896 }
1897}
1898
1900 ShuffleMask.assign(Mask.begin(), Mask.end());
1901 ShuffleMaskForBitcode = convertShuffleMaskForBitcode(Mask, getType());
1902}
1903
1905 Type *ResultTy) {
1906 Type *Int32Ty = Type::getInt32Ty(ResultTy->getContext());
1907 if (isa<ScalableVectorType>(ResultTy)) {
1908 assert(all_equal(Mask) && "Unexpected shuffle");
1909 Type *VecTy = VectorType::get(Int32Ty, Mask.size(), true);
1910 if (Mask[0] == 0)
1911 return Constant::getNullValue(VecTy);
1912 return PoisonValue::get(VecTy);
1913 }
1915 for (int Elem : Mask) {
1916 if (Elem == PoisonMaskElem)
1918 else
1919 MaskConst.push_back(ConstantInt::get(Int32Ty, Elem));
1920 }
1921 return ConstantVector::get(MaskConst);
1922}
1923
1924static bool isSingleSourceMaskImpl(ArrayRef<int> Mask, int NumOpElts) {
1925 assert(!Mask.empty() && "Shuffle mask must contain elements");
1926 bool UsesLHS = false;
1927 bool UsesRHS = false;
1928 for (int I : Mask) {
1929 if (I == -1)
1930 continue;
1931 assert(I >= 0 && I < (NumOpElts * 2) &&
1932 "Out-of-bounds shuffle mask element");
1933 UsesLHS |= (I < NumOpElts);
1934 UsesRHS |= (I >= NumOpElts);
1935 if (UsesLHS && UsesRHS)
1936 return false;
1937 }
1938 // Allow for degenerate case: completely undef mask means neither source is used.
1939 return UsesLHS || UsesRHS;
1940}
1941
1943 // We don't have vector operand size information, so assume operands are the
1944 // same size as the mask.
1945 return isSingleSourceMaskImpl(Mask, NumSrcElts);
1946}
1947
1948static bool isIdentityMaskImpl(ArrayRef<int> Mask, int NumOpElts) {
1949 if (!isSingleSourceMaskImpl(Mask, NumOpElts))
1950 return false;
1951 for (int i = 0, NumMaskElts = Mask.size(); i < NumMaskElts; ++i) {
1952 if (Mask[i] == -1)
1953 continue;
1954 if (Mask[i] != i && Mask[i] != (NumOpElts + i))
1955 return false;
1956 }
1957 return true;
1958}
1959
1961 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
1962 return false;
1963 // We don't have vector operand size information, so assume operands are the
1964 // same size as the mask.
1965 return isIdentityMaskImpl(Mask, NumSrcElts);
1966}
1967
1969 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
1970 return false;
1971 if (!isSingleSourceMask(Mask, NumSrcElts))
1972 return false;
1973
1974 // The number of elements in the mask must be at least 2.
1975 if (NumSrcElts < 2)
1976 return false;
1977
1978 for (int I = 0, E = Mask.size(); I < E; ++I) {
1979 if (Mask[I] == -1)
1980 continue;
1981 if (Mask[I] != (NumSrcElts - 1 - I) &&
1982 Mask[I] != (NumSrcElts + NumSrcElts - 1 - I))
1983 return false;
1984 }
1985 return true;
1986}
1987
1989 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
1990 return false;
1991 if (!isSingleSourceMask(Mask, NumSrcElts))
1992 return false;
1993 for (int I = 0, E = Mask.size(); I < E; ++I) {
1994 if (Mask[I] == -1)
1995 continue;
1996 if (Mask[I] != 0 && Mask[I] != NumSrcElts)
1997 return false;
1998 }
1999 return true;
2000}
2001
2003 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
2004 return false;
2005 // Select is differentiated from identity. It requires using both sources.
2006 if (isSingleSourceMask(Mask, NumSrcElts))
2007 return false;
2008 for (int I = 0, E = Mask.size(); I < E; ++I) {
2009 if (Mask[I] == -1)
2010 continue;
2011 if (Mask[I] != I && Mask[I] != (NumSrcElts + I))
2012 return false;
2013 }
2014 return true;
2015}
2016
2018 // Example masks that will return true:
2019 // v1 = <a, b, c, d>
2020 // v2 = <e, f, g, h>
2021 // trn1 = shufflevector v1, v2 <0, 4, 2, 6> = <a, e, c, g>
2022 // trn2 = shufflevector v1, v2 <1, 5, 3, 7> = <b, f, d, h>
2023
2024 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
2025 return false;
2026 // 1. The number of elements in the mask must be a power-of-2 and at least 2.
2027 int Sz = Mask.size();
2028 if (Sz < 2 || !isPowerOf2_32(Sz))
2029 return false;
2030
2031 // 2. The first element of the mask must be either a 0 or a 1.
2032 if (Mask[0] != 0 && Mask[0] != 1)
2033 return false;
2034
2035 // 3. The difference between the first 2 elements must be equal to the
2036 // number of elements in the mask.
2037 if ((Mask[1] - Mask[0]) != NumSrcElts)
2038 return false;
2039
2040 // 4. The difference between consecutive even-numbered and odd-numbered
2041 // elements must be equal to 2.
2042 for (int I = 2; I < Sz; ++I) {
2043 int MaskEltVal = Mask[I];
2044 if (MaskEltVal == -1)
2045 return false;
2046 int MaskEltPrevVal = Mask[I - 2];
2047 if (MaskEltVal - MaskEltPrevVal != 2)
2048 return false;
2049 }
2050 return true;
2051}
2052
2054 int &Index) {
2055 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
2056 return false;
2057 // Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4>
2058 int StartIndex = -1;
2059 for (int I = 0, E = Mask.size(); I != E; ++I) {
2060 int MaskEltVal = Mask[I];
2061 if (MaskEltVal == -1)
2062 continue;
2063
2064 if (StartIndex == -1) {
2065 // Don't support a StartIndex that begins in the second input, or if the
2066 // first non-undef index would access below the StartIndex.
2067 if (MaskEltVal < I || NumSrcElts <= (MaskEltVal - I))
2068 return false;
2069
2070 StartIndex = MaskEltVal - I;
2071 continue;
2072 }
2073
2074 // Splice is sequential starting from StartIndex.
2075 if (MaskEltVal != (StartIndex + I))
2076 return false;
2077 }
2078
2079 if (StartIndex == -1)
2080 return false;
2081
2082 // NOTE: This accepts StartIndex == 0 (COPY).
2083 Index = StartIndex;
2084 return true;
2085}
2086
2088 int NumSrcElts, int &Index) {
2089 // Must extract from a single source.
2090 if (!isSingleSourceMaskImpl(Mask, NumSrcElts))
2091 return false;
2092
2093 // Must be smaller (else this is an Identity shuffle).
2094 if (NumSrcElts <= (int)Mask.size())
2095 return false;
2096
2097 // Find start of extraction, accounting that we may start with an UNDEF.
2098 int SubIndex = -1;
2099 for (int i = 0, e = Mask.size(); i != e; ++i) {
2100 int M = Mask[i];
2101 if (M < 0)
2102 continue;
2103 int Offset = (M % NumSrcElts) - i;
2104 if (0 <= SubIndex && SubIndex != Offset)
2105 return false;
2106 SubIndex = Offset;
2107 }
2108
2109 if (0 <= SubIndex && SubIndex + (int)Mask.size() <= NumSrcElts) {
2110 Index = SubIndex;
2111 return true;
2112 }
2113 return false;
2114}
2115
2117 int NumSrcElts, int &NumSubElts,
2118 int &Index) {
2119 int NumMaskElts = Mask.size();
2120
2121 // Don't try to match if we're shuffling to a smaller size.
2122 if (NumMaskElts < NumSrcElts)
2123 return false;
2124
2125 // TODO: We don't recognize self-insertion/widening.
2126 if (isSingleSourceMaskImpl(Mask, NumSrcElts))
2127 return false;
2128
2129 // Determine which mask elements are attributed to which source.
2130 APInt UndefElts = APInt::getZero(NumMaskElts);
2131 APInt Src0Elts = APInt::getZero(NumMaskElts);
2132 APInt Src1Elts = APInt::getZero(NumMaskElts);
2133 bool Src0Identity = true;
2134 bool Src1Identity = true;
2135
2136 for (int i = 0; i != NumMaskElts; ++i) {
2137 int M = Mask[i];
2138 if (M < 0) {
2139 UndefElts.setBit(i);
2140 continue;
2141 }
2142 if (M < NumSrcElts) {
2143 Src0Elts.setBit(i);
2144 Src0Identity &= (M == i);
2145 continue;
2146 }
2147 Src1Elts.setBit(i);
2148 Src1Identity &= (M == (i + NumSrcElts));
2149 }
2150 assert((Src0Elts | Src1Elts | UndefElts).isAllOnes() &&
2151 "unknown shuffle elements");
2152 assert(!Src0Elts.isZero() && !Src1Elts.isZero() &&
2153 "2-source shuffle not found");
2154
2155 // Determine lo/hi span ranges.
2156 // TODO: How should we handle undefs at the start of subvector insertions?
2157 int Src0Lo = Src0Elts.countr_zero();
2158 int Src1Lo = Src1Elts.countr_zero();
2159 int Src0Hi = NumMaskElts - Src0Elts.countl_zero();
2160 int Src1Hi = NumMaskElts - Src1Elts.countl_zero();
2161
2162 // If src0 is in place, see if the src1 elements is inplace within its own
2163 // span.
2164 if (Src0Identity) {
2165 int NumSub1Elts = Src1Hi - Src1Lo;
2166 ArrayRef<int> Sub1Mask = Mask.slice(Src1Lo, NumSub1Elts);
2167 if (isIdentityMaskImpl(Sub1Mask, NumSrcElts)) {
2168 NumSubElts = NumSub1Elts;
2169 Index = Src1Lo;
2170 return true;
2171 }
2172 }
2173
2174 // If src1 is in place, see if the src0 elements is inplace within its own
2175 // span.
2176 if (Src1Identity) {
2177 int NumSub0Elts = Src0Hi - Src0Lo;
2178 ArrayRef<int> Sub0Mask = Mask.slice(Src0Lo, NumSub0Elts);
2179 if (isIdentityMaskImpl(Sub0Mask, NumSrcElts)) {
2180 NumSubElts = NumSub0Elts;
2181 Index = Src0Lo;
2182 return true;
2183 }
2184 }
2185
2186 return false;
2187}
2188
2190 // FIXME: Not currently possible to express a shuffle mask for a scalable
2191 // vector for this case.
2193 return false;
2194
2195 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2196 int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2197 if (NumMaskElts <= NumOpElts)
2198 return false;
2199
2200 // The first part of the mask must choose elements from exactly 1 source op.
2202 if (!isIdentityMaskImpl(Mask, NumOpElts))
2203 return false;
2204
2205 // All extending must be with undef elements.
2206 for (int i = NumOpElts; i < NumMaskElts; ++i)
2207 if (Mask[i] != -1)
2208 return false;
2209
2210 return true;
2211}
2212
2214 // FIXME: Not currently possible to express a shuffle mask for a scalable
2215 // vector for this case.
2217 return false;
2218
2219 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2220 int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2221 if (NumMaskElts >= NumOpElts)
2222 return false;
2223
2224 return isIdentityMaskImpl(getShuffleMask(), NumOpElts);
2225}
2226
2228 // Vector concatenation is differentiated from identity with padding.
2230 return false;
2231
2232 // FIXME: Not currently possible to express a shuffle mask for a scalable
2233 // vector for this case.
2235 return false;
2236
2237 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2238 int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2239 if (NumMaskElts != NumOpElts * 2)
2240 return false;
2241
2242 // Use the mask length rather than the operands' vector lengths here. We
2243 // already know that the shuffle returns a vector twice as long as the inputs,
2244 // and neither of the inputs are undef vectors. If the mask picks consecutive
2245 // elements from both inputs, then this is a concatenation of the inputs.
2246 return isIdentityMaskImpl(getShuffleMask(), NumMaskElts);
2247}
2248
2250 int ReplicationFactor, int VF) {
2251 assert(Mask.size() == (unsigned)ReplicationFactor * VF &&
2252 "Unexpected mask size.");
2253
2254 for (int CurrElt : seq(VF)) {
2255 ArrayRef<int> CurrSubMask = Mask.take_front(ReplicationFactor);
2256 assert(CurrSubMask.size() == (unsigned)ReplicationFactor &&
2257 "Run out of mask?");
2258 Mask = Mask.drop_front(ReplicationFactor);
2259 if (!all_of(CurrSubMask, [CurrElt](int MaskElt) {
2260 return MaskElt == PoisonMaskElem || MaskElt == CurrElt;
2261 }))
2262 return false;
2263 }
2264 assert(Mask.empty() && "Did not consume the whole mask?");
2265
2266 return true;
2267}
2268
2270 int &ReplicationFactor, int &VF) {
2271 // undef-less case is trivial.
2272 if (!llvm::is_contained(Mask, PoisonMaskElem)) {
2273 ReplicationFactor =
2274 Mask.take_while([](int MaskElt) { return MaskElt == 0; }).size();
2275 if (ReplicationFactor == 0 || Mask.size() % ReplicationFactor != 0)
2276 return false;
2277 VF = Mask.size() / ReplicationFactor;
2278 return isReplicationMaskWithParams(Mask, ReplicationFactor, VF);
2279 }
2280
2281 // However, if the mask contains undef's, we have to enumerate possible tuples
2282 // and pick one. There are bounds on replication factor: [1, mask size]
2283 // (where RF=1 is an identity shuffle, RF=mask size is a broadcast shuffle)
2284 // Additionally, mask size is a replication factor multiplied by vector size,
2285 // which further significantly reduces the search space.
2286
2287 // Before doing that, let's perform basic correctness checking first.
2288 int Largest = -1;
2289 for (int MaskElt : Mask) {
2290 if (MaskElt == PoisonMaskElem)
2291 continue;
2292 // Elements must be in non-decreasing order.
2293 if (MaskElt < Largest)
2294 return false;
2295 Largest = std::max(Largest, MaskElt);
2296 }
2297
2298 // Prefer larger replication factor if all else equal.
2299 for (int PossibleReplicationFactor :
2300 reverse(seq_inclusive<unsigned>(1, Mask.size()))) {
2301 if (Mask.size() % PossibleReplicationFactor != 0)
2302 continue;
2303 int PossibleVF = Mask.size() / PossibleReplicationFactor;
2304 if (!isReplicationMaskWithParams(Mask, PossibleReplicationFactor,
2305 PossibleVF))
2306 continue;
2307 ReplicationFactor = PossibleReplicationFactor;
2308 VF = PossibleVF;
2309 return true;
2310 }
2311
2312 return false;
2313}
2314
2315bool ShuffleVectorInst::isReplicationMask(int &ReplicationFactor,
2316 int &VF) const {
2317 // Not possible to express a shuffle mask for a scalable vector for this
2318 // case.
2320 return false;
2321
2322 VF = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2323 if (ShuffleMask.size() % VF != 0)
2324 return false;
2325 ReplicationFactor = ShuffleMask.size() / VF;
2326
2327 return isReplicationMaskWithParams(ShuffleMask, ReplicationFactor, VF);
2328}
2329
2331 if (VF <= 0 || Mask.size() < static_cast<unsigned>(VF) ||
2332 Mask.size() % VF != 0)
2333 return false;
2334 for (unsigned K = 0, Sz = Mask.size(); K < Sz; K += VF) {
2335 ArrayRef<int> SubMask = Mask.slice(K, VF);
2336 if (all_of(SubMask, equal_to(PoisonMaskElem)))
2337 continue;
2338 SmallBitVector Used(VF, false);
2339 for (int Idx : SubMask) {
2340 if (Idx != PoisonMaskElem && Idx < VF)
2341 Used.set(Idx);
2342 }
2343 if (!Used.all())
2344 return false;
2345 }
2346 return true;
2347}
2348
2349/// Return true if this shuffle mask is a replication mask.
2351 // Not possible to express a shuffle mask for a scalable vector for this
2352 // case.
2354 return false;
2355 if (!isSingleSourceMask(ShuffleMask, VF))
2356 return false;
2357
2358 return isOneUseSingleSourceMask(ShuffleMask, VF);
2359}
2360
2361bool ShuffleVectorInst::isInterleave(unsigned Factor) {
2363 // shuffle_vector can only interleave fixed length vectors - for scalable
2364 // vectors, see the @llvm.vector.interleave2 intrinsic
2365 if (!OpTy)
2366 return false;
2367 unsigned OpNumElts = OpTy->getNumElements();
2368
2369 return isInterleaveMask(ShuffleMask, Factor, OpNumElts * 2);
2370}
2371
2373 ArrayRef<int> Mask, unsigned Factor, unsigned NumInputElts,
2374 SmallVectorImpl<unsigned> &StartIndexes) {
2375 unsigned NumElts = Mask.size();
2376 if (NumElts % Factor)
2377 return false;
2378
2379 unsigned LaneLen = NumElts / Factor;
2380 if (!isPowerOf2_32(LaneLen))
2381 return false;
2382
2383 StartIndexes.resize(Factor);
2384
2385 // Check whether each element matches the general interleaved rule.
2386 // Ignore undef elements, as long as the defined elements match the rule.
2387 // Outer loop processes all factors (x, y, z in the above example)
2388 unsigned I = 0, J;
2389 for (; I < Factor; I++) {
2390 unsigned SavedLaneValue;
2391 unsigned SavedNoUndefs = 0;
2392
2393 // Inner loop processes consecutive accesses (x, x+1... in the example)
2394 for (J = 0; J < LaneLen - 1; J++) {
2395 // Lane computes x's position in the Mask
2396 unsigned Lane = J * Factor + I;
2397 unsigned NextLane = Lane + Factor;
2398 int LaneValue = Mask[Lane];
2399 int NextLaneValue = Mask[NextLane];
2400
2401 // If both are defined, values must be sequential
2402 if (LaneValue >= 0 && NextLaneValue >= 0 &&
2403 LaneValue + 1 != NextLaneValue)
2404 break;
2405
2406 // If the next value is undef, save the current one as reference
2407 if (LaneValue >= 0 && NextLaneValue < 0) {
2408 SavedLaneValue = LaneValue;
2409 SavedNoUndefs = 1;
2410 }
2411
2412 // Undefs are allowed, but defined elements must still be consecutive:
2413 // i.e.: x,..., undef,..., x + 2,..., undef,..., undef,..., x + 5, ....
2414 // Verify this by storing the last non-undef followed by an undef
2415 // Check that following non-undef masks are incremented with the
2416 // corresponding distance.
2417 if (SavedNoUndefs > 0 && LaneValue < 0) {
2418 SavedNoUndefs++;
2419 if (NextLaneValue >= 0 &&
2420 SavedLaneValue + SavedNoUndefs != (unsigned)NextLaneValue)
2421 break;
2422 }
2423 }
2424
2425 if (J < LaneLen - 1)
2426 return false;
2427
2428 int StartMask = 0;
2429 if (Mask[I] >= 0) {
2430 // Check that the start of the I range (J=0) is greater than 0
2431 StartMask = Mask[I];
2432 } else if (Mask[(LaneLen - 1) * Factor + I] >= 0) {
2433 // StartMask defined by the last value in lane
2434 StartMask = Mask[(LaneLen - 1) * Factor + I] - J;
2435 } else if (SavedNoUndefs > 0) {
2436 // StartMask defined by some non-zero value in the j loop
2437 StartMask = SavedLaneValue - (LaneLen - 1 - SavedNoUndefs);
2438 }
2439 // else StartMask remains set to 0, i.e. all elements are undefs
2440
2441 if (StartMask < 0)
2442 return false;
2443 // We must stay within the vectors; This case can happen with undefs.
2444 if (StartMask + LaneLen > NumInputElts)
2445 return false;
2446
2447 StartIndexes[I] = StartMask;
2448 }
2449
2450 return true;
2451}
2452
2453/// Check if the mask is a DE-interleave mask of the given factor
2454/// \p Factor like:
2455/// <Index, Index+Factor, ..., Index+(NumElts-1)*Factor>
2457 unsigned Factor,
2458 unsigned &Index) {
2459 // Check all potential start indices from 0 to (Factor - 1).
2460 for (unsigned Idx = 0; Idx < Factor; Idx++) {
2461 unsigned I = 0;
2462
2463 // Check that elements are in ascending order by Factor. Ignore undef
2464 // elements.
2465 for (; I < Mask.size(); I++)
2466 if (Mask[I] >= 0 && static_cast<unsigned>(Mask[I]) != Idx + I * Factor)
2467 break;
2468
2469 if (I == Mask.size()) {
2470 Index = Idx;
2471 return true;
2472 }
2473 }
2474
2475 return false;
2476}
2477
2478/// Try to lower a vector shuffle as a bit rotation.
2479///
2480/// Look for a repeated rotation pattern in each sub group.
2481/// Returns an element-wise left bit rotation amount or -1 if failed.
2482static int matchShuffleAsBitRotate(ArrayRef<int> Mask, int NumSubElts) {
2483 int NumElts = Mask.size();
2484 assert((NumElts % NumSubElts) == 0 && "Illegal shuffle mask");
2485
2486 int RotateAmt = -1;
2487 for (int i = 0; i != NumElts; i += NumSubElts) {
2488 for (int j = 0; j != NumSubElts; ++j) {
2489 int M = Mask[i + j];
2490 if (M < 0)
2491 continue;
2492 if (M < i || M >= i + NumSubElts)
2493 return -1;
2494 int Offset = (NumSubElts - (M - (i + j))) % NumSubElts;
2495 if (0 <= RotateAmt && Offset != RotateAmt)
2496 return -1;
2497 RotateAmt = Offset;
2498 }
2499 }
2500 return RotateAmt;
2501}
2502
2504 ArrayRef<int> Mask, unsigned EltSizeInBits, unsigned MinSubElts,
2505 unsigned MaxSubElts, unsigned &NumSubElts, unsigned &RotateAmt) {
2506 for (NumSubElts = MinSubElts; NumSubElts <= MaxSubElts; NumSubElts *= 2) {
2507 int EltRotateAmt = matchShuffleAsBitRotate(Mask, NumSubElts);
2508 if (EltRotateAmt < 0)
2509 continue;
2510 RotateAmt = EltRotateAmt * EltSizeInBits;
2511 return true;
2512 }
2513
2514 return false;
2515}
2516
2517//===----------------------------------------------------------------------===//
2518// InsertValueInst Class
2519//===----------------------------------------------------------------------===//
2520
2521void InsertValueInst::init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
2522 const Twine &Name) {
2523 assert(getNumOperands() == 2 && "NumOperands not initialized?");
2524
2525 // There's no fundamental reason why we require at least one index
2526 // (other than weirdness with &*IdxBegin being invalid; see
2527 // getelementptr's init routine for example). But there's no
2528 // present need to support it.
2529 assert(!Idxs.empty() && "InsertValueInst must have at least one index");
2530
2532 Val->getType() && "Inserted value must match indexed type!");
2533 Op<0>() = Agg;
2534 Op<1>() = Val;
2535
2536 Indices.append(Idxs.begin(), Idxs.end());
2537 setName(Name);
2538}
2539
2540InsertValueInst::InsertValueInst(const InsertValueInst &IVI)
2541 : Instruction(IVI.getType(), InsertValue, AllocMarker),
2542 Indices(IVI.Indices) {
2543 Op<0>() = IVI.getOperand(0);
2544 Op<1>() = IVI.getOperand(1);
2546}
2547
2548//===----------------------------------------------------------------------===//
2549// ExtractValueInst Class
2550//===----------------------------------------------------------------------===//
2551
2552void ExtractValueInst::init(ArrayRef<unsigned> Idxs, const Twine &Name) {
2553 assert(getNumOperands() == 1 && "NumOperands not initialized?");
2554
2555 // There's no fundamental reason why we require at least one index.
2556 // But there's no present need to support it.
2557 assert(!Idxs.empty() && "ExtractValueInst must have at least one index");
2558
2559 Indices.append(Idxs.begin(), Idxs.end());
2560 setName(Name);
2561}
2562
2563ExtractValueInst::ExtractValueInst(const ExtractValueInst &EVI)
2564 : UnaryInstruction(EVI.getType(), ExtractValue, EVI.getOperand(0),
2565 (BasicBlock *)nullptr),
2566 Indices(EVI.Indices) {
2568}
2569
2570// getIndexedType - Returns the type of the element that would be extracted
2571// with an extractvalue instruction with the specified parameters.
2572//
2573// A null type is returned if the indices are invalid for the specified
2574// pointer type.
2575//
2577 ArrayRef<unsigned> Idxs) {
2578 for (unsigned Index : Idxs) {
2579 // We can't use CompositeType::indexValid(Index) here.
2580 // indexValid() always returns true for arrays because getelementptr allows
2581 // out-of-bounds indices. Since we don't allow those for extractvalue and
2582 // insertvalue we need to check array indexing manually.
2583 // Since the only other types we can index into are struct types it's just
2584 // as easy to check those manually as well.
2585 if (ArrayType *AT = dyn_cast<ArrayType>(Agg)) {
2586 if (Index >= AT->getNumElements())
2587 return nullptr;
2588 Agg = AT->getElementType();
2589 } else if (StructType *ST = dyn_cast<StructType>(Agg)) {
2590 if (Index >= ST->getNumElements())
2591 return nullptr;
2592 Agg = ST->getElementType(Index);
2593 } else {
2594 // Not a valid type to index into.
2595 return nullptr;
2596 }
2597 }
2598 return Agg;
2599}
2600
2601//===----------------------------------------------------------------------===//
2602// UnaryOperator Class
2603//===----------------------------------------------------------------------===//
2604
2606 const Twine &Name, InsertPosition InsertBefore)
2607 : UnaryInstruction(Ty, iType, S, InsertBefore) {
2608 Op<0>() = S;
2609 setName(Name);
2610 AssertOK();
2611}
2612
2614 InsertPosition InsertBefore) {
2615 return new UnaryOperator(Op, S, S->getType(), Name, InsertBefore);
2616}
2617
2618void UnaryOperator::AssertOK() {
2619 Value *LHS = getOperand(0);
2620 (void)LHS; // Silence warnings.
2621#ifndef NDEBUG
2622 switch (getOpcode()) {
2623 case FNeg:
2624 assert(getType() == LHS->getType() &&
2625 "Unary operation should return same type as operand!");
2626 assert(getType()->isFPOrFPVectorTy() &&
2627 "Tried to create a floating-point operation on a "
2628 "non-floating-point type!");
2629 break;
2630 default: llvm_unreachable("Invalid opcode provided");
2631 }
2632#endif
2633}
2634
2635//===----------------------------------------------------------------------===//
2636// BinaryOperator Class
2637//===----------------------------------------------------------------------===//
2638
2640 const Twine &Name, InsertPosition InsertBefore)
2641 : Instruction(Ty, iType, AllocMarker, InsertBefore) {
2642 Op<0>() = S1;
2643 Op<1>() = S2;
2644 setName(Name);
2645 AssertOK();
2646}
2647
2648void BinaryOperator::AssertOK() {
2649 Value *LHS = getOperand(0), *RHS = getOperand(1);
2650 (void)LHS; (void)RHS; // Silence warnings.
2651 assert(LHS->getType() == RHS->getType() &&
2652 "Binary operator operand types must match!");
2653#ifndef NDEBUG
2654 switch (getOpcode()) {
2655 case Add: case Sub:
2656 case Mul:
2657 assert(getType() == LHS->getType() &&
2658 "Arithmetic operation should return same type as operands!");
2659 assert(getType()->isIntOrIntVectorTy() &&
2660 "Tried to create an integer operation on a non-integer type!");
2661 break;
2662 case FAdd: case FSub:
2663 case FMul:
2664 assert(getType() == LHS->getType() &&
2665 "Arithmetic operation should return same type as operands!");
2666 assert(getType()->isFPOrFPVectorTy() &&
2667 "Tried to create a floating-point operation on a "
2668 "non-floating-point type!");
2669 break;
2670 case UDiv:
2671 case SDiv:
2672 assert(getType() == LHS->getType() &&
2673 "Arithmetic operation should return same type as operands!");
2674 assert(getType()->isIntOrIntVectorTy() &&
2675 "Incorrect operand type (not integer) for S/UDIV");
2676 break;
2677 case FDiv:
2678 assert(getType() == LHS->getType() &&
2679 "Arithmetic operation should return same type as operands!");
2680 assert(getType()->isFPOrFPVectorTy() &&
2681 "Incorrect operand type (not floating point) for FDIV");
2682 break;
2683 case URem:
2684 case SRem:
2685 assert(getType() == LHS->getType() &&
2686 "Arithmetic operation should return same type as operands!");
2687 assert(getType()->isIntOrIntVectorTy() &&
2688 "Incorrect operand type (not integer) for S/UREM");
2689 break;
2690 case FRem:
2691 assert(getType() == LHS->getType() &&
2692 "Arithmetic operation should return same type as operands!");
2693 assert(getType()->isFPOrFPVectorTy() &&
2694 "Incorrect operand type (not floating point) for FREM");
2695 break;
2696 case Shl:
2697 case LShr:
2698 case AShr:
2699 assert(getType() == LHS->getType() &&
2700 "Shift operation should return same type as operands!");
2701 assert(getType()->isIntOrIntVectorTy() &&
2702 "Tried to create a shift operation on a non-integral type!");
2703 break;
2704 case And: case Or:
2705 case Xor:
2706 assert(getType() == LHS->getType() &&
2707 "Logical operation should return same type as operands!");
2708 assert(getType()->isIntOrIntVectorTy() &&
2709 "Tried to create a logical operation on a non-integral type!");
2710 break;
2711 default: llvm_unreachable("Invalid opcode provided");
2712 }
2713#endif
2714}
2715
2717 const Twine &Name,
2718 InsertPosition InsertBefore) {
2719 assert(S1->getType() == S2->getType() &&
2720 "Cannot create binary operator with two operands of differing type!");
2721 return new BinaryOperator(Op, S1, S2, S1->getType(), Name, InsertBefore);
2722}
2723
2725 InsertPosition InsertBefore) {
2726 Value *Zero = ConstantInt::get(Op->getType(), 0);
2727 return new BinaryOperator(Instruction::Sub, Zero, Op, Op->getType(), Name,
2728 InsertBefore);
2729}
2730
2732 InsertPosition InsertBefore) {
2733 Value *Zero = ConstantInt::get(Op->getType(), 0);
2734 return BinaryOperator::CreateNSWSub(Zero, Op, Name, InsertBefore);
2735}
2736
2738 InsertPosition InsertBefore) {
2739 Constant *C = Constant::getAllOnesValue(Op->getType());
2740 return new BinaryOperator(Instruction::Xor, Op, C,
2741 Op->getType(), Name, InsertBefore);
2742}
2743
2744// Exchange the two operands to this instruction. This instruction is safe to
2745// use on any binary instruction and does not modify the semantics of the
2746// instruction.
2748 if (!isCommutative())
2749 return true; // Can't commute operands
2750 Op<0>().swap(Op<1>());
2751 return false;
2752}
2753
2754//===----------------------------------------------------------------------===//
2755// FPMathOperator Class
2756//===----------------------------------------------------------------------===//
2757
2759 const MDNode *MD =
2760 cast<Instruction>(this)->getMetadata(LLVMContext::MD_fpmath);
2761 if (!MD)
2762 return 0.0;
2764 return Accuracy->getValueAPF().convertToFloat();
2765}
2766
2767//===----------------------------------------------------------------------===//
2768// CastInst Class
2769//===----------------------------------------------------------------------===//
2770
2771// Just determine if this cast only deals with integral->integral conversion.
2773 switch (getOpcode()) {
2774 default: return false;
2775 case Instruction::ZExt:
2776 case Instruction::SExt:
2777 case Instruction::Trunc:
2778 return true;
2779 case Instruction::BitCast:
2780 return getOperand(0)->getType()->isIntegerTy() &&
2781 getType()->isIntegerTy();
2782 }
2783}
2784
2785/// This function determines if the CastInst does not require any bits to be
2786/// changed in order to effect the cast. Essentially, it identifies cases where
2787/// no code gen is necessary for the cast, hence the name no-op cast. For
2788/// example, the following are all no-op casts:
2789/// # bitcast i32* %x to i8*
2790/// # bitcast <2 x i32> %x to <4 x i16>
2791/// # ptrtoint i32* %x to i32 ; on 32-bit plaforms only
2792/// Determine if the described cast is a no-op.
2794 Type *SrcTy,
2795 Type *DestTy,
2796 const DataLayout &DL) {
2797 assert(castIsValid(Opcode, SrcTy, DestTy) && "method precondition");
2798 switch (Opcode) {
2799 default: llvm_unreachable("Invalid CastOp");
2800 case Instruction::Trunc:
2801 case Instruction::ZExt:
2802 case Instruction::SExt:
2803 case Instruction::FPTrunc:
2804 case Instruction::FPExt:
2805 case Instruction::UIToFP:
2806 case Instruction::SIToFP:
2807 case Instruction::FPToUI:
2808 case Instruction::FPToSI:
2809 case Instruction::AddrSpaceCast:
2810 // TODO: Target informations may give a more accurate answer here.
2811 return false;
2812 case Instruction::BitCast:
2813 return true; // BitCast never modifies bits.
2814 case Instruction::PtrToAddr:
2815 case Instruction::PtrToInt:
2816 return DL.getIntPtrType(SrcTy)->getScalarSizeInBits() ==
2817 DestTy->getScalarSizeInBits();
2818 case Instruction::IntToPtr:
2819 return DL.getIntPtrType(DestTy)->getScalarSizeInBits() ==
2820 SrcTy->getScalarSizeInBits();
2821 }
2822}
2823
2825 return isNoopCast(getOpcode(), getOperand(0)->getType(), getType(), DL);
2826}
2827
2828/// This function determines if a pair of casts can be eliminated and what
2829/// opcode should be used in the elimination. This assumes that there are two
2830/// instructions like this:
2831/// * %F = firstOpcode SrcTy %x to MidTy
2832/// * %S = secondOpcode MidTy %F to DstTy
2833/// The function returns a resultOpcode so these two casts can be replaced with:
2834/// * %Replacement = resultOpcode %SrcTy %x to DstTy
2835/// If no such cast is permitted, the function returns 0.
2837 Instruction::CastOps secondOp,
2838 Type *SrcTy, Type *MidTy, Type *DstTy,
2839 const DataLayout *DL) {
2840 // Define the 144 possibilities for these two cast instructions. The values
2841 // in this matrix determine what to do in a given situation and select the
2842 // case in the switch below. The rows correspond to firstOp, the columns
2843 // correspond to secondOp. In looking at the table below, keep in mind
2844 // the following cast properties:
2845 //
2846 // Size Compare Source Destination
2847 // Operator Src ? Size Type Sign Type Sign
2848 // -------- ------------ ------------------- ---------------------
2849 // TRUNC > Integer Any Integral Any
2850 // ZEXT < Integral Unsigned Integer Any
2851 // SEXT < Integral Signed Integer Any
2852 // FPTOUI n/a FloatPt n/a Integral Unsigned
2853 // FPTOSI n/a FloatPt n/a Integral Signed
2854 // UITOFP n/a Integral Unsigned FloatPt n/a
2855 // SITOFP n/a Integral Signed FloatPt n/a
2856 // FPTRUNC > FloatPt n/a FloatPt n/a
2857 // FPEXT < FloatPt n/a FloatPt n/a
2858 // PTRTOINT n/a Pointer n/a Integral Unsigned
2859 // PTRTOADDR n/a Pointer n/a Integral Unsigned
2860 // INTTOPTR n/a Integral Unsigned Pointer n/a
2861 // BITCAST = FirstClass n/a FirstClass n/a
2862 // ADDRSPCST n/a Pointer n/a Pointer n/a
2863 //
2864 // NOTE: some transforms are safe, but we consider them to be non-profitable.
2865 // For example, we could merge "fptoui double to i32" + "zext i32 to i64",
2866 // into "fptoui double to i64", but this loses information about the range
2867 // of the produced value (we no longer know the top-part is all zeros).
2868 // Further this conversion is often much more expensive for typical hardware,
2869 // and causes issues when building libgcc. We disallow fptosi+sext for the
2870 // same reason.
2871 const unsigned numCastOps =
2872 Instruction::CastOpsEnd - Instruction::CastOpsBegin;
2873 // clang-format off
2874 static const uint8_t CastResults[numCastOps][numCastOps] = {
2875 // T F F U S F F P P I B A -+
2876 // R Z S P P I I T P 2 2 N T S |
2877 // U E E 2 2 2 2 R E I A T C C +- secondOp
2878 // N X X U S F F N X N D 2 V V |
2879 // C T T I I P P C T T R P T T -+
2880 { 1, 0, 0,99,99, 0, 0,99,99,99,99, 0, 3, 0}, // Trunc -+
2881 { 8, 1, 9,99,99, 2,17,99,99,99,99, 2, 3, 0}, // ZExt |
2882 { 8, 0, 1,99,99, 0, 2,99,99,99,99, 0, 3, 0}, // SExt |
2883 { 0, 0, 0,99,99, 0, 0,99,99,99,99, 0, 3, 0}, // FPToUI |
2884 { 0, 0, 0,99,99, 0, 0,99,99,99,99, 0, 3, 0}, // FPToSI |
2885 { 99,99,99, 0, 0,99,99, 0, 0,99,99,99, 4, 0}, // UIToFP +- firstOp
2886 { 99,99,99, 0, 0,99,99, 0, 0,99,99,99, 4, 0}, // SIToFP |
2887 { 99,99,99, 0, 0,99,99, 0, 0,99,99,99, 4, 0}, // FPTrunc |
2888 { 99,99,99, 2, 2,99,99, 8, 2,99,99,99, 4, 0}, // FPExt |
2889 { 1, 0, 0,99,99, 0, 0,99,99,99,99, 7, 3, 0}, // PtrToInt |
2890 { 0, 0, 0,99,99, 0, 0,99,99,99,99, 0, 3, 0}, // PtrToAddr |
2891 { 99,99,99,99,99,99,99,99,99,11,11,99,15, 0}, // IntToPtr |
2892 { 5, 5, 5, 0, 0, 5, 5, 0, 0,16,16, 5, 1,14}, // BitCast |
2893 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,13,12}, // AddrSpaceCast -+
2894 };
2895 // clang-format on
2896
2897 // TODO: This logic could be encoded into the table above and handled in the
2898 // switch below.
2899 // If either of the casts are a bitcast from scalar to vector, disallow the
2900 // merging. However, any pair of bitcasts are allowed.
2901 bool IsFirstBitcast = (firstOp == Instruction::BitCast);
2902 bool IsSecondBitcast = (secondOp == Instruction::BitCast);
2903 bool AreBothBitcasts = IsFirstBitcast && IsSecondBitcast;
2904
2905 // Check if any of the casts convert scalars <-> vectors.
2906 if ((IsFirstBitcast && isa<VectorType>(SrcTy) != isa<VectorType>(MidTy)) ||
2907 (IsSecondBitcast && isa<VectorType>(MidTy) != isa<VectorType>(DstTy)))
2908 if (!AreBothBitcasts)
2909 return 0;
2910
2911 int ElimCase = CastResults[firstOp-Instruction::CastOpsBegin]
2912 [secondOp-Instruction::CastOpsBegin];
2913 switch (ElimCase) {
2914 case 0:
2915 // Categorically disallowed.
2916 return 0;
2917 case 1:
2918 // Allowed, use first cast's opcode.
2919 return firstOp;
2920 case 2:
2921 // Allowed, use second cast's opcode.
2922 return secondOp;
2923 case 3:
2924 // No-op cast in second op implies firstOp as long as the DestTy
2925 // is integer and we are not converting between a vector and a
2926 // non-vector type.
2927 if (!SrcTy->isVectorTy() && DstTy->isIntegerTy())
2928 return firstOp;
2929 return 0;
2930 case 4:
2931 // No-op cast in second op implies firstOp as long as the DestTy
2932 // matches MidTy.
2933 if (DstTy == MidTy)
2934 return firstOp;
2935 return 0;
2936 case 5:
2937 // No-op cast in first op implies secondOp as long as the SrcTy
2938 // is an integer.
2939 if (SrcTy->isIntegerTy())
2940 return secondOp;
2941 return 0;
2942 case 7: {
2943 // Disable inttoptr/ptrtoint optimization if enabled.
2944 if (DisableI2pP2iOpt)
2945 return 0;
2946
2947 // Cannot simplify if address spaces are different!
2948 if (SrcTy != DstTy)
2949 return 0;
2950
2951 // Cannot simplify if the intermediate integer size is smaller than the
2952 // pointer size.
2953 unsigned MidSize = MidTy->getScalarSizeInBits();
2954 if (!DL || MidSize < DL->getPointerTypeSizeInBits(SrcTy))
2955 return 0;
2956
2957 return Instruction::BitCast;
2958 }
2959 case 8: {
2960 // ext, trunc -> bitcast, if the SrcTy and DstTy are the same
2961 // ext, trunc -> ext, if sizeof(SrcTy) < sizeof(DstTy)
2962 // ext, trunc -> trunc, if sizeof(SrcTy) > sizeof(DstTy)
2963 unsigned SrcSize = SrcTy->getScalarSizeInBits();
2964 unsigned DstSize = DstTy->getScalarSizeInBits();
2965 if (SrcTy == DstTy)
2966 return Instruction::BitCast;
2967 if (SrcSize < DstSize)
2968 return firstOp;
2969 if (SrcSize > DstSize)
2970 return secondOp;
2971 return 0;
2972 }
2973 case 9:
2974 // zext, sext -> zext, because sext can't sign extend after zext
2975 return Instruction::ZExt;
2976 case 11: {
2977 // inttoptr, ptrtoint/ptrtoaddr -> integer cast
2978 if (!DL)
2979 return 0;
2980 unsigned MidSize = secondOp == Instruction::PtrToAddr
2981 ? DL->getAddressSizeInBits(MidTy)
2982 : DL->getPointerTypeSizeInBits(MidTy);
2983 unsigned SrcSize = SrcTy->getScalarSizeInBits();
2984 unsigned DstSize = DstTy->getScalarSizeInBits();
2985 // If the middle size is smaller than both source and destination,
2986 // an additional masking operation would be required.
2987 if (MidSize < SrcSize && MidSize < DstSize)
2988 return 0;
2989 if (DstSize < SrcSize)
2990 return Instruction::Trunc;
2991 if (DstSize > SrcSize)
2992 return Instruction::ZExt;
2993 return Instruction::BitCast;
2994 }
2995 case 12:
2996 // addrspacecast, addrspacecast -> bitcast, if SrcAS == DstAS
2997 // addrspacecast, addrspacecast -> addrspacecast, if SrcAS != DstAS
2998 if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace())
2999 return Instruction::AddrSpaceCast;
3000 return Instruction::BitCast;
3001 case 13:
3002 // FIXME: this state can be merged with (1), but the following assert
3003 // is useful to check the correcteness of the sequence due to semantic
3004 // change of bitcast.
3005 assert(
3006 SrcTy->isPtrOrPtrVectorTy() &&
3007 MidTy->isPtrOrPtrVectorTy() &&
3008 DstTy->isPtrOrPtrVectorTy() &&
3009 SrcTy->getPointerAddressSpace() != MidTy->getPointerAddressSpace() &&
3010 MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&
3011 "Illegal addrspacecast, bitcast sequence!");
3012 // Allowed, use first cast's opcode
3013 return firstOp;
3014 case 14:
3015 // bitcast, addrspacecast -> addrspacecast
3016 return Instruction::AddrSpaceCast;
3017 case 15:
3018 // FIXME: this state can be merged with (1), but the following assert
3019 // is useful to check the correcteness of the sequence due to semantic
3020 // change of bitcast.
3021 assert(
3022 SrcTy->isIntOrIntVectorTy() &&
3023 MidTy->isPtrOrPtrVectorTy() &&
3024 DstTy->isPtrOrPtrVectorTy() &&
3025 MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&
3026 "Illegal inttoptr, bitcast sequence!");
3027 // Allowed, use first cast's opcode
3028 return firstOp;
3029 case 16:
3030 // FIXME: this state can be merged with (2), but the following assert
3031 // is useful to check the correcteness of the sequence due to semantic
3032 // change of bitcast.
3033 assert(
3034 SrcTy->isPtrOrPtrVectorTy() &&
3035 MidTy->isPtrOrPtrVectorTy() &&
3036 DstTy->isIntOrIntVectorTy() &&
3037 SrcTy->getPointerAddressSpace() == MidTy->getPointerAddressSpace() &&
3038 "Illegal bitcast, ptrtoint sequence!");
3039 // Allowed, use second cast's opcode
3040 return secondOp;
3041 case 17:
3042 // (sitofp (zext x)) -> (uitofp x)
3043 return Instruction::UIToFP;
3044 case 99:
3045 // Cast combination can't happen (error in input). This is for all cases
3046 // where the MidTy is not the same for the two cast instructions.
3047 llvm_unreachable("Invalid Cast Combination");
3048 default:
3049 llvm_unreachable("Error in CastResults table!!!");
3050 }
3051}
3052
3054 const Twine &Name, InsertPosition InsertBefore) {
3055 assert(castIsValid(op, S, Ty) && "Invalid cast!");
3056 // Construct and return the appropriate CastInst subclass
3057 switch (op) {
3058 case Trunc: return new TruncInst (S, Ty, Name, InsertBefore);
3059 case ZExt: return new ZExtInst (S, Ty, Name, InsertBefore);
3060 case SExt: return new SExtInst (S, Ty, Name, InsertBefore);
3061 case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertBefore);
3062 case FPExt: return new FPExtInst (S, Ty, Name, InsertBefore);
3063 case UIToFP: return new UIToFPInst (S, Ty, Name, InsertBefore);
3064 case SIToFP: return new SIToFPInst (S, Ty, Name, InsertBefore);
3065 case FPToUI: return new FPToUIInst (S, Ty, Name, InsertBefore);
3066 case FPToSI: return new FPToSIInst (S, Ty, Name, InsertBefore);
3067 case PtrToAddr: return new PtrToAddrInst (S, Ty, Name, InsertBefore);
3068 case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertBefore);
3069 case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertBefore);
3070 case BitCast:
3071 return new BitCastInst(S, Ty, Name, InsertBefore);
3072 case AddrSpaceCast:
3073 return new AddrSpaceCastInst(S, Ty, Name, InsertBefore);
3074 default:
3075 llvm_unreachable("Invalid opcode provided");
3076 }
3077}
3078
3080 InsertPosition InsertBefore) {
3081 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3082 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3083 return Create(Instruction::ZExt, S, Ty, Name, InsertBefore);
3084}
3085
3087 InsertPosition InsertBefore) {
3088 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3089 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3090 return Create(Instruction::SExt, S, Ty, Name, InsertBefore);
3091}
3092
3094 InsertPosition InsertBefore) {
3095 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3096 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3097 return Create(Instruction::Trunc, S, Ty, Name, InsertBefore);
3098}
3099
3100/// Create a BitCast or a PtrToInt cast instruction
3102 InsertPosition InsertBefore) {
3103 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3104 assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) &&
3105 "Invalid cast");
3106 assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast");
3107 assert((!Ty->isVectorTy() ||
3108 cast<VectorType>(Ty)->getElementCount() ==
3109 cast<VectorType>(S->getType())->getElementCount()) &&
3110 "Invalid cast");
3111
3112 if (Ty->isIntOrIntVectorTy())
3113 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
3114
3115 return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertBefore);
3116}
3117
3119 Value *S, Type *Ty, const Twine &Name, InsertPosition InsertBefore) {
3120 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3121 assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast");
3122
3123 if (S->getType()->getPointerAddressSpace() != Ty->getPointerAddressSpace())
3124 return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertBefore);
3125
3126 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3127}
3128
3130 const Twine &Name,
3131 InsertPosition InsertBefore) {
3132 if (S->getType()->isPointerTy() && Ty->isIntegerTy())
3133 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
3134 if (S->getType()->isIntegerTy() && Ty->isPointerTy())
3135 return Create(Instruction::IntToPtr, S, Ty, Name, InsertBefore);
3136
3137 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3138}
3139
3141 const Twine &Name,
3142 InsertPosition InsertBefore) {
3143 assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() &&
3144 "Invalid integer cast");
3145 unsigned SrcBits = C->getType()->getScalarSizeInBits();
3146 unsigned DstBits = Ty->getScalarSizeInBits();
3147 Instruction::CastOps opcode =
3148 (SrcBits == DstBits ? Instruction::BitCast :
3149 (SrcBits > DstBits ? Instruction::Trunc :
3150 (isSigned ? Instruction::SExt : Instruction::ZExt)));
3151 return Create(opcode, C, Ty, Name, InsertBefore);
3152}
3153
3155 InsertPosition InsertBefore) {
3156 assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
3157 "Invalid cast");
3158 unsigned SrcBits = C->getType()->getScalarSizeInBits();
3159 unsigned DstBits = Ty->getScalarSizeInBits();
3160 assert((C->getType() == Ty || SrcBits != DstBits) && "Invalid cast");
3161 Instruction::CastOps opcode =
3162 (SrcBits == DstBits ? Instruction::BitCast :
3163 (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt));
3164 return Create(opcode, C, Ty, Name, InsertBefore);
3165}
3166
3167bool CastInst::isBitCastable(Type *SrcTy, Type *DestTy) {
3168 if (!SrcTy->isFirstClassType() || !DestTy->isFirstClassType())
3169 return false;
3170
3171 if (SrcTy == DestTy)
3172 return true;
3173
3174 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) {
3175 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) {
3176 if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {
3177 // An element by element cast. Valid if casting the elements is valid.
3178 SrcTy = SrcVecTy->getElementType();
3179 DestTy = DestVecTy->getElementType();
3180 }
3181 }
3182 }
3183
3184 if (PointerType *DestPtrTy = dyn_cast<PointerType>(DestTy)) {
3185 if (PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy)) {
3186 return SrcPtrTy->getAddressSpace() == DestPtrTy->getAddressSpace();
3187 }
3188 }
3189
3190 TypeSize SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr
3191 TypeSize DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr
3192
3193 // Could still have vectors of pointers if the number of elements doesn't
3194 // match
3195 if (SrcBits.getKnownMinValue() == 0 || DestBits.getKnownMinValue() == 0)
3196 return false;
3197
3198 if (SrcBits != DestBits)
3199 return false;
3200
3201 return true;
3202}
3203
3205 const DataLayout &DL) {
3206 // ptrtoint and inttoptr are not allowed on non-integral pointers
3207 if (auto *PtrTy = dyn_cast<PointerType>(SrcTy))
3208 if (auto *IntTy = dyn_cast<IntegerType>(DestTy))
3209 return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) &&
3210 !DL.isNonIntegralPointerType(PtrTy));
3211 if (auto *PtrTy = dyn_cast<PointerType>(DestTy))
3212 if (auto *IntTy = dyn_cast<IntegerType>(SrcTy))
3213 return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) &&
3214 !DL.isNonIntegralPointerType(PtrTy));
3215
3216 return isBitCastable(SrcTy, DestTy);
3217}
3218
3219// Provide a way to get a "cast" where the cast opcode is inferred from the
3220// types and size of the operand. This, basically, is a parallel of the
3221// logic in the castIsValid function below. This axiom should hold:
3222// castIsValid( getCastOpcode(Val, Ty), Val, Ty)
3223// should not assert in castIsValid. In other words, this produces a "correct"
3224// casting opcode for the arguments passed to it.
3227 const Value *Src, bool SrcIsSigned, Type *DestTy, bool DestIsSigned) {
3228 Type *SrcTy = Src->getType();
3229
3230 assert(SrcTy->isFirstClassType() && DestTy->isFirstClassType() &&
3231 "Only first class types are castable!");
3232
3233 if (SrcTy == DestTy)
3234 return BitCast;
3235
3236 // FIXME: Check address space sizes here
3237 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy))
3238 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy))
3239 if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {
3240 // An element by element cast. Find the appropriate opcode based on the
3241 // element types.
3242 SrcTy = SrcVecTy->getElementType();
3243 DestTy = DestVecTy->getElementType();
3244 }
3245
3246 // Get the bit sizes, we'll need these
3247 // FIXME: This doesn't work for scalable vector types with different element
3248 // counts that don't call getElementType above.
3249 unsigned SrcBits =
3250 SrcTy->getPrimitiveSizeInBits().getFixedValue(); // 0 for ptr
3251 unsigned DestBits =
3252 DestTy->getPrimitiveSizeInBits().getFixedValue(); // 0 for ptr
3253
3254 // Run through the possibilities ...
3255 if (DestTy->isByteTy()) { // Casting to byte
3256 if (SrcTy->isIntegerTy()) { // Casting from integral
3257 assert(DestBits == SrcBits && "Illegal cast from integer to byte type");
3258 return BitCast;
3259 } else if (SrcTy->isPointerTy()) { // Casting from pointer
3260 assert(DestBits == SrcBits && "Illegal cast from pointer to byte type");
3261 return BitCast;
3262 }
3263 llvm_unreachable("Illegal cast to byte type");
3264 } else if (DestTy->isIntegerTy()) { // Casting to integral
3265 if (SrcTy->isIntegerTy()) { // Casting from integral
3266 if (DestBits < SrcBits)
3267 return Trunc; // int -> smaller int
3268 else if (DestBits > SrcBits) { // its an extension
3269 if (SrcIsSigned)
3270 return SExt; // signed -> SEXT
3271 else
3272 return ZExt; // unsigned -> ZEXT
3273 } else {
3274 return BitCast; // Same size, No-op cast
3275 }
3276 } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt
3277 if (DestIsSigned)
3278 return FPToSI; // FP -> sint
3279 else
3280 return FPToUI; // FP -> uint
3281 } else if (SrcTy->isVectorTy()) {
3282 assert(DestBits == SrcBits &&
3283 "Casting vector to integer of different width");
3284 return BitCast; // Same size, no-op cast
3285 } else {
3286 assert(SrcTy->isPointerTy() &&
3287 "Casting from a value that is not first-class type");
3288 return PtrToInt; // ptr -> int
3289 }
3290 } else if (DestTy->isFloatingPointTy()) { // Casting to floating pt
3291 if (SrcTy->isIntegerTy()) { // Casting from integral
3292 if (SrcIsSigned)
3293 return SIToFP; // sint -> FP
3294 else
3295 return UIToFP; // uint -> FP
3296 } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt
3297 if (DestBits < SrcBits) {
3298 return FPTrunc; // FP -> smaller FP
3299 } else if (DestBits > SrcBits) {
3300 return FPExt; // FP -> larger FP
3301 } else {
3302 return BitCast; // same size, no-op cast
3303 }
3304 } else if (SrcTy->isVectorTy()) {
3305 assert(DestBits == SrcBits &&
3306 "Casting vector to floating point of different width");
3307 return BitCast; // same size, no-op cast
3308 }
3309 llvm_unreachable("Casting pointer or non-first class to float");
3310 } else if (DestTy->isVectorTy()) {
3311 assert(DestBits == SrcBits &&
3312 "Illegal cast to vector (wrong type or size)");
3313 return BitCast;
3314 } else if (DestTy->isPointerTy()) {
3315 if (SrcTy->isPointerTy()) {
3316 if (DestTy->getPointerAddressSpace() != SrcTy->getPointerAddressSpace())
3317 return AddrSpaceCast;
3318 return BitCast; // ptr -> ptr
3319 } else if (SrcTy->isIntegerTy()) {
3320 return IntToPtr; // int -> ptr
3321 }
3322 llvm_unreachable("Casting pointer to other than pointer or int");
3323 }
3324 llvm_unreachable("Casting to type that is not first-class");
3325}
3326
3327//===----------------------------------------------------------------------===//
3328// CastInst SubClass Constructors
3329//===----------------------------------------------------------------------===//
3330
3331/// Check that the construction parameters for a CastInst are correct. This
3332/// could be broken out into the separate constructors but it is useful to have
3333/// it in one place and to eliminate the redundant code for getting the sizes
3334/// of the types involved.
3335bool
3337 if (!SrcTy->isFirstClassType() || !DstTy->isFirstClassType() ||
3338 SrcTy->isAggregateType() || DstTy->isAggregateType())
3339 return false;
3340
3341 // Get the size of the types in bits, and whether we are dealing
3342 // with vector types, we'll need this later.
3343 bool SrcIsVec = isa<VectorType>(SrcTy);
3344 bool DstIsVec = isa<VectorType>(DstTy);
3345 unsigned SrcScalarBitSize = SrcTy->getScalarSizeInBits();
3346 unsigned DstScalarBitSize = DstTy->getScalarSizeInBits();
3347
3348 // If these are vector types, get the lengths of the vectors (using zero for
3349 // scalar types means that checking that vector lengths match also checks that
3350 // scalars are not being converted to vectors or vectors to scalars).
3351 ElementCount SrcEC = SrcIsVec ? cast<VectorType>(SrcTy)->getElementCount()
3353 ElementCount DstEC = DstIsVec ? cast<VectorType>(DstTy)->getElementCount()
3355
3356 // Switch on the opcode provided
3357 switch (op) {
3358 default: return false; // This is an input error
3359 case Instruction::Trunc:
3360 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3361 SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize;
3362 case Instruction::ZExt:
3363 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3364 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3365 case Instruction::SExt:
3366 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3367 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3368 case Instruction::FPTrunc:
3369 return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&
3370 SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize;
3371 case Instruction::FPExt:
3372 return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&
3373 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3374 case Instruction::UIToFP:
3375 case Instruction::SIToFP:
3376 return SrcTy->isIntOrIntVectorTy() && DstTy->isFPOrFPVectorTy() &&
3377 SrcEC == DstEC;
3378 case Instruction::FPToUI:
3379 case Instruction::FPToSI:
3380 return SrcTy->isFPOrFPVectorTy() && DstTy->isIntOrIntVectorTy() &&
3381 SrcEC == DstEC;
3382 case Instruction::PtrToAddr:
3383 case Instruction::PtrToInt:
3384 if (SrcEC != DstEC)
3385 return false;
3386 return SrcTy->isPtrOrPtrVectorTy() && DstTy->isIntOrIntVectorTy();
3387 case Instruction::IntToPtr:
3388 if (SrcEC != DstEC)
3389 return false;
3390 return SrcTy->isIntOrIntVectorTy() && DstTy->isPtrOrPtrVectorTy();
3391 case Instruction::BitCast: {
3392 PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
3393 PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
3394
3395 // BitCast implies a no-op cast of type only. No bits change.
3396 // However, you can't cast pointers to anything but pointers/bytes.
3397 if ((SrcPtrTy && DstTy->isByteOrByteVectorTy()) ||
3398 (SrcTy->isByteOrByteVectorTy() && DstPtrTy))
3399 return true;
3400 if (!SrcPtrTy != !DstPtrTy)
3401 return false;
3402
3403 // For non-pointer cases, the cast is okay if the source and destination bit
3404 // widths are identical.
3405 if (!SrcPtrTy)
3406 return SrcTy->getPrimitiveSizeInBits() == DstTy->getPrimitiveSizeInBits();
3407
3408 // If both are pointers then the address spaces must match.
3409 if (SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace())
3410 return false;
3411
3412 // A vector of pointers must have the same number of elements.
3413 if (SrcIsVec && DstIsVec)
3414 return SrcEC == DstEC;
3415 if (SrcIsVec)
3416 return SrcEC == ElementCount::getFixed(1);
3417 if (DstIsVec)
3418 return DstEC == ElementCount::getFixed(1);
3419
3420 return true;
3421 }
3422 case Instruction::AddrSpaceCast: {
3423 PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
3424 if (!SrcPtrTy)
3425 return false;
3426
3427 PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
3428 if (!DstPtrTy)
3429 return false;
3430
3431 if (SrcPtrTy->getAddressSpace() == DstPtrTy->getAddressSpace())
3432 return false;
3433
3434 return SrcEC == DstEC;
3435 }
3436 }
3437}
3438
3440 InsertPosition InsertBefore)
3441 : CastInst(Ty, Trunc, S, Name, InsertBefore) {
3442 assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc");
3443}
3444
3445ZExtInst::ZExtInst(Value *S, Type *Ty, const Twine &Name,
3446 InsertPosition InsertBefore)
3447 : CastInst(Ty, ZExt, S, Name, InsertBefore) {
3448 assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt");
3449}
3450
3451SExtInst::SExtInst(Value *S, Type *Ty, const Twine &Name,
3452 InsertPosition InsertBefore)
3453 : CastInst(Ty, SExt, S, Name, InsertBefore) {
3454 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt");
3455}
3456
3458 InsertPosition InsertBefore)
3459 : CastInst(Ty, FPTrunc, S, Name, InsertBefore) {
3460 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc");
3461}
3462
3464 InsertPosition InsertBefore)
3465 : CastInst(Ty, FPExt, S, Name, InsertBefore) {
3466 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt");
3467}
3468
3470 InsertPosition InsertBefore)
3471 : CastInst(Ty, UIToFP, S, Name, InsertBefore) {
3472 assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP");
3473}
3474
3476 InsertPosition InsertBefore)
3477 : CastInst(Ty, SIToFP, S, Name, InsertBefore) {
3478 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP");
3479}
3480
3482 InsertPosition InsertBefore)
3483 : CastInst(Ty, FPToUI, S, Name, InsertBefore) {
3484 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI");
3485}
3486
3488 InsertPosition InsertBefore)
3489 : CastInst(Ty, FPToSI, S, Name, InsertBefore) {
3490 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI");
3491}
3492
3494 InsertPosition InsertBefore)
3495 : CastInst(Ty, PtrToInt, S, Name, InsertBefore) {
3496 assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt");
3497}
3498
3500 InsertPosition InsertBefore)
3501 : CastInst(Ty, PtrToAddr, S, Name, InsertBefore) {
3502 assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToAddr");
3503}
3504
3506 InsertPosition InsertBefore)
3507 : CastInst(Ty, IntToPtr, S, Name, InsertBefore) {
3508 assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr");
3509}
3510
3512 InsertPosition InsertBefore)
3513 : CastInst(Ty, BitCast, S, Name, InsertBefore) {
3514 assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast");
3515}
3516
3518 InsertPosition InsertBefore)
3519 : CastInst(Ty, AddrSpaceCast, S, Name, InsertBefore) {
3520 assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast");
3521}
3522
3523//===----------------------------------------------------------------------===//
3524// CmpInst Classes
3525//===----------------------------------------------------------------------===//
3526
3528 Value *RHS, const Twine &Name, InsertPosition InsertBefore,
3529 Instruction *FlagsSource)
3530 : Instruction(ty, op, AllocMarker, InsertBefore) {
3531 Op<0>() = LHS;
3532 Op<1>() = RHS;
3533 setPredicate(predicate);
3534 setName(Name);
3535 if (FlagsSource)
3536 copyIRFlags(FlagsSource);
3537}
3538
3540 const Twine &Name, InsertPosition InsertBefore) {
3541 if (Op == Instruction::ICmp) {
3542 if (InsertBefore.isValid())
3543 return new ICmpInst(InsertBefore, CmpInst::Predicate(predicate),
3544 S1, S2, Name);
3545 else
3546 return new ICmpInst(CmpInst::Predicate(predicate),
3547 S1, S2, Name);
3548 }
3549
3550 if (InsertBefore.isValid())
3551 return new FCmpInst(InsertBefore, CmpInst::Predicate(predicate),
3552 S1, S2, Name);
3553 else
3554 return new FCmpInst(CmpInst::Predicate(predicate),
3555 S1, S2, Name);
3556}
3557
3559 Value *S2,
3560 const Instruction *FlagsSource,
3561 const Twine &Name,
3562 InsertPosition InsertBefore) {
3563 CmpInst *Inst = Create(Op, Pred, S1, S2, Name, InsertBefore);
3564 Inst->copyIRFlags(FlagsSource);
3565 return Inst;
3566}
3567
3569 if (ICmpInst *IC = dyn_cast<ICmpInst>(this))
3570 IC->swapOperands();
3571 else
3572 cast<FCmpInst>(this)->swapOperands();
3573}
3574
3576 if (const ICmpInst *IC = dyn_cast<ICmpInst>(this))
3577 return IC->isCommutative();
3578 return cast<FCmpInst>(this)->isCommutative();
3579}
3580
3583 return ICmpInst::isEquality(P);
3585 return FCmpInst::isEquality(P);
3586 llvm_unreachable("Unsupported predicate kind");
3587}
3588
3589// Returns true if either operand of CmpInst is a provably non-zero
3590// floating-point constant.
3591static bool hasNonZeroFPOperands(const CmpInst *Cmp) {
3592 auto *LHS = dyn_cast<Constant>(Cmp->getOperand(0));
3593 auto *RHS = dyn_cast<Constant>(Cmp->getOperand(1));
3594 if (auto *Const = LHS ? LHS : RHS) {
3595 using namespace llvm::PatternMatch;
3596 return match(Const, m_NonZeroNotDenormalFP());
3597 }
3598 return false;
3599}
3600
3601// Floating-point equality is not an equivalence when comparing +0.0 with
3602// -0.0, when comparing NaN with another value, or when flushing
3603// denormals-to-zero.
3604bool CmpInst::isEquivalence(bool Invert) const {
3605 switch (Invert ? getInversePredicate() : getPredicate()) {
3607 return true;
3609 if (!hasNoNaNs())
3610 return false;
3611 [[fallthrough]];
3613 return hasNonZeroFPOperands(this);
3614 default:
3615 return false;
3616 }
3617}
3618
3620 switch (pred) {
3621 default: llvm_unreachable("Unknown cmp predicate!");
3622 case ICMP_EQ: return ICMP_NE;
3623 case ICMP_NE: return ICMP_EQ;
3624 case ICMP_UGT: return ICMP_ULE;
3625 case ICMP_ULT: return ICMP_UGE;
3626 case ICMP_UGE: return ICMP_ULT;
3627 case ICMP_ULE: return ICMP_UGT;
3628 case ICMP_SGT: return ICMP_SLE;
3629 case ICMP_SLT: return ICMP_SGE;
3630 case ICMP_SGE: return ICMP_SLT;
3631 case ICMP_SLE: return ICMP_SGT;
3632
3633 case FCMP_OEQ: return FCMP_UNE;
3634 case FCMP_ONE: return FCMP_UEQ;
3635 case FCMP_OGT: return FCMP_ULE;
3636 case FCMP_OLT: return FCMP_UGE;
3637 case FCMP_OGE: return FCMP_ULT;
3638 case FCMP_OLE: return FCMP_UGT;
3639 case FCMP_UEQ: return FCMP_ONE;
3640 case FCMP_UNE: return FCMP_OEQ;
3641 case FCMP_UGT: return FCMP_OLE;
3642 case FCMP_ULT: return FCMP_OGE;
3643 case FCMP_UGE: return FCMP_OLT;
3644 case FCMP_ULE: return FCMP_OGT;
3645 case FCMP_ORD: return FCMP_UNO;
3646 case FCMP_UNO: return FCMP_ORD;
3647 case FCMP_TRUE: return FCMP_FALSE;
3648 case FCMP_FALSE: return FCMP_TRUE;
3649 }
3650}
3651
3653 switch (Pred) {
3654 default: return "unknown";
3655 case FCmpInst::FCMP_FALSE: return "false";
3656 case FCmpInst::FCMP_OEQ: return "oeq";
3657 case FCmpInst::FCMP_OGT: return "ogt";
3658 case FCmpInst::FCMP_OGE: return "oge";
3659 case FCmpInst::FCMP_OLT: return "olt";
3660 case FCmpInst::FCMP_OLE: return "ole";
3661 case FCmpInst::FCMP_ONE: return "one";
3662 case FCmpInst::FCMP_ORD: return "ord";
3663 case FCmpInst::FCMP_UNO: return "uno";
3664 case FCmpInst::FCMP_UEQ: return "ueq";
3665 case FCmpInst::FCMP_UGT: return "ugt";
3666 case FCmpInst::FCMP_UGE: return "uge";
3667 case FCmpInst::FCMP_ULT: return "ult";
3668 case FCmpInst::FCMP_ULE: return "ule";
3669 case FCmpInst::FCMP_UNE: return "une";
3670 case FCmpInst::FCMP_TRUE: return "true";
3671 case ICmpInst::ICMP_EQ: return "eq";
3672 case ICmpInst::ICMP_NE: return "ne";
3673 case ICmpInst::ICMP_SGT: return "sgt";
3674 case ICmpInst::ICMP_SGE: return "sge";
3675 case ICmpInst::ICMP_SLT: return "slt";
3676 case ICmpInst::ICMP_SLE: return "sle";
3677 case ICmpInst::ICMP_UGT: return "ugt";
3678 case ICmpInst::ICMP_UGE: return "uge";
3679 case ICmpInst::ICMP_ULT: return "ult";
3680 case ICmpInst::ICMP_ULE: return "ule";
3681 }
3682}
3683
3685 OS << CmpInst::getPredicateName(Pred);
3686 return OS;
3687}
3688
3690 switch (pred) {
3691 default: llvm_unreachable("Unknown icmp predicate!");
3692 case ICMP_EQ: case ICMP_NE:
3693 case ICMP_SGT: case ICMP_SLT: case ICMP_SGE: case ICMP_SLE:
3694 return pred;
3695 case ICMP_UGT: return ICMP_SGT;
3696 case ICMP_ULT: return ICMP_SLT;
3697 case ICMP_UGE: return ICMP_SGE;
3698 case ICMP_ULE: return ICMP_SLE;
3699 }
3700}
3701
3703 switch (pred) {
3704 default: llvm_unreachable("Unknown icmp predicate!");
3705 case ICMP_EQ: case ICMP_NE:
3706 case ICMP_UGT: case ICMP_ULT: case ICMP_UGE: case ICMP_ULE:
3707 return pred;
3708 case ICMP_SGT: return ICMP_UGT;
3709 case ICMP_SLT: return ICMP_ULT;
3710 case ICMP_SGE: return ICMP_UGE;
3711 case ICMP_SLE: return ICMP_ULE;
3712 }
3713}
3714
3716 switch (pred) {
3717 default: llvm_unreachable("Unknown cmp predicate!");
3718 case ICMP_EQ: case ICMP_NE:
3719 return pred;
3720 case ICMP_SGT: return ICMP_SLT;
3721 case ICMP_SLT: return ICMP_SGT;
3722 case ICMP_SGE: return ICMP_SLE;
3723 case ICMP_SLE: return ICMP_SGE;
3724 case ICMP_UGT: return ICMP_ULT;
3725 case ICMP_ULT: return ICMP_UGT;
3726 case ICMP_UGE: return ICMP_ULE;
3727 case ICMP_ULE: return ICMP_UGE;
3728
3729 case FCMP_FALSE: case FCMP_TRUE:
3730 case FCMP_OEQ: case FCMP_ONE:
3731 case FCMP_UEQ: case FCMP_UNE:
3732 case FCMP_ORD: case FCMP_UNO:
3733 return pred;
3734 case FCMP_OGT: return FCMP_OLT;
3735 case FCMP_OLT: return FCMP_OGT;
3736 case FCMP_OGE: return FCMP_OLE;
3737 case FCMP_OLE: return FCMP_OGE;
3738 case FCMP_UGT: return FCMP_ULT;
3739 case FCMP_ULT: return FCMP_UGT;
3740 case FCMP_UGE: return FCMP_ULE;
3741 case FCMP_ULE: return FCMP_UGE;
3742 }
3743}
3744
3746 switch (pred) {
3747 case ICMP_SGE:
3748 case ICMP_SLE:
3749 case ICMP_UGE:
3750 case ICMP_ULE:
3751 case FCMP_OGE:
3752 case FCMP_OLE:
3753 case FCMP_UGE:
3754 case FCMP_ULE:
3755 return true;
3756 default:
3757 return false;
3758 }
3759}
3760
3762 switch (pred) {
3763 case ICMP_SGT:
3764 case ICMP_SLT:
3765 case ICMP_UGT:
3766 case ICMP_ULT:
3767 case FCMP_OGT:
3768 case FCMP_OLT:
3769 case FCMP_UGT:
3770 case FCMP_ULT:
3771 return true;
3772 default:
3773 return false;
3774 }
3775}
3776
3778 switch (pred) {
3779 case ICMP_SGE:
3780 return ICMP_SGT;
3781 case ICMP_SLE:
3782 return ICMP_SLT;
3783 case ICMP_UGE:
3784 return ICMP_UGT;
3785 case ICMP_ULE:
3786 return ICMP_ULT;
3787 case FCMP_OGE:
3788 return FCMP_OGT;
3789 case FCMP_OLE:
3790 return FCMP_OLT;
3791 case FCMP_UGE:
3792 return FCMP_UGT;
3793 case FCMP_ULE:
3794 return FCMP_ULT;
3795 default:
3796 return pred;
3797 }
3798}
3799
3801 switch (pred) {
3802 case ICMP_SGT:
3803 return ICMP_SGE;
3804 case ICMP_SLT:
3805 return ICMP_SLE;
3806 case ICMP_UGT:
3807 return ICMP_UGE;
3808 case ICMP_ULT:
3809 return ICMP_ULE;
3810 case FCMP_OGT:
3811 return FCMP_OGE;
3812 case FCMP_OLT:
3813 return FCMP_OLE;
3814 case FCMP_UGT:
3815 return FCMP_UGE;
3816 case FCMP_ULT:
3817 return FCMP_ULE;
3818 default:
3819 return pred;
3820 }
3821}
3822
3824 assert(CmpInst::isRelational(pred) && "Call only with relational predicate!");
3825
3826 if (isStrictPredicate(pred))
3827 return getNonStrictPredicate(pred);
3828 if (isNonStrictPredicate(pred))
3829 return getStrictPredicate(pred);
3830
3831 llvm_unreachable("Unknown predicate!");
3832}
3833
3834bool ICmpInst::compare(const APInt &LHS, const APInt &RHS,
3835 ICmpInst::Predicate Pred) {
3836 assert(ICmpInst::isIntPredicate(Pred) && "Only for integer predicates!");
3837 switch (Pred) {
3839 return LHS.eq(RHS);
3841 return LHS.ne(RHS);
3843 return LHS.ugt(RHS);
3845 return LHS.uge(RHS);
3847 return LHS.ult(RHS);
3849 return LHS.ule(RHS);
3851 return LHS.sgt(RHS);
3853 return LHS.sge(RHS);
3855 return LHS.slt(RHS);
3857 return LHS.sle(RHS);
3858 default:
3859 llvm_unreachable("Unexpected non-integer predicate.");
3860 };
3861}
3862
3863bool FCmpInst::compare(const APFloat &LHS, const APFloat &RHS,
3864 FCmpInst::Predicate Pred) {
3865 APFloat::cmpResult R = LHS.compare(RHS);
3866 switch (Pred) {
3867 default:
3868 llvm_unreachable("Invalid FCmp Predicate");
3870 return false;
3872 return true;
3873 case FCmpInst::FCMP_UNO:
3874 return R == APFloat::cmpUnordered;
3875 case FCmpInst::FCMP_ORD:
3876 return R != APFloat::cmpUnordered;
3877 case FCmpInst::FCMP_UEQ:
3878 return R == APFloat::cmpUnordered || R == APFloat::cmpEqual;
3879 case FCmpInst::FCMP_OEQ:
3880 return R == APFloat::cmpEqual;
3881 case FCmpInst::FCMP_UNE:
3882 return R != APFloat::cmpEqual;
3883 case FCmpInst::FCMP_ONE:
3885 case FCmpInst::FCMP_ULT:
3886 return R == APFloat::cmpUnordered || R == APFloat::cmpLessThan;
3887 case FCmpInst::FCMP_OLT:
3888 return R == APFloat::cmpLessThan;
3889 case FCmpInst::FCMP_UGT:
3891 case FCmpInst::FCMP_OGT:
3892 return R == APFloat::cmpGreaterThan;
3893 case FCmpInst::FCMP_ULE:
3894 return R != APFloat::cmpGreaterThan;
3895 case FCmpInst::FCMP_OLE:
3896 return R == APFloat::cmpLessThan || R == APFloat::cmpEqual;
3897 case FCmpInst::FCMP_UGE:
3898 return R != APFloat::cmpLessThan;
3899 case FCmpInst::FCMP_OGE:
3900 return R == APFloat::cmpGreaterThan || R == APFloat::cmpEqual;
3901 }
3902}
3903
3904std::optional<bool> ICmpInst::compare(const KnownBits &LHS,
3905 const KnownBits &RHS,
3906 ICmpInst::Predicate Pred) {
3907 switch (Pred) {
3908 case ICmpInst::ICMP_EQ:
3909 return KnownBits::eq(LHS, RHS);
3910 case ICmpInst::ICMP_NE:
3911 return KnownBits::ne(LHS, RHS);
3912 case ICmpInst::ICMP_UGE:
3913 return KnownBits::uge(LHS, RHS);
3914 case ICmpInst::ICMP_UGT:
3915 return KnownBits::ugt(LHS, RHS);
3916 case ICmpInst::ICMP_ULE:
3917 return KnownBits::ule(LHS, RHS);
3918 case ICmpInst::ICMP_ULT:
3919 return KnownBits::ult(LHS, RHS);
3920 case ICmpInst::ICMP_SGE:
3921 return KnownBits::sge(LHS, RHS);
3922 case ICmpInst::ICMP_SGT:
3923 return KnownBits::sgt(LHS, RHS);
3924 case ICmpInst::ICMP_SLE:
3925 return KnownBits::sle(LHS, RHS);
3926 case ICmpInst::ICMP_SLT:
3927 return KnownBits::slt(LHS, RHS);
3928 default:
3929 llvm_unreachable("Unexpected non-integer predicate.");
3930 }
3931}
3932
3934 if (CmpInst::isEquality(pred))
3935 return pred;
3936 if (isSigned(pred))
3937 return getUnsignedPredicate(pred);
3938 if (isUnsigned(pred))
3939 return getSignedPredicate(pred);
3940
3941 llvm_unreachable("Unknown predicate!");
3942}
3943
3945 switch (predicate) {
3946 default: return false;
3949 case FCmpInst::FCMP_ORD: return true;
3950 }
3951}
3952
3954 switch (predicate) {
3955 default: return false;
3958 case FCmpInst::FCMP_UNO: return true;
3959 }
3960}
3961
3963 switch(predicate) {
3964 default: return false;
3965 case ICMP_EQ: case ICMP_UGE: case ICMP_ULE: case ICMP_SGE: case ICMP_SLE:
3966 case FCMP_TRUE: case FCMP_UEQ: case FCMP_UGE: case FCMP_ULE: return true;
3967 }
3968}
3969
3971 switch(predicate) {
3972 case ICMP_NE: case ICMP_UGT: case ICMP_ULT: case ICMP_SGT: case ICMP_SLT:
3973 case FCMP_FALSE: case FCMP_ONE: case FCMP_OGT: case FCMP_OLT: return true;
3974 default: return false;
3975 }
3976}
3977
3979 // If the predicates match, then we know the first condition implies the
3980 // second is true.
3981 if (CmpPredicate::getMatching(Pred1, Pred2))
3982 return true;
3983
3984 if (Pred1.hasSameSign() && CmpInst::isSigned(Pred2))
3986 else if (Pred2.hasSameSign() && CmpInst::isSigned(Pred1))
3988
3989 switch (Pred1) {
3990 default:
3991 break;
3992 case CmpInst::ICMP_EQ:
3993 // A == B implies A >=u B, A <=u B, A >=s B, and A <=s B are true.
3994 return Pred2 == CmpInst::ICMP_UGE || Pred2 == CmpInst::ICMP_ULE ||
3995 Pred2 == CmpInst::ICMP_SGE || Pred2 == CmpInst::ICMP_SLE;
3996 case CmpInst::ICMP_UGT: // A >u B implies A != B and A >=u B are true.
3997 return Pred2 == CmpInst::ICMP_NE || Pred2 == CmpInst::ICMP_UGE;
3998 case CmpInst::ICMP_ULT: // A <u B implies A != B and A <=u B are true.
3999 return Pred2 == CmpInst::ICMP_NE || Pred2 == CmpInst::ICMP_ULE;
4000 case CmpInst::ICMP_SGT: // A >s B implies A != B and A >=s B are true.
4001 return Pred2 == CmpInst::ICMP_NE || Pred2 == CmpInst::ICMP_SGE;
4002 case CmpInst::ICMP_SLT: // A <s B implies A != B and A <=s B are true.
4003 return Pred2 == CmpInst::ICMP_NE || Pred2 == CmpInst::ICMP_SLE;
4004 }
4005 return false;
4006}
4007
4009 CmpPredicate Pred2) {
4010 return isImpliedTrueByMatchingCmp(Pred1,
4012}
4013
4015 CmpPredicate Pred2) {
4016 if (isImpliedTrueByMatchingCmp(Pred1, Pred2))
4017 return true;
4018 if (isImpliedFalseByMatchingCmp(Pred1, Pred2))
4019 return false;
4020 return std::nullopt;
4021}
4022
4023//===----------------------------------------------------------------------===//
4024// CmpPredicate Implementation
4025//===----------------------------------------------------------------------===//
4026
4027std::optional<CmpPredicate> CmpPredicate::getMatching(CmpPredicate A,
4028 CmpPredicate B) {
4029 if (A.Pred == B.Pred)
4030 return A.HasSameSign == B.HasSameSign ? A : CmpPredicate(A.Pred);
4032 return {};
4033 if (A.HasSameSign &&
4035 return B.Pred;
4036 if (B.HasSameSign &&
4038 return A.Pred;
4039 return {};
4040}
4041
4045
4047 if (auto *ICI = dyn_cast<ICmpInst>(Cmp))
4048 return ICI->getCmpPredicate();
4049 return Cmp->getPredicate();
4050}
4051
4055
4057 return getSwapped(get(Cmp));
4058}
4059
4060//===----------------------------------------------------------------------===//
4061// SwitchInst Implementation
4062//===----------------------------------------------------------------------===//
4063
4064void SwitchInst::init(Value *Value, BasicBlock *Default, unsigned NumReserved) {
4065 assert(Value && Default && NumReserved);
4066 ReservedSpace = NumReserved;
4068 allocHungoffUses(ReservedSpace);
4069
4070 Op<0>() = Value;
4071 Op<1>() = Default;
4072}
4073
4074/// SwitchInst ctor - Create a new switch instruction, specifying a value to
4075/// switch on and a default destination. The number of additional cases can
4076/// be specified here to make memory allocation more efficient. This
4077/// constructor can also autoinsert before another instruction.
4078SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
4079 InsertPosition InsertBefore)
4080 : Instruction(Type::getVoidTy(Value->getContext()), Instruction::Switch,
4081 AllocMarker, InsertBefore) {
4082 init(Value, Default, 2 + NumCases);
4083}
4084
4085SwitchInst::SwitchInst(const SwitchInst &SI)
4086 : Instruction(SI.getType(), Instruction::Switch, AllocMarker) {
4087 init(SI.getCondition(), SI.getDefaultDest(), SI.getNumOperands());
4088 setNumHungOffUseOperands(SI.getNumOperands());
4089 Use *OL = getOperandList();
4090 ConstantInt **VL = case_values();
4091 const Use *InOL = SI.getOperandList();
4092 ConstantInt *const *InVL = SI.case_values();
4093 for (unsigned i = 2, E = SI.getNumOperands(); i != E; ++i) {
4094 OL[i] = InOL[i];
4095 VL[i - 2] = InVL[i - 2];
4096 }
4097 SubclassOptionalData = SI.SubclassOptionalData;
4098}
4099
4100/// addCase - Add an entry to the switch instruction...
4101///
4103 unsigned NewCaseIdx = getNumCases();
4104 unsigned OpNo = getNumOperands();
4105 if (OpNo + 1 > ReservedSpace)
4106 growOperands(); // Get more space!
4107 // Initialize some new operands.
4108 assert(OpNo < ReservedSpace && "Growing didn't work!");
4109 setNumHungOffUseOperands(OpNo + 1);
4110 CaseHandle Case(this, NewCaseIdx);
4111 Case.setValue(OnVal);
4112 Case.setSuccessor(Dest);
4113}
4114
4115/// removeCase - This method removes the specified case and its successor
4116/// from the switch instruction.
4118 unsigned idx = I->getCaseIndex();
4119
4120 assert(2 + idx < getNumOperands() && "Case index out of range!!!");
4121
4122 unsigned NumOps = getNumOperands();
4123 Use *OL = getOperandList();
4124 ConstantInt **VL = case_values();
4125
4126 // Overwrite this case with the end of the list.
4127 if (2 + idx + 1 != NumOps) {
4128 OL[2 + idx] = OL[NumOps - 1];
4129 VL[idx] = VL[NumOps - 2 - 1];
4130 }
4131
4132 // Nuke the last value.
4133 OL[NumOps - 1].set(nullptr);
4134 VL[NumOps - 2 - 1] = nullptr;
4136
4137 return CaseIt(this, idx);
4138}
4139
4140/// growOperands - grow operands - This grows the operand list in response
4141/// to a push_back style of operation. This grows the number of ops by 3 times.
4142///
4143void SwitchInst::growOperands() {
4144 unsigned e = getNumOperands();
4145 unsigned NumOps = e*3;
4146
4147 ReservedSpace = NumOps;
4148 growHungoffUses(ReservedSpace, /*WithExtraValues=*/true);
4149}
4150
4152 MDNode *ProfileData = getBranchWeightMDNode(SI);
4153 if (!ProfileData)
4154 return;
4155
4156 if (getNumBranchWeights(*ProfileData) != SI.getNumSuccessors()) {
4157 llvm_unreachable("number of prof branch_weights metadata operands does "
4158 "not correspond to number of succesors");
4159 }
4160
4162 if (!extractBranchWeights(ProfileData, Weights))
4163 return;
4164 this->Weights = std::move(Weights);
4165}
4166
4169 if (Weights) {
4170 assert(SI.getNumSuccessors() == Weights->size() &&
4171 "num of prof branch_weights must accord with num of successors");
4172 Changed = true;
4173 // Copy the last case to the place of the removed one and shrink.
4174 // This is tightly coupled with the way SwitchInst::removeCase() removes
4175 // the cases in SwitchInst::removeCase(CaseIt).
4176 (*Weights)[I->getCaseIndex() + 1] = Weights->back();
4177 Weights->pop_back();
4178 }
4179 return SI.removeCase(I);
4180}
4181
4183 auto *DestBlock = I->getCaseSuccessor();
4184 if (Weights) {
4185 auto Weight = getSuccessorWeight(I->getCaseIndex() + 1);
4186 (*Weights)[0] = Weight.value();
4187 }
4188
4189 SI.setDefaultDest(DestBlock);
4190}
4191
4193 ConstantInt *OnVal, BasicBlock *Dest,
4195 SI.addCase(OnVal, Dest);
4196
4197 if (!Weights && W && *W) {
4198 Changed = true;
4199 Weights = SmallVector<uint32_t, 8>(SI.getNumSuccessors(), 0);
4200 (*Weights)[SI.getNumSuccessors() - 1] = *W;
4201 } else if (Weights) {
4202 Changed = true;
4203 Weights->push_back(W.value_or(0));
4204 }
4205 if (Weights)
4206 assert(SI.getNumSuccessors() == Weights->size() &&
4207 "num of prof branch_weights must accord with num of successors");
4208}
4209
4212 // Instruction is erased. Mark as unchanged to not touch it in the destructor.
4213 Changed = false;
4214 if (Weights)
4215 Weights->resize(0);
4216 return SI.eraseFromParent();
4217}
4218
4221 if (!Weights)
4222 return std::nullopt;
4223 return (*Weights)[idx];
4224}
4225
4228 if (!W)
4229 return;
4230
4231 if (!Weights && *W)
4232 Weights = SmallVector<uint32_t, 8>(SI.getNumSuccessors(), 0);
4233
4234 if (Weights) {
4235 auto &OldW = (*Weights)[idx];
4236 if (*W != OldW) {
4237 Changed = true;
4238 OldW = *W;
4239 }
4240 }
4241}
4242
4245 unsigned idx) {
4246 if (MDNode *ProfileData = getBranchWeightMDNode(SI))
4247 if (ProfileData->getNumOperands() == SI.getNumSuccessors() + 1)
4248 return mdconst::extract<ConstantInt>(ProfileData->getOperand(idx + 1))
4249 ->getValue()
4250 .getZExtValue();
4251
4252 return std::nullopt;
4253}
4254
4255//===----------------------------------------------------------------------===//
4256// IndirectBrInst Implementation
4257//===----------------------------------------------------------------------===//
4258
4259void IndirectBrInst::init(Value *Address, unsigned NumDests) {
4260 assert(Address && Address->getType()->isPointerTy() &&
4261 "Address of indirectbr must be a pointer");
4262 ReservedSpace = 1+NumDests;
4264 allocHungoffUses(ReservedSpace);
4265
4266 Op<0>() = Address;
4267}
4268
4269
4270/// growOperands - grow operands - This grows the operand list in response
4271/// to a push_back style of operation. This grows the number of ops by 2 times.
4272///
4273void IndirectBrInst::growOperands() {
4274 unsigned e = getNumOperands();
4275 unsigned NumOps = e*2;
4276
4277 ReservedSpace = NumOps;
4278 growHungoffUses(ReservedSpace);
4279}
4280
4281IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases,
4282 InsertPosition InsertBefore)
4283 : Instruction(Type::getVoidTy(Address->getContext()),
4284 Instruction::IndirectBr, AllocMarker, InsertBefore) {
4285 init(Address, NumCases);
4286}
4287
4288IndirectBrInst::IndirectBrInst(const IndirectBrInst &IBI)
4289 : Instruction(Type::getVoidTy(IBI.getContext()), Instruction::IndirectBr,
4290 AllocMarker) {
4291 NumUserOperands = IBI.NumUserOperands;
4292 allocHungoffUses(IBI.getNumOperands());
4293 Use *OL = getOperandList();
4294 const Use *InOL = IBI.getOperandList();
4295 for (unsigned i = 0, E = IBI.getNumOperands(); i != E; ++i)
4296 OL[i] = InOL[i];
4297 SubclassOptionalData = IBI.SubclassOptionalData;
4298}
4299
4300/// addDestination - Add a destination.
4301///
4303 unsigned OpNo = getNumOperands();
4304 if (OpNo+1 > ReservedSpace)
4305 growOperands(); // Get more space!
4306 // Initialize some new operands.
4307 assert(OpNo < ReservedSpace && "Growing didn't work!");
4309 getOperandList()[OpNo] = DestBB;
4310}
4311
4312/// removeDestination - This method removes the specified successor from the
4313/// indirectbr instruction.
4315 assert(idx < getNumOperands()-1 && "Successor index out of range!");
4316
4317 unsigned NumOps = getNumOperands();
4318 Use *OL = getOperandList();
4319
4320 // Replace this value with the last one.
4321 OL[idx+1] = OL[NumOps-1];
4322
4323 // Nuke the last value.
4324 OL[NumOps-1].set(nullptr);
4326}
4327
4328//===----------------------------------------------------------------------===//
4329// FreezeInst Implementation
4330//===----------------------------------------------------------------------===//
4331
4332FreezeInst::FreezeInst(Value *S, const Twine &Name, InsertPosition InsertBefore)
4333 : UnaryInstruction(S->getType(), Freeze, S, InsertBefore) {
4334 setName(Name);
4335}
4336
4337//===----------------------------------------------------------------------===//
4338// cloneImpl() implementations
4339//===----------------------------------------------------------------------===//
4340
4341// Define these methods here so vtables don't get emitted into every translation
4342// unit that uses these classes.
4343
4344GetElementPtrInst *GetElementPtrInst::cloneImpl() const {
4346 return new (AllocMarker) GetElementPtrInst(*this, AllocMarker);
4347}
4348
4352
4356
4358 return new FCmpInst(getPredicate(), Op<0>(), Op<1>());
4359}
4360
4362 return new ICmpInst(getPredicate(), Op<0>(), Op<1>());
4363}
4364
4365ExtractValueInst *ExtractValueInst::cloneImpl() const {
4366 return new ExtractValueInst(*this);
4367}
4368
4369InsertValueInst *InsertValueInst::cloneImpl() const {
4370 return new InsertValueInst(*this);
4371}
4372
4375 getOperand(0), getAlign());
4376 Result->setUsedWithInAlloca(isUsedWithInAlloca());
4377 Result->setSwiftError(isSwiftError());
4378 return Result;
4379}
4380
4382 return new LoadInst(getType(), getOperand(0), Twine(), isVolatile(),
4384}
4385
4390
4395 Result->setVolatile(isVolatile());
4396 Result->setWeak(isWeak());
4397 return Result;
4398}
4399
4401 AtomicRMWInst *Result =
4404 Result->setVolatile(isVolatile());
4405 return Result;
4406}
4407
4411
4413 return new TruncInst(getOperand(0), getType());
4414}
4415
4417 return new ZExtInst(getOperand(0), getType());
4418}
4419
4421 return new SExtInst(getOperand(0), getType());
4422}
4423
4425 return new FPTruncInst(getOperand(0), getType());
4426}
4427
4429 return new FPExtInst(getOperand(0), getType());
4430}
4431
4433 return new UIToFPInst(getOperand(0), getType());
4434}
4435
4437 return new SIToFPInst(getOperand(0), getType());
4438}
4439
4441 return new FPToUIInst(getOperand(0), getType());
4442}
4443
4445 return new FPToSIInst(getOperand(0), getType());
4446}
4447
4449 return new PtrToIntInst(getOperand(0), getType());
4450}
4451
4455
4457 return new IntToPtrInst(getOperand(0), getType());
4458}
4459
4461 return new BitCastInst(getOperand(0), getType());
4462}
4463
4467
4468CallInst *CallInst::cloneImpl() const {
4469 if (hasOperandBundles()) {
4473 return new (AllocMarker) CallInst(*this, AllocMarker);
4474 }
4476 return new (AllocMarker) CallInst(*this, AllocMarker);
4477}
4478
4479SelectInst *SelectInst::cloneImpl() const {
4481}
4482
4484 return new VAArgInst(getOperand(0), getType());
4485}
4486
4487ExtractElementInst *ExtractElementInst::cloneImpl() const {
4489}
4490
4491InsertElementInst *InsertElementInst::cloneImpl() const {
4493}
4494
4498
4499PHINode *PHINode::cloneImpl() const { return new (AllocMarker) PHINode(*this); }
4500
4501LandingPadInst *LandingPadInst::cloneImpl() const {
4502 return new LandingPadInst(*this);
4503}
4504
4505ReturnInst *ReturnInst::cloneImpl() const {
4507 return new (AllocMarker) ReturnInst(*this, AllocMarker);
4508}
4509
4510UncondBrInst *UncondBrInst::cloneImpl() const {
4511 return new (AllocMarker) UncondBrInst(*this);
4512}
4513
4514CondBrInst *CondBrInst::cloneImpl() const {
4515 return new (AllocMarker) CondBrInst(*this);
4516}
4517
4518SwitchInst *SwitchInst::cloneImpl() const { return new SwitchInst(*this); }
4519
4520IndirectBrInst *IndirectBrInst::cloneImpl() const {
4521 return new IndirectBrInst(*this);
4522}
4523
4524InvokeInst *InvokeInst::cloneImpl() const {
4525 if (hasOperandBundles()) {
4529 return new (AllocMarker) InvokeInst(*this, AllocMarker);
4530 }
4532 return new (AllocMarker) InvokeInst(*this, AllocMarker);
4533}
4534
4535CallBrInst *CallBrInst::cloneImpl() const {
4536 if (hasOperandBundles()) {
4540 return new (AllocMarker) CallBrInst(*this, AllocMarker);
4541 }
4543 return new (AllocMarker) CallBrInst(*this, AllocMarker);
4544}
4545
4546ResumeInst *ResumeInst::cloneImpl() const {
4547 return new (AllocMarker) ResumeInst(*this);
4548}
4549
4550CleanupReturnInst *CleanupReturnInst::cloneImpl() const {
4552 return new (AllocMarker) CleanupReturnInst(*this, AllocMarker);
4553}
4554
4555CatchReturnInst *CatchReturnInst::cloneImpl() const {
4556 return new (AllocMarker) CatchReturnInst(*this);
4557}
4558
4559CatchSwitchInst *CatchSwitchInst::cloneImpl() const {
4560 return new CatchSwitchInst(*this);
4561}
4562
4563FuncletPadInst *FuncletPadInst::cloneImpl() const {
4565 return new (AllocMarker) FuncletPadInst(*this, AllocMarker);
4566}
4567
4569 LLVMContext &Context = getContext();
4570 return new UnreachableInst(Context);
4571}
4572
4573bool UnreachableInst::shouldLowerToTrap(bool TrapUnreachable,
4574 bool NoTrapAfterNoreturn) const {
4575 if (!TrapUnreachable)
4576 return false;
4577
4578 // We may be able to ignore unreachable behind a noreturn call.
4580 Call && Call->doesNotReturn()) {
4581 if (NoTrapAfterNoreturn)
4582 return false;
4583 // Do not emit an additional trap instruction.
4584 if (Call->isNonContinuableTrap())
4585 return false;
4586 }
4587
4588 if (getFunction()->hasFnAttribute(Attribute::Naked))
4589 return false;
4590
4591 return true;
4592}
4593
4595 return new FreezeInst(getOperand(0));
4596}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
constexpr LLT S1
Rewrite undef for PHI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Atomic ordering constants.
@ FnAttr
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define LLVM_SUPPRESS_DEPRECATED_DECLARATIONS_PUSH
Definition Compiler.h:269
#define LLVM_SUPPRESS_DEPRECATED_DECLARATIONS_POP
Definition Compiler.h:270
#define LLVM_ABI
Definition Compiler.h:213
This file contains the declarations for the subclasses of Constant, which represent the different fla...
@ Default
#define op(i)
Module.h This file contains the declarations for the Module class.
static Align computeLoadStoreDefaultAlign(Type *Ty, InsertPosition Pos)
static bool isImpliedFalseByMatchingCmp(CmpPredicate Pred1, CmpPredicate Pred2)
static Value * createPlaceholderForShuffleVector(Value *V)
static Align computeAllocaDefaultAlign(Type *Ty, InsertPosition Pos)
static cl::opt< bool > DisableI2pP2iOpt("disable-i2p-p2i-opt", cl::init(false), cl::desc("Disables inttoptr/ptrtoint roundtrip optimization"))
static bool hasNonZeroFPOperands(const CmpInst *Cmp)
static int matchShuffleAsBitRotate(ArrayRef< int > Mask, int NumSubElts)
Try to lower a vector shuffle as a bit rotation.
static Type * getIndexedTypeInternal(Type *Ty, ArrayRef< IndexTy > IdxList)
static bool isReplicationMaskWithParams(ArrayRef< int > Mask, int ReplicationFactor, int VF)
static bool isIdentityMaskImpl(ArrayRef< int > Mask, int NumOpElts)
static bool isSingleSourceMaskImpl(ArrayRef< int > Mask, int NumOpElts)
static bool isImpliedTrueByMatchingCmp(CmpPredicate Pred1, CmpPredicate Pred2)
static LLVM_SUPPRESS_DEPRECATED_DECLARATIONS_POP Value * getAISize(LLVMContext &Context, Value *Amt)
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
This file contains the declarations for metadata subclasses.
#define T
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
#define P(N)
PowerPC Reduce CR logical Operation
This file contains the declarations for profiling metadata utility functions.
const SmallVectorImpl< MachineOperand > & Cond
static unsigned getNumElements(Type *Ty)
This file implements the SmallBitVector class.
This file defines the SmallVector class.
#define LLVM_DEBUG(...)
Definition Debug.h:114
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Definition VPlanSLP.cpp:247
Value * RHS
Value * LHS
cmpResult
IEEE-754R 5.11: Floating Point Comparison Relations.
Definition APFloat.h:334
LLVM_ABI float convertToFloat() const
Converts this APFloat to host float value.
Definition APFloat.cpp:6066
Class for arbitrary precision integers.
Definition APInt.h:78
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
Definition APInt.h:1345
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition APInt.h:381
unsigned countr_zero() const
Count the number of trailing zero bits.
Definition APInt.h:1654
unsigned countl_zero() const
The APInt version of std::countl_zero.
Definition APInt.h:1613
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
Definition APInt.h:201
This class represents a conversion between pointers from one address space to another.
LLVM_ABI AddrSpaceCastInst * cloneImpl() const
Clone an identical AddrSpaceCastInst.
LLVM_ABI AddrSpaceCastInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI std::optional< TypeSize > getAllocationSizeInBits(const DataLayout &DL) const
Get allocation size in bits.
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
LLVM_ABI bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
LLVM_ABI AllocaInst * cloneImpl() const
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
bool isUsedWithInAlloca() const
Return true if this alloca is used as an inalloca argument to a call.
unsigned getAddressSpace() const
Return the address space for the allocation.
LLVM_ABI std::optional< TypeSize > getAllocationSize(const DataLayout &DL) const
Get allocation size in bytes.
LLVM_ABI bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
void setAlignment(Align Align)
const Value * getArraySize() const
Get the number of elements allocated.
LLVM_ABI AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, const Twine &Name, InsertPosition InsertBefore)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
iterator end() const
Definition ArrayRef.h:131
size_t size() const
size - Get the array size.
Definition ArrayRef.h:142
iterator begin() const
Definition ArrayRef.h:130
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:137
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
Definition ArrayRef.h:186
Class to represent array types.
void setSyncScopeID(SyncScope::ID SSID)
Sets the synchronization scope ID of this cmpxchg instruction.
bool isVolatile() const
Return true if this is a cmpxchg from a volatile memory location.
void setFailureOrdering(AtomicOrdering Ordering)
Sets the failure ordering constraint of this cmpxchg instruction.
AtomicOrdering getFailureOrdering() const
Returns the failure ordering constraint of this cmpxchg instruction.
void setSuccessOrdering(AtomicOrdering Ordering)
Sets the success ordering constraint of this cmpxchg instruction.
LLVM_ABI AtomicCmpXchgInst * cloneImpl() const
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
friend class Instruction
Iterator for Instructions in a `BasicBlock.
bool isWeak() const
Return true if this cmpxchg may spuriously fail.
void setAlignment(Align Align)
AtomicOrdering getSuccessOrdering() const
Returns the success ordering constraint of this cmpxchg instruction.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this cmpxchg instruction.
LLVM_ABI AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment, AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering, SyncScope::ID SSID, InsertPosition InsertBefore=nullptr)
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
LLVM_ABI AtomicRMWInst * cloneImpl() const
bool isVolatile() const
Return true if this is a RMW on a volatile memory location.
BinOp
This enumeration lists the possible modifications atomicrmw can make.
@ Add
*p = old + v
@ FAdd
*p = old + v
@ USubCond
Subtract only if no unsigned overflow.
@ FMinimum
*p = minimum(old, v) minimum matches the behavior of llvm.minimum.
@ Min
*p = old <signed v ? old : v
@ Sub
*p = old - v
@ And
*p = old & v
@ Xor
*p = old ^ v
@ USubSat
*p = usub.sat(old, v) usub.sat matches the behavior of llvm.usub.sat.
@ FMaximum
*p = maximum(old, v) maximum matches the behavior of llvm.maximum.
@ FSub
*p = old - v
@ UIncWrap
Increment one up to a maximum value.
@ Max
*p = old >signed v ? old : v
@ UMin
*p = old <unsigned v ? old : v
@ FMin
*p = minnum(old, v) minnum matches the behavior of llvm.minnum.
@ UMax
*p = old >unsigned v ? old : v
@ FMaximumNum
*p = maximumnum(old, v) maximumnum matches the behavior of llvm.maximumnum.
@ FMax
*p = maxnum(old, v) maxnum matches the behavior of llvm.maxnum.
@ UDecWrap
Decrement one until a minimum value or zero.
@ FMinimumNum
*p = minimumnum(old, v) minimumnum matches the behavior of llvm.minimumnum.
@ Nand
*p = ~(old & v)
void setSyncScopeID(SyncScope::ID SSID)
Sets the synchronization scope ID of this rmw instruction.
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this rmw instruction.
void setOperation(BinOp Operation)
friend class Instruction
Iterator for Instructions in a `BasicBlock.
BinOp getOperation() const
LLVM_ABI AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment, AtomicOrdering Ordering, SyncScope::ID SSID, InsertPosition InsertBefore=nullptr)
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this rmw instruction.
void setAlignment(Align Align)
static LLVM_ABI StringRef getOperationName(BinOp Op)
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
LLVM_ABI CaptureInfo getCaptureInfo() const
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:105
LLVM_ABI const ConstantRange & getRange() const
Returns the value of the range attribute.
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
Definition Attributes.h:124
static LLVM_ABI Attribute getWithMemoryEffects(LLVMContext &Context, MemoryEffects ME)
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition Attributes.h:261
LLVM Basic Block Representation.
Definition BasicBlock.h:62
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this basic block belongs to.
static LLVM_ABI BinaryOperator * CreateNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Helper functions to construct and inspect unary operations (NEG and NOT) via binary operators SUB and...
BinaryOps getOpcode() const
Definition InstrTypes.h:374
LLVM_ABI bool swapOperands()
Exchange the two operands to this instruction.
static LLVM_ABI BinaryOperator * CreateNot(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition InstrTypes.h:181
static LLVM_ABI BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
LLVM_ABI BinaryOperator(BinaryOps iType, Value *S1, Value *S2, Type *Ty, const Twine &Name, InsertPosition InsertBefore)
static LLVM_ABI BinaryOperator * CreateNSWNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
LLVM_ABI BinaryOperator * cloneImpl() const
This class represents a no-op cast from one type to another.
LLVM_ABI BitCastInst * cloneImpl() const
Clone an identical BitCastInst.
LLVM_ABI BitCastInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
LLVM_ABI FPClassTest getParamNoFPClass(unsigned i) const
Extract a test mask for disallowed floating-point value classes for the parameter.
bool isInlineAsm() const
Check if this call is an inline asm statement.
LLVM_ABI BundleOpInfo & getBundleOpInfoForOperand(unsigned OpIdx)
Return the BundleOpInfo for the operand at index OpIdx.
void setCallingConv(CallingConv::ID CC)
LLVM_ABI FPClassTest getRetNoFPClass() const
Extract a test mask for disallowed floating-point value classes for the return value.
bundle_op_iterator bundle_op_info_begin()
Return the start of the list of BundleOpInfo instances associated with this OperandBundleUser.
LLVM_ABI bool paramHasNonNullAttr(unsigned ArgNo, bool AllowUndefOrPoison) const
Return true if this argument has the nonnull attribute on either the CallBase instruction or the call...
LLVM_ABI MemoryEffects getMemoryEffects() const
void addFnAttr(Attribute::AttrKind Kind)
Adds the attribute to the function.
LLVM_ABI bool doesNotAccessMemory() const
Determine if the call does not access memory.
LLVM_ABI void getOperandBundlesAsDefs(SmallVectorImpl< OperandBundleDef > &Defs) const
Return the list of operand bundles attached to this instruction as a vector of OperandBundleDefs.
LLVM_ABI void setOnlyAccessesArgMemory()
OperandBundleUse getOperandBundleAt(unsigned Index) const
Return the operand bundle at a specific index.
OperandBundleUse operandBundleFromBundleOpInfo(const BundleOpInfo &BOI) const
Simple helper function to map a BundleOpInfo to an OperandBundleUse.
LLVM_ABI void setOnlyAccessesInaccessibleMemOrArgMem()
std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
LLVM_ABI void setDoesNotAccessMemory()
AttributeSet getParamAttributes(unsigned ArgNo) const
Return the param attributes for this call.
bool hasRetAttr(Attribute::AttrKind Kind) const
Determine whether the return value has the given attribute.
LLVM_ABI bool onlyAccessesInaccessibleMemory() const
Determine if the function may only access memory that is inaccessible from the IR.
unsigned getNumOperandBundles() const
Return the number of operand bundles associated with this User.
CallingConv::ID getCallingConv() const
bundle_op_iterator bundle_op_info_end()
Return the end of the list of BundleOpInfo instances associated with this OperandBundleUser.
LLVM_ABI unsigned getNumSubclassExtraOperandsDynamic() const
Get the number of extra operands for instructions that don't have a fixed number of extra operands.
BundleOpInfo * bundle_op_iterator
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
LLVM_ABI bool isMustTailCall() const
Tests if this call site must be tail call optimized.
LLVM_ABI bool isIndirectCall() const
Return true if the callsite is an indirect call.
LLVM_ABI bool onlyReadsMemory() const
Determine if the call does not access or only reads memory.
bool isByValArgument(unsigned ArgNo) const
Determine whether this argument is passed by value.
iterator_range< bundle_op_iterator > bundle_op_infos()
Return the range [bundle_op_info_begin, bundle_op_info_end).
LLVM_ABI void setOnlyReadsMemory()
static LLVM_ABI CallBase * addOperandBundle(CallBase *CB, uint32_t ID, OperandBundleDef OB, InsertPosition InsertPt=nullptr)
Create a clone of CB with operand bundle OB added.
LLVM_ABI bool onlyAccessesInaccessibleMemOrArgMem() const
Determine if the function may only access memory that is either inaccessible from the IR or pointed t...
LLVM_ABI CaptureInfo getCaptureInfo(unsigned OpNo) const
Return which pointer components this operand may capture.
LLVM_ABI bool hasArgumentWithAdditionalReturnCaptureComponents() const
Returns whether the call has an argument that has an attribute like captures(ret: address,...
CallBase(AttributeList const &A, FunctionType *FT, ArgsTy &&... Args)
Value * getCalledOperand() const
LLVM_ABI void setOnlyWritesMemory()
LLVM_ABI op_iterator populateBundleOperandInfos(ArrayRef< OperandBundleDef > Bundles, const unsigned BeginIndex)
Populate the BundleOpInfo instances and the Use& vector from Bundles.
AttributeList Attrs
parameter attributes for callable
bool hasOperandBundlesOtherThan(ArrayRef< uint32_t > IDs) const
Return true if this operand bundle user contains operand bundles with tags other than those specified...
LLVM_ABI std::optional< ConstantRange > getRange() const
If this return value has a range attribute, return the value range of the argument.
LLVM_ABI bool isReturnNonNull() const
Return true if the return value is known to be not null.
Value * getArgOperand(unsigned i) const
FunctionType * FTy
uint64_t getRetDereferenceableBytes() const
Extract the number of dereferenceable bytes for a call or parameter (0=unknown).
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
FunctionType * getFunctionType() const
LLVM_ABI Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
static unsigned CountBundleInputs(ArrayRef< OperandBundleDef > Bundles)
Return the total number of values used in Bundles.
LLVM_ABI Value * getArgOperandWithAttribute(Attribute::AttrKind Kind) const
If one of the arguments has the specified attribute, returns its operand value.
LLVM_ABI void setOnlyAccessesInaccessibleMemory()
static LLVM_ABI CallBase * Create(CallBase *CB, ArrayRef< OperandBundleDef > Bundles, InsertPosition InsertPt=nullptr)
Create a clone of CB with a different set of operand bundles and insert it before InsertPt.
LLVM_ABI bool onlyWritesMemory() const
Determine if the call does not access or only writes memory.
LLVM_ABI bool hasClobberingOperandBundles() const
Return true if this operand bundle user has operand bundles that may write to the heap.
void setCalledOperand(Value *V)
static LLVM_ABI CallBase * removeOperandBundle(CallBase *CB, uint32_t ID, InsertPosition InsertPt=nullptr)
Create a clone of CB with operand bundle ID removed.
LLVM_ABI bool hasReadingOperandBundles() const
Return true if this operand bundle user has operand bundles that may read from the heap.
LLVM_ABI bool onlyAccessesArgMemory() const
Determine if the call can access memmory only using pointers based on its arguments.
unsigned arg_size() const
AttributeList getAttributes() const
Return the attributes for this call.
LLVM_ABI void setMemoryEffects(MemoryEffects ME)
bool hasOperandBundles() const
Return true if this User has any operand bundles.
LLVM_ABI bool isTailCall() const
Tests if this call site is marked as a tail call.
LLVM_ABI Function * getCaller()
Helper to get the caller (the parent function).
CallBr instruction, tracking function calls that may not return control but instead transfer it to a ...
SmallVector< BasicBlock *, 16 > getIndirectDests() const
void setDefaultDest(BasicBlock *B)
void setIndirectDest(unsigned i, BasicBlock *B)
BasicBlock * getDefaultDest() const
static CallBrInst * Create(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, ArrayRef< BasicBlock * > IndirectDests, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
LLVM_ABI CallBrInst * cloneImpl() const
This class represents a function call, abstracting a target machine's calling convention.
LLVM_ABI void updateProfWeight(uint64_t S, uint64_t T)
Updates profile metadata by scaling it by S / T.
TailCallKind getTailCallKind() const
LLVM_ABI CallInst * cloneImpl() const
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Represents which components of the pointer may be captured in which location.
Definition ModRef.h:414
CaptureComponents getOtherComponents() const
Get components potentially captured through locations other than the return value.
Definition ModRef.h:446
static CaptureInfo none()
Create CaptureInfo that does not capture any components of the pointer.
Definition ModRef.h:427
static CaptureInfo all()
Create CaptureInfo that may capture all components of the pointer.
Definition ModRef.h:430
CaptureComponents getRetComponents() const
Get components potentially captured by the return value.
Definition ModRef.h:442
static LLVM_ABI Instruction::CastOps getCastOpcode(const Value *Val, bool SrcIsSigned, Type *Ty, bool DstIsSigned)
Returns the opcode necessary to cast Val into Ty using usual casting rules.
static LLVM_ABI CastInst * CreatePointerBitCastOrAddrSpaceCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a BitCast or an AddrSpaceCast cast instruction.
Instruction::CastOps getOpcode() const
Return the opcode of this CastInst.
Definition InstrTypes.h:610
static LLVM_ABI unsigned isEliminableCastPair(Instruction::CastOps firstOpcode, Instruction::CastOps secondOpcode, Type *SrcTy, Type *MidTy, Type *DstTy, const DataLayout *DL)
Determine how a pair of casts can be eliminated, if they can be at all.
static LLVM_ABI CastInst * CreateIntegerCast(Value *S, Type *Ty, bool isSigned, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a ZExt, BitCast, or Trunc for int -> int casts.
static LLVM_ABI CastInst * CreateFPCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create an FPExt, BitCast, or FPTrunc for fp -> fp casts.
CastInst(Type *Ty, unsigned iType, Value *S, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics for subclasses.
Definition InstrTypes.h:451
static LLVM_ABI bool isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, const DataLayout &DL)
Check whether a bitcast, inttoptr, or ptrtoint cast between these types is valid and a no-op.
static LLVM_ABI bool isBitCastable(Type *SrcTy, Type *DestTy)
Check whether a bitcast between these types is valid.
static LLVM_ABI CastInst * CreateTruncOrBitCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a Trunc or BitCast cast instruction.
static LLVM_ABI CastInst * CreatePointerCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a BitCast, AddrSpaceCast or a PtrToInt cast instruction.
static LLVM_ABI CastInst * CreateBitOrPointerCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a BitCast, a PtrToInt, or an IntToPTr cast instruction.
static LLVM_ABI bool isNoopCast(Instruction::CastOps Opcode, Type *SrcTy, Type *DstTy, const DataLayout &DL)
A no-op cast is one that can be effected without changing any bits.
static LLVM_ABI CastInst * CreateZExtOrBitCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a ZExt or BitCast cast instruction.
static LLVM_ABI CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
LLVM_ABI bool isIntegerCast() const
There are several places where we need to know if a cast instruction only deals with integer source a...
static LLVM_ABI CastInst * CreateSExtOrBitCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a SExt or BitCast cast instruction.
static LLVM_ABI bool castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy)
This method can be used to determine if a cast from SrcTy to DstTy using Opcode op is valid or not.
LLVM_ABI CatchReturnInst * cloneImpl() const
void setUnwindDest(BasicBlock *UnwindDest)
LLVM_ABI void addHandler(BasicBlock *Dest)
Add an entry to the switch instruction... Note: This action invalidates handler_end().
LLVM_ABI CatchSwitchInst * cloneImpl() const
mapped_iterator< op_iterator, DerefFnTy > handler_iterator
Value * getParentPad() const
void setParentPad(Value *ParentPad)
BasicBlock * getUnwindDest() const
LLVM_ABI void removeHandler(handler_iterator HI)
LLVM_ABI CleanupReturnInst * cloneImpl() const
This class is the base class for the comparison instructions.
Definition InstrTypes.h:664
Predicate getStrictPredicate() const
For example, SGE -> SGT, SLE -> SLT, ULE -> ULT, UGE -> UGT.
Definition InstrTypes.h:858
bool isEquality() const
Determine if this is an equals/not equals predicate.
Definition InstrTypes.h:915
void setPredicate(Predicate P)
Set the predicate for this instruction to the specified value.
Definition InstrTypes.h:768
bool isFalseWhenEqual() const
This is just a convenience.
Definition InstrTypes.h:948
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
Definition InstrTypes.h:679
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
Definition InstrTypes.h:693
@ ICMP_SLT
signed less than
Definition InstrTypes.h:705
@ ICMP_SLE
signed less or equal
Definition InstrTypes.h:706
@ FCMP_OLT
0 1 0 0 True if ordered and less than
Definition InstrTypes.h:682
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
Definition InstrTypes.h:691
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
Definition InstrTypes.h:680
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
Definition InstrTypes.h:681
@ ICMP_UGE
unsigned greater or equal
Definition InstrTypes.h:700
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:699
@ ICMP_SGT
signed greater than
Definition InstrTypes.h:703
@ FCMP_ULT
1 1 0 0 True if unordered or less than
Definition InstrTypes.h:690
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
Definition InstrTypes.h:684
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
Definition InstrTypes.h:687
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
Definition InstrTypes.h:688
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
Definition InstrTypes.h:683
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
Definition InstrTypes.h:685
@ ICMP_NE
not equal
Definition InstrTypes.h:698
@ ICMP_SGE
signed greater or equal
Definition InstrTypes.h:704
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
Definition InstrTypes.h:692
@ ICMP_ULE
unsigned less or equal
Definition InstrTypes.h:702
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
Definition InstrTypes.h:689
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
Definition InstrTypes.h:678
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition InstrTypes.h:686
LLVM_ABI bool isEquivalence(bool Invert=false) const
Determine if one operand of this compare can always be replaced by the other operand,...
bool isSigned() const
Definition InstrTypes.h:930
static LLVM_ABI bool isEquality(Predicate pred)
Determine if this is an equals/not equals predicate.
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition InstrTypes.h:827
bool isTrueWhenEqual() const
This is just a convenience.
Definition InstrTypes.h:942
static LLVM_ABI CmpInst * Create(OtherOps Op, Predicate Pred, Value *S1, Value *S2, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Construct a compare instruction, given the opcode, the predicate and the two operands.
static bool isFPPredicate(Predicate P)
Definition InstrTypes.h:770
Predicate getNonStrictPredicate() const
For example, SGT -> SGE, SLT -> SLE, ULT -> ULE, UGT -> UGE.
Definition InstrTypes.h:871
static LLVM_ABI CmpInst * CreateWithCopiedFlags(OtherOps Op, Predicate Pred, Value *S1, Value *S2, const Instruction *FlagsSource, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Construct a compare instruction, given the opcode, the predicate, the two operands and the instructio...
bool isNonStrictPredicate() const
Definition InstrTypes.h:852
LLVM_ABI void swapOperands()
This is just a convenience that dispatches to the subclasses.
static bool isRelational(Predicate P)
Return true if the predicate is relational (not EQ or NE).
Definition InstrTypes.h:923
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition InstrTypes.h:789
static LLVM_ABI StringRef getPredicateName(Predicate P)
Predicate getPredicate() const
Return the predicate for this instruction.
Definition InstrTypes.h:765
bool isStrictPredicate() const
Definition InstrTypes.h:843
static LLVM_ABI bool isUnordered(Predicate predicate)
Determine if the predicate is an unordered operation.
Predicate getFlippedStrictnessPredicate() const
For predicate of kind "is X or equal to 0" returns the predicate "is X".
Definition InstrTypes.h:893
static bool isIntPredicate(Predicate P)
Definition InstrTypes.h:776
static LLVM_ABI bool isOrdered(Predicate predicate)
Determine if the predicate is an ordered operation.
LLVM_ABI CmpInst(Type *ty, Instruction::OtherOps op, Predicate pred, Value *LHS, Value *RHS, const Twine &Name="", InsertPosition InsertBefore=nullptr, Instruction *FlagsSource=nullptr)
bool isUnsigned() const
Definition InstrTypes.h:936
LLVM_ABI bool isCommutative() const
This is just a convenience that dispatches to the subclasses.
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
static LLVM_ABI std::optional< CmpPredicate > getMatching(CmpPredicate A, CmpPredicate B)
Compares two CmpPredicates taking samesign into account and returns the canonicalized CmpPredicate if...
CmpPredicate()
Default constructor.
static LLVM_ABI CmpPredicate get(const CmpInst *Cmp)
Do a ICmpInst::getCmpPredicate() or CmpInst::getPredicate(), as appropriate.
LLVM_ABI CmpInst::Predicate getPreferredSignedPredicate() const
Attempts to return a signed CmpInst::Predicate from the CmpPredicate.
bool hasSameSign() const
Query samesign information, for optimizations.
static LLVM_ABI CmpPredicate getSwapped(CmpPredicate P)
Get the swapped predicate of a CmpPredicate.
Conditional Branch instruction.
LLVM_ABI void swapSuccessors()
Swap the successors of this branch instruction.
LLVM_ABI CondBrInst * cloneImpl() const
Value * getCondition() const
ConstantFP - Floating Point Values [float, double].
Definition Constants.h:420
const APFloat & getValueAPF() const
Definition Constants.h:463
This is the shared class of boolean and integer constants.
Definition Constants.h:87
LLVM_ABI ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
static LLVM_ABI Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
Definition Constant.h:43
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
static constexpr ElementCount getFixed(ScalarTy MinVal)
Definition TypeSize.h:309
LLVM_ABI ExtractElementInst * cloneImpl() const
static ExtractElementInst * Create(Value *Vec, Value *Idx, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI bool isValidOperands(const Value *Vec, const Value *Idx)
Return true if an extractelement instruction can be formed with the specified operands.
This instruction extracts a struct member or array element value from an aggregate value.
static LLVM_ABI Type * getIndexedType(Type *Agg, ArrayRef< unsigned > Idxs)
Returns the type of the element that would be extracted with an extractvalue instruction with the spe...
LLVM_ABI ExtractValueInst * cloneImpl() const
This instruction compares its operands according to the predicate given to the constructor.
bool isEquality() const
static LLVM_ABI bool compare(const APFloat &LHS, const APFloat &RHS, FCmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
LLVM_ABI FCmpInst * cloneImpl() const
Clone an identical FCmpInst.
FCmpInst(InsertPosition InsertBefore, Predicate pred, Value *LHS, Value *RHS, const Twine &NameStr="")
Constructor with insertion semantics.
This class represents an extension of floating point types.
LLVM_ABI FPExtInst * cloneImpl() const
Clone an identical FPExtInst.
LLVM_ABI FPExtInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI float getFPAccuracy() const
Get the maximum error permitted by this operation in ULPs.
This class represents a cast from floating point to signed integer.
LLVM_ABI FPToSIInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI FPToSIInst * cloneImpl() const
Clone an identical FPToSIInst.
This class represents a cast from floating point to unsigned integer.
LLVM_ABI FPToUIInst * cloneImpl() const
Clone an identical FPToUIInst.
LLVM_ABI FPToUIInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
This class represents a truncation of floating point types.
LLVM_ABI FPTruncInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI FPTruncInst * cloneImpl() const
Clone an identical FPTruncInst.
LLVM_ABI FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System, InsertPosition InsertBefore=nullptr)
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this fence instruction.
void setSyncScopeID(SyncScope::ID SSID)
Sets the synchronization scope ID of this fence instruction.
LLVM_ABI FenceInst * cloneImpl() const
friend class Instruction
Iterator for Instructions in a `BasicBlock.
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this fence instruction.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
Class to represent fixed width SIMD vectors.
unsigned getNumElements() const
LLVM_ABI FreezeInst(Value *S, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
LLVM_ABI FreezeInst * cloneImpl() const
Clone an identical FreezeInst.
void setParentPad(Value *ParentPad)
Value * getParentPad() const
Convenience accessors.
LLVM_ABI FuncletPadInst * cloneImpl() const
Class to represent function types.
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
Type * getParamType(unsigned i) const
Parameter type accessors.
bool isVarArg() const
Represents flags for the getelementptr instruction/expression.
static GEPNoWrapFlags inBounds()
GEPNoWrapFlags withoutInBounds() const
unsigned getRaw() const
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
LLVM_ABI bool isInBounds() const
Determine whether the GEP has the inbounds flag.
LLVM_ABI bool hasNoUnsignedSignedWrap() const
Determine whether the GEP has the nusw flag.
static LLVM_ABI Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
LLVM_ABI bool hasAllZeroIndices() const
Return true if all of the indices of this GEP are zeros.
LLVM_ABI bool hasNoUnsignedWrap() const
Determine whether the GEP has the nuw flag.
LLVM_ABI bool hasAllConstantIndices() const
Return true if all of the indices of this GEP are constant integers.
LLVM_ABI void setIsInBounds(bool b=true)
Set or clear the inbounds flag on this GEP instruction.
static LLVM_ABI Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
LLVM_ABI bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const
Accumulate the constant address offset of this GEP if possible.
LLVM_ABI GetElementPtrInst * cloneImpl() const
LLVM_ABI bool collectOffset(const DataLayout &DL, unsigned BitWidth, SmallMapVector< Value *, APInt, 4 > &VariableOffsets, APInt &ConstantOffset) const
LLVM_ABI void setNoWrapFlags(GEPNoWrapFlags NW)
Set nowrap flags for GEP instruction.
LLVM_ABI GEPNoWrapFlags getNoWrapFlags() const
Get the nowrap flags for the GEP instruction.
Module * getParent()
Get the module that this global value is contained inside of...
This instruction compares its operands according to the predicate given to the constructor.
ICmpInst(InsertPosition InsertBefore, Predicate pred, Value *LHS, Value *RHS, const Twine &NameStr="")
Constructor with insertion semantics.
static LLVM_ABI bool compare(const APInt &LHS, const APInt &RHS, ICmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
LLVM_ABI ICmpInst * cloneImpl() const
Clone an identical ICmpInst.
Predicate getFlippedSignednessPredicate() const
For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->EQ.
Predicate getSignedPredicate() const
For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
static CmpPredicate getInverseCmpPredicate(CmpPredicate Pred)
bool isEquality() const
Return true if this predicate is either EQ or NE.
static LLVM_ABI Predicate getFlippedSignednessPredicate(Predicate Pred)
For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->EQ.
static LLVM_ABI std::optional< bool > isImpliedByMatchingCmp(CmpPredicate Pred1, CmpPredicate Pred2)
Determine if Pred1 implies Pred2 is true, false, or if nothing can be inferred about the implication,...
Predicate getUnsignedPredicate() const
For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
Indirect Branch Instruction.
LLVM_ABI void addDestination(BasicBlock *Dest)
Add a destination.
LLVM_ABI void removeDestination(unsigned i)
This method removes the specified successor from the indirectbr instruction.
LLVM_ABI IndirectBrInst * cloneImpl() const
LLVM_ABI InsertElementInst * cloneImpl() const
static InsertElementInst * Create(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI bool isValidOperands(const Value *Vec, const Value *NewElt, const Value *Idx)
Return true if an insertelement instruction can be formed with the specified operands.
bool isValid() const
Definition Instruction.h:62
BasicBlock * getBasicBlock()
Definition Instruction.h:63
This instruction inserts a struct field of array element value into an aggregate value.
LLVM_ABI InsertValueInst * cloneImpl() const
BitfieldElement::Type getSubclassData() const
LLVM_ABI bool hasNoNaNs() const LLVM_READONLY
Determine whether the no-NaNs flag is set.
LLVM_ABI void copyIRFlags(const Value *V, bool IncludeWrapFlags=true)
Convenience method to copy supported exact, fast-math, and (optionally) wrapping flags from V to this...
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI bool isCommutative() const LLVM_READONLY
Return true if the instruction is commutative:
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
LLVM_ABI bool isVolatile() const LLVM_READONLY
Return true if this instruction has a volatile memory access.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Bitfield::Element< uint16_t, 0, 15 > OpaqueField
Instruction(const Instruction &)=delete
friend class BasicBlock
Various leaf nodes.
void setSubclassData(typename BitfieldElement::Type Value)
This class represents a cast from an integer to a pointer.
LLVM_ABI IntToPtrInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI IntToPtrInst * cloneImpl() const
Clone an identical IntToPtrInst.
Invoke instruction.
BasicBlock * getUnwindDest() const
void setNormalDest(BasicBlock *B)
LLVM_ABI InvokeInst * cloneImpl() const
LLVM_ABI LandingPadInst * getLandingPadInst() const
Get the landingpad instruction from the landing pad block (the unwind destination).
void setUnwindDest(BasicBlock *B)
LLVM_ABI void updateProfWeight(uint64_t S, uint64_t T)
Updates profile metadata by scaling it by S / T.
static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
LLVMContextImpl *const pImpl
Definition LLVMContext.h:70
The landingpad instruction holds all of the information necessary to generate correct exception handl...
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
LLVM_ABI LandingPadInst * cloneImpl() const
static LLVM_ABI LandingPadInst * Create(Type *RetTy, unsigned NumReservedClauses, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedClauses is a hint for the number of incoming clauses that this landingpad w...
LLVM_ABI void addClause(Constant *ClauseVal)
Add a catch or filter clause to the landing pad.
void setCleanup(bool V)
Indicate that this landingpad instruction is a cleanup.
void setAlignment(Align Align)
bool isVolatile() const
Return true if this is a load from a volatile memory location.
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this load instruction.
LLVM_ABI LoadInst * cloneImpl() const
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
void setVolatile(bool V)
Specify whether this is a volatile load or not.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
LLVM_ABI LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, InsertPosition InsertBefore)
Align getAlign() const
Return the alignment of the access that is being performed.
Metadata node.
Definition Metadata.h:1080
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1444
static MemoryEffectsBase readOnly()
Definition ModRef.h:133
bool onlyWritesMemory() const
Whether this function only (at most) writes memory.
Definition ModRef.h:252
bool doesNotAccessMemory() const
Whether this function accesses no memory.
Definition ModRef.h:246
static MemoryEffectsBase argMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Definition ModRef.h:143
static MemoryEffectsBase inaccessibleMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Definition ModRef.h:149
bool onlyAccessesInaccessibleMem() const
Whether this function only (at most) accesses inaccessible memory.
Definition ModRef.h:265
bool onlyAccessesArgPointees() const
Whether this function only (at most) accesses argument memory.
Definition ModRef.h:255
bool onlyReadsMemory() const
Whether this function only (at most) reads memory.
Definition ModRef.h:249
static MemoryEffectsBase writeOnly()
Definition ModRef.h:138
static MemoryEffectsBase inaccessibleOrArgMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Definition ModRef.h:166
static MemoryEffectsBase none()
Definition ModRef.h:128
bool onlyAccessesInaccessibleOrArgMem() const
Whether this function only (at most) accesses argument and inaccessible memory.
Definition ModRef.h:305
StringRef getTag() const
void allocHungoffUses(unsigned N)
const_block_iterator block_begin() const
LLVM_ABI void removeIncomingValueIf(function_ref< bool(unsigned)> Predicate, bool DeletePHIIfEmpty=true)
Remove all incoming values for which the predicate returns true.
void setIncomingBlock(unsigned i, BasicBlock *BB)
LLVM_ABI Value * removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty=true)
Remove an incoming value.
LLVM_ABI bool hasConstantOrUndefValue() const
Whether the specified PHI node always merges together the same value, assuming undefs are equal to a ...
void setIncomingValue(unsigned i, Value *V)
const_block_iterator block_end() const
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
LLVM_ABI Value * hasConstantValue() const
If the specified PHI node always merges together the same value, return the value,...
LLVM_ABI PHINode * cloneImpl() const
unsigned getNumIncomingValues() const
Return the number of incoming edges.
Class to represent pointers.
unsigned getAddressSpace() const
Return the address space of the Pointer type.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
This class represents a cast from a pointer to an address (non-capturing ptrtoint).
PtrToAddrInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
PtrToAddrInst * cloneImpl() const
Clone an identical PtrToAddrInst.
This class represents a cast from a pointer to an integer.
LLVM_ABI PtrToIntInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI PtrToIntInst * cloneImpl() const
Clone an identical PtrToIntInst.
Resume the propagation of an exception.
LLVM_ABI ResumeInst * cloneImpl() const
Return a value (possibly void), from a function.
LLVM_ABI ReturnInst * cloneImpl() const
This class represents a sign extension of integer types.
LLVM_ABI SExtInst * cloneImpl() const
Clone an identical SExtInst.
LLVM_ABI SExtInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
This class represents a cast from signed integer to floating point.
LLVM_ABI SIToFPInst * cloneImpl() const
Clone an identical SIToFPInst.
LLVM_ABI SIToFPInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
Class to represent scalable SIMD vectors.
LLVM_ABI SelectInst * cloneImpl() const
static LLVM_ABI const char * areInvalidOperands(Value *Cond, Value *True, Value *False)
Return a string if the specified operands are invalid for a select operation, otherwise return null.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, const Instruction *MDFrom=nullptr)
static LLVM_ABI bool isZeroEltSplatMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses all elements with the same value as the first element of exa...
ArrayRef< int > getShuffleMask() const
static LLVM_ABI bool isSpliceMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is a splice mask, concatenating the two inputs together and then ext...
int getMaskValue(unsigned Elt) const
Return the shuffle mask value of this instruction for the given element index.
LLVM_ABI ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI bool isValidOperands(const Value *V1, const Value *V2, const Value *Mask)
Return true if a shufflevector instruction can be formed with the specified operands.
static LLVM_ABI bool isSelectMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from its source vectors without lane crossings.
static LLVM_ABI bool isBitRotateMask(ArrayRef< int > Mask, unsigned EltSizeInBits, unsigned MinSubElts, unsigned MaxSubElts, unsigned &NumSubElts, unsigned &RotateAmt)
Checks if the shuffle is a bit rotation of the first operand across multiple subelements,...
VectorType * getType() const
Overload to return most specific vector type.
LLVM_ABI bool isIdentityWithExtract() const
Return true if this shuffle extracts the first N elements of exactly one source vector.
static LLVM_ABI bool isOneUseSingleSourceMask(ArrayRef< int > Mask, int VF)
Return true if this shuffle mask represents "clustered" mask of size VF, i.e.
LLVM_ABI bool isIdentityWithPadding() const
Return true if this shuffle lengthens exactly one source vector with undefs in the high elements.
static LLVM_ABI bool isSingleSourceMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from exactly one source vector.
LLVM_ABI bool isConcat() const
Return true if this shuffle concatenates its 2 source vectors.
static LLVM_ABI bool isDeInterleaveMaskOfFactor(ArrayRef< int > Mask, unsigned Factor, unsigned &Index)
Check if the mask is a DE-interleave mask of the given factor Factor like: <Index,...
LLVM_ABI ShuffleVectorInst * cloneImpl() const
static LLVM_ABI bool isIdentityMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from exactly one source vector without lane crossin...
static LLVM_ABI bool isExtractSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is an extract subvector mask.
LLVM_ABI void setShuffleMask(ArrayRef< int > Mask)
friend class Instruction
Iterator for Instructions in a `BasicBlock.
LLVM_ABI bool isInterleave(unsigned Factor)
Return if this shuffle interleaves its two input vectors together.
static LLVM_ABI bool isReverseMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask swaps the order of elements from exactly one source vector.
static LLVM_ABI bool isTransposeMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask is a transpose mask.
LLVM_ABI void commute()
Swap the operands and adjust the mask to preserve the semantics of the instruction.
static LLVM_ABI bool isInsertSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &NumSubElts, int &Index)
Return true if this shuffle mask is an insert subvector mask.
static LLVM_ABI Constant * convertShuffleMaskForBitcode(ArrayRef< int > Mask, Type *ResultTy)
static LLVM_ABI bool isReplicationMask(ArrayRef< int > Mask, int &ReplicationFactor, int &VF)
Return true if this shuffle mask replicates each of the VF elements in a vector ReplicationFactor tim...
static LLVM_ABI bool isInterleaveMask(ArrayRef< int > Mask, unsigned Factor, unsigned NumInputElts, SmallVectorImpl< unsigned > &StartIndexes)
Return true if the mask interleaves one or more input vectors together.
This is a 'bitvector' (really, a variable-sized bit array), optimized for the case when the array is ...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void resize(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this store instruction.
Align getAlign() const
void setVolatile(bool V)
Specify whether this is a volatile store or not.
void setAlignment(Align Align)
friend class Instruction
Iterator for Instructions in a `BasicBlock.
LLVM_ABI StoreInst * cloneImpl() const
LLVM_ABI StoreInst(Value *Val, Value *Ptr, InsertPosition InsertBefore)
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this store instruction.
bool isVolatile() const
Return true if this is a store to a volatile memory location.
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this store instruction.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
Class to represent struct types.
LLVM_ABI void setSuccessorWeight(unsigned idx, CaseWeightOpt W)
LLVM_ABI Instruction::InstListType::iterator eraseFromParent()
Delegate the call to the underlying SwitchInst::eraseFromParent() and mark this object to not touch t...
LLVM_ABI void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W)
Delegate the call to the underlying SwitchInst::addCase() and set the specified branch weight for the...
LLVM_ABI CaseWeightOpt getSuccessorWeight(unsigned idx)
LLVM_ABI void replaceDefaultDest(SwitchInst::CaseIt I)
Replace the default destination by given case.
std::optional< uint32_t > CaseWeightOpt
LLVM_ABI SwitchInst::CaseIt removeCase(SwitchInst::CaseIt I)
Delegate the call to the underlying SwitchInst::removeCase() and remove correspondent branch weight.
void setValue(ConstantInt *V) const
Sets the new value for current case.
void setSuccessor(BasicBlock *S) const
Sets the new successor for current case.
Multiway switch.
void allocHungoffUses(unsigned N)
LLVM_ABI SwitchInst * cloneImpl() const
LLVM_ABI void addCase(ConstantInt *OnVal, BasicBlock *Dest)
Add an entry to the switch instruction.
CaseIteratorImpl< CaseHandle > CaseIt
ConstantInt *const * case_values() const
unsigned getNumCases() const
Return the number of 'cases' in this switch instruction, excluding the default case.
LLVM_ABI CaseIt removeCase(CaseIt I)
This method removes the specified case and its successor from the switch instruction.
This class represents a truncation of integer types.
LLVM_ABI TruncInst * cloneImpl() const
Clone an identical TruncInst.
LLVM_ABI TruncInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition TypeSize.h:343
static constexpr TypeSize get(ScalarTy Quantity, bool Scalable)
Definition TypeSize.h:340
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:46
bool isByteTy() const
True if this is an instance of ByteType.
Definition Type.h:242
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:290
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:313
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition Type.h:263
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:284
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
LLVM_ABI bool isFirstClassType() const
Return true if the type is "first class", meaning it is a valid type for a Value.
Definition Type.cpp:255
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:370
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition Type.cpp:201
bool isByteOrByteVectorTy() const
Return true if this is a byte type or a vector of byte types.
Definition Type.h:248
bool isAggregateType() const
Return true if the type is an aggregate type.
Definition Type.h:321
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition Type.h:130
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:236
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
Definition Type.cpp:310
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition Type.h:186
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition Type.h:287
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:257
bool isTokenTy() const
Return true if this is 'token'.
Definition Type.h:236
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition Type.h:227
This class represents a cast unsigned integer to floating point.
LLVM_ABI UIToFPInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI UIToFPInst * cloneImpl() const
Clone an identical UIToFPInst.
UnaryInstruction(Type *Ty, unsigned iType, Value *V, InsertPosition InsertBefore=nullptr)
Definition InstrTypes.h:62
static LLVM_ABI UnaryOperator * Create(UnaryOps Op, Value *S, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a unary instruction, given the opcode and an operand.
LLVM_ABI UnaryOperator(UnaryOps iType, Value *S, Type *Ty, const Twine &Name, InsertPosition InsertBefore)
LLVM_ABI UnaryOperator * cloneImpl() const
UnaryOps getOpcode() const
Definition InstrTypes.h:154
Unconditional Branch instruction.
LLVM_ABI UncondBrInst * cloneImpl() const
LLVM_ABI UnreachableInst(LLVMContext &C, InsertPosition InsertBefore=nullptr)
LLVM_ABI bool shouldLowerToTrap(bool TrapUnreachable, bool NoTrapAfterNoreturn) const
friend class Instruction
Iterator for Instructions in a `BasicBlock.
LLVM_ABI UnreachableInst * cloneImpl() const
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
LLVM_ABI void set(Value *Val)
Definition Value.h:907
Use * op_iterator
Definition User.h:254
const Use * getOperandList() const
Definition User.h:200
op_iterator op_begin()
Definition User.h:259
LLVM_ABI void allocHungoffUses(unsigned N, bool WithExtraValues=false)
Allocate the array of Uses, followed by a pointer (with bottom bit set) to the User.
Definition User.cpp:54
const Use & getOperandUse(unsigned i) const
Definition User.h:220
void setNumHungOffUseOperands(unsigned NumOps)
Subclasses with hung off uses need to manage the operand count themselves.
Definition User.h:240
Use & Op()
Definition User.h:171
LLVM_ABI void growHungoffUses(unsigned N, bool WithExtraValues=false)
Grow the number of hung off uses.
Definition User.cpp:71
Value * getOperand(unsigned i) const
Definition User.h:207
unsigned getNumOperands() const
Definition User.h:229
op_iterator op_end()
Definition User.h:261
VAArgInst(Value *List, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
LLVM_ABI VAArgInst * cloneImpl() const
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI Value(Type *Ty, unsigned scid)
Definition Value.cpp:53
unsigned char SubclassOptionalData
Hold subclass data that can be dropped.
Definition Value.h:85
LLVM_ABI void setName(const Twine &Name)
Change the name of the value.
Definition Value.cpp:397
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition Value.cpp:553
LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.h:259
unsigned NumUserOperands
Definition Value.h:109
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
Base class of all SIMD vector types.
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
This class represents zero extension of integer types.
LLVM_ABI ZExtInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI ZExtInst * cloneImpl() const
Clone an identical ZExtInst.
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:200
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:165
An efficient, type-erasing, non-owning reference to a callable.
typename base_list_type::iterator iterator
Definition ilist.h:121
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
CallInst * Call
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
bool match(Val *V, const Pattern &P)
cstfp_pred_ty< is_non_zero_not_denormal_fp > m_NonZeroNotDenormalFP()
Match a floating-point non-zero that is not a denormal.
initializer< Ty > init(const Ty &Val)
@ Switch
The "resume-switch" lowering, where there are separate resume and destroy functions that are shared b...
Definition CoroShape.h:31
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
Definition Metadata.h:668
NodeAddr< UseNode * > Use
Definition RDFGraph.h:385
Context & getContext() const
Definition BasicBlock.h:99
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
@ Offset
Definition DWP.cpp:532
auto seq_inclusive(T Begin, T End)
Iterate over an integral type from Begin to End inclusive.
Definition Sequence.h:325
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1739
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition STLExtras.h:1669
unsigned getPointerAddressSpace(const Type *T)
Definition SPIRVUtils.h:374
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
Definition InstrProf.h:296
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
LLVM_ABI MDNode * getBranchWeightMDNode(const Instruction &I)
Get the branch weights metadata node.
MemoryEffectsBase< IRMemLocation > MemoryEffects
Summary of how a function affects memory in the program.
Definition ModRef.h:356
constexpr auto equal_to(T &&Arg)
Functor variant of std::equal_to that can be used as a UnaryPredicate in functional algorithms like a...
Definition STLExtras.h:2173
std::enable_if_t< std::is_unsigned_v< T >, std::optional< T > > checkedMulUnsigned(T LHS, T RHS)
Multiply two unsigned integers LHS and RHS.
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
auto reverse(ContainerTy &&C)
Definition STLExtras.h:408
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:279
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
bool isPointerTy(const Type *T)
Definition SPIRVUtils.h:368
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
constexpr int PoisonMaskElem
LLVM_ABI unsigned getNumBranchWeights(const MDNode &ProfileData)
AtomicOrdering
Atomic ordering for LLVM's memory model.
OperandBundleDefT< Value * > OperandBundleDef
Definition AutoUpgrade.h:34
@ Mul
Product of integers.
@ Xor
Bitwise or logical XOR of integers.
@ FMul
Product of floats.
@ Sub
Subtraction of integers.
@ Add
Sum of integers.
@ FAdd
Sum of floats.
DWARFExpression::Operation Op
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
OutputIt copy(R &&Range, OutputIt Out)
Definition STLExtras.h:1885
constexpr unsigned BitWidth
LLVM_ABI bool extractBranchWeights(const MDNode *ProfileData, SmallVectorImpl< uint32_t > &Weights)
Extract branch weights from MD_prof metadata.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1947
bool capturesAnything(CaptureComponents CC)
Definition ModRef.h:379
bool all_equal(std::initializer_list< T > Values)
Returns true if all Values in the initializer lists are equal or the list.
Definition STLExtras.h:2166
auto seq(T Begin, T End)
Iterate over an integral type from Begin up to - but not including - End.
Definition Sequence.h:305
@ Default
The result values are uniform if and only if all operands are uniform.
Definition Uniformity.h:20
LLVM_ABI void scaleProfData(Instruction &I, uint64_t S, uint64_t T)
Scaling the profile data attached to 'I' using the ratio of S/T.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
Summary of memprof metadata on allocations.
Used to keep track of an operand bundle.
uint32_t End
The index in the Use& vector where operands for this operand bundle ends.
uint32_t Begin
The index in the Use& vector where operands for this operand bundle starts.
Incoming for lane mask phi as machine instruction, incoming register Reg and incoming block Block are...
static LLVM_ABI std::optional< bool > eq(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_EQ result.
static LLVM_ABI std::optional< bool > ne(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_NE result.
static LLVM_ABI std::optional< bool > sge(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SGE result.
static LLVM_ABI std::optional< bool > ugt(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_UGT result.
static LLVM_ABI std::optional< bool > slt(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SLT result.
static LLVM_ABI std::optional< bool > ult(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_ULT result.
static LLVM_ABI std::optional< bool > ule(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_ULE result.
static LLVM_ABI std::optional< bool > sle(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SLE result.
static LLVM_ABI std::optional< bool > sgt(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SGT result.
static LLVM_ABI std::optional< bool > uge(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_UGE result.
Matching combinators.
A MapVector that performs no allocations if smaller than a certain size.
Definition MapVector.h:276
Indicates this User has operands co-allocated.
Definition User.h:60
Indicates this User has operands and a descriptor co-allocated .
Definition User.h:66