LLVM 22.0.0git
Instructions.cpp
Go to the documentation of this file.
1//===- Instructions.cpp - Implement the LLVM instructions -----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements all of the non-inline methods for the LLVM instruction
10// classes.
11//
12//===----------------------------------------------------------------------===//
13
15#include "LLVMContextImpl.h"
18#include "llvm/ADT/Twine.h"
19#include "llvm/IR/Attributes.h"
20#include "llvm/IR/BasicBlock.h"
21#include "llvm/IR/Constant.h"
23#include "llvm/IR/Constants.h"
24#include "llvm/IR/DataLayout.h"
26#include "llvm/IR/Function.h"
27#include "llvm/IR/InstrTypes.h"
28#include "llvm/IR/Instruction.h"
29#include "llvm/IR/Intrinsics.h"
30#include "llvm/IR/LLVMContext.h"
31#include "llvm/IR/MDBuilder.h"
32#include "llvm/IR/Metadata.h"
33#include "llvm/IR/Module.h"
34#include "llvm/IR/Operator.h"
37#include "llvm/IR/Type.h"
38#include "llvm/IR/Value.h"
46#include "llvm/Support/ModRef.h"
48#include <algorithm>
49#include <cassert>
50#include <cstdint>
51#include <optional>
52#include <vector>
53
54using namespace llvm;
55
57 "disable-i2p-p2i-opt", cl::init(false),
58 cl::desc("Disables inttoptr/ptrtoint roundtrip optimization"));
59
60//===----------------------------------------------------------------------===//
61// AllocaInst Class
62//===----------------------------------------------------------------------===//
63
64std::optional<TypeSize>
66 TypeSize Size = DL.getTypeAllocSize(getAllocatedType());
67 if (isArrayAllocation()) {
69 if (!C)
70 return std::nullopt;
71 assert(!Size.isScalable() && "Array elements cannot have a scalable size");
72 auto CheckedProd =
73 checkedMulUnsigned(Size.getKnownMinValue(), C->getZExtValue());
74 if (!CheckedProd)
75 return std::nullopt;
76 return TypeSize::getFixed(*CheckedProd);
77 }
78 return Size;
79}
80
81std::optional<TypeSize>
83 std::optional<TypeSize> Size = getAllocationSize(DL);
84 if (!Size)
85 return std::nullopt;
86 auto CheckedProd = checkedMulUnsigned(Size->getKnownMinValue(),
87 static_cast<TypeSize::ScalarTy>(8));
88 if (!CheckedProd)
89 return std::nullopt;
90 return TypeSize::get(*CheckedProd, Size->isScalable());
91}
92
93//===----------------------------------------------------------------------===//
94// SelectInst Class
95//===----------------------------------------------------------------------===//
96
97/// areInvalidOperands - Return a string if the specified operands are invalid
98/// for a select operation, otherwise return null.
99const char *SelectInst::areInvalidOperands(Value *Op0, Value *Op1, Value *Op2) {
100 if (Op1->getType() != Op2->getType())
101 return "both values to select must have same type";
102
103 if (Op1->getType()->isTokenTy())
104 return "select values cannot have token type";
105
106 if (VectorType *VT = dyn_cast<VectorType>(Op0->getType())) {
107 // Vector select.
108 if (VT->getElementType() != Type::getInt1Ty(Op0->getContext()))
109 return "vector select condition element type must be i1";
111 if (!ET)
112 return "selected values for vector select must be vectors";
113 if (ET->getElementCount() != VT->getElementCount())
114 return "vector select requires selected vectors to have "
115 "the same vector length as select condition";
116 } else if (Op0->getType() != Type::getInt1Ty(Op0->getContext())) {
117 return "select condition must be i1 or <n x i1>";
118 }
119 return nullptr;
120}
121
122//===----------------------------------------------------------------------===//
123// PHINode Class
124//===----------------------------------------------------------------------===//
125
126PHINode::PHINode(const PHINode &PN)
127 : Instruction(PN.getType(), Instruction::PHI, AllocMarker),
128 ReservedSpace(PN.getNumOperands()) {
131 std::copy(PN.op_begin(), PN.op_end(), op_begin());
132 copyIncomingBlocks(make_range(PN.block_begin(), PN.block_end()));
134}
135
136// removeIncomingValue - Remove an incoming value. This is useful if a
137// predecessor basic block is deleted.
138Value *PHINode::removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty) {
139 Value *Removed = getIncomingValue(Idx);
140
141 // Move everything after this operand down.
142 //
143 // FIXME: we could just swap with the end of the list, then erase. However,
144 // clients might not expect this to happen. The code as it is thrashes the
145 // use/def lists, which is kinda lame.
146 std::copy(op_begin() + Idx + 1, op_end(), op_begin() + Idx);
147 copyIncomingBlocks(drop_begin(blocks(), Idx + 1), Idx);
148
149 // Nuke the last value.
150 Op<-1>().set(nullptr);
152
153 // If the PHI node is dead, because it has zero entries, nuke it now.
154 if (getNumOperands() == 0 && DeletePHIIfEmpty) {
155 // If anyone is using this PHI, make them use a dummy value instead...
158 }
159 return Removed;
160}
161
162void PHINode::removeIncomingValueIf(function_ref<bool(unsigned)> Predicate,
163 bool DeletePHIIfEmpty) {
164 unsigned NumOps = getNumIncomingValues();
165 unsigned NewNumOps = 0;
166 for (unsigned Idx = 0; Idx < NumOps; ++Idx) {
167 if (Predicate(Idx))
168 continue;
169
170 if (Idx != NewNumOps) {
171 setIncomingValue(NewNumOps, getIncomingValue(Idx));
172 setIncomingBlock(NewNumOps, getIncomingBlock(Idx));
173 }
174 ++NewNumOps;
175 }
176
177 if (NewNumOps == NumOps)
178 return;
179
180 // Remove operands.
181 for (unsigned Idx = NewNumOps; Idx < NumOps; ++Idx)
182 getOperandUse(Idx).set(nullptr);
183
184 setNumHungOffUseOperands(NewNumOps);
185
186 // If the PHI node is dead, because it has zero entries, nuke it now.
187 if (getNumOperands() == 0 && DeletePHIIfEmpty) {
188 // If anyone is using this PHI, make them use a dummy value instead...
191 }
192}
193
194/// growOperands - grow operands - This grows the operand list in response
195/// to a push_back style of operation. This grows the number of ops by 1.5
196/// times.
197///
198void PHINode::growOperands() {
199 unsigned e = getNumOperands();
200 unsigned NumOps = e + e / 2;
201 if (NumOps < 2) NumOps = 2; // 2 op PHI nodes are VERY common.
202
203 ReservedSpace = NumOps;
204 growHungoffUses(ReservedSpace, /*WithExtraValues=*/true);
205}
206
207/// hasConstantValue - If the specified PHI node always merges together the same
208/// value, return the value, otherwise return null.
210 // Exploit the fact that phi nodes always have at least one entry.
211 Value *ConstantValue = getIncomingValue(0);
212 for (unsigned i = 1, e = getNumIncomingValues(); i != e; ++i)
213 if (getIncomingValue(i) != ConstantValue && getIncomingValue(i) != this) {
214 if (ConstantValue != this)
215 return nullptr; // Incoming values not all the same.
216 // The case where the first value is this PHI.
217 ConstantValue = getIncomingValue(i);
218 }
219 if (ConstantValue == this)
220 return PoisonValue::get(getType());
221 return ConstantValue;
222}
223
224/// hasConstantOrUndefValue - Whether the specified PHI node always merges
225/// together the same value, assuming that undefs result in the same value as
226/// non-undefs.
227/// Unlike \ref hasConstantValue, this does not return a value because the
228/// unique non-undef incoming value need not dominate the PHI node.
230 Value *ConstantValue = nullptr;
231 for (unsigned i = 0, e = getNumIncomingValues(); i != e; ++i) {
233 if (Incoming != this && !isa<UndefValue>(Incoming)) {
234 if (ConstantValue && ConstantValue != Incoming)
235 return false;
236 ConstantValue = Incoming;
237 }
238 }
239 return true;
240}
241
242//===----------------------------------------------------------------------===//
243// LandingPadInst Implementation
244//===----------------------------------------------------------------------===//
245
246LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues,
247 const Twine &NameStr,
248 InsertPosition InsertBefore)
249 : Instruction(RetTy, Instruction::LandingPad, AllocMarker, InsertBefore) {
250 init(NumReservedValues, NameStr);
251}
252
253LandingPadInst::LandingPadInst(const LandingPadInst &LP)
254 : Instruction(LP.getType(), Instruction::LandingPad, AllocMarker),
255 ReservedSpace(LP.getNumOperands()) {
258 Use *OL = getOperandList();
259 const Use *InOL = LP.getOperandList();
260 for (unsigned I = 0, E = ReservedSpace; I != E; ++I)
261 OL[I] = InOL[I];
262
263 setCleanup(LP.isCleanup());
264}
265
266LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses,
267 const Twine &NameStr,
268 InsertPosition InsertBefore) {
269 return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertBefore);
270}
271
272void LandingPadInst::init(unsigned NumReservedValues, const Twine &NameStr) {
273 ReservedSpace = NumReservedValues;
275 allocHungoffUses(ReservedSpace);
276 setName(NameStr);
277 setCleanup(false);
278}
279
280/// growOperands - grow operands - This grows the operand list in response to a
281/// push_back style of operation. This grows the number of ops by 2 times.
282void LandingPadInst::growOperands(unsigned Size) {
283 unsigned e = getNumOperands();
284 if (ReservedSpace >= e + Size) return;
285 ReservedSpace = (std::max(e, 1U) + Size / 2) * 2;
286 growHungoffUses(ReservedSpace);
287}
288
290 unsigned OpNo = getNumOperands();
291 growOperands(1);
292 assert(OpNo < ReservedSpace && "Growing didn't work!");
294 getOperandList()[OpNo] = Val;
295}
296
297//===----------------------------------------------------------------------===//
298// CallBase Implementation
299//===----------------------------------------------------------------------===//
300
302 InsertPosition InsertPt) {
303 switch (CB->getOpcode()) {
304 case Instruction::Call:
305 return CallInst::Create(cast<CallInst>(CB), Bundles, InsertPt);
306 case Instruction::Invoke:
307 return InvokeInst::Create(cast<InvokeInst>(CB), Bundles, InsertPt);
308 case Instruction::CallBr:
309 return CallBrInst::Create(cast<CallBrInst>(CB), Bundles, InsertPt);
310 default:
311 llvm_unreachable("Unknown CallBase sub-class!");
312 }
313}
314
316 InsertPosition InsertPt) {
318 for (unsigned i = 0, e = CI->getNumOperandBundles(); i < e; ++i) {
319 auto ChildOB = CI->getOperandBundleAt(i);
320 if (ChildOB.getTagName() != OpB.getTag())
321 OpDefs.emplace_back(ChildOB);
322 }
323 OpDefs.emplace_back(OpB);
324 return CallBase::Create(CI, OpDefs, InsertPt);
325}
326
328
330 assert(getOpcode() == Instruction::CallBr && "Unexpected opcode!");
331 return cast<CallBrInst>(this)->getNumIndirectDests() + 1;
332}
333
335 const Value *V = getCalledOperand();
336 if (isa<Function>(V) || isa<Constant>(V))
337 return false;
338 return !isInlineAsm();
339}
340
341/// Tests if this call site must be tail call optimized. Only a CallInst can
342/// be tail call optimized.
344 if (auto *CI = dyn_cast<CallInst>(this))
345 return CI->isMustTailCall();
346 return false;
347}
348
349/// Tests if this call site is marked as a tail call.
351 if (auto *CI = dyn_cast<CallInst>(this))
352 return CI->isTailCall();
353 return false;
354}
355
358 return F->getIntrinsicID();
360}
361
363 FPClassTest Mask = Attrs.getRetNoFPClass();
364
365 if (const Function *F = getCalledFunction())
366 Mask |= F->getAttributes().getRetNoFPClass();
367 return Mask;
368}
369
371 FPClassTest Mask = Attrs.getParamNoFPClass(i);
372
373 if (const Function *F = getCalledFunction())
374 Mask |= F->getAttributes().getParamNoFPClass(i);
375 return Mask;
376}
377
378std::optional<ConstantRange> CallBase::getRange() const {
379 Attribute CallAttr = Attrs.getRetAttr(Attribute::Range);
381 if (const Function *F = getCalledFunction())
382 FnAttr = F->getRetAttribute(Attribute::Range);
383
384 if (CallAttr.isValid() && FnAttr.isValid())
385 return CallAttr.getRange().intersectWith(FnAttr.getRange());
386 if (CallAttr.isValid())
387 return CallAttr.getRange();
388 if (FnAttr.isValid())
389 return FnAttr.getRange();
390 return std::nullopt;
391}
392
394 if (hasRetAttr(Attribute::NonNull))
395 return true;
396
397 if (getRetDereferenceableBytes() > 0 &&
399 return true;
400
401 return false;
402}
403
405 unsigned Index;
406
407 if (Attrs.hasAttrSomewhere(Kind, &Index))
408 return getArgOperand(Index - AttributeList::FirstArgIndex);
409 if (const Function *F = getCalledFunction())
410 if (F->getAttributes().hasAttrSomewhere(Kind, &Index))
411 return getArgOperand(Index - AttributeList::FirstArgIndex);
412
413 return nullptr;
414}
415
416/// Determine whether the argument or parameter has the given attribute.
417bool CallBase::paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const {
418 assert(ArgNo < arg_size() && "Param index out of bounds!");
419
420 if (Attrs.hasParamAttr(ArgNo, Kind))
421 return true;
422
423 const Function *F = getCalledFunction();
424 if (!F)
425 return false;
426
427 if (!F->getAttributes().hasParamAttr(ArgNo, Kind))
428 return false;
429
430 // Take into account mod/ref by operand bundles.
431 switch (Kind) {
432 case Attribute::ReadNone:
434 case Attribute::ReadOnly:
436 case Attribute::WriteOnly:
437 return !hasReadingOperandBundles();
438 default:
439 return true;
440 }
441}
442
444 bool AllowUndefOrPoison) const {
446 "Argument must be a pointer");
447 if (paramHasAttr(ArgNo, Attribute::NonNull) &&
448 (AllowUndefOrPoison || paramHasAttr(ArgNo, Attribute::NoUndef)))
449 return true;
450
451 if (paramHasAttr(ArgNo, Attribute::Dereferenceable) &&
453 getCaller(),
455 return true;
456
457 return false;
458}
459
460bool CallBase::hasFnAttrOnCalledFunction(Attribute::AttrKind Kind) const {
462 return F->getAttributes().hasFnAttr(Kind);
463
464 return false;
465}
466
467bool CallBase::hasFnAttrOnCalledFunction(StringRef Kind) const {
469 return F->getAttributes().hasFnAttr(Kind);
470
471 return false;
472}
473
474template <typename AK>
475Attribute CallBase::getFnAttrOnCalledFunction(AK Kind) const {
476 if constexpr (std::is_same_v<AK, Attribute::AttrKind>) {
477 // getMemoryEffects() correctly combines memory effects from the call-site,
478 // operand bundles and function.
479 assert(Kind != Attribute::Memory && "Use getMemoryEffects() instead");
480 }
481
483 return F->getAttributes().getFnAttr(Kind);
484
485 return Attribute();
486}
487
488template LLVM_ABI Attribute
489CallBase::getFnAttrOnCalledFunction(Attribute::AttrKind Kind) const;
490template LLVM_ABI Attribute
491CallBase::getFnAttrOnCalledFunction(StringRef Kind) const;
492
493template <typename AK>
494Attribute CallBase::getParamAttrOnCalledFunction(unsigned ArgNo,
495 AK Kind) const {
497
498 if (auto *F = dyn_cast<Function>(V))
499 return F->getAttributes().getParamAttr(ArgNo, Kind);
500
501 return Attribute();
502}
503template LLVM_ABI Attribute CallBase::getParamAttrOnCalledFunction(
504 unsigned ArgNo, Attribute::AttrKind Kind) const;
505template LLVM_ABI Attribute
506CallBase::getParamAttrOnCalledFunction(unsigned ArgNo, StringRef Kind) const;
507
510 for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i)
512}
513
516 const unsigned BeginIndex) {
517 auto It = op_begin() + BeginIndex;
518 for (auto &B : Bundles)
519 It = std::copy(B.input_begin(), B.input_end(), It);
520
521 auto *ContextImpl = getContext().pImpl;
522 auto BI = Bundles.begin();
523 unsigned CurrentIndex = BeginIndex;
524
525 for (auto &BOI : bundle_op_infos()) {
526 assert(BI != Bundles.end() && "Incorrect allocation?");
527
528 BOI.Tag = ContextImpl->getOrInsertBundleTag(BI->getTag());
529 BOI.Begin = CurrentIndex;
530 BOI.End = CurrentIndex + BI->input_size();
531 CurrentIndex = BOI.End;
532 BI++;
533 }
534
535 assert(BI == Bundles.end() && "Incorrect allocation?");
536
537 return It;
538}
539
541 /// When there isn't many bundles, we do a simple linear search.
542 /// Else fallback to a binary-search that use the fact that bundles usually
543 /// have similar number of argument to get faster convergence.
545 for (auto &BOI : bundle_op_infos())
546 if (BOI.Begin <= OpIdx && OpIdx < BOI.End)
547 return BOI;
548
549 llvm_unreachable("Did not find operand bundle for operand!");
550 }
551
552 assert(OpIdx >= arg_size() && "the Idx is not in the operand bundles");
554 OpIdx < std::prev(bundle_op_info_end())->End &&
555 "The Idx isn't in the operand bundle");
556
557 /// We need a decimal number below and to prevent using floating point numbers
558 /// we use an intergal value multiplied by this constant.
559 constexpr unsigned NumberScaling = 1024;
560
563 bundle_op_iterator Current = Begin;
564
565 while (Begin != End) {
566 unsigned ScaledOperandPerBundle =
567 NumberScaling * (std::prev(End)->End - Begin->Begin) / (End - Begin);
568 Current = Begin + (((OpIdx - Begin->Begin) * NumberScaling) /
569 ScaledOperandPerBundle);
570 if (Current >= End)
571 Current = std::prev(End);
572 assert(Current < End && Current >= Begin &&
573 "the operand bundle doesn't cover every value in the range");
574 if (OpIdx >= Current->Begin && OpIdx < Current->End)
575 break;
576 if (OpIdx >= Current->End)
577 Begin = Current + 1;
578 else
579 End = Current;
580 }
581
582 assert(OpIdx >= Current->Begin && OpIdx < Current->End &&
583 "the operand bundle doesn't cover every value in the range");
584 return *Current;
585}
586
589 InsertPosition InsertPt) {
590 if (CB->getOperandBundle(ID))
591 return CB;
592
594 CB->getOperandBundlesAsDefs(Bundles);
595 Bundles.push_back(OB);
596 return Create(CB, Bundles, InsertPt);
597}
598
600 InsertPosition InsertPt) {
602 bool CreateNew = false;
603
604 for (unsigned I = 0, E = CB->getNumOperandBundles(); I != E; ++I) {
605 auto Bundle = CB->getOperandBundleAt(I);
606 if (Bundle.getTagID() == ID) {
607 CreateNew = true;
608 continue;
609 }
610 Bundles.emplace_back(Bundle);
611 }
612
613 return CreateNew ? Create(CB, Bundles, InsertPt) : CB;
614}
615
617 // Implementation note: this is a conservative implementation of operand
618 // bundle semantics, where *any* non-assume operand bundle (other than
619 // ptrauth) forces a callsite to be at least readonly.
624 getIntrinsicID() != Intrinsic::assume;
625}
626
635
637 MemoryEffects ME = getAttributes().getMemoryEffects();
638 if (auto *Fn = dyn_cast<Function>(getCalledOperand())) {
639 MemoryEffects FnME = Fn->getMemoryEffects();
640 if (hasOperandBundles()) {
641 // TODO: Add a method to get memory effects for operand bundles instead.
643 FnME |= MemoryEffects::readOnly();
645 FnME |= MemoryEffects::writeOnly();
646 }
647 if (isVolatile()) {
648 // Volatile operations also access inaccessible memory.
650 }
651 ME &= FnME;
652 }
653 return ME;
654}
658
659/// Determine if the function does not access memory.
666
667/// Determine if the function does not access or only reads memory.
674
675/// Determine if the function does not access or only writes memory.
682
683/// Determine if the call can access memmory only using pointers based
684/// on its arguments.
691
692/// Determine if the function may only access memory that is
693/// inaccessible from the IR.
700
701/// Determine if the function may only access memory that is
702/// either inaccessible from the IR or pointed to by its arguments.
710
712 if (OpNo < arg_size()) {
713 // If the argument is passed byval, the callee does not have access to the
714 // original pointer and thus cannot capture it.
715 if (isByValArgument(OpNo))
716 return CaptureInfo::none();
717
719 if (auto *Fn = dyn_cast<Function>(getCalledOperand()))
720 CI &= Fn->getAttributes().getParamAttrs(OpNo).getCaptureInfo();
721 return CI;
722 }
723
724 // Bundles on assumes are captures(none).
725 if (getIntrinsicID() == Intrinsic::assume)
726 return CaptureInfo::none();
727
728 // deopt operand bundles are captures(none)
729 auto &BOI = getBundleOpInfoForOperand(OpNo);
730 auto OBU = operandBundleFromBundleOpInfo(BOI);
731 return OBU.isDeoptOperandBundle() ? CaptureInfo::none() : CaptureInfo::all();
732}
733
735 for (unsigned I = 0, E = arg_size(); I < E; ++I) {
737 continue;
738
740 if (auto *Fn = dyn_cast<Function>(getCalledOperand()))
741 CI &= Fn->getAttributes().getParamAttrs(I).getCaptureInfo();
743 return true;
744 }
745 return false;
746}
747
748//===----------------------------------------------------------------------===//
749// CallInst Implementation
750//===----------------------------------------------------------------------===//
751
752void CallInst::init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
753 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr) {
754 this->FTy = FTy;
755 assert(getNumOperands() == Args.size() + CountBundleInputs(Bundles) + 1 &&
756 "NumOperands not set up?");
757
758#ifndef NDEBUG
759 assert((Args.size() == FTy->getNumParams() ||
760 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
761 "Calling a function with bad signature!");
762
763 for (unsigned i = 0; i != Args.size(); ++i)
764 assert((i >= FTy->getNumParams() ||
765 FTy->getParamType(i) == Args[i]->getType()) &&
766 "Calling a function with a bad signature!");
767#endif
768
769 // Set operands in order of their index to match use-list-order
770 // prediction.
771 llvm::copy(Args, op_begin());
772 setCalledOperand(Func);
773
774 auto It = populateBundleOperandInfos(Bundles, Args.size());
775 (void)It;
776 assert(It + 1 == op_end() && "Should add up!");
777
778 setName(NameStr);
779}
780
781void CallInst::init(FunctionType *FTy, Value *Func, const Twine &NameStr) {
782 this->FTy = FTy;
783 assert(getNumOperands() == 1 && "NumOperands not set up?");
784 setCalledOperand(Func);
785
786 assert(FTy->getNumParams() == 0 && "Calling a function with bad signature");
787
788 setName(NameStr);
789}
790
791CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name,
792 AllocInfo AllocInfo, InsertPosition InsertBefore)
793 : CallBase(Ty->getReturnType(), Instruction::Call, AllocInfo,
794 InsertBefore) {
795 init(Ty, Func, Name);
796}
797
798CallInst::CallInst(const CallInst &CI, AllocInfo AllocInfo)
799 : CallBase(CI.Attrs, CI.FTy, CI.getType(), Instruction::Call, AllocInfo) {
801 "Wrong number of operands allocated");
802 setTailCallKind(CI.getTailCallKind());
804
805 std::copy(CI.op_begin(), CI.op_end(), op_begin());
806 std::copy(CI.bundle_op_info_begin(), CI.bundle_op_info_end(),
809}
810
812 InsertPosition InsertPt) {
813 std::vector<Value *> Args(CI->arg_begin(), CI->arg_end());
814
815 auto *NewCI = CallInst::Create(CI->getFunctionType(), CI->getCalledOperand(),
816 Args, OpB, CI->getName(), InsertPt);
817 NewCI->setTailCallKind(CI->getTailCallKind());
818 NewCI->setCallingConv(CI->getCallingConv());
819 NewCI->SubclassOptionalData = CI->SubclassOptionalData;
820 NewCI->setAttributes(CI->getAttributes());
821 NewCI->setDebugLoc(CI->getDebugLoc());
822 return NewCI;
823}
824
825// Update profile weight for call instruction by scaling it using the ratio
826// of S/T. The meaning of "branch_weights" meta data for call instruction is
827// transfered to represent call count.
829 if (T == 0) {
830 LLVM_DEBUG(dbgs() << "Attempting to update profile weights will result in "
831 "div by 0. Ignoring. Likely the function "
832 << getParent()->getParent()->getName()
833 << " has 0 entry count, and contains call instructions "
834 "with non-zero prof info.");
835 return;
836 }
837 scaleProfData(*this, S, T);
838}
839
840//===----------------------------------------------------------------------===//
841// InvokeInst Implementation
842//===----------------------------------------------------------------------===//
843
844void InvokeInst::init(FunctionType *FTy, Value *Fn, BasicBlock *IfNormal,
845 BasicBlock *IfException, ArrayRef<Value *> Args,
847 const Twine &NameStr) {
848 this->FTy = FTy;
849
851 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)) &&
852 "NumOperands not set up?");
853
854#ifndef NDEBUG
855 assert(((Args.size() == FTy->getNumParams()) ||
856 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
857 "Invoking a function with bad signature");
858
859 for (unsigned i = 0, e = Args.size(); i != e; i++)
860 assert((i >= FTy->getNumParams() ||
861 FTy->getParamType(i) == Args[i]->getType()) &&
862 "Invoking a function with a bad signature!");
863#endif
864
865 // Set operands in order of their index to match use-list-order
866 // prediction.
867 llvm::copy(Args, op_begin());
868 setNormalDest(IfNormal);
869 setUnwindDest(IfException);
871
872 auto It = populateBundleOperandInfos(Bundles, Args.size());
873 (void)It;
874 assert(It + 3 == op_end() && "Should add up!");
875
876 setName(NameStr);
877}
878
879InvokeInst::InvokeInst(const InvokeInst &II, AllocInfo AllocInfo)
880 : CallBase(II.Attrs, II.FTy, II.getType(), Instruction::Invoke, AllocInfo) {
881 assert(getNumOperands() == II.getNumOperands() &&
882 "Wrong number of operands allocated");
883 setCallingConv(II.getCallingConv());
884 std::copy(II.op_begin(), II.op_end(), op_begin());
885 std::copy(II.bundle_op_info_begin(), II.bundle_op_info_end(),
887 SubclassOptionalData = II.SubclassOptionalData;
888}
889
891 InsertPosition InsertPt) {
892 std::vector<Value *> Args(II->arg_begin(), II->arg_end());
893
894 auto *NewII = InvokeInst::Create(
895 II->getFunctionType(), II->getCalledOperand(), II->getNormalDest(),
896 II->getUnwindDest(), Args, OpB, II->getName(), InsertPt);
897 NewII->setCallingConv(II->getCallingConv());
898 NewII->SubclassOptionalData = II->SubclassOptionalData;
899 NewII->setAttributes(II->getAttributes());
900 NewII->setDebugLoc(II->getDebugLoc());
901 return NewII;
902}
903
905 return cast<LandingPadInst>(getUnwindDest()->getFirstNonPHIIt());
906}
907
909 if (T == 0) {
910 LLVM_DEBUG(dbgs() << "Attempting to update profile weights will result in "
911 "div by 0. Ignoring. Likely the function "
912 << getParent()->getParent()->getName()
913 << " has 0 entry count, and contains call instructions "
914 "with non-zero prof info.");
915 return;
916 }
917 scaleProfData(*this, S, T);
918}
919
920//===----------------------------------------------------------------------===//
921// CallBrInst Implementation
922//===----------------------------------------------------------------------===//
923
924void CallBrInst::init(FunctionType *FTy, Value *Fn, BasicBlock *Fallthrough,
925 ArrayRef<BasicBlock *> IndirectDests,
928 const Twine &NameStr) {
929 this->FTy = FTy;
930
931 assert(getNumOperands() == ComputeNumOperands(Args.size(),
932 IndirectDests.size(),
933 CountBundleInputs(Bundles)) &&
934 "NumOperands not set up?");
935
936#ifndef NDEBUG
937 assert(((Args.size() == FTy->getNumParams()) ||
938 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
939 "Calling a function with bad signature");
940
941 for (unsigned i = 0, e = Args.size(); i != e; i++)
942 assert((i >= FTy->getNumParams() ||
943 FTy->getParamType(i) == Args[i]->getType()) &&
944 "Calling a function with a bad signature!");
945#endif
946
947 // Set operands in order of their index to match use-list-order
948 // prediction.
949 llvm::copy(Args, op_begin());
950 NumIndirectDests = IndirectDests.size();
951 setDefaultDest(Fallthrough);
952 for (unsigned i = 0; i != NumIndirectDests; ++i)
953 setIndirectDest(i, IndirectDests[i]);
955
956 auto It = populateBundleOperandInfos(Bundles, Args.size());
957 (void)It;
958 assert(It + 2 + IndirectDests.size() == op_end() && "Should add up!");
959
960 setName(NameStr);
961}
962
963CallBrInst::CallBrInst(const CallBrInst &CBI, AllocInfo AllocInfo)
964 : CallBase(CBI.Attrs, CBI.FTy, CBI.getType(), Instruction::CallBr,
965 AllocInfo) {
967 "Wrong number of operands allocated");
969 std::copy(CBI.op_begin(), CBI.op_end(), op_begin());
970 std::copy(CBI.bundle_op_info_begin(), CBI.bundle_op_info_end(),
973 NumIndirectDests = CBI.NumIndirectDests;
974}
975
976CallBrInst *CallBrInst::Create(CallBrInst *CBI, ArrayRef<OperandBundleDef> OpB,
977 InsertPosition InsertPt) {
978 std::vector<Value *> Args(CBI->arg_begin(), CBI->arg_end());
979
980 auto *NewCBI = CallBrInst::Create(
981 CBI->getFunctionType(), CBI->getCalledOperand(), CBI->getDefaultDest(),
982 CBI->getIndirectDests(), Args, OpB, CBI->getName(), InsertPt);
983 NewCBI->setCallingConv(CBI->getCallingConv());
984 NewCBI->SubclassOptionalData = CBI->SubclassOptionalData;
985 NewCBI->setAttributes(CBI->getAttributes());
986 NewCBI->setDebugLoc(CBI->getDebugLoc());
987 NewCBI->NumIndirectDests = CBI->NumIndirectDests;
988 return NewCBI;
989}
990
991//===----------------------------------------------------------------------===//
992// ReturnInst Implementation
993//===----------------------------------------------------------------------===//
994
995ReturnInst::ReturnInst(const ReturnInst &RI, AllocInfo AllocInfo)
996 : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Ret,
997 AllocInfo) {
999 "Wrong number of operands allocated");
1000 if (RI.getNumOperands())
1001 Op<0>() = RI.Op<0>();
1003}
1004
1005ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, AllocInfo AllocInfo,
1006 InsertPosition InsertBefore)
1007 : Instruction(Type::getVoidTy(C), Instruction::Ret, AllocInfo,
1008 InsertBefore) {
1009 if (retVal)
1010 Op<0>() = retVal;
1011}
1012
1013//===----------------------------------------------------------------------===//
1014// ResumeInst Implementation
1015//===----------------------------------------------------------------------===//
1016
1017ResumeInst::ResumeInst(const ResumeInst &RI)
1018 : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Resume,
1019 AllocMarker) {
1020 Op<0>() = RI.Op<0>();
1021}
1022
1023ResumeInst::ResumeInst(Value *Exn, InsertPosition InsertBefore)
1024 : Instruction(Type::getVoidTy(Exn->getContext()), Instruction::Resume,
1025 AllocMarker, InsertBefore) {
1026 Op<0>() = Exn;
1027}
1028
1029//===----------------------------------------------------------------------===//
1030// CleanupReturnInst Implementation
1031//===----------------------------------------------------------------------===//
1032
1033CleanupReturnInst::CleanupReturnInst(const CleanupReturnInst &CRI,
1035 : Instruction(CRI.getType(), Instruction::CleanupRet, AllocInfo) {
1037 "Wrong number of operands allocated");
1038 setSubclassData<Instruction::OpaqueField>(
1040 Op<0>() = CRI.Op<0>();
1041 if (CRI.hasUnwindDest())
1042 Op<1>() = CRI.Op<1>();
1043}
1044
1045void CleanupReturnInst::init(Value *CleanupPad, BasicBlock *UnwindBB) {
1046 if (UnwindBB)
1047 setSubclassData<UnwindDestField>(true);
1048
1049 Op<0>() = CleanupPad;
1050 if (UnwindBB)
1051 Op<1>() = UnwindBB;
1052}
1053
1054CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB,
1056 InsertPosition InsertBefore)
1057 : Instruction(Type::getVoidTy(CleanupPad->getContext()),
1058 Instruction::CleanupRet, AllocInfo, InsertBefore) {
1059 init(CleanupPad, UnwindBB);
1060}
1061
1062//===----------------------------------------------------------------------===//
1063// CatchReturnInst Implementation
1064//===----------------------------------------------------------------------===//
1065void CatchReturnInst::init(Value *CatchPad, BasicBlock *BB) {
1066 Op<0>() = CatchPad;
1067 Op<1>() = BB;
1068}
1069
1070CatchReturnInst::CatchReturnInst(const CatchReturnInst &CRI)
1071 : Instruction(Type::getVoidTy(CRI.getContext()), Instruction::CatchRet,
1072 AllocMarker) {
1073 Op<0>() = CRI.Op<0>();
1074 Op<1>() = CRI.Op<1>();
1075}
1076
1077CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB,
1078 InsertPosition InsertBefore)
1079 : Instruction(Type::getVoidTy(BB->getContext()), Instruction::CatchRet,
1080 AllocMarker, InsertBefore) {
1081 init(CatchPad, BB);
1082}
1083
1084//===----------------------------------------------------------------------===//
1085// CatchSwitchInst Implementation
1086//===----------------------------------------------------------------------===//
1087
1088CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
1089 unsigned NumReservedValues,
1090 const Twine &NameStr,
1091 InsertPosition InsertBefore)
1092 : Instruction(ParentPad->getType(), Instruction::CatchSwitch, AllocMarker,
1093 InsertBefore) {
1094 if (UnwindDest)
1095 ++NumReservedValues;
1096 init(ParentPad, UnwindDest, NumReservedValues + 1);
1097 setName(NameStr);
1098}
1099
1100CatchSwitchInst::CatchSwitchInst(const CatchSwitchInst &CSI)
1101 : Instruction(CSI.getType(), Instruction::CatchSwitch, AllocMarker) {
1103 init(CSI.getParentPad(), CSI.getUnwindDest(), CSI.getNumOperands());
1104 setNumHungOffUseOperands(ReservedSpace);
1105 Use *OL = getOperandList();
1106 const Use *InOL = CSI.getOperandList();
1107 for (unsigned I = 1, E = ReservedSpace; I != E; ++I)
1108 OL[I] = InOL[I];
1109}
1110
1111void CatchSwitchInst::init(Value *ParentPad, BasicBlock *UnwindDest,
1112 unsigned NumReservedValues) {
1113 assert(ParentPad && NumReservedValues);
1114
1115 ReservedSpace = NumReservedValues;
1116 setNumHungOffUseOperands(UnwindDest ? 2 : 1);
1117 allocHungoffUses(ReservedSpace);
1118
1119 Op<0>() = ParentPad;
1120 if (UnwindDest) {
1122 setUnwindDest(UnwindDest);
1123 }
1124}
1125
1126/// growOperands - grow operands - This grows the operand list in response to a
1127/// push_back style of operation. This grows the number of ops by 2 times.
1128void CatchSwitchInst::growOperands(unsigned Size) {
1129 unsigned NumOperands = getNumOperands();
1130 assert(NumOperands >= 1);
1131 if (ReservedSpace >= NumOperands + Size)
1132 return;
1133 ReservedSpace = (NumOperands + Size / 2) * 2;
1134 growHungoffUses(ReservedSpace);
1135}
1136
1138 unsigned OpNo = getNumOperands();
1139 growOperands(1);
1140 assert(OpNo < ReservedSpace && "Growing didn't work!");
1142 getOperandList()[OpNo] = Handler;
1143}
1144
1146 // Move all subsequent handlers up one.
1147 Use *EndDst = op_end() - 1;
1148 for (Use *CurDst = HI.getCurrent(); CurDst != EndDst; ++CurDst)
1149 *CurDst = *(CurDst + 1);
1150 // Null out the last handler use.
1151 *EndDst = nullptr;
1152
1154}
1155
1156//===----------------------------------------------------------------------===//
1157// FuncletPadInst Implementation
1158//===----------------------------------------------------------------------===//
1159void FuncletPadInst::init(Value *ParentPad, ArrayRef<Value *> Args,
1160 const Twine &NameStr) {
1161 assert(getNumOperands() == 1 + Args.size() && "NumOperands not set up?");
1162 llvm::copy(Args, op_begin());
1163 setParentPad(ParentPad);
1164 setName(NameStr);
1165}
1166
1167FuncletPadInst::FuncletPadInst(const FuncletPadInst &FPI, AllocInfo AllocInfo)
1168 : Instruction(FPI.getType(), FPI.getOpcode(), AllocInfo) {
1170 "Wrong number of operands allocated");
1171 std::copy(FPI.op_begin(), FPI.op_end(), op_begin());
1173}
1174
1175FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
1177 const Twine &NameStr,
1178 InsertPosition InsertBefore)
1179 : Instruction(ParentPad->getType(), Op, AllocInfo, InsertBefore) {
1180 init(ParentPad, Args, NameStr);
1181}
1182
1183//===----------------------------------------------------------------------===//
1184// UnreachableInst Implementation
1185//===----------------------------------------------------------------------===//
1186
1188 InsertPosition InsertBefore)
1189 : Instruction(Type::getVoidTy(Context), Instruction::Unreachable,
1190 AllocMarker, InsertBefore) {}
1191
1192//===----------------------------------------------------------------------===//
1193// BranchInst Implementation
1194//===----------------------------------------------------------------------===//
1195
1196void BranchInst::AssertOK() {
1197 if (isConditional())
1198 assert(getCondition()->getType()->isIntegerTy(1) &&
1199 "May only branch on boolean predicates!");
1200}
1201
1202BranchInst::BranchInst(BasicBlock *IfTrue, AllocInfo AllocInfo,
1203 InsertPosition InsertBefore)
1204 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1205 AllocInfo, InsertBefore) {
1206 assert(IfTrue && "Branch destination may not be null!");
1207 Op<-1>() = IfTrue;
1208}
1209
1210BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
1211 AllocInfo AllocInfo, InsertPosition InsertBefore)
1212 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1213 AllocInfo, InsertBefore) {
1214 // Assign in order of operand index to make use-list order predictable.
1215 Op<-3>() = Cond;
1216 Op<-2>() = IfFalse;
1217 Op<-1>() = IfTrue;
1218#ifndef NDEBUG
1219 AssertOK();
1220#endif
1221}
1222
1223BranchInst::BranchInst(const BranchInst &BI, AllocInfo AllocInfo)
1224 : Instruction(Type::getVoidTy(BI.getContext()), Instruction::Br,
1225 AllocInfo) {
1227 "Wrong number of operands allocated");
1228 // Assign in order of operand index to make use-list order predictable.
1229 if (BI.getNumOperands() != 1) {
1230 assert(BI.getNumOperands() == 3 && "BR can have 1 or 3 operands!");
1231 Op<-3>() = BI.Op<-3>();
1232 Op<-2>() = BI.Op<-2>();
1233 }
1234 Op<-1>() = BI.Op<-1>();
1236}
1237
1240 "Cannot swap successors of an unconditional branch");
1241 Op<-1>().swap(Op<-2>());
1242
1243 // Update profile metadata if present and it matches our structural
1244 // expectations.
1246}
1247
1248//===----------------------------------------------------------------------===//
1249// AllocaInst Implementation
1250//===----------------------------------------------------------------------===//
1251
1252static Value *getAISize(LLVMContext &Context, Value *Amt) {
1253 if (!Amt)
1254 Amt = ConstantInt::get(Type::getInt32Ty(Context), 1);
1255 else {
1256 assert(!isa<BasicBlock>(Amt) &&
1257 "Passed basic block into allocation size parameter! Use other ctor");
1258 assert(Amt->getType()->isIntegerTy() &&
1259 "Allocation array size is not an integer!");
1260 }
1261 return Amt;
1262}
1263
1265 assert(Pos.isValid() &&
1266 "Insertion position cannot be null when alignment not provided!");
1267 BasicBlock *BB = Pos.getBasicBlock();
1268 assert(BB->getParent() &&
1269 "BB must be in a Function when alignment not provided!");
1270 const DataLayout &DL = BB->getDataLayout();
1271 return DL.getPrefTypeAlign(Ty);
1272}
1273
1274AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
1275 InsertPosition InsertBefore)
1276 : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertBefore) {}
1277
1278AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1279 const Twine &Name, InsertPosition InsertBefore)
1280 : AllocaInst(Ty, AddrSpace, ArraySize,
1281 computeAllocaDefaultAlign(Ty, InsertBefore), Name,
1282 InsertBefore) {}
1283
1284AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1285 Align Align, const Twine &Name,
1286 InsertPosition InsertBefore)
1287 : UnaryInstruction(PointerType::get(Ty->getContext(), AddrSpace), Alloca,
1288 getAISize(Ty->getContext(), ArraySize), InsertBefore),
1289 AllocatedType(Ty) {
1291 assert(!Ty->isVoidTy() && "Cannot allocate void!");
1292 setName(Name);
1293}
1294
1297 return !CI->isOne();
1298 return true;
1299}
1300
1301/// isStaticAlloca - Return true if this alloca is in the entry block of the
1302/// function and is a constant size. If so, the code generator will fold it
1303/// into the prolog/epilog code, so it is basically free.
1305 // Must be constant size.
1306 if (!isa<ConstantInt>(getArraySize())) return false;
1307
1308 // Must be in the entry block.
1309 const BasicBlock *Parent = getParent();
1310 return Parent->isEntryBlock() && !isUsedWithInAlloca();
1311}
1312
1313//===----------------------------------------------------------------------===//
1314// LoadInst Implementation
1315//===----------------------------------------------------------------------===//
1316
1317void LoadInst::AssertOK() {
1319 "Ptr must have pointer type.");
1320}
1321
1323 assert(Pos.isValid() &&
1324 "Insertion position cannot be null when alignment not provided!");
1325 BasicBlock *BB = Pos.getBasicBlock();
1326 assert(BB->getParent() &&
1327 "BB must be in a Function when alignment not provided!");
1328 const DataLayout &DL = BB->getDataLayout();
1329 return DL.getABITypeAlign(Ty);
1330}
1331
1332LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name,
1333 InsertPosition InsertBef)
1334 : LoadInst(Ty, Ptr, Name, /*isVolatile=*/false, InsertBef) {}
1335
1336LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1337 InsertPosition InsertBef)
1338 : LoadInst(Ty, Ptr, Name, isVolatile,
1339 computeLoadStoreDefaultAlign(Ty, InsertBef), InsertBef) {}
1340
1341LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1342 Align Align, InsertPosition InsertBef)
1343 : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
1344 SyncScope::System, InsertBef) {}
1345
1346LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1348 InsertPosition InsertBef)
1349 : UnaryInstruction(Ty, Load, Ptr, InsertBef) {
1352 setAtomic(Order, SSID);
1353 AssertOK();
1354 setName(Name);
1355}
1356
1357//===----------------------------------------------------------------------===//
1358// StoreInst Implementation
1359//===----------------------------------------------------------------------===//
1360
1361void StoreInst::AssertOK() {
1362 assert(getOperand(0) && getOperand(1) && "Both operands must be non-null!");
1364 "Ptr must have pointer type!");
1365}
1366
1368 : StoreInst(val, addr, /*isVolatile=*/false, InsertBefore) {}
1369
1371 InsertPosition InsertBefore)
1372 : StoreInst(val, addr, isVolatile,
1373 computeLoadStoreDefaultAlign(val->getType(), InsertBefore),
1374 InsertBefore) {}
1375
1377 InsertPosition InsertBefore)
1379 SyncScope::System, InsertBefore) {}
1380
1382 AtomicOrdering Order, SyncScope::ID SSID,
1383 InsertPosition InsertBefore)
1384 : Instruction(Type::getVoidTy(val->getContext()), Store, AllocMarker,
1385 InsertBefore) {
1386 Op<0>() = val;
1387 Op<1>() = addr;
1390 setAtomic(Order, SSID);
1391 AssertOK();
1392}
1393
1394//===----------------------------------------------------------------------===//
1395// AtomicCmpXchgInst Implementation
1396//===----------------------------------------------------------------------===//
1397
1398void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal,
1399 Align Alignment, AtomicOrdering SuccessOrdering,
1400 AtomicOrdering FailureOrdering,
1401 SyncScope::ID SSID) {
1402 Op<0>() = Ptr;
1403 Op<1>() = Cmp;
1404 Op<2>() = NewVal;
1405 setSuccessOrdering(SuccessOrdering);
1406 setFailureOrdering(FailureOrdering);
1407 setSyncScopeID(SSID);
1408 setAlignment(Alignment);
1409
1410 assert(getOperand(0) && getOperand(1) && getOperand(2) &&
1411 "All operands must be non-null!");
1413 "Ptr must have pointer type!");
1414 assert(getOperand(1)->getType() == getOperand(2)->getType() &&
1415 "Cmp type and NewVal type must be same!");
1416}
1417
1419 Align Alignment,
1420 AtomicOrdering SuccessOrdering,
1421 AtomicOrdering FailureOrdering,
1422 SyncScope::ID SSID,
1423 InsertPosition InsertBefore)
1424 : Instruction(
1425 StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())),
1426 AtomicCmpXchg, AllocMarker, InsertBefore) {
1427 Init(Ptr, Cmp, NewVal, Alignment, SuccessOrdering, FailureOrdering, SSID);
1428}
1429
1430//===----------------------------------------------------------------------===//
1431// AtomicRMWInst Implementation
1432//===----------------------------------------------------------------------===//
1433
1434void AtomicRMWInst::Init(BinOp Operation, Value *Ptr, Value *Val,
1435 Align Alignment, AtomicOrdering Ordering,
1436 SyncScope::ID SSID) {
1437 assert(Ordering != AtomicOrdering::NotAtomic &&
1438 "atomicrmw instructions can only be atomic.");
1439 assert(Ordering != AtomicOrdering::Unordered &&
1440 "atomicrmw instructions cannot be unordered.");
1441 Op<0>() = Ptr;
1442 Op<1>() = Val;
1444 setOrdering(Ordering);
1445 setSyncScopeID(SSID);
1446 setAlignment(Alignment);
1447
1448 assert(getOperand(0) && getOperand(1) && "All operands must be non-null!");
1450 "Ptr must have pointer type!");
1451 assert(Ordering != AtomicOrdering::NotAtomic &&
1452 "AtomicRMW instructions must be atomic!");
1453}
1454
1456 Align Alignment, AtomicOrdering Ordering,
1457 SyncScope::ID SSID, InsertPosition InsertBefore)
1458 : Instruction(Val->getType(), AtomicRMW, AllocMarker, InsertBefore) {
1459 Init(Operation, Ptr, Val, Alignment, Ordering, SSID);
1460}
1461
1463 switch (Op) {
1465 return "xchg";
1466 case AtomicRMWInst::Add:
1467 return "add";
1468 case AtomicRMWInst::Sub:
1469 return "sub";
1470 case AtomicRMWInst::And:
1471 return "and";
1473 return "nand";
1474 case AtomicRMWInst::Or:
1475 return "or";
1476 case AtomicRMWInst::Xor:
1477 return "xor";
1478 case AtomicRMWInst::Max:
1479 return "max";
1480 case AtomicRMWInst::Min:
1481 return "min";
1483 return "umax";
1485 return "umin";
1487 return "fadd";
1489 return "fsub";
1491 return "fmax";
1493 return "fmin";
1495 return "fmaximum";
1497 return "fminimum";
1499 return "uinc_wrap";
1501 return "udec_wrap";
1503 return "usub_cond";
1505 return "usub_sat";
1507 return "<invalid operation>";
1508 }
1509
1510 llvm_unreachable("invalid atomicrmw operation");
1511}
1512
1513//===----------------------------------------------------------------------===//
1514// FenceInst Implementation
1515//===----------------------------------------------------------------------===//
1516
1518 SyncScope::ID SSID, InsertPosition InsertBefore)
1519 : Instruction(Type::getVoidTy(C), Fence, AllocMarker, InsertBefore) {
1520 setOrdering(Ordering);
1521 setSyncScopeID(SSID);
1522}
1523
1524//===----------------------------------------------------------------------===//
1525// GetElementPtrInst Implementation
1526//===----------------------------------------------------------------------===//
1527
1528void GetElementPtrInst::init(Value *Ptr, ArrayRef<Value *> IdxList,
1529 const Twine &Name) {
1530 assert(getNumOperands() == 1 + IdxList.size() &&
1531 "NumOperands not initialized?");
1532 Op<0>() = Ptr;
1533 llvm::copy(IdxList, op_begin() + 1);
1534 setName(Name);
1535}
1536
1537GetElementPtrInst::GetElementPtrInst(const GetElementPtrInst &GEPI,
1539 : Instruction(GEPI.getType(), GetElementPtr, AllocInfo),
1540 SourceElementType(GEPI.SourceElementType),
1541 ResultElementType(GEPI.ResultElementType) {
1542 assert(getNumOperands() == GEPI.getNumOperands() &&
1543 "Wrong number of operands allocated");
1544 std::copy(GEPI.op_begin(), GEPI.op_end(), op_begin());
1546}
1547
1549 if (auto *Struct = dyn_cast<StructType>(Ty)) {
1550 if (!Struct->indexValid(Idx))
1551 return nullptr;
1552 return Struct->getTypeAtIndex(Idx);
1553 }
1554 if (!Idx->getType()->isIntOrIntVectorTy())
1555 return nullptr;
1556 if (auto *Array = dyn_cast<ArrayType>(Ty))
1557 return Array->getElementType();
1558 if (auto *Vector = dyn_cast<VectorType>(Ty))
1559 return Vector->getElementType();
1560 return nullptr;
1561}
1562
1564 if (auto *Struct = dyn_cast<StructType>(Ty)) {
1565 if (Idx >= Struct->getNumElements())
1566 return nullptr;
1567 return Struct->getElementType(Idx);
1568 }
1569 if (auto *Array = dyn_cast<ArrayType>(Ty))
1570 return Array->getElementType();
1571 if (auto *Vector = dyn_cast<VectorType>(Ty))
1572 return Vector->getElementType();
1573 return nullptr;
1574}
1575
1576template <typename IndexTy>
1578 if (IdxList.empty())
1579 return Ty;
1580 for (IndexTy V : IdxList.slice(1)) {
1582 if (!Ty)
1583 return Ty;
1584 }
1585 return Ty;
1586}
1587
1591
1593 ArrayRef<Constant *> IdxList) {
1594 return getIndexedTypeInternal(Ty, IdxList);
1595}
1596
1600
1601/// hasAllZeroIndices - Return true if all of the indices of this GEP are
1602/// zeros. If so, the result pointer and the first operand have the same
1603/// value, just potentially different types.
1605 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
1607 if (!CI->isZero()) return false;
1608 } else {
1609 return false;
1610 }
1611 }
1612 return true;
1613}
1614
1615/// hasAllConstantIndices - Return true if all of the indices of this GEP are
1616/// constant integers. If so, the result pointer and the first operand have
1617/// a constant offset between them.
1619 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
1621 return false;
1622 }
1623 return true;
1624}
1625
1629
1631 GEPNoWrapFlags NW = cast<GEPOperator>(this)->getNoWrapFlags();
1632 if (B)
1634 else
1635 NW = NW.withoutInBounds();
1636 setNoWrapFlags(NW);
1637}
1638
1640 return cast<GEPOperator>(this)->getNoWrapFlags();
1641}
1642
1644 return cast<GEPOperator>(this)->isInBounds();
1645}
1646
1648 return cast<GEPOperator>(this)->hasNoUnsignedSignedWrap();
1649}
1650
1652 return cast<GEPOperator>(this)->hasNoUnsignedWrap();
1653}
1654
1656 APInt &Offset) const {
1657 // Delegate to the generic GEPOperator implementation.
1658 return cast<GEPOperator>(this)->accumulateConstantOffset(DL, Offset);
1659}
1660
1662 const DataLayout &DL, unsigned BitWidth,
1663 SmallMapVector<Value *, APInt, 4> &VariableOffsets,
1664 APInt &ConstantOffset) const {
1665 // Delegate to the generic GEPOperator implementation.
1666 return cast<GEPOperator>(this)->collectOffset(DL, BitWidth, VariableOffsets,
1667 ConstantOffset);
1668}
1669
1670//===----------------------------------------------------------------------===//
1671// ExtractElementInst Implementation
1672//===----------------------------------------------------------------------===//
1673
1674ExtractElementInst::ExtractElementInst(Value *Val, Value *Index,
1675 const Twine &Name,
1676 InsertPosition InsertBef)
1677 : Instruction(cast<VectorType>(Val->getType())->getElementType(),
1678 ExtractElement, AllocMarker, InsertBef) {
1679 assert(isValidOperands(Val, Index) &&
1680 "Invalid extractelement instruction operands!");
1681 Op<0>() = Val;
1682 Op<1>() = Index;
1683 setName(Name);
1684}
1685
1686bool ExtractElementInst::isValidOperands(const Value *Val, const Value *Index) {
1687 if (!Val->getType()->isVectorTy() || !Index->getType()->isIntegerTy())
1688 return false;
1689 return true;
1690}
1691
1692//===----------------------------------------------------------------------===//
1693// InsertElementInst Implementation
1694//===----------------------------------------------------------------------===//
1695
1696InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index,
1697 const Twine &Name,
1698 InsertPosition InsertBef)
1699 : Instruction(Vec->getType(), InsertElement, AllocMarker, InsertBef) {
1700 assert(isValidOperands(Vec, Elt, Index) &&
1701 "Invalid insertelement instruction operands!");
1702 Op<0>() = Vec;
1703 Op<1>() = Elt;
1704 Op<2>() = Index;
1705 setName(Name);
1706}
1707
1709 const Value *Index) {
1710 if (!Vec->getType()->isVectorTy())
1711 return false; // First operand of insertelement must be vector type.
1712
1713 if (Elt->getType() != cast<VectorType>(Vec->getType())->getElementType())
1714 return false;// Second operand of insertelement must be vector element type.
1715
1716 if (!Index->getType()->isIntegerTy())
1717 return false; // Third operand of insertelement must be i32.
1718 return true;
1719}
1720
1721//===----------------------------------------------------------------------===//
1722// ShuffleVectorInst Implementation
1723//===----------------------------------------------------------------------===//
1724
1726 assert(V && "Cannot create placeholder of nullptr V");
1727 return PoisonValue::get(V->getType());
1728}
1729
1731 InsertPosition InsertBefore)
1733 InsertBefore) {}
1734
1736 const Twine &Name,
1737 InsertPosition InsertBefore)
1739 InsertBefore) {}
1740
1742 const Twine &Name,
1743 InsertPosition InsertBefore)
1744 : Instruction(
1745 VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
1746 cast<VectorType>(Mask->getType())->getElementCount()),
1747 ShuffleVector, AllocMarker, InsertBefore) {
1748 assert(isValidOperands(V1, V2, Mask) &&
1749 "Invalid shuffle vector instruction operands!");
1750
1751 Op<0>() = V1;
1752 Op<1>() = V2;
1753 SmallVector<int, 16> MaskArr;
1754 getShuffleMask(cast<Constant>(Mask), MaskArr);
1755 setShuffleMask(MaskArr);
1756 setName(Name);
1757}
1758
1760 const Twine &Name,
1761 InsertPosition InsertBefore)
1762 : Instruction(
1763 VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
1764 Mask.size(), isa<ScalableVectorType>(V1->getType())),
1765 ShuffleVector, AllocMarker, InsertBefore) {
1766 assert(isValidOperands(V1, V2, Mask) &&
1767 "Invalid shuffle vector instruction operands!");
1768 Op<0>() = V1;
1769 Op<1>() = V2;
1770 setShuffleMask(Mask);
1771 setName(Name);
1772}
1773
1775 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
1776 int NumMaskElts = ShuffleMask.size();
1777 SmallVector<int, 16> NewMask(NumMaskElts);
1778 for (int i = 0; i != NumMaskElts; ++i) {
1779 int MaskElt = getMaskValue(i);
1780 if (MaskElt == PoisonMaskElem) {
1781 NewMask[i] = PoisonMaskElem;
1782 continue;
1783 }
1784 assert(MaskElt >= 0 && MaskElt < 2 * NumOpElts && "Out-of-range mask");
1785 MaskElt = (MaskElt < NumOpElts) ? MaskElt + NumOpElts : MaskElt - NumOpElts;
1786 NewMask[i] = MaskElt;
1787 }
1788 setShuffleMask(NewMask);
1789 Op<0>().swap(Op<1>());
1790}
1791
1793 ArrayRef<int> Mask) {
1794 // V1 and V2 must be vectors of the same type.
1795 if (!isa<VectorType>(V1->getType()) || V1->getType() != V2->getType())
1796 return false;
1797
1798 // Make sure the mask elements make sense.
1799 int V1Size =
1800 cast<VectorType>(V1->getType())->getElementCount().getKnownMinValue();
1801 for (int Elem : Mask)
1802 if (Elem != PoisonMaskElem && Elem >= V1Size * 2)
1803 return false;
1804
1806 if ((Mask[0] != 0 && Mask[0] != PoisonMaskElem) || !all_equal(Mask))
1807 return false;
1808
1809 return true;
1810}
1811
1813 const Value *Mask) {
1814 // V1 and V2 must be vectors of the same type.
1815 if (!V1->getType()->isVectorTy() || V1->getType() != V2->getType())
1816 return false;
1817
1818 // Mask must be vector of i32, and must be the same kind of vector as the
1819 // input vectors
1820 auto *MaskTy = dyn_cast<VectorType>(Mask->getType());
1821 if (!MaskTy || !MaskTy->getElementType()->isIntegerTy(32) ||
1823 return false;
1824
1825 // Check to see if Mask is valid.
1827 return true;
1828
1829 // NOTE: Through vector ConstantInt we have the potential to support more
1830 // than just zero splat masks but that requires a LangRef change.
1831 if (isa<ScalableVectorType>(MaskTy))
1832 return false;
1833
1834 unsigned V1Size = cast<FixedVectorType>(V1->getType())->getNumElements();
1835
1836 if (const auto *CI = dyn_cast<ConstantInt>(Mask))
1837 return !CI->uge(V1Size * 2);
1838
1839 if (const auto *MV = dyn_cast<ConstantVector>(Mask)) {
1840 for (Value *Op : MV->operands()) {
1841 if (auto *CI = dyn_cast<ConstantInt>(Op)) {
1842 if (CI->uge(V1Size*2))
1843 return false;
1844 } else if (!isa<UndefValue>(Op)) {
1845 return false;
1846 }
1847 }
1848 return true;
1849 }
1850
1851 if (const auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
1852 for (unsigned i = 0, e = cast<FixedVectorType>(MaskTy)->getNumElements();
1853 i != e; ++i)
1854 if (CDS->getElementAsInteger(i) >= V1Size*2)
1855 return false;
1856 return true;
1857 }
1858
1859 return false;
1860}
1861
1863 SmallVectorImpl<int> &Result) {
1864 ElementCount EC = cast<VectorType>(Mask->getType())->getElementCount();
1865
1866 if (isa<ConstantAggregateZero>(Mask) || isa<UndefValue>(Mask)) {
1867 int MaskVal = isa<UndefValue>(Mask) ? -1 : 0;
1868 Result.append(EC.getKnownMinValue(), MaskVal);
1869 return;
1870 }
1871
1872 assert(!EC.isScalable() &&
1873 "Scalable vector shuffle mask must be undef or zeroinitializer");
1874
1875 unsigned NumElts = EC.getFixedValue();
1876
1877 Result.reserve(NumElts);
1878
1879 if (auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
1880 for (unsigned i = 0; i != NumElts; ++i)
1881 Result.push_back(CDS->getElementAsInteger(i));
1882 return;
1883 }
1884 for (unsigned i = 0; i != NumElts; ++i) {
1885 Constant *C = Mask->getAggregateElement(i);
1886 Result.push_back(isa<UndefValue>(C) ? -1 :
1887 cast<ConstantInt>(C)->getZExtValue());
1888 }
1889}
1890
1892 ShuffleMask.assign(Mask.begin(), Mask.end());
1893 ShuffleMaskForBitcode = convertShuffleMaskForBitcode(Mask, getType());
1894}
1895
1897 Type *ResultTy) {
1898 Type *Int32Ty = Type::getInt32Ty(ResultTy->getContext());
1899 if (isa<ScalableVectorType>(ResultTy)) {
1900 assert(all_equal(Mask) && "Unexpected shuffle");
1901 Type *VecTy = VectorType::get(Int32Ty, Mask.size(), true);
1902 if (Mask[0] == 0)
1903 return Constant::getNullValue(VecTy);
1904 return PoisonValue::get(VecTy);
1905 }
1907 for (int Elem : Mask) {
1908 if (Elem == PoisonMaskElem)
1910 else
1911 MaskConst.push_back(ConstantInt::get(Int32Ty, Elem));
1912 }
1913 return ConstantVector::get(MaskConst);
1914}
1915
1916static bool isSingleSourceMaskImpl(ArrayRef<int> Mask, int NumOpElts) {
1917 assert(!Mask.empty() && "Shuffle mask must contain elements");
1918 bool UsesLHS = false;
1919 bool UsesRHS = false;
1920 for (int I : Mask) {
1921 if (I == -1)
1922 continue;
1923 assert(I >= 0 && I < (NumOpElts * 2) &&
1924 "Out-of-bounds shuffle mask element");
1925 UsesLHS |= (I < NumOpElts);
1926 UsesRHS |= (I >= NumOpElts);
1927 if (UsesLHS && UsesRHS)
1928 return false;
1929 }
1930 // Allow for degenerate case: completely undef mask means neither source is used.
1931 return UsesLHS || UsesRHS;
1932}
1933
1935 // We don't have vector operand size information, so assume operands are the
1936 // same size as the mask.
1937 return isSingleSourceMaskImpl(Mask, NumSrcElts);
1938}
1939
1940static bool isIdentityMaskImpl(ArrayRef<int> Mask, int NumOpElts) {
1941 if (!isSingleSourceMaskImpl(Mask, NumOpElts))
1942 return false;
1943 for (int i = 0, NumMaskElts = Mask.size(); i < NumMaskElts; ++i) {
1944 if (Mask[i] == -1)
1945 continue;
1946 if (Mask[i] != i && Mask[i] != (NumOpElts + i))
1947 return false;
1948 }
1949 return true;
1950}
1951
1953 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
1954 return false;
1955 // We don't have vector operand size information, so assume operands are the
1956 // same size as the mask.
1957 return isIdentityMaskImpl(Mask, NumSrcElts);
1958}
1959
1961 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
1962 return false;
1963 if (!isSingleSourceMask(Mask, NumSrcElts))
1964 return false;
1965
1966 // The number of elements in the mask must be at least 2.
1967 if (NumSrcElts < 2)
1968 return false;
1969
1970 for (int I = 0, E = Mask.size(); I < E; ++I) {
1971 if (Mask[I] == -1)
1972 continue;
1973 if (Mask[I] != (NumSrcElts - 1 - I) &&
1974 Mask[I] != (NumSrcElts + NumSrcElts - 1 - I))
1975 return false;
1976 }
1977 return true;
1978}
1979
1981 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
1982 return false;
1983 if (!isSingleSourceMask(Mask, NumSrcElts))
1984 return false;
1985 for (int I = 0, E = Mask.size(); I < E; ++I) {
1986 if (Mask[I] == -1)
1987 continue;
1988 if (Mask[I] != 0 && Mask[I] != NumSrcElts)
1989 return false;
1990 }
1991 return true;
1992}
1993
1995 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
1996 return false;
1997 // Select is differentiated from identity. It requires using both sources.
1998 if (isSingleSourceMask(Mask, NumSrcElts))
1999 return false;
2000 for (int I = 0, E = Mask.size(); I < E; ++I) {
2001 if (Mask[I] == -1)
2002 continue;
2003 if (Mask[I] != I && Mask[I] != (NumSrcElts + I))
2004 return false;
2005 }
2006 return true;
2007}
2008
2010 // Example masks that will return true:
2011 // v1 = <a, b, c, d>
2012 // v2 = <e, f, g, h>
2013 // trn1 = shufflevector v1, v2 <0, 4, 2, 6> = <a, e, c, g>
2014 // trn2 = shufflevector v1, v2 <1, 5, 3, 7> = <b, f, d, h>
2015
2016 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
2017 return false;
2018 // 1. The number of elements in the mask must be a power-of-2 and at least 2.
2019 int Sz = Mask.size();
2020 if (Sz < 2 || !isPowerOf2_32(Sz))
2021 return false;
2022
2023 // 2. The first element of the mask must be either a 0 or a 1.
2024 if (Mask[0] != 0 && Mask[0] != 1)
2025 return false;
2026
2027 // 3. The difference between the first 2 elements must be equal to the
2028 // number of elements in the mask.
2029 if ((Mask[1] - Mask[0]) != NumSrcElts)
2030 return false;
2031
2032 // 4. The difference between consecutive even-numbered and odd-numbered
2033 // elements must be equal to 2.
2034 for (int I = 2; I < Sz; ++I) {
2035 int MaskEltVal = Mask[I];
2036 if (MaskEltVal == -1)
2037 return false;
2038 int MaskEltPrevVal = Mask[I - 2];
2039 if (MaskEltVal - MaskEltPrevVal != 2)
2040 return false;
2041 }
2042 return true;
2043}
2044
2046 int &Index) {
2047 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
2048 return false;
2049 // Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4>
2050 int StartIndex = -1;
2051 for (int I = 0, E = Mask.size(); I != E; ++I) {
2052 int MaskEltVal = Mask[I];
2053 if (MaskEltVal == -1)
2054 continue;
2055
2056 if (StartIndex == -1) {
2057 // Don't support a StartIndex that begins in the second input, or if the
2058 // first non-undef index would access below the StartIndex.
2059 if (MaskEltVal < I || NumSrcElts <= (MaskEltVal - I))
2060 return false;
2061
2062 StartIndex = MaskEltVal - I;
2063 continue;
2064 }
2065
2066 // Splice is sequential starting from StartIndex.
2067 if (MaskEltVal != (StartIndex + I))
2068 return false;
2069 }
2070
2071 if (StartIndex == -1)
2072 return false;
2073
2074 // NOTE: This accepts StartIndex == 0 (COPY).
2075 Index = StartIndex;
2076 return true;
2077}
2078
2080 int NumSrcElts, int &Index) {
2081 // Must extract from a single source.
2082 if (!isSingleSourceMaskImpl(Mask, NumSrcElts))
2083 return false;
2084
2085 // Must be smaller (else this is an Identity shuffle).
2086 if (NumSrcElts <= (int)Mask.size())
2087 return false;
2088
2089 // Find start of extraction, accounting that we may start with an UNDEF.
2090 int SubIndex = -1;
2091 for (int i = 0, e = Mask.size(); i != e; ++i) {
2092 int M = Mask[i];
2093 if (M < 0)
2094 continue;
2095 int Offset = (M % NumSrcElts) - i;
2096 if (0 <= SubIndex && SubIndex != Offset)
2097 return false;
2098 SubIndex = Offset;
2099 }
2100
2101 if (0 <= SubIndex && SubIndex + (int)Mask.size() <= NumSrcElts) {
2102 Index = SubIndex;
2103 return true;
2104 }
2105 return false;
2106}
2107
2109 int NumSrcElts, int &NumSubElts,
2110 int &Index) {
2111 int NumMaskElts = Mask.size();
2112
2113 // Don't try to match if we're shuffling to a smaller size.
2114 if (NumMaskElts < NumSrcElts)
2115 return false;
2116
2117 // TODO: We don't recognize self-insertion/widening.
2118 if (isSingleSourceMaskImpl(Mask, NumSrcElts))
2119 return false;
2120
2121 // Determine which mask elements are attributed to which source.
2122 APInt UndefElts = APInt::getZero(NumMaskElts);
2123 APInt Src0Elts = APInt::getZero(NumMaskElts);
2124 APInt Src1Elts = APInt::getZero(NumMaskElts);
2125 bool Src0Identity = true;
2126 bool Src1Identity = true;
2127
2128 for (int i = 0; i != NumMaskElts; ++i) {
2129 int M = Mask[i];
2130 if (M < 0) {
2131 UndefElts.setBit(i);
2132 continue;
2133 }
2134 if (M < NumSrcElts) {
2135 Src0Elts.setBit(i);
2136 Src0Identity &= (M == i);
2137 continue;
2138 }
2139 Src1Elts.setBit(i);
2140 Src1Identity &= (M == (i + NumSrcElts));
2141 }
2142 assert((Src0Elts | Src1Elts | UndefElts).isAllOnes() &&
2143 "unknown shuffle elements");
2144 assert(!Src0Elts.isZero() && !Src1Elts.isZero() &&
2145 "2-source shuffle not found");
2146
2147 // Determine lo/hi span ranges.
2148 // TODO: How should we handle undefs at the start of subvector insertions?
2149 int Src0Lo = Src0Elts.countr_zero();
2150 int Src1Lo = Src1Elts.countr_zero();
2151 int Src0Hi = NumMaskElts - Src0Elts.countl_zero();
2152 int Src1Hi = NumMaskElts - Src1Elts.countl_zero();
2153
2154 // If src0 is in place, see if the src1 elements is inplace within its own
2155 // span.
2156 if (Src0Identity) {
2157 int NumSub1Elts = Src1Hi - Src1Lo;
2158 ArrayRef<int> Sub1Mask = Mask.slice(Src1Lo, NumSub1Elts);
2159 if (isIdentityMaskImpl(Sub1Mask, NumSrcElts)) {
2160 NumSubElts = NumSub1Elts;
2161 Index = Src1Lo;
2162 return true;
2163 }
2164 }
2165
2166 // If src1 is in place, see if the src0 elements is inplace within its own
2167 // span.
2168 if (Src1Identity) {
2169 int NumSub0Elts = Src0Hi - Src0Lo;
2170 ArrayRef<int> Sub0Mask = Mask.slice(Src0Lo, NumSub0Elts);
2171 if (isIdentityMaskImpl(Sub0Mask, NumSrcElts)) {
2172 NumSubElts = NumSub0Elts;
2173 Index = Src0Lo;
2174 return true;
2175 }
2176 }
2177
2178 return false;
2179}
2180
2182 // FIXME: Not currently possible to express a shuffle mask for a scalable
2183 // vector for this case.
2185 return false;
2186
2187 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2188 int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2189 if (NumMaskElts <= NumOpElts)
2190 return false;
2191
2192 // The first part of the mask must choose elements from exactly 1 source op.
2194 if (!isIdentityMaskImpl(Mask, NumOpElts))
2195 return false;
2196
2197 // All extending must be with undef elements.
2198 for (int i = NumOpElts; i < NumMaskElts; ++i)
2199 if (Mask[i] != -1)
2200 return false;
2201
2202 return true;
2203}
2204
2206 // FIXME: Not currently possible to express a shuffle mask for a scalable
2207 // vector for this case.
2209 return false;
2210
2211 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2212 int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2213 if (NumMaskElts >= NumOpElts)
2214 return false;
2215
2216 return isIdentityMaskImpl(getShuffleMask(), NumOpElts);
2217}
2218
2220 // Vector concatenation is differentiated from identity with padding.
2222 return false;
2223
2224 // FIXME: Not currently possible to express a shuffle mask for a scalable
2225 // vector for this case.
2227 return false;
2228
2229 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2230 int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2231 if (NumMaskElts != NumOpElts * 2)
2232 return false;
2233
2234 // Use the mask length rather than the operands' vector lengths here. We
2235 // already know that the shuffle returns a vector twice as long as the inputs,
2236 // and neither of the inputs are undef vectors. If the mask picks consecutive
2237 // elements from both inputs, then this is a concatenation of the inputs.
2238 return isIdentityMaskImpl(getShuffleMask(), NumMaskElts);
2239}
2240
2242 int ReplicationFactor, int VF) {
2243 assert(Mask.size() == (unsigned)ReplicationFactor * VF &&
2244 "Unexpected mask size.");
2245
2246 for (int CurrElt : seq(VF)) {
2247 ArrayRef<int> CurrSubMask = Mask.take_front(ReplicationFactor);
2248 assert(CurrSubMask.size() == (unsigned)ReplicationFactor &&
2249 "Run out of mask?");
2250 Mask = Mask.drop_front(ReplicationFactor);
2251 if (!all_of(CurrSubMask, [CurrElt](int MaskElt) {
2252 return MaskElt == PoisonMaskElem || MaskElt == CurrElt;
2253 }))
2254 return false;
2255 }
2256 assert(Mask.empty() && "Did not consume the whole mask?");
2257
2258 return true;
2259}
2260
2262 int &ReplicationFactor, int &VF) {
2263 // undef-less case is trivial.
2264 if (!llvm::is_contained(Mask, PoisonMaskElem)) {
2265 ReplicationFactor =
2266 Mask.take_while([](int MaskElt) { return MaskElt == 0; }).size();
2267 if (ReplicationFactor == 0 || Mask.size() % ReplicationFactor != 0)
2268 return false;
2269 VF = Mask.size() / ReplicationFactor;
2270 return isReplicationMaskWithParams(Mask, ReplicationFactor, VF);
2271 }
2272
2273 // However, if the mask contains undef's, we have to enumerate possible tuples
2274 // and pick one. There are bounds on replication factor: [1, mask size]
2275 // (where RF=1 is an identity shuffle, RF=mask size is a broadcast shuffle)
2276 // Additionally, mask size is a replication factor multiplied by vector size,
2277 // which further significantly reduces the search space.
2278
2279 // Before doing that, let's perform basic correctness checking first.
2280 int Largest = -1;
2281 for (int MaskElt : Mask) {
2282 if (MaskElt == PoisonMaskElem)
2283 continue;
2284 // Elements must be in non-decreasing order.
2285 if (MaskElt < Largest)
2286 return false;
2287 Largest = std::max(Largest, MaskElt);
2288 }
2289
2290 // Prefer larger replication factor if all else equal.
2291 for (int PossibleReplicationFactor :
2292 reverse(seq_inclusive<unsigned>(1, Mask.size()))) {
2293 if (Mask.size() % PossibleReplicationFactor != 0)
2294 continue;
2295 int PossibleVF = Mask.size() / PossibleReplicationFactor;
2296 if (!isReplicationMaskWithParams(Mask, PossibleReplicationFactor,
2297 PossibleVF))
2298 continue;
2299 ReplicationFactor = PossibleReplicationFactor;
2300 VF = PossibleVF;
2301 return true;
2302 }
2303
2304 return false;
2305}
2306
2307bool ShuffleVectorInst::isReplicationMask(int &ReplicationFactor,
2308 int &VF) const {
2309 // Not possible to express a shuffle mask for a scalable vector for this
2310 // case.
2312 return false;
2313
2314 VF = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2315 if (ShuffleMask.size() % VF != 0)
2316 return false;
2317 ReplicationFactor = ShuffleMask.size() / VF;
2318
2319 return isReplicationMaskWithParams(ShuffleMask, ReplicationFactor, VF);
2320}
2321
2323 if (VF <= 0 || Mask.size() < static_cast<unsigned>(VF) ||
2324 Mask.size() % VF != 0)
2325 return false;
2326 for (unsigned K = 0, Sz = Mask.size(); K < Sz; K += VF) {
2327 ArrayRef<int> SubMask = Mask.slice(K, VF);
2328 if (all_of(SubMask, [](int Idx) { return Idx == PoisonMaskElem; }))
2329 continue;
2330 SmallBitVector Used(VF, false);
2331 for (int Idx : SubMask) {
2332 if (Idx != PoisonMaskElem && Idx < VF)
2333 Used.set(Idx);
2334 }
2335 if (!Used.all())
2336 return false;
2337 }
2338 return true;
2339}
2340
2341/// Return true if this shuffle mask is a replication mask.
2343 // Not possible to express a shuffle mask for a scalable vector for this
2344 // case.
2346 return false;
2347 if (!isSingleSourceMask(ShuffleMask, VF))
2348 return false;
2349
2350 return isOneUseSingleSourceMask(ShuffleMask, VF);
2351}
2352
2353bool ShuffleVectorInst::isInterleave(unsigned Factor) {
2355 // shuffle_vector can only interleave fixed length vectors - for scalable
2356 // vectors, see the @llvm.vector.interleave2 intrinsic
2357 if (!OpTy)
2358 return false;
2359 unsigned OpNumElts = OpTy->getNumElements();
2360
2361 return isInterleaveMask(ShuffleMask, Factor, OpNumElts * 2);
2362}
2363
2365 ArrayRef<int> Mask, unsigned Factor, unsigned NumInputElts,
2366 SmallVectorImpl<unsigned> &StartIndexes) {
2367 unsigned NumElts = Mask.size();
2368 if (NumElts % Factor)
2369 return false;
2370
2371 unsigned LaneLen = NumElts / Factor;
2372 if (!isPowerOf2_32(LaneLen))
2373 return false;
2374
2375 StartIndexes.resize(Factor);
2376
2377 // Check whether each element matches the general interleaved rule.
2378 // Ignore undef elements, as long as the defined elements match the rule.
2379 // Outer loop processes all factors (x, y, z in the above example)
2380 unsigned I = 0, J;
2381 for (; I < Factor; I++) {
2382 unsigned SavedLaneValue;
2383 unsigned SavedNoUndefs = 0;
2384
2385 // Inner loop processes consecutive accesses (x, x+1... in the example)
2386 for (J = 0; J < LaneLen - 1; J++) {
2387 // Lane computes x's position in the Mask
2388 unsigned Lane = J * Factor + I;
2389 unsigned NextLane = Lane + Factor;
2390 int LaneValue = Mask[Lane];
2391 int NextLaneValue = Mask[NextLane];
2392
2393 // If both are defined, values must be sequential
2394 if (LaneValue >= 0 && NextLaneValue >= 0 &&
2395 LaneValue + 1 != NextLaneValue)
2396 break;
2397
2398 // If the next value is undef, save the current one as reference
2399 if (LaneValue >= 0 && NextLaneValue < 0) {
2400 SavedLaneValue = LaneValue;
2401 SavedNoUndefs = 1;
2402 }
2403
2404 // Undefs are allowed, but defined elements must still be consecutive:
2405 // i.e.: x,..., undef,..., x + 2,..., undef,..., undef,..., x + 5, ....
2406 // Verify this by storing the last non-undef followed by an undef
2407 // Check that following non-undef masks are incremented with the
2408 // corresponding distance.
2409 if (SavedNoUndefs > 0 && LaneValue < 0) {
2410 SavedNoUndefs++;
2411 if (NextLaneValue >= 0 &&
2412 SavedLaneValue + SavedNoUndefs != (unsigned)NextLaneValue)
2413 break;
2414 }
2415 }
2416
2417 if (J < LaneLen - 1)
2418 return false;
2419
2420 int StartMask = 0;
2421 if (Mask[I] >= 0) {
2422 // Check that the start of the I range (J=0) is greater than 0
2423 StartMask = Mask[I];
2424 } else if (Mask[(LaneLen - 1) * Factor + I] >= 0) {
2425 // StartMask defined by the last value in lane
2426 StartMask = Mask[(LaneLen - 1) * Factor + I] - J;
2427 } else if (SavedNoUndefs > 0) {
2428 // StartMask defined by some non-zero value in the j loop
2429 StartMask = SavedLaneValue - (LaneLen - 1 - SavedNoUndefs);
2430 }
2431 // else StartMask remains set to 0, i.e. all elements are undefs
2432
2433 if (StartMask < 0)
2434 return false;
2435 // We must stay within the vectors; This case can happen with undefs.
2436 if (StartMask + LaneLen > NumInputElts)
2437 return false;
2438
2439 StartIndexes[I] = StartMask;
2440 }
2441
2442 return true;
2443}
2444
2445/// Check if the mask is a DE-interleave mask of the given factor
2446/// \p Factor like:
2447/// <Index, Index+Factor, ..., Index+(NumElts-1)*Factor>
2449 unsigned Factor,
2450 unsigned &Index) {
2451 // Check all potential start indices from 0 to (Factor - 1).
2452 for (unsigned Idx = 0; Idx < Factor; Idx++) {
2453 unsigned I = 0;
2454
2455 // Check that elements are in ascending order by Factor. Ignore undef
2456 // elements.
2457 for (; I < Mask.size(); I++)
2458 if (Mask[I] >= 0 && static_cast<unsigned>(Mask[I]) != Idx + I * Factor)
2459 break;
2460
2461 if (I == Mask.size()) {
2462 Index = Idx;
2463 return true;
2464 }
2465 }
2466
2467 return false;
2468}
2469
2470/// Try to lower a vector shuffle as a bit rotation.
2471///
2472/// Look for a repeated rotation pattern in each sub group.
2473/// Returns an element-wise left bit rotation amount or -1 if failed.
2474static int matchShuffleAsBitRotate(ArrayRef<int> Mask, int NumSubElts) {
2475 int NumElts = Mask.size();
2476 assert((NumElts % NumSubElts) == 0 && "Illegal shuffle mask");
2477
2478 int RotateAmt = -1;
2479 for (int i = 0; i != NumElts; i += NumSubElts) {
2480 for (int j = 0; j != NumSubElts; ++j) {
2481 int M = Mask[i + j];
2482 if (M < 0)
2483 continue;
2484 if (M < i || M >= i + NumSubElts)
2485 return -1;
2486 int Offset = (NumSubElts - (M - (i + j))) % NumSubElts;
2487 if (0 <= RotateAmt && Offset != RotateAmt)
2488 return -1;
2489 RotateAmt = Offset;
2490 }
2491 }
2492 return RotateAmt;
2493}
2494
2496 ArrayRef<int> Mask, unsigned EltSizeInBits, unsigned MinSubElts,
2497 unsigned MaxSubElts, unsigned &NumSubElts, unsigned &RotateAmt) {
2498 for (NumSubElts = MinSubElts; NumSubElts <= MaxSubElts; NumSubElts *= 2) {
2499 int EltRotateAmt = matchShuffleAsBitRotate(Mask, NumSubElts);
2500 if (EltRotateAmt < 0)
2501 continue;
2502 RotateAmt = EltRotateAmt * EltSizeInBits;
2503 return true;
2504 }
2505
2506 return false;
2507}
2508
2509//===----------------------------------------------------------------------===//
2510// InsertValueInst Class
2511//===----------------------------------------------------------------------===//
2512
2513void InsertValueInst::init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
2514 const Twine &Name) {
2515 assert(getNumOperands() == 2 && "NumOperands not initialized?");
2516
2517 // There's no fundamental reason why we require at least one index
2518 // (other than weirdness with &*IdxBegin being invalid; see
2519 // getelementptr's init routine for example). But there's no
2520 // present need to support it.
2521 assert(!Idxs.empty() && "InsertValueInst must have at least one index");
2522
2524 Val->getType() && "Inserted value must match indexed type!");
2525 Op<0>() = Agg;
2526 Op<1>() = Val;
2527
2528 Indices.append(Idxs.begin(), Idxs.end());
2529 setName(Name);
2530}
2531
2532InsertValueInst::InsertValueInst(const InsertValueInst &IVI)
2533 : Instruction(IVI.getType(), InsertValue, AllocMarker),
2534 Indices(IVI.Indices) {
2535 Op<0>() = IVI.getOperand(0);
2536 Op<1>() = IVI.getOperand(1);
2538}
2539
2540//===----------------------------------------------------------------------===//
2541// ExtractValueInst Class
2542//===----------------------------------------------------------------------===//
2543
2544void ExtractValueInst::init(ArrayRef<unsigned> Idxs, const Twine &Name) {
2545 assert(getNumOperands() == 1 && "NumOperands not initialized?");
2546
2547 // There's no fundamental reason why we require at least one index.
2548 // But there's no present need to support it.
2549 assert(!Idxs.empty() && "ExtractValueInst must have at least one index");
2550
2551 Indices.append(Idxs.begin(), Idxs.end());
2552 setName(Name);
2553}
2554
2555ExtractValueInst::ExtractValueInst(const ExtractValueInst &EVI)
2556 : UnaryInstruction(EVI.getType(), ExtractValue, EVI.getOperand(0),
2557 (BasicBlock *)nullptr),
2558 Indices(EVI.Indices) {
2560}
2561
2562// getIndexedType - Returns the type of the element that would be extracted
2563// with an extractvalue instruction with the specified parameters.
2564//
2565// A null type is returned if the indices are invalid for the specified
2566// pointer type.
2567//
2569 ArrayRef<unsigned> Idxs) {
2570 for (unsigned Index : Idxs) {
2571 // We can't use CompositeType::indexValid(Index) here.
2572 // indexValid() always returns true for arrays because getelementptr allows
2573 // out-of-bounds indices. Since we don't allow those for extractvalue and
2574 // insertvalue we need to check array indexing manually.
2575 // Since the only other types we can index into are struct types it's just
2576 // as easy to check those manually as well.
2577 if (ArrayType *AT = dyn_cast<ArrayType>(Agg)) {
2578 if (Index >= AT->getNumElements())
2579 return nullptr;
2580 Agg = AT->getElementType();
2581 } else if (StructType *ST = dyn_cast<StructType>(Agg)) {
2582 if (Index >= ST->getNumElements())
2583 return nullptr;
2584 Agg = ST->getElementType(Index);
2585 } else {
2586 // Not a valid type to index into.
2587 return nullptr;
2588 }
2589 }
2590 return Agg;
2591}
2592
2593//===----------------------------------------------------------------------===//
2594// UnaryOperator Class
2595//===----------------------------------------------------------------------===//
2596
2598 const Twine &Name, InsertPosition InsertBefore)
2599 : UnaryInstruction(Ty, iType, S, InsertBefore) {
2600 Op<0>() = S;
2601 setName(Name);
2602 AssertOK();
2603}
2604
2606 InsertPosition InsertBefore) {
2607 return new UnaryOperator(Op, S, S->getType(), Name, InsertBefore);
2608}
2609
2610void UnaryOperator::AssertOK() {
2611 Value *LHS = getOperand(0);
2612 (void)LHS; // Silence warnings.
2613#ifndef NDEBUG
2614 switch (getOpcode()) {
2615 case FNeg:
2616 assert(getType() == LHS->getType() &&
2617 "Unary operation should return same type as operand!");
2618 assert(getType()->isFPOrFPVectorTy() &&
2619 "Tried to create a floating-point operation on a "
2620 "non-floating-point type!");
2621 break;
2622 default: llvm_unreachable("Invalid opcode provided");
2623 }
2624#endif
2625}
2626
2627//===----------------------------------------------------------------------===//
2628// BinaryOperator Class
2629//===----------------------------------------------------------------------===//
2630
2632 const Twine &Name, InsertPosition InsertBefore)
2633 : Instruction(Ty, iType, AllocMarker, InsertBefore) {
2634 Op<0>() = S1;
2635 Op<1>() = S2;
2636 setName(Name);
2637 AssertOK();
2638}
2639
2640void BinaryOperator::AssertOK() {
2641 Value *LHS = getOperand(0), *RHS = getOperand(1);
2642 (void)LHS; (void)RHS; // Silence warnings.
2643 assert(LHS->getType() == RHS->getType() &&
2644 "Binary operator operand types must match!");
2645#ifndef NDEBUG
2646 switch (getOpcode()) {
2647 case Add: case Sub:
2648 case Mul:
2649 assert(getType() == LHS->getType() &&
2650 "Arithmetic operation should return same type as operands!");
2651 assert(getType()->isIntOrIntVectorTy() &&
2652 "Tried to create an integer operation on a non-integer type!");
2653 break;
2654 case FAdd: case FSub:
2655 case FMul:
2656 assert(getType() == LHS->getType() &&
2657 "Arithmetic operation should return same type as operands!");
2658 assert(getType()->isFPOrFPVectorTy() &&
2659 "Tried to create a floating-point operation on a "
2660 "non-floating-point type!");
2661 break;
2662 case UDiv:
2663 case SDiv:
2664 assert(getType() == LHS->getType() &&
2665 "Arithmetic operation should return same type as operands!");
2666 assert(getType()->isIntOrIntVectorTy() &&
2667 "Incorrect operand type (not integer) for S/UDIV");
2668 break;
2669 case FDiv:
2670 assert(getType() == LHS->getType() &&
2671 "Arithmetic operation should return same type as operands!");
2672 assert(getType()->isFPOrFPVectorTy() &&
2673 "Incorrect operand type (not floating point) for FDIV");
2674 break;
2675 case URem:
2676 case SRem:
2677 assert(getType() == LHS->getType() &&
2678 "Arithmetic operation should return same type as operands!");
2679 assert(getType()->isIntOrIntVectorTy() &&
2680 "Incorrect operand type (not integer) for S/UREM");
2681 break;
2682 case FRem:
2683 assert(getType() == LHS->getType() &&
2684 "Arithmetic operation should return same type as operands!");
2685 assert(getType()->isFPOrFPVectorTy() &&
2686 "Incorrect operand type (not floating point) for FREM");
2687 break;
2688 case Shl:
2689 case LShr:
2690 case AShr:
2691 assert(getType() == LHS->getType() &&
2692 "Shift operation should return same type as operands!");
2693 assert(getType()->isIntOrIntVectorTy() &&
2694 "Tried to create a shift operation on a non-integral type!");
2695 break;
2696 case And: case Or:
2697 case Xor:
2698 assert(getType() == LHS->getType() &&
2699 "Logical operation should return same type as operands!");
2700 assert(getType()->isIntOrIntVectorTy() &&
2701 "Tried to create a logical operation on a non-integral type!");
2702 break;
2703 default: llvm_unreachable("Invalid opcode provided");
2704 }
2705#endif
2706}
2707
2709 const Twine &Name,
2710 InsertPosition InsertBefore) {
2711 assert(S1->getType() == S2->getType() &&
2712 "Cannot create binary operator with two operands of differing type!");
2713 return new BinaryOperator(Op, S1, S2, S1->getType(), Name, InsertBefore);
2714}
2715
2717 InsertPosition InsertBefore) {
2718 Value *Zero = ConstantInt::get(Op->getType(), 0);
2719 return new BinaryOperator(Instruction::Sub, Zero, Op, Op->getType(), Name,
2720 InsertBefore);
2721}
2722
2724 InsertPosition InsertBefore) {
2725 Value *Zero = ConstantInt::get(Op->getType(), 0);
2726 return BinaryOperator::CreateNSWSub(Zero, Op, Name, InsertBefore);
2727}
2728
2730 InsertPosition InsertBefore) {
2731 Constant *C = Constant::getAllOnesValue(Op->getType());
2732 return new BinaryOperator(Instruction::Xor, Op, C,
2733 Op->getType(), Name, InsertBefore);
2734}
2735
2736// Exchange the two operands to this instruction. This instruction is safe to
2737// use on any binary instruction and does not modify the semantics of the
2738// instruction.
2740 if (!isCommutative())
2741 return true; // Can't commute operands
2742 Op<0>().swap(Op<1>());
2743 return false;
2744}
2745
2746//===----------------------------------------------------------------------===//
2747// FPMathOperator Class
2748//===----------------------------------------------------------------------===//
2749
2751 const MDNode *MD =
2752 cast<Instruction>(this)->getMetadata(LLVMContext::MD_fpmath);
2753 if (!MD)
2754 return 0.0;
2756 return Accuracy->getValueAPF().convertToFloat();
2757}
2758
2759//===----------------------------------------------------------------------===//
2760// CastInst Class
2761//===----------------------------------------------------------------------===//
2762
2763// Just determine if this cast only deals with integral->integral conversion.
2765 switch (getOpcode()) {
2766 default: return false;
2767 case Instruction::ZExt:
2768 case Instruction::SExt:
2769 case Instruction::Trunc:
2770 return true;
2771 case Instruction::BitCast:
2772 return getOperand(0)->getType()->isIntegerTy() &&
2773 getType()->isIntegerTy();
2774 }
2775}
2776
2777/// This function determines if the CastInst does not require any bits to be
2778/// changed in order to effect the cast. Essentially, it identifies cases where
2779/// no code gen is necessary for the cast, hence the name no-op cast. For
2780/// example, the following are all no-op casts:
2781/// # bitcast i32* %x to i8*
2782/// # bitcast <2 x i32> %x to <4 x i16>
2783/// # ptrtoint i32* %x to i32 ; on 32-bit plaforms only
2784/// Determine if the described cast is a no-op.
2786 Type *SrcTy,
2787 Type *DestTy,
2788 const DataLayout &DL) {
2789 assert(castIsValid(Opcode, SrcTy, DestTy) && "method precondition");
2790 switch (Opcode) {
2791 default: llvm_unreachable("Invalid CastOp");
2792 case Instruction::Trunc:
2793 case Instruction::ZExt:
2794 case Instruction::SExt:
2795 case Instruction::FPTrunc:
2796 case Instruction::FPExt:
2797 case Instruction::UIToFP:
2798 case Instruction::SIToFP:
2799 case Instruction::FPToUI:
2800 case Instruction::FPToSI:
2801 case Instruction::AddrSpaceCast:
2802 // TODO: Target informations may give a more accurate answer here.
2803 return false;
2804 case Instruction::BitCast:
2805 return true; // BitCast never modifies bits.
2806 case Instruction::PtrToAddr:
2807 case Instruction::PtrToInt:
2808 return DL.getIntPtrType(SrcTy)->getScalarSizeInBits() ==
2809 DestTy->getScalarSizeInBits();
2810 case Instruction::IntToPtr:
2811 return DL.getIntPtrType(DestTy)->getScalarSizeInBits() ==
2812 SrcTy->getScalarSizeInBits();
2813 }
2814}
2815
2817 return isNoopCast(getOpcode(), getOperand(0)->getType(), getType(), DL);
2818}
2819
2820/// This function determines if a pair of casts can be eliminated and what
2821/// opcode should be used in the elimination. This assumes that there are two
2822/// instructions like this:
2823/// * %F = firstOpcode SrcTy %x to MidTy
2824/// * %S = secondOpcode MidTy %F to DstTy
2825/// The function returns a resultOpcode so these two casts can be replaced with:
2826/// * %Replacement = resultOpcode %SrcTy %x to DstTy
2827/// If no such cast is permitted, the function returns 0.
2829 Instruction::CastOps secondOp,
2830 Type *SrcTy, Type *MidTy, Type *DstTy,
2831 const DataLayout *DL) {
2832 // Define the 144 possibilities for these two cast instructions. The values
2833 // in this matrix determine what to do in a given situation and select the
2834 // case in the switch below. The rows correspond to firstOp, the columns
2835 // correspond to secondOp. In looking at the table below, keep in mind
2836 // the following cast properties:
2837 //
2838 // Size Compare Source Destination
2839 // Operator Src ? Size Type Sign Type Sign
2840 // -------- ------------ ------------------- ---------------------
2841 // TRUNC > Integer Any Integral Any
2842 // ZEXT < Integral Unsigned Integer Any
2843 // SEXT < Integral Signed Integer Any
2844 // FPTOUI n/a FloatPt n/a Integral Unsigned
2845 // FPTOSI n/a FloatPt n/a Integral Signed
2846 // UITOFP n/a Integral Unsigned FloatPt n/a
2847 // SITOFP n/a Integral Signed FloatPt n/a
2848 // FPTRUNC > FloatPt n/a FloatPt n/a
2849 // FPEXT < FloatPt n/a FloatPt n/a
2850 // PTRTOINT n/a Pointer n/a Integral Unsigned
2851 // PTRTOADDR n/a Pointer n/a Integral Unsigned
2852 // INTTOPTR n/a Integral Unsigned Pointer n/a
2853 // BITCAST = FirstClass n/a FirstClass n/a
2854 // ADDRSPCST n/a Pointer n/a Pointer n/a
2855 //
2856 // NOTE: some transforms are safe, but we consider them to be non-profitable.
2857 // For example, we could merge "fptoui double to i32" + "zext i32 to i64",
2858 // into "fptoui double to i64", but this loses information about the range
2859 // of the produced value (we no longer know the top-part is all zeros).
2860 // Further this conversion is often much more expensive for typical hardware,
2861 // and causes issues when building libgcc. We disallow fptosi+sext for the
2862 // same reason.
2863 const unsigned numCastOps =
2864 Instruction::CastOpsEnd - Instruction::CastOpsBegin;
2865 // clang-format off
2866 static const uint8_t CastResults[numCastOps][numCastOps] = {
2867 // T F F U S F F P P I B A -+
2868 // R Z S P P I I T P 2 2 N T S |
2869 // U E E 2 2 2 2 R E I A T C C +- secondOp
2870 // N X X U S F F N X N D 2 V V |
2871 // C T T I I P P C T T R P T T -+
2872 { 1, 0, 0,99,99, 0, 0,99,99,99,99, 0, 3, 0}, // Trunc -+
2873 { 8, 1, 9,99,99, 2,17,99,99,99,99, 2, 3, 0}, // ZExt |
2874 { 8, 0, 1,99,99, 0, 2,99,99,99,99, 0, 3, 0}, // SExt |
2875 { 0, 0, 0,99,99, 0, 0,99,99,99,99, 0, 3, 0}, // FPToUI |
2876 { 0, 0, 0,99,99, 0, 0,99,99,99,99, 0, 3, 0}, // FPToSI |
2877 { 99,99,99, 0, 0,99,99, 0, 0,99,99,99, 4, 0}, // UIToFP +- firstOp
2878 { 99,99,99, 0, 0,99,99, 0, 0,99,99,99, 4, 0}, // SIToFP |
2879 { 99,99,99, 0, 0,99,99, 0, 0,99,99,99, 4, 0}, // FPTrunc |
2880 { 99,99,99, 2, 2,99,99, 8, 2,99,99,99, 4, 0}, // FPExt |
2881 { 1, 0, 0,99,99, 0, 0,99,99,99,99, 7, 3, 0}, // PtrToInt |
2882 { 0, 0, 0,99,99, 0, 0,99,99,99,99, 0, 3, 0}, // PtrToAddr |
2883 { 99,99,99,99,99,99,99,99,99,11,11,99,15, 0}, // IntToPtr |
2884 { 5, 5, 5, 0, 0, 5, 5, 0, 0,16,16, 5, 1,14}, // BitCast |
2885 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,13,12}, // AddrSpaceCast -+
2886 };
2887 // clang-format on
2888
2889 // TODO: This logic could be encoded into the table above and handled in the
2890 // switch below.
2891 // If either of the casts are a bitcast from scalar to vector, disallow the
2892 // merging. However, any pair of bitcasts are allowed.
2893 bool IsFirstBitcast = (firstOp == Instruction::BitCast);
2894 bool IsSecondBitcast = (secondOp == Instruction::BitCast);
2895 bool AreBothBitcasts = IsFirstBitcast && IsSecondBitcast;
2896
2897 // Check if any of the casts convert scalars <-> vectors.
2898 if ((IsFirstBitcast && isa<VectorType>(SrcTy) != isa<VectorType>(MidTy)) ||
2899 (IsSecondBitcast && isa<VectorType>(MidTy) != isa<VectorType>(DstTy)))
2900 if (!AreBothBitcasts)
2901 return 0;
2902
2903 int ElimCase = CastResults[firstOp-Instruction::CastOpsBegin]
2904 [secondOp-Instruction::CastOpsBegin];
2905 switch (ElimCase) {
2906 case 0:
2907 // Categorically disallowed.
2908 return 0;
2909 case 1:
2910 // Allowed, use first cast's opcode.
2911 return firstOp;
2912 case 2:
2913 // Allowed, use second cast's opcode.
2914 return secondOp;
2915 case 3:
2916 // No-op cast in second op implies firstOp as long as the DestTy
2917 // is integer and we are not converting between a vector and a
2918 // non-vector type.
2919 if (!SrcTy->isVectorTy() && DstTy->isIntegerTy())
2920 return firstOp;
2921 return 0;
2922 case 4:
2923 // No-op cast in second op implies firstOp as long as the DestTy
2924 // matches MidTy.
2925 if (DstTy == MidTy)
2926 return firstOp;
2927 return 0;
2928 case 5:
2929 // No-op cast in first op implies secondOp as long as the SrcTy
2930 // is an integer.
2931 if (SrcTy->isIntegerTy())
2932 return secondOp;
2933 return 0;
2934 case 7: {
2935 // Disable inttoptr/ptrtoint optimization if enabled.
2936 if (DisableI2pP2iOpt)
2937 return 0;
2938
2939 // Cannot simplify if address spaces are different!
2940 if (SrcTy != DstTy)
2941 return 0;
2942
2943 // Cannot simplify if the intermediate integer size is smaller than the
2944 // pointer size.
2945 unsigned MidSize = MidTy->getScalarSizeInBits();
2946 if (!DL || MidSize < DL->getPointerTypeSizeInBits(SrcTy))
2947 return 0;
2948
2949 return Instruction::BitCast;
2950 }
2951 case 8: {
2952 // ext, trunc -> bitcast, if the SrcTy and DstTy are the same
2953 // ext, trunc -> ext, if sizeof(SrcTy) < sizeof(DstTy)
2954 // ext, trunc -> trunc, if sizeof(SrcTy) > sizeof(DstTy)
2955 unsigned SrcSize = SrcTy->getScalarSizeInBits();
2956 unsigned DstSize = DstTy->getScalarSizeInBits();
2957 if (SrcTy == DstTy)
2958 return Instruction::BitCast;
2959 if (SrcSize < DstSize)
2960 return firstOp;
2961 if (SrcSize > DstSize)
2962 return secondOp;
2963 return 0;
2964 }
2965 case 9:
2966 // zext, sext -> zext, because sext can't sign extend after zext
2967 return Instruction::ZExt;
2968 case 11: {
2969 // inttoptr, ptrtoint/ptrtoaddr -> integer cast
2970 if (!DL)
2971 return 0;
2972 unsigned MidSize = secondOp == Instruction::PtrToAddr
2973 ? DL->getAddressSizeInBits(MidTy)
2974 : DL->getPointerTypeSizeInBits(MidTy);
2975 unsigned SrcSize = SrcTy->getScalarSizeInBits();
2976 unsigned DstSize = DstTy->getScalarSizeInBits();
2977 // If the middle size is smaller than both source and destination,
2978 // an additional masking operation would be required.
2979 if (MidSize < SrcSize && MidSize < DstSize)
2980 return 0;
2981 if (DstSize < SrcSize)
2982 return Instruction::Trunc;
2983 if (DstSize > SrcSize)
2984 return Instruction::ZExt;
2985 return Instruction::BitCast;
2986 }
2987 case 12:
2988 // addrspacecast, addrspacecast -> bitcast, if SrcAS == DstAS
2989 // addrspacecast, addrspacecast -> addrspacecast, if SrcAS != DstAS
2990 if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace())
2991 return Instruction::AddrSpaceCast;
2992 return Instruction::BitCast;
2993 case 13:
2994 // FIXME: this state can be merged with (1), but the following assert
2995 // is useful to check the correcteness of the sequence due to semantic
2996 // change of bitcast.
2997 assert(
2998 SrcTy->isPtrOrPtrVectorTy() &&
2999 MidTy->isPtrOrPtrVectorTy() &&
3000 DstTy->isPtrOrPtrVectorTy() &&
3001 SrcTy->getPointerAddressSpace() != MidTy->getPointerAddressSpace() &&
3002 MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&
3003 "Illegal addrspacecast, bitcast sequence!");
3004 // Allowed, use first cast's opcode
3005 return firstOp;
3006 case 14:
3007 // bitcast, addrspacecast -> addrspacecast
3008 return Instruction::AddrSpaceCast;
3009 case 15:
3010 // FIXME: this state can be merged with (1), but the following assert
3011 // is useful to check the correcteness of the sequence due to semantic
3012 // change of bitcast.
3013 assert(
3014 SrcTy->isIntOrIntVectorTy() &&
3015 MidTy->isPtrOrPtrVectorTy() &&
3016 DstTy->isPtrOrPtrVectorTy() &&
3017 MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&
3018 "Illegal inttoptr, bitcast sequence!");
3019 // Allowed, use first cast's opcode
3020 return firstOp;
3021 case 16:
3022 // FIXME: this state can be merged with (2), but the following assert
3023 // is useful to check the correcteness of the sequence due to semantic
3024 // change of bitcast.
3025 assert(
3026 SrcTy->isPtrOrPtrVectorTy() &&
3027 MidTy->isPtrOrPtrVectorTy() &&
3028 DstTy->isIntOrIntVectorTy() &&
3029 SrcTy->getPointerAddressSpace() == MidTy->getPointerAddressSpace() &&
3030 "Illegal bitcast, ptrtoint sequence!");
3031 // Allowed, use second cast's opcode
3032 return secondOp;
3033 case 17:
3034 // (sitofp (zext x)) -> (uitofp x)
3035 return Instruction::UIToFP;
3036 case 99:
3037 // Cast combination can't happen (error in input). This is for all cases
3038 // where the MidTy is not the same for the two cast instructions.
3039 llvm_unreachable("Invalid Cast Combination");
3040 default:
3041 llvm_unreachable("Error in CastResults table!!!");
3042 }
3043}
3044
3046 const Twine &Name, InsertPosition InsertBefore) {
3047 assert(castIsValid(op, S, Ty) && "Invalid cast!");
3048 // Construct and return the appropriate CastInst subclass
3049 switch (op) {
3050 case Trunc: return new TruncInst (S, Ty, Name, InsertBefore);
3051 case ZExt: return new ZExtInst (S, Ty, Name, InsertBefore);
3052 case SExt: return new SExtInst (S, Ty, Name, InsertBefore);
3053 case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertBefore);
3054 case FPExt: return new FPExtInst (S, Ty, Name, InsertBefore);
3055 case UIToFP: return new UIToFPInst (S, Ty, Name, InsertBefore);
3056 case SIToFP: return new SIToFPInst (S, Ty, Name, InsertBefore);
3057 case FPToUI: return new FPToUIInst (S, Ty, Name, InsertBefore);
3058 case FPToSI: return new FPToSIInst (S, Ty, Name, InsertBefore);
3059 case PtrToAddr: return new PtrToAddrInst (S, Ty, Name, InsertBefore);
3060 case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertBefore);
3061 case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertBefore);
3062 case BitCast:
3063 return new BitCastInst(S, Ty, Name, InsertBefore);
3064 case AddrSpaceCast:
3065 return new AddrSpaceCastInst(S, Ty, Name, InsertBefore);
3066 default:
3067 llvm_unreachable("Invalid opcode provided");
3068 }
3069}
3070
3072 InsertPosition InsertBefore) {
3073 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3074 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3075 return Create(Instruction::ZExt, S, Ty, Name, InsertBefore);
3076}
3077
3079 InsertPosition InsertBefore) {
3080 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3081 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3082 return Create(Instruction::SExt, S, Ty, Name, InsertBefore);
3083}
3084
3086 InsertPosition InsertBefore) {
3087 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3088 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3089 return Create(Instruction::Trunc, S, Ty, Name, InsertBefore);
3090}
3091
3092/// Create a BitCast or a PtrToInt cast instruction
3094 InsertPosition InsertBefore) {
3095 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3096 assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) &&
3097 "Invalid cast");
3098 assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast");
3099 assert((!Ty->isVectorTy() ||
3100 cast<VectorType>(Ty)->getElementCount() ==
3101 cast<VectorType>(S->getType())->getElementCount()) &&
3102 "Invalid cast");
3103
3104 if (Ty->isIntOrIntVectorTy())
3105 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
3106
3107 return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertBefore);
3108}
3109
3111 Value *S, Type *Ty, const Twine &Name, InsertPosition InsertBefore) {
3112 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3113 assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast");
3114
3115 if (S->getType()->getPointerAddressSpace() != Ty->getPointerAddressSpace())
3116 return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertBefore);
3117
3118 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3119}
3120
3122 const Twine &Name,
3123 InsertPosition InsertBefore) {
3124 if (S->getType()->isPointerTy() && Ty->isIntegerTy())
3125 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
3126 if (S->getType()->isIntegerTy() && Ty->isPointerTy())
3127 return Create(Instruction::IntToPtr, S, Ty, Name, InsertBefore);
3128
3129 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3130}
3131
3133 const Twine &Name,
3134 InsertPosition InsertBefore) {
3135 assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() &&
3136 "Invalid integer cast");
3137 unsigned SrcBits = C->getType()->getScalarSizeInBits();
3138 unsigned DstBits = Ty->getScalarSizeInBits();
3139 Instruction::CastOps opcode =
3140 (SrcBits == DstBits ? Instruction::BitCast :
3141 (SrcBits > DstBits ? Instruction::Trunc :
3142 (isSigned ? Instruction::SExt : Instruction::ZExt)));
3143 return Create(opcode, C, Ty, Name, InsertBefore);
3144}
3145
3147 InsertPosition InsertBefore) {
3148 assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
3149 "Invalid cast");
3150 unsigned SrcBits = C->getType()->getScalarSizeInBits();
3151 unsigned DstBits = Ty->getScalarSizeInBits();
3152 assert((C->getType() == Ty || SrcBits != DstBits) && "Invalid cast");
3153 Instruction::CastOps opcode =
3154 (SrcBits == DstBits ? Instruction::BitCast :
3155 (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt));
3156 return Create(opcode, C, Ty, Name, InsertBefore);
3157}
3158
3159bool CastInst::isBitCastable(Type *SrcTy, Type *DestTy) {
3160 if (!SrcTy->isFirstClassType() || !DestTy->isFirstClassType())
3161 return false;
3162
3163 if (SrcTy == DestTy)
3164 return true;
3165
3166 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) {
3167 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) {
3168 if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {
3169 // An element by element cast. Valid if casting the elements is valid.
3170 SrcTy = SrcVecTy->getElementType();
3171 DestTy = DestVecTy->getElementType();
3172 }
3173 }
3174 }
3175
3176 if (PointerType *DestPtrTy = dyn_cast<PointerType>(DestTy)) {
3177 if (PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy)) {
3178 return SrcPtrTy->getAddressSpace() == DestPtrTy->getAddressSpace();
3179 }
3180 }
3181
3182 TypeSize SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr
3183 TypeSize DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr
3184
3185 // Could still have vectors of pointers if the number of elements doesn't
3186 // match
3187 if (SrcBits.getKnownMinValue() == 0 || DestBits.getKnownMinValue() == 0)
3188 return false;
3189
3190 if (SrcBits != DestBits)
3191 return false;
3192
3193 return true;
3194}
3195
3197 const DataLayout &DL) {
3198 // ptrtoint and inttoptr are not allowed on non-integral pointers
3199 if (auto *PtrTy = dyn_cast<PointerType>(SrcTy))
3200 if (auto *IntTy = dyn_cast<IntegerType>(DestTy))
3201 return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) &&
3202 !DL.isNonIntegralPointerType(PtrTy));
3203 if (auto *PtrTy = dyn_cast<PointerType>(DestTy))
3204 if (auto *IntTy = dyn_cast<IntegerType>(SrcTy))
3205 return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) &&
3206 !DL.isNonIntegralPointerType(PtrTy));
3207
3208 return isBitCastable(SrcTy, DestTy);
3209}
3210
3211// Provide a way to get a "cast" where the cast opcode is inferred from the
3212// types and size of the operand. This, basically, is a parallel of the
3213// logic in the castIsValid function below. This axiom should hold:
3214// castIsValid( getCastOpcode(Val, Ty), Val, Ty)
3215// should not assert in castIsValid. In other words, this produces a "correct"
3216// casting opcode for the arguments passed to it.
3219 const Value *Src, bool SrcIsSigned, Type *DestTy, bool DestIsSigned) {
3220 Type *SrcTy = Src->getType();
3221
3222 assert(SrcTy->isFirstClassType() && DestTy->isFirstClassType() &&
3223 "Only first class types are castable!");
3224
3225 if (SrcTy == DestTy)
3226 return BitCast;
3227
3228 // FIXME: Check address space sizes here
3229 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy))
3230 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy))
3231 if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {
3232 // An element by element cast. Find the appropriate opcode based on the
3233 // element types.
3234 SrcTy = SrcVecTy->getElementType();
3235 DestTy = DestVecTy->getElementType();
3236 }
3237
3238 // Get the bit sizes, we'll need these
3239 // FIXME: This doesn't work for scalable vector types with different element
3240 // counts that don't call getElementType above.
3241 unsigned SrcBits =
3242 SrcTy->getPrimitiveSizeInBits().getFixedValue(); // 0 for ptr
3243 unsigned DestBits =
3244 DestTy->getPrimitiveSizeInBits().getFixedValue(); // 0 for ptr
3245
3246 // Run through the possibilities ...
3247 if (DestTy->isIntegerTy()) { // Casting to integral
3248 if (SrcTy->isIntegerTy()) { // Casting from integral
3249 if (DestBits < SrcBits)
3250 return Trunc; // int -> smaller int
3251 else if (DestBits > SrcBits) { // its an extension
3252 if (SrcIsSigned)
3253 return SExt; // signed -> SEXT
3254 else
3255 return ZExt; // unsigned -> ZEXT
3256 } else {
3257 return BitCast; // Same size, No-op cast
3258 }
3259 } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt
3260 if (DestIsSigned)
3261 return FPToSI; // FP -> sint
3262 else
3263 return FPToUI; // FP -> uint
3264 } else if (SrcTy->isVectorTy()) {
3265 assert(DestBits == SrcBits &&
3266 "Casting vector to integer of different width");
3267 return BitCast; // Same size, no-op cast
3268 } else {
3269 assert(SrcTy->isPointerTy() &&
3270 "Casting from a value that is not first-class type");
3271 return PtrToInt; // ptr -> int
3272 }
3273 } else if (DestTy->isFloatingPointTy()) { // Casting to floating pt
3274 if (SrcTy->isIntegerTy()) { // Casting from integral
3275 if (SrcIsSigned)
3276 return SIToFP; // sint -> FP
3277 else
3278 return UIToFP; // uint -> FP
3279 } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt
3280 if (DestBits < SrcBits) {
3281 return FPTrunc; // FP -> smaller FP
3282 } else if (DestBits > SrcBits) {
3283 return FPExt; // FP -> larger FP
3284 } else {
3285 return BitCast; // same size, no-op cast
3286 }
3287 } else if (SrcTy->isVectorTy()) {
3288 assert(DestBits == SrcBits &&
3289 "Casting vector to floating point of different width");
3290 return BitCast; // same size, no-op cast
3291 }
3292 llvm_unreachable("Casting pointer or non-first class to float");
3293 } else if (DestTy->isVectorTy()) {
3294 assert(DestBits == SrcBits &&
3295 "Illegal cast to vector (wrong type or size)");
3296 return BitCast;
3297 } else if (DestTy->isPointerTy()) {
3298 if (SrcTy->isPointerTy()) {
3299 if (DestTy->getPointerAddressSpace() != SrcTy->getPointerAddressSpace())
3300 return AddrSpaceCast;
3301 return BitCast; // ptr -> ptr
3302 } else if (SrcTy->isIntegerTy()) {
3303 return IntToPtr; // int -> ptr
3304 }
3305 llvm_unreachable("Casting pointer to other than pointer or int");
3306 }
3307 llvm_unreachable("Casting to type that is not first-class");
3308}
3309
3310//===----------------------------------------------------------------------===//
3311// CastInst SubClass Constructors
3312//===----------------------------------------------------------------------===//
3313
3314/// Check that the construction parameters for a CastInst are correct. This
3315/// could be broken out into the separate constructors but it is useful to have
3316/// it in one place and to eliminate the redundant code for getting the sizes
3317/// of the types involved.
3318bool
3320 if (!SrcTy->isFirstClassType() || !DstTy->isFirstClassType() ||
3321 SrcTy->isAggregateType() || DstTy->isAggregateType())
3322 return false;
3323
3324 // Get the size of the types in bits, and whether we are dealing
3325 // with vector types, we'll need this later.
3326 bool SrcIsVec = isa<VectorType>(SrcTy);
3327 bool DstIsVec = isa<VectorType>(DstTy);
3328 unsigned SrcScalarBitSize = SrcTy->getScalarSizeInBits();
3329 unsigned DstScalarBitSize = DstTy->getScalarSizeInBits();
3330
3331 // If these are vector types, get the lengths of the vectors (using zero for
3332 // scalar types means that checking that vector lengths match also checks that
3333 // scalars are not being converted to vectors or vectors to scalars).
3334 ElementCount SrcEC = SrcIsVec ? cast<VectorType>(SrcTy)->getElementCount()
3336 ElementCount DstEC = DstIsVec ? cast<VectorType>(DstTy)->getElementCount()
3338
3339 // Switch on the opcode provided
3340 switch (op) {
3341 default: return false; // This is an input error
3342 case Instruction::Trunc:
3343 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3344 SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize;
3345 case Instruction::ZExt:
3346 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3347 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3348 case Instruction::SExt:
3349 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3350 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3351 case Instruction::FPTrunc:
3352 return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&
3353 SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize;
3354 case Instruction::FPExt:
3355 return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&
3356 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3357 case Instruction::UIToFP:
3358 case Instruction::SIToFP:
3359 return SrcTy->isIntOrIntVectorTy() && DstTy->isFPOrFPVectorTy() &&
3360 SrcEC == DstEC;
3361 case Instruction::FPToUI:
3362 case Instruction::FPToSI:
3363 return SrcTy->isFPOrFPVectorTy() && DstTy->isIntOrIntVectorTy() &&
3364 SrcEC == DstEC;
3365 case Instruction::PtrToAddr:
3366 case Instruction::PtrToInt:
3367 if (SrcEC != DstEC)
3368 return false;
3369 return SrcTy->isPtrOrPtrVectorTy() && DstTy->isIntOrIntVectorTy();
3370 case Instruction::IntToPtr:
3371 if (SrcEC != DstEC)
3372 return false;
3373 return SrcTy->isIntOrIntVectorTy() && DstTy->isPtrOrPtrVectorTy();
3374 case Instruction::BitCast: {
3375 PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
3376 PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
3377
3378 // BitCast implies a no-op cast of type only. No bits change.
3379 // However, you can't cast pointers to anything but pointers.
3380 if (!SrcPtrTy != !DstPtrTy)
3381 return false;
3382
3383 // For non-pointer cases, the cast is okay if the source and destination bit
3384 // widths are identical.
3385 if (!SrcPtrTy)
3386 return SrcTy->getPrimitiveSizeInBits() == DstTy->getPrimitiveSizeInBits();
3387
3388 // If both are pointers then the address spaces must match.
3389 if (SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace())
3390 return false;
3391
3392 // A vector of pointers must have the same number of elements.
3393 if (SrcIsVec && DstIsVec)
3394 return SrcEC == DstEC;
3395 if (SrcIsVec)
3396 return SrcEC == ElementCount::getFixed(1);
3397 if (DstIsVec)
3398 return DstEC == ElementCount::getFixed(1);
3399
3400 return true;
3401 }
3402 case Instruction::AddrSpaceCast: {
3403 PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
3404 if (!SrcPtrTy)
3405 return false;
3406
3407 PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
3408 if (!DstPtrTy)
3409 return false;
3410
3411 if (SrcPtrTy->getAddressSpace() == DstPtrTy->getAddressSpace())
3412 return false;
3413
3414 return SrcEC == DstEC;
3415 }
3416 }
3417}
3418
3420 InsertPosition InsertBefore)
3421 : CastInst(Ty, Trunc, S, Name, InsertBefore) {
3422 assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc");
3423}
3424
3425ZExtInst::ZExtInst(Value *S, Type *Ty, const Twine &Name,
3426 InsertPosition InsertBefore)
3427 : CastInst(Ty, ZExt, S, Name, InsertBefore) {
3428 assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt");
3429}
3430
3431SExtInst::SExtInst(Value *S, Type *Ty, const Twine &Name,
3432 InsertPosition InsertBefore)
3433 : CastInst(Ty, SExt, S, Name, InsertBefore) {
3434 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt");
3435}
3436
3438 InsertPosition InsertBefore)
3439 : CastInst(Ty, FPTrunc, S, Name, InsertBefore) {
3440 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc");
3441}
3442
3444 InsertPosition InsertBefore)
3445 : CastInst(Ty, FPExt, S, Name, InsertBefore) {
3446 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt");
3447}
3448
3450 InsertPosition InsertBefore)
3451 : CastInst(Ty, UIToFP, S, Name, InsertBefore) {
3452 assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP");
3453}
3454
3456 InsertPosition InsertBefore)
3457 : CastInst(Ty, SIToFP, S, Name, InsertBefore) {
3458 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP");
3459}
3460
3462 InsertPosition InsertBefore)
3463 : CastInst(Ty, FPToUI, S, Name, InsertBefore) {
3464 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI");
3465}
3466
3468 InsertPosition InsertBefore)
3469 : CastInst(Ty, FPToSI, S, Name, InsertBefore) {
3470 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI");
3471}
3472
3474 InsertPosition InsertBefore)
3475 : CastInst(Ty, PtrToInt, S, Name, InsertBefore) {
3476 assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt");
3477}
3478
3480 InsertPosition InsertBefore)
3481 : CastInst(Ty, PtrToAddr, S, Name, InsertBefore) {
3482 assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToAddr");
3483}
3484
3486 InsertPosition InsertBefore)
3487 : CastInst(Ty, IntToPtr, S, Name, InsertBefore) {
3488 assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr");
3489}
3490
3492 InsertPosition InsertBefore)
3493 : CastInst(Ty, BitCast, S, Name, InsertBefore) {
3494 assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast");
3495}
3496
3498 InsertPosition InsertBefore)
3499 : CastInst(Ty, AddrSpaceCast, S, Name, InsertBefore) {
3500 assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast");
3501}
3502
3503//===----------------------------------------------------------------------===//
3504// CmpInst Classes
3505//===----------------------------------------------------------------------===//
3506
3508 Value *RHS, const Twine &Name, InsertPosition InsertBefore,
3509 Instruction *FlagsSource)
3510 : Instruction(ty, op, AllocMarker, InsertBefore) {
3511 Op<0>() = LHS;
3512 Op<1>() = RHS;
3513 setPredicate(predicate);
3514 setName(Name);
3515 if (FlagsSource)
3516 copyIRFlags(FlagsSource);
3517}
3518
3520 const Twine &Name, InsertPosition InsertBefore) {
3521 if (Op == Instruction::ICmp) {
3522 if (InsertBefore.isValid())
3523 return new ICmpInst(InsertBefore, CmpInst::Predicate(predicate),
3524 S1, S2, Name);
3525 else
3526 return new ICmpInst(CmpInst::Predicate(predicate),
3527 S1, S2, Name);
3528 }
3529
3530 if (InsertBefore.isValid())
3531 return new FCmpInst(InsertBefore, CmpInst::Predicate(predicate),
3532 S1, S2, Name);
3533 else
3534 return new FCmpInst(CmpInst::Predicate(predicate),
3535 S1, S2, Name);
3536}
3537
3539 Value *S2,
3540 const Instruction *FlagsSource,
3541 const Twine &Name,
3542 InsertPosition InsertBefore) {
3543 CmpInst *Inst = Create(Op, Pred, S1, S2, Name, InsertBefore);
3544 Inst->copyIRFlags(FlagsSource);
3545 return Inst;
3546}
3547
3549 if (ICmpInst *IC = dyn_cast<ICmpInst>(this))
3550 IC->swapOperands();
3551 else
3552 cast<FCmpInst>(this)->swapOperands();
3553}
3554
3556 if (const ICmpInst *IC = dyn_cast<ICmpInst>(this))
3557 return IC->isCommutative();
3558 return cast<FCmpInst>(this)->isCommutative();
3559}
3560
3563 return ICmpInst::isEquality(P);
3565 return FCmpInst::isEquality(P);
3566 llvm_unreachable("Unsupported predicate kind");
3567}
3568
3569// Returns true if either operand of CmpInst is a provably non-zero
3570// floating-point constant.
3571static bool hasNonZeroFPOperands(const CmpInst *Cmp) {
3572 auto *LHS = dyn_cast<Constant>(Cmp->getOperand(0));
3573 auto *RHS = dyn_cast<Constant>(Cmp->getOperand(1));
3574 if (auto *Const = LHS ? LHS : RHS) {
3575 using namespace llvm::PatternMatch;
3576 return match(Const, m_NonZeroNotDenormalFP());
3577 }
3578 return false;
3579}
3580
3581// Floating-point equality is not an equivalence when comparing +0.0 with
3582// -0.0, when comparing NaN with another value, or when flushing
3583// denormals-to-zero.
3584bool CmpInst::isEquivalence(bool Invert) const {
3585 switch (Invert ? getInversePredicate() : getPredicate()) {
3587 return true;
3589 if (!hasNoNaNs())
3590 return false;
3591 [[fallthrough]];
3593 return hasNonZeroFPOperands(this);
3594 default:
3595 return false;
3596 }
3597}
3598
3600 switch (pred) {
3601 default: llvm_unreachable("Unknown cmp predicate!");
3602 case ICMP_EQ: return ICMP_NE;
3603 case ICMP_NE: return ICMP_EQ;
3604 case ICMP_UGT: return ICMP_ULE;
3605 case ICMP_ULT: return ICMP_UGE;
3606 case ICMP_UGE: return ICMP_ULT;
3607 case ICMP_ULE: return ICMP_UGT;
3608 case ICMP_SGT: return ICMP_SLE;
3609 case ICMP_SLT: return ICMP_SGE;
3610 case ICMP_SGE: return ICMP_SLT;
3611 case ICMP_SLE: return ICMP_SGT;
3612
3613 case FCMP_OEQ: return FCMP_UNE;
3614 case FCMP_ONE: return FCMP_UEQ;
3615 case FCMP_OGT: return FCMP_ULE;
3616 case FCMP_OLT: return FCMP_UGE;
3617 case FCMP_OGE: return FCMP_ULT;
3618 case FCMP_OLE: return FCMP_UGT;
3619 case FCMP_UEQ: return FCMP_ONE;
3620 case FCMP_UNE: return FCMP_OEQ;
3621 case FCMP_UGT: return FCMP_OLE;
3622 case FCMP_ULT: return FCMP_OGE;
3623 case FCMP_UGE: return FCMP_OLT;
3624 case FCMP_ULE: return FCMP_OGT;
3625 case FCMP_ORD: return FCMP_UNO;
3626 case FCMP_UNO: return FCMP_ORD;
3627 case FCMP_TRUE: return FCMP_FALSE;
3628 case FCMP_FALSE: return FCMP_TRUE;
3629 }
3630}
3631
3633 switch (Pred) {
3634 default: return "unknown";
3635 case FCmpInst::FCMP_FALSE: return "false";
3636 case FCmpInst::FCMP_OEQ: return "oeq";
3637 case FCmpInst::FCMP_OGT: return "ogt";
3638 case FCmpInst::FCMP_OGE: return "oge";
3639 case FCmpInst::FCMP_OLT: return "olt";
3640 case FCmpInst::FCMP_OLE: return "ole";
3641 case FCmpInst::FCMP_ONE: return "one";
3642 case FCmpInst::FCMP_ORD: return "ord";
3643 case FCmpInst::FCMP_UNO: return "uno";
3644 case FCmpInst::FCMP_UEQ: return "ueq";
3645 case FCmpInst::FCMP_UGT: return "ugt";
3646 case FCmpInst::FCMP_UGE: return "uge";
3647 case FCmpInst::FCMP_ULT: return "ult";
3648 case FCmpInst::FCMP_ULE: return "ule";
3649 case FCmpInst::FCMP_UNE: return "une";
3650 case FCmpInst::FCMP_TRUE: return "true";
3651 case ICmpInst::ICMP_EQ: return "eq";
3652 case ICmpInst::ICMP_NE: return "ne";
3653 case ICmpInst::ICMP_SGT: return "sgt";
3654 case ICmpInst::ICMP_SGE: return "sge";
3655 case ICmpInst::ICMP_SLT: return "slt";
3656 case ICmpInst::ICMP_SLE: return "sle";
3657 case ICmpInst::ICMP_UGT: return "ugt";
3658 case ICmpInst::ICMP_UGE: return "uge";
3659 case ICmpInst::ICMP_ULT: return "ult";
3660 case ICmpInst::ICMP_ULE: return "ule";
3661 }
3662}
3663
3665 OS << CmpInst::getPredicateName(Pred);
3666 return OS;
3667}
3668
3670 switch (pred) {
3671 default: llvm_unreachable("Unknown icmp predicate!");
3672 case ICMP_EQ: case ICMP_NE:
3673 case ICMP_SGT: case ICMP_SLT: case ICMP_SGE: case ICMP_SLE:
3674 return pred;
3675 case ICMP_UGT: return ICMP_SGT;
3676 case ICMP_ULT: return ICMP_SLT;
3677 case ICMP_UGE: return ICMP_SGE;
3678 case ICMP_ULE: return ICMP_SLE;
3679 }
3680}
3681
3683 switch (pred) {
3684 default: llvm_unreachable("Unknown icmp predicate!");
3685 case ICMP_EQ: case ICMP_NE:
3686 case ICMP_UGT: case ICMP_ULT: case ICMP_UGE: case ICMP_ULE:
3687 return pred;
3688 case ICMP_SGT: return ICMP_UGT;
3689 case ICMP_SLT: return ICMP_ULT;
3690 case ICMP_SGE: return ICMP_UGE;
3691 case ICMP_SLE: return ICMP_ULE;
3692 }
3693}
3694
3696 switch (pred) {
3697 default: llvm_unreachable("Unknown cmp predicate!");
3698 case ICMP_EQ: case ICMP_NE:
3699 return pred;
3700 case ICMP_SGT: return ICMP_SLT;
3701 case ICMP_SLT: return ICMP_SGT;
3702 case ICMP_SGE: return ICMP_SLE;
3703 case ICMP_SLE: return ICMP_SGE;
3704 case ICMP_UGT: return ICMP_ULT;
3705 case ICMP_ULT: return ICMP_UGT;
3706 case ICMP_UGE: return ICMP_ULE;
3707 case ICMP_ULE: return ICMP_UGE;
3708
3709 case FCMP_FALSE: case FCMP_TRUE:
3710 case FCMP_OEQ: case FCMP_ONE:
3711 case FCMP_UEQ: case FCMP_UNE:
3712 case FCMP_ORD: case FCMP_UNO:
3713 return pred;
3714 case FCMP_OGT: return FCMP_OLT;
3715 case FCMP_OLT: return FCMP_OGT;
3716 case FCMP_OGE: return FCMP_OLE;
3717 case FCMP_OLE: return FCMP_OGE;
3718 case FCMP_UGT: return FCMP_ULT;
3719 case FCMP_ULT: return FCMP_UGT;
3720 case FCMP_UGE: return FCMP_ULE;
3721 case FCMP_ULE: return FCMP_UGE;
3722 }
3723}
3724
3726 switch (pred) {
3727 case ICMP_SGE:
3728 case ICMP_SLE:
3729 case ICMP_UGE:
3730 case ICMP_ULE:
3731 case FCMP_OGE:
3732 case FCMP_OLE:
3733 case FCMP_UGE:
3734 case FCMP_ULE:
3735 return true;
3736 default:
3737 return false;
3738 }
3739}
3740
3742 switch (pred) {
3743 case ICMP_SGT:
3744 case ICMP_SLT:
3745 case ICMP_UGT:
3746 case ICMP_ULT:
3747 case FCMP_OGT:
3748 case FCMP_OLT:
3749 case FCMP_UGT:
3750 case FCMP_ULT:
3751 return true;
3752 default:
3753 return false;
3754 }
3755}
3756
3758 switch (pred) {
3759 case ICMP_SGE:
3760 return ICMP_SGT;
3761 case ICMP_SLE:
3762 return ICMP_SLT;
3763 case ICMP_UGE:
3764 return ICMP_UGT;
3765 case ICMP_ULE:
3766 return ICMP_ULT;
3767 case FCMP_OGE:
3768 return FCMP_OGT;
3769 case FCMP_OLE:
3770 return FCMP_OLT;
3771 case FCMP_UGE:
3772 return FCMP_UGT;
3773 case FCMP_ULE:
3774 return FCMP_ULT;
3775 default:
3776 return pred;
3777 }
3778}
3779
3781 switch (pred) {
3782 case ICMP_SGT:
3783 return ICMP_SGE;
3784 case ICMP_SLT:
3785 return ICMP_SLE;
3786 case ICMP_UGT:
3787 return ICMP_UGE;
3788 case ICMP_ULT:
3789 return ICMP_ULE;
3790 case FCMP_OGT:
3791 return FCMP_OGE;
3792 case FCMP_OLT:
3793 return FCMP_OLE;
3794 case FCMP_UGT:
3795 return FCMP_UGE;
3796 case FCMP_ULT:
3797 return FCMP_ULE;
3798 default:
3799 return pred;
3800 }
3801}
3802
3804 assert(CmpInst::isRelational(pred) && "Call only with relational predicate!");
3805
3806 if (isStrictPredicate(pred))
3807 return getNonStrictPredicate(pred);
3808 if (isNonStrictPredicate(pred))
3809 return getStrictPredicate(pred);
3810
3811 llvm_unreachable("Unknown predicate!");
3812}
3813
3815 switch (predicate) {
3816 default: return false;
3818 case ICmpInst::ICMP_UGE: return true;
3819 }
3820}
3821
3823 switch (predicate) {
3824 default: return false;
3826 case ICmpInst::ICMP_SGE: return true;
3827 }
3828}
3829
3830bool ICmpInst::compare(const APInt &LHS, const APInt &RHS,
3831 ICmpInst::Predicate Pred) {
3832 assert(ICmpInst::isIntPredicate(Pred) && "Only for integer predicates!");
3833 switch (Pred) {
3835 return LHS.eq(RHS);
3837 return LHS.ne(RHS);
3839 return LHS.ugt(RHS);
3841 return LHS.uge(RHS);
3843 return LHS.ult(RHS);
3845 return LHS.ule(RHS);
3847 return LHS.sgt(RHS);
3849 return LHS.sge(RHS);
3851 return LHS.slt(RHS);
3853 return LHS.sle(RHS);
3854 default:
3855 llvm_unreachable("Unexpected non-integer predicate.");
3856 };
3857}
3858
3859bool FCmpInst::compare(const APFloat &LHS, const APFloat &RHS,
3860 FCmpInst::Predicate Pred) {
3861 APFloat::cmpResult R = LHS.compare(RHS);
3862 switch (Pred) {
3863 default:
3864 llvm_unreachable("Invalid FCmp Predicate");
3866 return false;
3868 return true;
3869 case FCmpInst::FCMP_UNO:
3870 return R == APFloat::cmpUnordered;
3871 case FCmpInst::FCMP_ORD:
3872 return R != APFloat::cmpUnordered;
3873 case FCmpInst::FCMP_UEQ:
3874 return R == APFloat::cmpUnordered || R == APFloat::cmpEqual;
3875 case FCmpInst::FCMP_OEQ:
3876 return R == APFloat::cmpEqual;
3877 case FCmpInst::FCMP_UNE:
3878 return R != APFloat::cmpEqual;
3879 case FCmpInst::FCMP_ONE:
3881 case FCmpInst::FCMP_ULT:
3882 return R == APFloat::cmpUnordered || R == APFloat::cmpLessThan;
3883 case FCmpInst::FCMP_OLT:
3884 return R == APFloat::cmpLessThan;
3885 case FCmpInst::FCMP_UGT:
3887 case FCmpInst::FCMP_OGT:
3888 return R == APFloat::cmpGreaterThan;
3889 case FCmpInst::FCMP_ULE:
3890 return R != APFloat::cmpGreaterThan;
3891 case FCmpInst::FCMP_OLE:
3892 return R == APFloat::cmpLessThan || R == APFloat::cmpEqual;
3893 case FCmpInst::FCMP_UGE:
3894 return R != APFloat::cmpLessThan;
3895 case FCmpInst::FCMP_OGE:
3896 return R == APFloat::cmpGreaterThan || R == APFloat::cmpEqual;
3897 }
3898}
3899
3900std::optional<bool> ICmpInst::compare(const KnownBits &LHS,
3901 const KnownBits &RHS,
3902 ICmpInst::Predicate Pred) {
3903 switch (Pred) {
3904 case ICmpInst::ICMP_EQ:
3905 return KnownBits::eq(LHS, RHS);
3906 case ICmpInst::ICMP_NE:
3907 return KnownBits::ne(LHS, RHS);
3908 case ICmpInst::ICMP_UGE:
3909 return KnownBits::uge(LHS, RHS);
3910 case ICmpInst::ICMP_UGT:
3911 return KnownBits::ugt(LHS, RHS);
3912 case ICmpInst::ICMP_ULE:
3913 return KnownBits::ule(LHS, RHS);
3914 case ICmpInst::ICMP_ULT:
3915 return KnownBits::ult(LHS, RHS);
3916 case ICmpInst::ICMP_SGE:
3917 return KnownBits::sge(LHS, RHS);
3918 case ICmpInst::ICMP_SGT:
3919 return KnownBits::sgt(LHS, RHS);
3920 case ICmpInst::ICMP_SLE:
3921 return KnownBits::sle(LHS, RHS);
3922 case ICmpInst::ICMP_SLT:
3923 return KnownBits::slt(LHS, RHS);
3924 default:
3925 llvm_unreachable("Unexpected non-integer predicate.");
3926 }
3927}
3928
3930 if (CmpInst::isEquality(pred))
3931 return pred;
3932 if (isSigned(pred))
3933 return getUnsignedPredicate(pred);
3934 if (isUnsigned(pred))
3935 return getSignedPredicate(pred);
3936
3937 llvm_unreachable("Unknown predicate!");
3938}
3939
3941 switch (predicate) {
3942 default: return false;
3945 case FCmpInst::FCMP_ORD: return true;
3946 }
3947}
3948
3950 switch (predicate) {
3951 default: return false;
3954 case FCmpInst::FCMP_UNO: return true;
3955 }
3956}
3957
3959 switch(predicate) {
3960 default: return false;
3961 case ICMP_EQ: case ICMP_UGE: case ICMP_ULE: case ICMP_SGE: case ICMP_SLE:
3962 case FCMP_TRUE: case FCMP_UEQ: case FCMP_UGE: case FCMP_ULE: return true;
3963 }
3964}
3965
3967 switch(predicate) {
3968 case ICMP_NE: case ICMP_UGT: case ICMP_ULT: case ICMP_SGT: case ICMP_SLT:
3969 case FCMP_FALSE: case FCMP_ONE: case FCMP_OGT: case FCMP_OLT: return true;
3970 default: return false;
3971 }
3972}
3973
3975 // If the predicates match, then we know the first condition implies the
3976 // second is true.
3977 if (CmpPredicate::getMatching(Pred1, Pred2))
3978 return true;
3979
3980 if (Pred1.hasSameSign() && CmpInst::isSigned(Pred2))
3982 else if (Pred2.hasSameSign() && CmpInst::isSigned(Pred1))
3984
3985 switch (Pred1) {
3986 default:
3987 break;
3988 case CmpInst::ICMP_EQ:
3989 // A == B implies A >=u B, A <=u B, A >=s B, and A <=s B are true.
3990 return Pred2 == CmpInst::ICMP_UGE || Pred2 == CmpInst::ICMP_ULE ||
3991 Pred2 == CmpInst::ICMP_SGE || Pred2 == CmpInst::ICMP_SLE;
3992 case CmpInst::ICMP_UGT: // A >u B implies A != B and A >=u B are true.
3993 return Pred2 == CmpInst::ICMP_NE || Pred2 == CmpInst::ICMP_UGE;
3994 case CmpInst::ICMP_ULT: // A <u B implies A != B and A <=u B are true.
3995 return Pred2 == CmpInst::ICMP_NE || Pred2 == CmpInst::ICMP_ULE;
3996 case CmpInst::ICMP_SGT: // A >s B implies A != B and A >=s B are true.
3997 return Pred2 == CmpInst::ICMP_NE || Pred2 == CmpInst::ICMP_SGE;
3998 case CmpInst::ICMP_SLT: // A <s B implies A != B and A <=s B are true.
3999 return Pred2 == CmpInst::ICMP_NE || Pred2 == CmpInst::ICMP_SLE;
4000 }
4001 return false;
4002}
4003
4005 CmpPredicate Pred2) {
4006 return isImpliedTrueByMatchingCmp(Pred1,
4008}
4009
4011 CmpPredicate Pred2) {
4012 if (isImpliedTrueByMatchingCmp(Pred1, Pred2))
4013 return true;
4014 if (isImpliedFalseByMatchingCmp(Pred1, Pred2))
4015 return false;
4016 return std::nullopt;
4017}
4018
4019//===----------------------------------------------------------------------===//
4020// CmpPredicate Implementation
4021//===----------------------------------------------------------------------===//
4022
4023std::optional<CmpPredicate> CmpPredicate::getMatching(CmpPredicate A,
4024 CmpPredicate B) {
4025 if (A.Pred == B.Pred)
4026 return A.HasSameSign == B.HasSameSign ? A : CmpPredicate(A.Pred);
4028 return {};
4029 if (A.HasSameSign &&
4031 return B.Pred;
4032 if (B.HasSameSign &&
4034 return A.Pred;
4035 return {};
4036}
4037
4041
4043 if (auto *ICI = dyn_cast<ICmpInst>(Cmp))
4044 return ICI->getCmpPredicate();
4045 return Cmp->getPredicate();
4046}
4047
4051
4053 return getSwapped(get(Cmp));
4054}
4055
4056//===----------------------------------------------------------------------===//
4057// SwitchInst Implementation
4058//===----------------------------------------------------------------------===//
4059
4060void SwitchInst::init(Value *Value, BasicBlock *Default, unsigned NumReserved) {
4061 assert(Value && Default && NumReserved);
4062 ReservedSpace = NumReserved;
4064 allocHungoffUses(ReservedSpace);
4065
4066 Op<0>() = Value;
4067 Op<1>() = Default;
4068}
4069
4070/// SwitchInst ctor - Create a new switch instruction, specifying a value to
4071/// switch on and a default destination. The number of additional cases can
4072/// be specified here to make memory allocation more efficient. This
4073/// constructor can also autoinsert before another instruction.
4074SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
4075 InsertPosition InsertBefore)
4076 : Instruction(Type::getVoidTy(Value->getContext()), Instruction::Switch,
4077 AllocMarker, InsertBefore) {
4078 init(Value, Default, 2 + NumCases);
4079}
4080
4081SwitchInst::SwitchInst(const SwitchInst &SI)
4082 : Instruction(SI.getType(), Instruction::Switch, AllocMarker) {
4083 init(SI.getCondition(), SI.getDefaultDest(), SI.getNumOperands());
4084 setNumHungOffUseOperands(SI.getNumOperands());
4085 Use *OL = getOperandList();
4086 ConstantInt **VL = case_values();
4087 const Use *InOL = SI.getOperandList();
4088 ConstantInt *const *InVL = SI.case_values();
4089 for (unsigned i = 2, E = SI.getNumOperands(); i != E; ++i) {
4090 OL[i] = InOL[i];
4091 VL[i - 2] = InVL[i - 2];
4092 }
4093 SubclassOptionalData = SI.SubclassOptionalData;
4094}
4095
4096/// addCase - Add an entry to the switch instruction...
4097///
4099 unsigned NewCaseIdx = getNumCases();
4100 unsigned OpNo = getNumOperands();
4101 if (OpNo + 1 > ReservedSpace)
4102 growOperands(); // Get more space!
4103 // Initialize some new operands.
4104 assert(OpNo < ReservedSpace && "Growing didn't work!");
4105 setNumHungOffUseOperands(OpNo + 1);
4106 CaseHandle Case(this, NewCaseIdx);
4107 Case.setValue(OnVal);
4108 Case.setSuccessor(Dest);
4109}
4110
4111/// removeCase - This method removes the specified case and its successor
4112/// from the switch instruction.
4114 unsigned idx = I->getCaseIndex();
4115
4116 assert(2 + idx < getNumOperands() && "Case index out of range!!!");
4117
4118 unsigned NumOps = getNumOperands();
4119 Use *OL = getOperandList();
4120 ConstantInt **VL = case_values();
4121
4122 // Overwrite this case with the end of the list.
4123 if (2 + idx + 1 != NumOps) {
4124 OL[2 + idx] = OL[NumOps - 1];
4125 VL[idx] = VL[NumOps - 2 - 1];
4126 }
4127
4128 // Nuke the last value.
4129 OL[NumOps - 1].set(nullptr);
4130 VL[NumOps - 2 - 1] = nullptr;
4132
4133 return CaseIt(this, idx);
4134}
4135
4136/// growOperands - grow operands - This grows the operand list in response
4137/// to a push_back style of operation. This grows the number of ops by 3 times.
4138///
4139void SwitchInst::growOperands() {
4140 unsigned e = getNumOperands();
4141 unsigned NumOps = e*3;
4142
4143 ReservedSpace = NumOps;
4144 growHungoffUses(ReservedSpace, /*WithExtraValues=*/true);
4145}
4146
4148 MDNode *ProfileData = getBranchWeightMDNode(SI);
4149 if (!ProfileData)
4150 return;
4151
4152 if (getNumBranchWeights(*ProfileData) != SI.getNumSuccessors()) {
4153 llvm_unreachable("number of prof branch_weights metadata operands does "
4154 "not correspond to number of succesors");
4155 }
4156
4158 if (!extractBranchWeights(ProfileData, Weights))
4159 return;
4160 this->Weights = std::move(Weights);
4161}
4162
4165 if (Weights) {
4166 assert(SI.getNumSuccessors() == Weights->size() &&
4167 "num of prof branch_weights must accord with num of successors");
4168 Changed = true;
4169 // Copy the last case to the place of the removed one and shrink.
4170 // This is tightly coupled with the way SwitchInst::removeCase() removes
4171 // the cases in SwitchInst::removeCase(CaseIt).
4172 (*Weights)[I->getCaseIndex() + 1] = Weights->back();
4173 Weights->pop_back();
4174 }
4175 return SI.removeCase(I);
4176}
4177
4179 auto *DestBlock = I->getCaseSuccessor();
4180 if (Weights) {
4181 auto Weight = getSuccessorWeight(I->getCaseIndex() + 1);
4182 (*Weights)[0] = Weight.value();
4183 }
4184
4185 SI.setDefaultDest(DestBlock);
4186}
4187
4189 ConstantInt *OnVal, BasicBlock *Dest,
4191 SI.addCase(OnVal, Dest);
4192
4193 if (!Weights && W && *W) {
4194 Changed = true;
4195 Weights = SmallVector<uint32_t, 8>(SI.getNumSuccessors(), 0);
4196 (*Weights)[SI.getNumSuccessors() - 1] = *W;
4197 } else if (Weights) {
4198 Changed = true;
4199 Weights->push_back(W.value_or(0));
4200 }
4201 if (Weights)
4202 assert(SI.getNumSuccessors() == Weights->size() &&
4203 "num of prof branch_weights must accord with num of successors");
4204}
4205
4208 // Instruction is erased. Mark as unchanged to not touch it in the destructor.
4209 Changed = false;
4210 if (Weights)
4211 Weights->resize(0);
4212 return SI.eraseFromParent();
4213}
4214
4217 if (!Weights)
4218 return std::nullopt;
4219 return (*Weights)[idx];
4220}
4221
4224 if (!W)
4225 return;
4226
4227 if (!Weights && *W)
4228 Weights = SmallVector<uint32_t, 8>(SI.getNumSuccessors(), 0);
4229
4230 if (Weights) {
4231 auto &OldW = (*Weights)[idx];
4232 if (*W != OldW) {
4233 Changed = true;
4234 OldW = *W;
4235 }
4236 }
4237}
4238
4241 unsigned idx) {
4242 if (MDNode *ProfileData = getBranchWeightMDNode(SI))
4243 if (ProfileData->getNumOperands() == SI.getNumSuccessors() + 1)
4244 return mdconst::extract<ConstantInt>(ProfileData->getOperand(idx + 1))
4245 ->getValue()
4246 .getZExtValue();
4247
4248 return std::nullopt;
4249}
4250
4251//===----------------------------------------------------------------------===//
4252// IndirectBrInst Implementation
4253//===----------------------------------------------------------------------===//
4254
4255void IndirectBrInst::init(Value *Address, unsigned NumDests) {
4256 assert(Address && Address->getType()->isPointerTy() &&
4257 "Address of indirectbr must be a pointer");
4258 ReservedSpace = 1+NumDests;
4260 allocHungoffUses(ReservedSpace);
4261
4262 Op<0>() = Address;
4263}
4264
4265
4266/// growOperands - grow operands - This grows the operand list in response
4267/// to a push_back style of operation. This grows the number of ops by 2 times.
4268///
4269void IndirectBrInst::growOperands() {
4270 unsigned e = getNumOperands();
4271 unsigned NumOps = e*2;
4272
4273 ReservedSpace = NumOps;
4274 growHungoffUses(ReservedSpace);
4275}
4276
4277IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases,
4278 InsertPosition InsertBefore)
4279 : Instruction(Type::getVoidTy(Address->getContext()),
4280 Instruction::IndirectBr, AllocMarker, InsertBefore) {
4281 init(Address, NumCases);
4282}
4283
4284IndirectBrInst::IndirectBrInst(const IndirectBrInst &IBI)
4285 : Instruction(Type::getVoidTy(IBI.getContext()), Instruction::IndirectBr,
4286 AllocMarker) {
4287 NumUserOperands = IBI.NumUserOperands;
4288 allocHungoffUses(IBI.getNumOperands());
4289 Use *OL = getOperandList();
4290 const Use *InOL = IBI.getOperandList();
4291 for (unsigned i = 0, E = IBI.getNumOperands(); i != E; ++i)
4292 OL[i] = InOL[i];
4293 SubclassOptionalData = IBI.SubclassOptionalData;
4294}
4295
4296/// addDestination - Add a destination.
4297///
4299 unsigned OpNo = getNumOperands();
4300 if (OpNo+1 > ReservedSpace)
4301 growOperands(); // Get more space!
4302 // Initialize some new operands.
4303 assert(OpNo < ReservedSpace && "Growing didn't work!");
4305 getOperandList()[OpNo] = DestBB;
4306}
4307
4308/// removeDestination - This method removes the specified successor from the
4309/// indirectbr instruction.
4311 assert(idx < getNumOperands()-1 && "Successor index out of range!");
4312
4313 unsigned NumOps = getNumOperands();
4314 Use *OL = getOperandList();
4315
4316 // Replace this value with the last one.
4317 OL[idx+1] = OL[NumOps-1];
4318
4319 // Nuke the last value.
4320 OL[NumOps-1].set(nullptr);
4322}
4323
4324//===----------------------------------------------------------------------===//
4325// FreezeInst Implementation
4326//===----------------------------------------------------------------------===//
4327
4328FreezeInst::FreezeInst(Value *S, const Twine &Name, InsertPosition InsertBefore)
4329 : UnaryInstruction(S->getType(), Freeze, S, InsertBefore) {
4330 setName(Name);
4331}
4332
4333//===----------------------------------------------------------------------===//
4334// cloneImpl() implementations
4335//===----------------------------------------------------------------------===//
4336
4337// Define these methods here so vtables don't get emitted into every translation
4338// unit that uses these classes.
4339
4340GetElementPtrInst *GetElementPtrInst::cloneImpl() const {
4342 return new (AllocMarker) GetElementPtrInst(*this, AllocMarker);
4343}
4344
4348
4352
4354 return new FCmpInst(getPredicate(), Op<0>(), Op<1>());
4355}
4356
4358 return new ICmpInst(getPredicate(), Op<0>(), Op<1>());
4359}
4360
4361ExtractValueInst *ExtractValueInst::cloneImpl() const {
4362 return new ExtractValueInst(*this);
4363}
4364
4365InsertValueInst *InsertValueInst::cloneImpl() const {
4366 return new InsertValueInst(*this);
4367}
4368
4371 getOperand(0), getAlign());
4372 Result->setUsedWithInAlloca(isUsedWithInAlloca());
4373 Result->setSwiftError(isSwiftError());
4374 return Result;
4375}
4376
4378 return new LoadInst(getType(), getOperand(0), Twine(), isVolatile(),
4380}
4381
4386
4391 Result->setVolatile(isVolatile());
4392 Result->setWeak(isWeak());
4393 return Result;
4394}
4395
4397 AtomicRMWInst *Result =
4400 Result->setVolatile(isVolatile());
4401 return Result;
4402}
4403
4407
4409 return new TruncInst(getOperand(0), getType());
4410}
4411
4413 return new ZExtInst(getOperand(0), getType());
4414}
4415
4417 return new SExtInst(getOperand(0), getType());
4418}
4419
4421 return new FPTruncInst(getOperand(0), getType());
4422}
4423
4425 return new FPExtInst(getOperand(0), getType());
4426}
4427
4429 return new UIToFPInst(getOperand(0), getType());
4430}
4431
4433 return new SIToFPInst(getOperand(0), getType());
4434}
4435
4437 return new FPToUIInst(getOperand(0), getType());
4438}
4439
4441 return new FPToSIInst(getOperand(0), getType());
4442}
4443
4445 return new PtrToIntInst(getOperand(0), getType());
4446}
4447
4451
4453 return new IntToPtrInst(getOperand(0), getType());
4454}
4455
4457 return new BitCastInst(getOperand(0), getType());
4458}
4459
4463
4464CallInst *CallInst::cloneImpl() const {
4465 if (hasOperandBundles()) {
4469 return new (AllocMarker) CallInst(*this, AllocMarker);
4470 }
4472 return new (AllocMarker) CallInst(*this, AllocMarker);
4473}
4474
4475SelectInst *SelectInst::cloneImpl() const {
4477}
4478
4480 return new VAArgInst(getOperand(0), getType());
4481}
4482
4483ExtractElementInst *ExtractElementInst::cloneImpl() const {
4485}
4486
4487InsertElementInst *InsertElementInst::cloneImpl() const {
4489}
4490
4494
4495PHINode *PHINode::cloneImpl() const { return new (AllocMarker) PHINode(*this); }
4496
4497LandingPadInst *LandingPadInst::cloneImpl() const {
4498 return new LandingPadInst(*this);
4499}
4500
4501ReturnInst *ReturnInst::cloneImpl() const {
4503 return new (AllocMarker) ReturnInst(*this, AllocMarker);
4504}
4505
4506BranchInst *BranchInst::cloneImpl() const {
4508 return new (AllocMarker) BranchInst(*this, AllocMarker);
4509}
4510
4511SwitchInst *SwitchInst::cloneImpl() const { return new SwitchInst(*this); }
4512
4513IndirectBrInst *IndirectBrInst::cloneImpl() const {
4514 return new IndirectBrInst(*this);
4515}
4516
4517InvokeInst *InvokeInst::cloneImpl() const {
4518 if (hasOperandBundles()) {
4522 return new (AllocMarker) InvokeInst(*this, AllocMarker);
4523 }
4525 return new (AllocMarker) InvokeInst(*this, AllocMarker);
4526}
4527
4528CallBrInst *CallBrInst::cloneImpl() const {
4529 if (hasOperandBundles()) {
4533 return new (AllocMarker) CallBrInst(*this, AllocMarker);
4534 }
4536 return new (AllocMarker) CallBrInst(*this, AllocMarker);
4537}
4538
4539ResumeInst *ResumeInst::cloneImpl() const {
4540 return new (AllocMarker) ResumeInst(*this);
4541}
4542
4543CleanupReturnInst *CleanupReturnInst::cloneImpl() const {
4545 return new (AllocMarker) CleanupReturnInst(*this, AllocMarker);
4546}
4547
4548CatchReturnInst *CatchReturnInst::cloneImpl() const {
4549 return new (AllocMarker) CatchReturnInst(*this);
4550}
4551
4552CatchSwitchInst *CatchSwitchInst::cloneImpl() const {
4553 return new CatchSwitchInst(*this);
4554}
4555
4556FuncletPadInst *FuncletPadInst::cloneImpl() const {
4558 return new (AllocMarker) FuncletPadInst(*this, AllocMarker);
4559}
4560
4562 LLVMContext &Context = getContext();
4563 return new UnreachableInst(Context);
4564}
4565
4566bool UnreachableInst::shouldLowerToTrap(bool TrapUnreachable,
4567 bool NoTrapAfterNoreturn) const {
4568 if (!TrapUnreachable)
4569 return false;
4570
4571 // We may be able to ignore unreachable behind a noreturn call.
4573 Call && Call->doesNotReturn()) {
4574 if (NoTrapAfterNoreturn)
4575 return false;
4576 // Do not emit an additional trap instruction.
4577 if (Call->isNonContinuableTrap())
4578 return false;
4579 }
4580
4581 if (getFunction()->hasFnAttribute(Attribute::Naked))
4582 return false;
4583
4584 return true;
4585}
4586
4588 return new FreezeInst(getOperand(0));
4589}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
constexpr LLT S1
Rewrite undef for PHI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Atomic ordering constants.
@ FnAttr
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define LLVM_ABI
Definition Compiler.h:213
This file contains the declarations for the subclasses of Constant, which represent the different fla...
@ Default
static bool isSigned(unsigned int Opcode)
#define op(i)
Module.h This file contains the declarations for the Module class.
static Align computeLoadStoreDefaultAlign(Type *Ty, InsertPosition Pos)
static bool isImpliedFalseByMatchingCmp(CmpPredicate Pred1, CmpPredicate Pred2)
static Value * createPlaceholderForShuffleVector(Value *V)
static Align computeAllocaDefaultAlign(Type *Ty, InsertPosition Pos)
static cl::opt< bool > DisableI2pP2iOpt("disable-i2p-p2i-opt", cl::init(false), cl::desc("Disables inttoptr/ptrtoint roundtrip optimization"))
static bool hasNonZeroFPOperands(const CmpInst *Cmp)
static int matchShuffleAsBitRotate(ArrayRef< int > Mask, int NumSubElts)
Try to lower a vector shuffle as a bit rotation.
static Type * getIndexedTypeInternal(Type *Ty, ArrayRef< IndexTy > IdxList)
static bool isReplicationMaskWithParams(ArrayRef< int > Mask, int ReplicationFactor, int VF)
static bool isIdentityMaskImpl(ArrayRef< int > Mask, int NumOpElts)
static bool isSingleSourceMaskImpl(ArrayRef< int > Mask, int NumOpElts)
static Value * getAISize(LLVMContext &Context, Value *Amt)
static bool isImpliedTrueByMatchingCmp(CmpPredicate Pred1, CmpPredicate Pred2)
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
This file contains the declarations for metadata subclasses.
#define T
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
#define P(N)
PowerPC Reduce CR logical Operation
This file contains the declarations for profiling metadata utility functions.
const SmallVectorImpl< MachineOperand > & Cond
static unsigned getNumElements(Type *Ty)
This file implements the SmallBitVector class.
This file defines the SmallVector class.
#define LLVM_DEBUG(...)
Definition Debug.h:114
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Definition VPlanSLP.cpp:247
Value * RHS
Value * LHS
cmpResult
IEEE-754R 5.11: Floating Point Comparison Relations.
Definition APFloat.h:334
LLVM_ABI float convertToFloat() const
Converts this APFloat to host float value.
Definition APFloat.cpp:6143
Class for arbitrary precision integers.
Definition APInt.h:78
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
Definition APInt.h:1331
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition APInt.h:381
unsigned countr_zero() const
Count the number of trailing zero bits.
Definition APInt.h:1640
unsigned countl_zero() const
The APInt version of std::countl_zero.
Definition APInt.h:1599
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
Definition APInt.h:201
This class represents a conversion between pointers from one address space to another.
LLVM_ABI AddrSpaceCastInst * cloneImpl() const
Clone an identical AddrSpaceCastInst.
LLVM_ABI AddrSpaceCastInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI std::optional< TypeSize > getAllocationSizeInBits(const DataLayout &DL) const
Get allocation size in bits.
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
LLVM_ABI bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
LLVM_ABI AllocaInst * cloneImpl() const
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
bool isUsedWithInAlloca() const
Return true if this alloca is used as an inalloca argument to a call.
unsigned getAddressSpace() const
Return the address space for the allocation.
LLVM_ABI std::optional< TypeSize > getAllocationSize(const DataLayout &DL) const
Get allocation size in bytes.
LLVM_ABI bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
void setAlignment(Align Align)
const Value * getArraySize() const
Get the number of elements allocated.
LLVM_ABI AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, const Twine &Name, InsertPosition InsertBefore)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
iterator end() const
Definition ArrayRef.h:131
size_t size() const
size - Get the array size.
Definition ArrayRef.h:142
iterator begin() const
Definition ArrayRef.h:130
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:137
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
Definition ArrayRef.h:186
Class to represent array types.
void setSyncScopeID(SyncScope::ID SSID)
Sets the synchronization scope ID of this cmpxchg instruction.
bool isVolatile() const
Return true if this is a cmpxchg from a volatile memory location.
void setFailureOrdering(AtomicOrdering Ordering)
Sets the failure ordering constraint of this cmpxchg instruction.
AtomicOrdering getFailureOrdering() const
Returns the failure ordering constraint of this cmpxchg instruction.
void setSuccessOrdering(AtomicOrdering Ordering)
Sets the success ordering constraint of this cmpxchg instruction.
LLVM_ABI AtomicCmpXchgInst * cloneImpl() const
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
friend class Instruction
Iterator for Instructions in a `BasicBlock.
bool isWeak() const
Return true if this cmpxchg may spuriously fail.
void setAlignment(Align Align)
AtomicOrdering getSuccessOrdering() const
Returns the success ordering constraint of this cmpxchg instruction.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this cmpxchg instruction.
LLVM_ABI AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment, AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering, SyncScope::ID SSID, InsertPosition InsertBefore=nullptr)
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
LLVM_ABI AtomicRMWInst * cloneImpl() const
bool isVolatile() const
Return true if this is a RMW on a volatile memory location.
BinOp
This enumeration lists the possible modifications atomicrmw can make.
@ Add
*p = old + v
@ FAdd
*p = old + v
@ USubCond
Subtract only if no unsigned overflow.
@ FMinimum
*p = minimum(old, v) minimum matches the behavior of llvm.minimum.
@ Min
*p = old <signed v ? old : v
@ Sub
*p = old - v
@ And
*p = old & v
@ Xor
*p = old ^ v
@ USubSat
*p = usub.sat(old, v) usub.sat matches the behavior of llvm.usub.sat.
@ FMaximum
*p = maximum(old, v) maximum matches the behavior of llvm.maximum.
@ FSub
*p = old - v
@ UIncWrap
Increment one up to a maximum value.
@ Max
*p = old >signed v ? old : v
@ UMin
*p = old <unsigned v ? old : v
@ FMin
*p = minnum(old, v) minnum matches the behavior of llvm.minnum.
@ UMax
*p = old >unsigned v ? old : v
@ FMax
*p = maxnum(old, v) maxnum matches the behavior of llvm.maxnum.
@ UDecWrap
Decrement one until a minimum value or zero.
@ Nand
*p = ~(old & v)
void setSyncScopeID(SyncScope::ID SSID)
Sets the synchronization scope ID of this rmw instruction.
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this rmw instruction.
void setOperation(BinOp Operation)
friend class Instruction
Iterator for Instructions in a `BasicBlock.
BinOp getOperation() const
LLVM_ABI AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment, AtomicOrdering Ordering, SyncScope::ID SSID, InsertPosition InsertBefore=nullptr)
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this rmw instruction.
void setAlignment(Align Align)
static LLVM_ABI StringRef getOperationName(BinOp Op)
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
LLVM_ABI CaptureInfo getCaptureInfo() const
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:69
LLVM_ABI const ConstantRange & getRange() const
Returns the value of the range attribute.
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
Definition Attributes.h:88
static LLVM_ABI Attribute getWithMemoryEffects(LLVMContext &Context, MemoryEffects ME)
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition Attributes.h:223
LLVM Basic Block Representation.
Definition BasicBlock.h:62
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this basic block belongs to.
static LLVM_ABI BinaryOperator * CreateNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Helper functions to construct and inspect unary operations (NEG and NOT) via binary operators SUB and...
BinaryOps getOpcode() const
Definition InstrTypes.h:374
LLVM_ABI bool swapOperands()
Exchange the two operands to this instruction.
static LLVM_ABI BinaryOperator * CreateNot(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition InstrTypes.h:181
static LLVM_ABI BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
LLVM_ABI BinaryOperator(BinaryOps iType, Value *S1, Value *S2, Type *Ty, const Twine &Name, InsertPosition InsertBefore)
static LLVM_ABI BinaryOperator * CreateNSWNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
LLVM_ABI BinaryOperator * cloneImpl() const
This class represents a no-op cast from one type to another.
LLVM_ABI BitCastInst * cloneImpl() const
Clone an identical BitCastInst.
LLVM_ABI BitCastInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
Conditional or Unconditional Branch instruction.
LLVM_ABI void swapSuccessors()
Swap the successors of this branch instruction.
LLVM_ABI BranchInst * cloneImpl() const
bool isConditional() const
Value * getCondition() const
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
LLVM_ABI FPClassTest getParamNoFPClass(unsigned i) const
Extract a test mask for disallowed floating-point value classes for the parameter.
bool isInlineAsm() const
Check if this call is an inline asm statement.
LLVM_ABI BundleOpInfo & getBundleOpInfoForOperand(unsigned OpIdx)
Return the BundleOpInfo for the operand at index OpIdx.
void setCallingConv(CallingConv::ID CC)
LLVM_ABI FPClassTest getRetNoFPClass() const
Extract a test mask for disallowed floating-point value classes for the return value.
bundle_op_iterator bundle_op_info_begin()
Return the start of the list of BundleOpInfo instances associated with this OperandBundleUser.
LLVM_ABI bool paramHasNonNullAttr(unsigned ArgNo, bool AllowUndefOrPoison) const
Return true if this argument has the nonnull attribute on either the CallBase instruction or the call...
LLVM_ABI MemoryEffects getMemoryEffects() const
void addFnAttr(Attribute::AttrKind Kind)
Adds the attribute to the function.
LLVM_ABI bool doesNotAccessMemory() const
Determine if the call does not access memory.
LLVM_ABI void getOperandBundlesAsDefs(SmallVectorImpl< OperandBundleDef > &Defs) const
Return the list of operand bundles attached to this instruction as a vector of OperandBundleDefs.
LLVM_ABI void setOnlyAccessesArgMemory()
OperandBundleUse getOperandBundleAt(unsigned Index) const
Return the operand bundle at a specific index.
OperandBundleUse operandBundleFromBundleOpInfo(const BundleOpInfo &BOI) const
Simple helper function to map a BundleOpInfo to an OperandBundleUse.
LLVM_ABI void setOnlyAccessesInaccessibleMemOrArgMem()
std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
LLVM_ABI void setDoesNotAccessMemory()
AttributeSet getParamAttributes(unsigned ArgNo) const
Return the param attributes for this call.
bool hasRetAttr(Attribute::AttrKind Kind) const
Determine whether the return value has the given attribute.
LLVM_ABI bool onlyAccessesInaccessibleMemory() const
Determine if the function may only access memory that is inaccessible from the IR.
unsigned getNumOperandBundles() const
Return the number of operand bundles associated with this User.
CallingConv::ID getCallingConv() const
bundle_op_iterator bundle_op_info_end()
Return the end of the list of BundleOpInfo instances associated with this OperandBundleUser.
LLVM_ABI unsigned getNumSubclassExtraOperandsDynamic() const
Get the number of extra operands for instructions that don't have a fixed number of extra operands.
BundleOpInfo * bundle_op_iterator
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
LLVM_ABI bool isMustTailCall() const
Tests if this call site must be tail call optimized.
LLVM_ABI bool isIndirectCall() const
Return true if the callsite is an indirect call.
LLVM_ABI bool onlyReadsMemory() const
Determine if the call does not access or only reads memory.
bool isByValArgument(unsigned ArgNo) const
Determine whether this argument is passed by value.
iterator_range< bundle_op_iterator > bundle_op_infos()
Return the range [bundle_op_info_begin, bundle_op_info_end).
LLVM_ABI void setOnlyReadsMemory()
static LLVM_ABI CallBase * addOperandBundle(CallBase *CB, uint32_t ID, OperandBundleDef OB, InsertPosition InsertPt=nullptr)
Create a clone of CB with operand bundle OB added.
LLVM_ABI bool onlyAccessesInaccessibleMemOrArgMem() const
Determine if the function may only access memory that is either inaccessible from the IR or pointed t...
LLVM_ABI CaptureInfo getCaptureInfo(unsigned OpNo) const
Return which pointer components this operand may capture.
LLVM_ABI bool hasArgumentWithAdditionalReturnCaptureComponents() const
Returns whether the call has an argument that has an attribute like captures(ret: address,...
CallBase(AttributeList const &A, FunctionType *FT, ArgsTy &&... Args)
Value * getCalledOperand() const
LLVM_ABI void setOnlyWritesMemory()
LLVM_ABI op_iterator populateBundleOperandInfos(ArrayRef< OperandBundleDef > Bundles, const unsigned BeginIndex)
Populate the BundleOpInfo instances and the Use& vector from Bundles.
AttributeList Attrs
parameter attributes for callable
bool hasOperandBundlesOtherThan(ArrayRef< uint32_t > IDs) const
Return true if this operand bundle user contains operand bundles with tags other than those specified...
LLVM_ABI std::optional< ConstantRange > getRange() const
If this return value has a range attribute, return the value range of the argument.
LLVM_ABI bool isReturnNonNull() const
Return true if the return value is known to be not null.
Value * getArgOperand(unsigned i) const
FunctionType * FTy
uint64_t getRetDereferenceableBytes() const
Extract the number of dereferenceable bytes for a call or parameter (0=unknown).
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
FunctionType * getFunctionType() const
LLVM_ABI Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
static unsigned CountBundleInputs(ArrayRef< OperandBundleDef > Bundles)
Return the total number of values used in Bundles.
LLVM_ABI Value * getArgOperandWithAttribute(Attribute::AttrKind Kind) const
If one of the arguments has the specified attribute, returns its operand value.
LLVM_ABI void setOnlyAccessesInaccessibleMemory()
static LLVM_ABI CallBase * Create(CallBase *CB, ArrayRef< OperandBundleDef > Bundles, InsertPosition InsertPt=nullptr)
Create a clone of CB with a different set of operand bundles and insert it before InsertPt.
LLVM_ABI bool onlyWritesMemory() const
Determine if the call does not access or only writes memory.
LLVM_ABI bool hasClobberingOperandBundles() const
Return true if this operand bundle user has operand bundles that may write to the heap.
void setCalledOperand(Value *V)
static LLVM_ABI CallBase * removeOperandBundle(CallBase *CB, uint32_t ID, InsertPosition InsertPt=nullptr)
Create a clone of CB with operand bundle ID removed.
LLVM_ABI bool hasReadingOperandBundles() const
Return true if this operand bundle user has operand bundles that may read from the heap.
LLVM_ABI bool onlyAccessesArgMemory() const
Determine if the call can access memmory only using pointers based on its arguments.
unsigned arg_size() const
AttributeList getAttributes() const
Return the attributes for this call.
LLVM_ABI void setMemoryEffects(MemoryEffects ME)
bool hasOperandBundles() const
Return true if this User has any operand bundles.
LLVM_ABI bool isTailCall() const
Tests if this call site is marked as a tail call.
LLVM_ABI Function * getCaller()
Helper to get the caller (the parent function).
CallBr instruction, tracking function calls that may not return control but instead transfer it to a ...
SmallVector< BasicBlock *, 16 > getIndirectDests() const
void setDefaultDest(BasicBlock *B)
void setIndirectDest(unsigned i, BasicBlock *B)
BasicBlock * getDefaultDest() const
static CallBrInst * Create(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, ArrayRef< BasicBlock * > IndirectDests, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
LLVM_ABI CallBrInst * cloneImpl() const
This class represents a function call, abstracting a target machine's calling convention.
LLVM_ABI void updateProfWeight(uint64_t S, uint64_t T)
Updates profile metadata by scaling it by S / T.
TailCallKind getTailCallKind() const
LLVM_ABI CallInst * cloneImpl() const
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Represents which components of the pointer may be captured in which location.
Definition ModRef.h:359
CaptureComponents getOtherComponents() const
Get components potentially captured through locations other than the return value.
Definition ModRef.h:391
static CaptureInfo none()
Create CaptureInfo that does not capture any components of the pointer.
Definition ModRef.h:372
static CaptureInfo all()
Create CaptureInfo that may capture all components of the pointer.
Definition ModRef.h:375
CaptureComponents getRetComponents() const
Get components potentially captured by the return value.
Definition ModRef.h:387
static LLVM_ABI Instruction::CastOps getCastOpcode(const Value *Val, bool SrcIsSigned, Type *Ty, bool DstIsSigned)
Returns the opcode necessary to cast Val into Ty using usual casting rules.
static LLVM_ABI CastInst * CreatePointerBitCastOrAddrSpaceCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a BitCast or an AddrSpaceCast cast instruction.
Instruction::CastOps getOpcode() const
Return the opcode of this CastInst.
Definition InstrTypes.h:610
static LLVM_ABI unsigned isEliminableCastPair(Instruction::CastOps firstOpcode, Instruction::CastOps secondOpcode, Type *SrcTy, Type *MidTy, Type *DstTy, const DataLayout *DL)
Determine how a pair of casts can be eliminated, if they can be at all.
static LLVM_ABI CastInst * CreateIntegerCast(Value *S, Type *Ty, bool isSigned, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a ZExt, BitCast, or Trunc for int -> int casts.
static LLVM_ABI CastInst * CreateFPCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create an FPExt, BitCast, or FPTrunc for fp -> fp casts.
CastInst(Type *Ty, unsigned iType, Value *S, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics for subclasses.
Definition InstrTypes.h:451
static LLVM_ABI bool isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, const DataLayout &DL)
Check whether a bitcast, inttoptr, or ptrtoint cast between these types is valid and a no-op.
static LLVM_ABI bool isBitCastable(Type *SrcTy, Type *DestTy)
Check whether a bitcast between these types is valid.
static LLVM_ABI CastInst * CreateTruncOrBitCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a Trunc or BitCast cast instruction.
static LLVM_ABI CastInst * CreatePointerCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a BitCast, AddrSpaceCast or a PtrToInt cast instruction.
static LLVM_ABI CastInst * CreateBitOrPointerCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a BitCast, a PtrToInt, or an IntToPTr cast instruction.
static LLVM_ABI bool isNoopCast(Instruction::CastOps Opcode, Type *SrcTy, Type *DstTy, const DataLayout &DL)
A no-op cast is one that can be effected without changing any bits.
static LLVM_ABI CastInst * CreateZExtOrBitCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a ZExt or BitCast cast instruction.
static LLVM_ABI CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
LLVM_ABI bool isIntegerCast() const
There are several places where we need to know if a cast instruction only deals with integer source a...
static LLVM_ABI CastInst * CreateSExtOrBitCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a SExt or BitCast cast instruction.
static LLVM_ABI bool castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy)
This method can be used to determine if a cast from SrcTy to DstTy using Opcode op is valid or not.
LLVM_ABI CatchReturnInst * cloneImpl() const
void setUnwindDest(BasicBlock *UnwindDest)
LLVM_ABI void addHandler(BasicBlock *Dest)
Add an entry to the switch instruction... Note: This action invalidates handler_end().
LLVM_ABI CatchSwitchInst * cloneImpl() const
mapped_iterator< op_iterator, DerefFnTy > handler_iterator
Value * getParentPad() const
void setParentPad(Value *ParentPad)
BasicBlock * getUnwindDest() const
LLVM_ABI void removeHandler(handler_iterator HI)
LLVM_ABI CleanupReturnInst * cloneImpl() const
This class is the base class for the comparison instructions.
Definition InstrTypes.h:664
Predicate getStrictPredicate() const
For example, SGE -> SGT, SLE -> SLT, ULE -> ULT, UGE -> UGT.
Definition InstrTypes.h:858
bool isEquality() const
Determine if this is an equals/not equals predicate.
Definition InstrTypes.h:915
void setPredicate(Predicate P)
Set the predicate for this instruction to the specified value.
Definition InstrTypes.h:768
bool isFalseWhenEqual() const
This is just a convenience.
Definition InstrTypes.h:948
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
Definition InstrTypes.h:679
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
Definition InstrTypes.h:693
@ ICMP_SLT
signed less than
Definition InstrTypes.h:705
@ ICMP_SLE
signed less or equal
Definition InstrTypes.h:706
@ FCMP_OLT
0 1 0 0 True if ordered and less than
Definition InstrTypes.h:682
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
Definition InstrTypes.h:691
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
Definition InstrTypes.h:680
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
Definition InstrTypes.h:681
@ ICMP_UGE
unsigned greater or equal
Definition InstrTypes.h:700
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:699
@ ICMP_SGT
signed greater than
Definition InstrTypes.h:703
@ FCMP_ULT
1 1 0 0 True if unordered or less than
Definition InstrTypes.h:690
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
Definition InstrTypes.h:684
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
Definition InstrTypes.h:687
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
Definition InstrTypes.h:688
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
Definition InstrTypes.h:683
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
Definition InstrTypes.h:685
@ ICMP_NE
not equal
Definition InstrTypes.h:698
@ ICMP_SGE
signed greater or equal
Definition InstrTypes.h:704
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
Definition InstrTypes.h:692
@ ICMP_ULE
unsigned less or equal
Definition InstrTypes.h:702
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
Definition InstrTypes.h:689
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
Definition InstrTypes.h:678
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition InstrTypes.h:686
LLVM_ABI bool isEquivalence(bool Invert=false) const
Determine if one operand of this compare can always be replaced by the other operand,...
bool isSigned() const
Definition InstrTypes.h:930
static LLVM_ABI bool isEquality(Predicate pred)
Determine if this is an equals/not equals predicate.
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition InstrTypes.h:827
bool isTrueWhenEqual() const
This is just a convenience.
Definition InstrTypes.h:942
static LLVM_ABI CmpInst * Create(OtherOps Op, Predicate Pred, Value *S1, Value *S2, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Construct a compare instruction, given the opcode, the predicate and the two operands.
static bool isFPPredicate(Predicate P)
Definition InstrTypes.h:770
Predicate getNonStrictPredicate() const
For example, SGT -> SGE, SLT -> SLE, ULT -> ULE, UGT -> UGE.
Definition InstrTypes.h:871
static LLVM_ABI CmpInst * CreateWithCopiedFlags(OtherOps Op, Predicate Pred, Value *S1, Value *S2, const Instruction *FlagsSource, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Construct a compare instruction, given the opcode, the predicate, the two operands and the instructio...
bool isNonStrictPredicate() const
Definition InstrTypes.h:852
LLVM_ABI void swapOperands()
This is just a convenience that dispatches to the subclasses.
static bool isRelational(Predicate P)
Return true if the predicate is relational (not EQ or NE).
Definition InstrTypes.h:923
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition InstrTypes.h:789
static LLVM_ABI StringRef getPredicateName(Predicate P)
Predicate getPredicate() const
Return the predicate for this instruction.
Definition InstrTypes.h:765
bool isStrictPredicate() const
Definition InstrTypes.h:843
static LLVM_ABI bool isUnordered(Predicate predicate)
Determine if the predicate is an unordered operation.
Predicate getFlippedStrictnessPredicate() const
For predicate of kind "is X or equal to 0" returns the predicate "is X".
Definition InstrTypes.h:893
static bool isIntPredicate(Predicate P)
Definition InstrTypes.h:776
static LLVM_ABI bool isOrdered(Predicate predicate)
Determine if the predicate is an ordered operation.
LLVM_ABI CmpInst(Type *ty, Instruction::OtherOps op, Predicate pred, Value *LHS, Value *RHS, const Twine &Name="", InsertPosition InsertBefore=nullptr, Instruction *FlagsSource=nullptr)
bool isUnsigned() const
Definition InstrTypes.h:936
LLVM_ABI bool isCommutative() const
This is just a convenience that dispatches to the subclasses.
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
static LLVM_ABI std::optional< CmpPredicate > getMatching(CmpPredicate A, CmpPredicate B)
Compares two CmpPredicates taking samesign into account and returns the canonicalized CmpPredicate if...
CmpPredicate()
Default constructor.
static LLVM_ABI CmpPredicate get(const CmpInst *Cmp)
Do a ICmpInst::getCmpPredicate() or CmpInst::getPredicate(), as appropriate.
LLVM_ABI CmpInst::Predicate getPreferredSignedPredicate() const
Attempts to return a signed CmpInst::Predicate from the CmpPredicate.
bool hasSameSign() const
Query samesign information, for optimizations.
static LLVM_ABI CmpPredicate getSwapped(CmpPredicate P)
Get the swapped predicate of a CmpPredicate.
ConstantFP - Floating Point Values [float, double].
Definition Constants.h:282
const APFloat & getValueAPF() const
Definition Constants.h:325
This is the shared class of boolean and integer constants.
Definition Constants.h:87
LLVM_ABI ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
static LLVM_ABI Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
Definition Constant.h:43
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
static constexpr ElementCount getFixed(ScalarTy MinVal)
Definition TypeSize.h:309
LLVM_ABI ExtractElementInst * cloneImpl() const
static ExtractElementInst * Create(Value *Vec, Value *Idx, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI bool isValidOperands(const Value *Vec, const Value *Idx)
Return true if an extractelement instruction can be formed with the specified operands.
This instruction extracts a struct member or array element value from an aggregate value.
static LLVM_ABI Type * getIndexedType(Type *Agg, ArrayRef< unsigned > Idxs)
Returns the type of the element that would be extracted with an extractvalue instruction with the spe...
LLVM_ABI ExtractValueInst * cloneImpl() const
This instruction compares its operands according to the predicate given to the constructor.
bool isEquality() const
static LLVM_ABI bool compare(const APFloat &LHS, const APFloat &RHS, FCmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
LLVM_ABI FCmpInst * cloneImpl() const
Clone an identical FCmpInst.
FCmpInst(InsertPosition InsertBefore, Predicate pred, Value *LHS, Value *RHS, const Twine &NameStr="")
Constructor with insertion semantics.
This class represents an extension of floating point types.
LLVM_ABI FPExtInst * cloneImpl() const
Clone an identical FPExtInst.
LLVM_ABI FPExtInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI float getFPAccuracy() const
Get the maximum error permitted by this operation in ULPs.
This class represents a cast from floating point to signed integer.
LLVM_ABI FPToSIInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI FPToSIInst * cloneImpl() const
Clone an identical FPToSIInst.
This class represents a cast from floating point to unsigned integer.
LLVM_ABI FPToUIInst * cloneImpl() const
Clone an identical FPToUIInst.
LLVM_ABI FPToUIInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
This class represents a truncation of floating point types.
LLVM_ABI FPTruncInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI FPTruncInst * cloneImpl() const
Clone an identical FPTruncInst.
LLVM_ABI FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System, InsertPosition InsertBefore=nullptr)
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this fence instruction.
void setSyncScopeID(SyncScope::ID SSID)
Sets the synchronization scope ID of this fence instruction.
LLVM_ABI FenceInst * cloneImpl() const
friend class Instruction
Iterator for Instructions in a `BasicBlock.
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this fence instruction.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
Class to represent fixed width SIMD vectors.
unsigned getNumElements() const
LLVM_ABI FreezeInst(Value *S, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
LLVM_ABI FreezeInst * cloneImpl() const
Clone an identical FreezeInst.
void setParentPad(Value *ParentPad)
Value * getParentPad() const
Convenience accessors.
LLVM_ABI FuncletPadInst * cloneImpl() const
Class to represent function types.
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
Type * getParamType(unsigned i) const
Parameter type accessors.
bool isVarArg() const
Represents flags for the getelementptr instruction/expression.
static GEPNoWrapFlags inBounds()
GEPNoWrapFlags withoutInBounds() const
unsigned getRaw() const
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
LLVM_ABI bool isInBounds() const
Determine whether the GEP has the inbounds flag.
LLVM_ABI bool hasNoUnsignedSignedWrap() const
Determine whether the GEP has the nusw flag.
static LLVM_ABI Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
LLVM_ABI bool hasAllZeroIndices() const
Return true if all of the indices of this GEP are zeros.
LLVM_ABI bool hasNoUnsignedWrap() const
Determine whether the GEP has the nuw flag.
LLVM_ABI bool hasAllConstantIndices() const
Return true if all of the indices of this GEP are constant integers.
LLVM_ABI void setIsInBounds(bool b=true)
Set or clear the inbounds flag on this GEP instruction.
static LLVM_ABI Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
LLVM_ABI bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const
Accumulate the constant address offset of this GEP if possible.
LLVM_ABI GetElementPtrInst * cloneImpl() const
LLVM_ABI bool collectOffset(const DataLayout &DL, unsigned BitWidth, SmallMapVector< Value *, APInt, 4 > &VariableOffsets, APInt &ConstantOffset) const
LLVM_ABI void setNoWrapFlags(GEPNoWrapFlags NW)
Set nowrap flags for GEP instruction.
LLVM_ABI GEPNoWrapFlags getNoWrapFlags() const
Get the nowrap flags for the GEP instruction.
Module * getParent()
Get the module that this global value is contained inside of...
This instruction compares its operands according to the predicate given to the constructor.
ICmpInst(InsertPosition InsertBefore, Predicate pred, Value *LHS, Value *RHS, const Twine &NameStr="")
Constructor with insertion semantics.
static LLVM_ABI bool compare(const APInt &LHS, const APInt &RHS, ICmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
LLVM_ABI ICmpInst * cloneImpl() const
Clone an identical ICmpInst.
Predicate getFlippedSignednessPredicate() const
For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->EQ.
Predicate getSignedPredicate() const
For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
static CmpPredicate getInverseCmpPredicate(CmpPredicate Pred)
bool isEquality() const
Return true if this predicate is either EQ or NE.
static LLVM_ABI Predicate getFlippedSignednessPredicate(Predicate Pred)
For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->EQ.
static LLVM_ABI std::optional< bool > isImpliedByMatchingCmp(CmpPredicate Pred1, CmpPredicate Pred2)
Determine if Pred1 implies Pred2 is true, false, or if nothing can be inferred about the implication,...
Predicate getUnsignedPredicate() const
For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
Indirect Branch Instruction.
LLVM_ABI void addDestination(BasicBlock *Dest)
Add a destination.
LLVM_ABI void removeDestination(unsigned i)
This method removes the specified successor from the indirectbr instruction.
LLVM_ABI IndirectBrInst * cloneImpl() const
LLVM_ABI InsertElementInst * cloneImpl() const
static InsertElementInst * Create(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI bool isValidOperands(const Value *Vec, const Value *NewElt, const Value *Idx)
Return true if an insertelement instruction can be formed with the specified operands.
bool isValid() const
Definition Instruction.h:62
BasicBlock * getBasicBlock()
Definition Instruction.h:63
This instruction inserts a struct field of array element value into an aggregate value.
LLVM_ABI InsertValueInst * cloneImpl() const
BitfieldElement::Type getSubclassData() const
LLVM_ABI bool hasNoNaNs() const LLVM_READONLY
Determine whether the no-NaNs flag is set.
LLVM_ABI void copyIRFlags(const Value *V, bool IncludeWrapFlags=true)
Convenience method to copy supported exact, fast-math, and (optionally) wrapping flags from V to this...
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI bool isCommutative() const LLVM_READONLY
Return true if the instruction is commutative:
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
LLVM_ABI void swapProfMetadata()
If the instruction has "branch_weights" MD_prof metadata and the MDNode has three operands (including...
LLVM_ABI bool isVolatile() const LLVM_READONLY
Return true if this instruction has a volatile memory access.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Bitfield::Element< uint16_t, 0, 15 > OpaqueField
Instruction(const Instruction &)=delete
friend class BasicBlock
Various leaf nodes.
void setSubclassData(typename BitfieldElement::Type Value)
This class represents a cast from an integer to a pointer.
LLVM_ABI IntToPtrInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI IntToPtrInst * cloneImpl() const
Clone an identical IntToPtrInst.
Invoke instruction.
BasicBlock * getUnwindDest() const
void setNormalDest(BasicBlock *B)
LLVM_ABI InvokeInst * cloneImpl() const
LLVM_ABI LandingPadInst * getLandingPadInst() const
Get the landingpad instruction from the landing pad block (the unwind destination).
void setUnwindDest(BasicBlock *B)
LLVM_ABI void updateProfWeight(uint64_t S, uint64_t T)
Updates profile metadata by scaling it by S / T.
static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
LLVMContextImpl *const pImpl
Definition LLVMContext.h:70
The landingpad instruction holds all of the information necessary to generate correct exception handl...
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
LLVM_ABI LandingPadInst * cloneImpl() const
static LLVM_ABI LandingPadInst * Create(Type *RetTy, unsigned NumReservedClauses, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedClauses is a hint for the number of incoming clauses that this landingpad w...
LLVM_ABI void addClause(Constant *ClauseVal)
Add a catch or filter clause to the landing pad.
void setCleanup(bool V)
Indicate that this landingpad instruction is a cleanup.
void setAlignment(Align Align)
bool isVolatile() const
Return true if this is a load from a volatile memory location.
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this load instruction.
LLVM_ABI LoadInst * cloneImpl() const
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
void setVolatile(bool V)
Specify whether this is a volatile load or not.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
LLVM_ABI LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, InsertPosition InsertBefore)
Align getAlign() const
Return the alignment of the access that is being performed.
Metadata node.
Definition Metadata.h:1078
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1442
static MemoryEffectsBase readOnly()
Definition ModRef.h:130
bool onlyWritesMemory() const
Whether this function only (at most) writes memory.
Definition ModRef.h:226
bool doesNotAccessMemory() const
Whether this function accesses no memory.
Definition ModRef.h:220
static MemoryEffectsBase argMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Definition ModRef.h:140
static MemoryEffectsBase inaccessibleMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Definition ModRef.h:146
bool onlyAccessesInaccessibleMem() const
Whether this function only (at most) accesses inaccessible memory.
Definition ModRef.h:239
bool onlyAccessesArgPointees() const
Whether this function only (at most) accesses argument memory.
Definition ModRef.h:229
bool onlyReadsMemory() const
Whether this function only (at most) reads memory.
Definition ModRef.h:223
static MemoryEffectsBase writeOnly()
Definition ModRef.h:135
static MemoryEffectsBase inaccessibleOrArgMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Definition ModRef.h:163
static MemoryEffectsBase none()
Definition ModRef.h:125
bool onlyAccessesInaccessibleOrArgMem() const
Whether this function only (at most) accesses argument and inaccessible memory.
Definition ModRef.h:250
StringRef getTag() const
iterator_range< const_block_iterator > blocks() const
void allocHungoffUses(unsigned N)
const_block_iterator block_begin() const
LLVM_ABI void removeIncomingValueIf(function_ref< bool(unsigned)> Predicate, bool DeletePHIIfEmpty=true)
Remove all incoming values for which the predicate returns true.
void setIncomingBlock(unsigned i, BasicBlock *BB)
LLVM_ABI Value * removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty=true)
Remove an incoming value.
LLVM_ABI bool hasConstantOrUndefValue() const
Whether the specified PHI node always merges together the same value, assuming undefs are equal to a ...
void setIncomingValue(unsigned i, Value *V)
void copyIncomingBlocks(iterator_range< const_block_iterator > BBRange, uint32_t ToIdx=0)
Copies the basic blocks from BBRange to the incoming basic block list of this PHINode,...
const_block_iterator block_end() const
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
LLVM_ABI Value * hasConstantValue() const
If the specified PHI node always merges together the same value, return the value,...
LLVM_ABI PHINode * cloneImpl() const
unsigned getNumIncomingValues() const
Return the number of incoming edges.
Class to represent pointers.
unsigned getAddressSpace() const
Return the address space of the Pointer type.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
This class represents a cast from a pointer to an address (non-capturing ptrtoint).
PtrToAddrInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
PtrToAddrInst * cloneImpl() const
Clone an identical PtrToAddrInst.
This class represents a cast from a pointer to an integer.
LLVM_ABI PtrToIntInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI PtrToIntInst * cloneImpl() const
Clone an identical PtrToIntInst.
Resume the propagation of an exception.
LLVM_ABI ResumeInst * cloneImpl() const
Return a value (possibly void), from a function.
LLVM_ABI ReturnInst * cloneImpl() const
This class represents a sign extension of integer types.
LLVM_ABI SExtInst * cloneImpl() const
Clone an identical SExtInst.
LLVM_ABI SExtInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
This class represents a cast from signed integer to floating point.
LLVM_ABI SIToFPInst * cloneImpl() const
Clone an identical SIToFPInst.
LLVM_ABI SIToFPInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
Class to represent scalable SIMD vectors.
LLVM_ABI SelectInst * cloneImpl() const
static LLVM_ABI const char * areInvalidOperands(Value *Cond, Value *True, Value *False)
Return a string if the specified operands are invalid for a select operation, otherwise return null.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, const Instruction *MDFrom=nullptr)
static LLVM_ABI bool isZeroEltSplatMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses all elements with the same value as the first element of exa...
ArrayRef< int > getShuffleMask() const
static LLVM_ABI bool isSpliceMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is a splice mask, concatenating the two inputs together and then ext...
int getMaskValue(unsigned Elt) const
Return the shuffle mask value of this instruction for the given element index.
LLVM_ABI ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI bool isValidOperands(const Value *V1, const Value *V2, const Value *Mask)
Return true if a shufflevector instruction can be formed with the specified operands.
static LLVM_ABI bool isSelectMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from its source vectors without lane crossings.
static LLVM_ABI bool isBitRotateMask(ArrayRef< int > Mask, unsigned EltSizeInBits, unsigned MinSubElts, unsigned MaxSubElts, unsigned &NumSubElts, unsigned &RotateAmt)
Checks if the shuffle is a bit rotation of the first operand across multiple subelements,...
VectorType * getType() const
Overload to return most specific vector type.
LLVM_ABI bool isIdentityWithExtract() const
Return true if this shuffle extracts the first N elements of exactly one source vector.
static LLVM_ABI bool isOneUseSingleSourceMask(ArrayRef< int > Mask, int VF)
Return true if this shuffle mask represents "clustered" mask of size VF, i.e.
LLVM_ABI bool isIdentityWithPadding() const
Return true if this shuffle lengthens exactly one source vector with undefs in the high elements.
static LLVM_ABI bool isSingleSourceMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from exactly one source vector.
LLVM_ABI bool isConcat() const
Return true if this shuffle concatenates its 2 source vectors.
static LLVM_ABI bool isDeInterleaveMaskOfFactor(ArrayRef< int > Mask, unsigned Factor, unsigned &Index)
Check if the mask is a DE-interleave mask of the given factor Factor like: <Index,...
LLVM_ABI ShuffleVectorInst * cloneImpl() const
static LLVM_ABI bool isIdentityMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from exactly one source vector without lane crossin...
static LLVM_ABI bool isExtractSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is an extract subvector mask.
LLVM_ABI void setShuffleMask(ArrayRef< int > Mask)
friend class Instruction
Iterator for Instructions in a `BasicBlock.
LLVM_ABI bool isInterleave(unsigned Factor)
Return if this shuffle interleaves its two input vectors together.
static LLVM_ABI bool isReverseMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask swaps the order of elements from exactly one source vector.
static LLVM_ABI bool isTransposeMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask is a transpose mask.
LLVM_ABI void commute()
Swap the operands and adjust the mask to preserve the semantics of the instruction.
static LLVM_ABI bool isInsertSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &NumSubElts, int &Index)
Return true if this shuffle mask is an insert subvector mask.
static LLVM_ABI Constant * convertShuffleMaskForBitcode(ArrayRef< int > Mask, Type *ResultTy)
static LLVM_ABI bool isReplicationMask(ArrayRef< int > Mask, int &ReplicationFactor, int &VF)
Return true if this shuffle mask replicates each of the VF elements in a vector ReplicationFactor tim...
static LLVM_ABI bool isInterleaveMask(ArrayRef< int > Mask, unsigned Factor, unsigned NumInputElts, SmallVectorImpl< unsigned > &StartIndexes)
Return true if the mask interleaves one or more input vectors together.
This is a 'bitvector' (really, a variable-sized bit array), optimized for the case when the array is ...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void resize(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this store instruction.
Align getAlign() const
void setVolatile(bool V)
Specify whether this is a volatile store or not.
void setAlignment(Align Align)
friend class Instruction
Iterator for Instructions in a `BasicBlock.
LLVM_ABI StoreInst * cloneImpl() const
LLVM_ABI StoreInst(Value *Val, Value *Ptr, InsertPosition InsertBefore)
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this store instruction.
bool isVolatile() const
Return true if this is a store to a volatile memory location.
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this store instruction.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
Class to represent struct types.
LLVM_ABI void setSuccessorWeight(unsigned idx, CaseWeightOpt W)
LLVM_ABI Instruction::InstListType::iterator eraseFromParent()
Delegate the call to the underlying SwitchInst::eraseFromParent() and mark this object to not touch t...
LLVM_ABI void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W)
Delegate the call to the underlying SwitchInst::addCase() and set the specified branch weight for the...
LLVM_ABI CaseWeightOpt getSuccessorWeight(unsigned idx)
LLVM_ABI void replaceDefaultDest(SwitchInst::CaseIt I)
Replace the default destination by given case.
std::optional< uint32_t > CaseWeightOpt
LLVM_ABI SwitchInst::CaseIt removeCase(SwitchInst::CaseIt I)
Delegate the call to the underlying SwitchInst::removeCase() and remove correspondent branch weight.
void setValue(ConstantInt *V) const
Sets the new value for current case.
void setSuccessor(BasicBlock *S) const
Sets the new successor for current case.
Multiway switch.
void allocHungoffUses(unsigned N)
LLVM_ABI SwitchInst * cloneImpl() const
LLVM_ABI void addCase(ConstantInt *OnVal, BasicBlock *Dest)
Add an entry to the switch instruction.
CaseIteratorImpl< CaseHandle > CaseIt
ConstantInt *const * case_values() const
unsigned getNumCases() const
Return the number of 'cases' in this switch instruction, excluding the default case.
LLVM_ABI CaseIt removeCase(CaseIt I)
This method removes the specified case and its successor from the switch instruction.
This class represents a truncation of integer types.
LLVM_ABI TruncInst * cloneImpl() const
Clone an identical TruncInst.
LLVM_ABI TruncInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition TypeSize.h:343
static constexpr TypeSize get(ScalarTy Quantity, bool Scalable)
Definition TypeSize.h:340
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:296
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition Type.h:246
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:267
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
LLVM_ABI bool isFirstClassType() const
Return true if the type is "first class", meaning it is a valid type for a Value.
Definition Type.cpp:249
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:352
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition Type.cpp:197
bool isAggregateType() const
Return true if the type is an aggregate type.
Definition Type.h:304
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition Type.h:128
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:230
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
Definition Type.cpp:293
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition Type.h:184
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition Type.h:270
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:240
bool isTokenTy() const
Return true if this is 'token'.
Definition Type.h:234
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition Type.h:225
This class represents a cast unsigned integer to floating point.
LLVM_ABI UIToFPInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI UIToFPInst * cloneImpl() const
Clone an identical UIToFPInst.
UnaryInstruction(Type *Ty, unsigned iType, Value *V, InsertPosition InsertBefore=nullptr)
Definition InstrTypes.h:62
static LLVM_ABI UnaryOperator * Create(UnaryOps Op, Value *S, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a unary instruction, given the opcode and an operand.
LLVM_ABI UnaryOperator(UnaryOps iType, Value *S, Type *Ty, const Twine &Name, InsertPosition InsertBefore)
LLVM_ABI UnaryOperator * cloneImpl() const
UnaryOps getOpcode() const
Definition InstrTypes.h:154
LLVM_ABI UnreachableInst(LLVMContext &C, InsertPosition InsertBefore=nullptr)
LLVM_ABI bool shouldLowerToTrap(bool TrapUnreachable, bool NoTrapAfterNoreturn) const
friend class Instruction
Iterator for Instructions in a `BasicBlock.
LLVM_ABI UnreachableInst * cloneImpl() const
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
LLVM_ABI void set(Value *Val)
Definition Value.h:905
Use * op_iterator
Definition User.h:279
const Use * getOperandList() const
Definition User.h:225
op_iterator op_begin()
Definition User.h:284
LLVM_ABI void allocHungoffUses(unsigned N, bool WithExtraValues=false)
Allocate the array of Uses, followed by a pointer (with bottom bit set) to the User.
Definition User.cpp:53
const Use & getOperandUse(unsigned i) const
Definition User.h:245
void setNumHungOffUseOperands(unsigned NumOps)
Subclasses with hung off uses need to manage the operand count themselves.
Definition User.h:265
Use & Op()
Definition User.h:196
LLVM_ABI void growHungoffUses(unsigned N, bool WithExtraValues=false)
Grow the number of hung off uses.
Definition User.cpp:70
Value * getOperand(unsigned i) const
Definition User.h:232
unsigned getNumOperands() const
Definition User.h:254
op_iterator op_end()
Definition User.h:286
VAArgInst(Value *List, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
LLVM_ABI VAArgInst * cloneImpl() const
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI Value(Type *Ty, unsigned scid)
Definition Value.cpp:53
unsigned char SubclassOptionalData
Hold subclass data that can be dropped.
Definition Value.h:85
LLVM_ABI void setName(const Twine &Name)
Change the name of the value.
Definition Value.cpp:390
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition Value.cpp:546
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.cpp:1099
unsigned NumUserOperands
Definition Value.h:109
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
Base class of all SIMD vector types.
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
This class represents zero extension of integer types.
LLVM_ABI ZExtInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
LLVM_ABI ZExtInst * cloneImpl() const
Clone an identical ZExtInst.
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:200
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:165
An efficient, type-erasing, non-owning reference to a callable.
typename base_list_type::iterator iterator
Definition ilist.h:121
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
CallInst * Call
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
bool match(Val *V, const Pattern &P)
cstfp_pred_ty< is_non_zero_not_denormal_fp > m_NonZeroNotDenormalFP()
Match a floating-point non-zero that is not a denormal.
initializer< Ty > init(const Ty &Val)
@ Switch
The "resume-switch" lowering, where there are separate resume and destroy functions that are shared b...
Definition CoroShape.h:31
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
Definition Metadata.h:667
NodeAddr< UseNode * > Use
Definition RDFGraph.h:385
Context & getContext() const
Definition BasicBlock.h:99
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:316
@ Offset
Definition DWP.cpp:532
auto seq_inclusive(T Begin, T End)
Iterate over an integral type from Begin to End inclusive.
Definition Sequence.h:325
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1737
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition STLExtras.h:1667
unsigned getPointerAddressSpace(const Type *T)
Definition SPIRVUtils.h:365
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
Definition InstrProf.h:296
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
LLVM_ABI MDNode * getBranchWeightMDNode(const Instruction &I)
Get the branch weights metadata node.
MemoryEffectsBase< IRMemLocation > MemoryEffects
Summary of how a function affects memory in the program.
Definition ModRef.h:301
std::enable_if_t< std::is_unsigned_v< T >, std::optional< T > > checkedMulUnsigned(T LHS, T RHS)
Multiply two unsigned integers LHS and RHS.
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
auto reverse(ContainerTy &&C)
Definition STLExtras.h:406
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:279
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
bool isPointerTy(const Type *T)
Definition SPIRVUtils.h:359
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
constexpr int PoisonMaskElem
LLVM_ABI unsigned getNumBranchWeights(const MDNode &ProfileData)
AtomicOrdering
Atomic ordering for LLVM's memory model.
OperandBundleDefT< Value * > OperandBundleDef
Definition AutoUpgrade.h:34
@ Mul
Product of integers.
@ Xor
Bitwise or logical XOR of integers.
@ FMul
Product of floats.
@ Sub
Subtraction of integers.
@ Add
Sum of integers.
@ FAdd
Sum of floats.
DWARFExpression::Operation Op
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
OutputIt copy(R &&Range, OutputIt Out)
Definition STLExtras.h:1847
constexpr unsigned BitWidth
LLVM_ABI bool extractBranchWeights(const MDNode *ProfileData, SmallVectorImpl< uint32_t > &Weights)
Extract branch weights from MD_prof metadata.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1909
bool capturesAnything(CaptureComponents CC)
Definition ModRef.h:324
bool all_equal(std::initializer_list< T > Values)
Returns true if all Values in the initializer lists are equal or the list.
Definition STLExtras.h:2120
auto seq(T Begin, T End)
Iterate over an integral type from Begin up to - but not including - End.
Definition Sequence.h:305
@ Default
The result values are uniform if and only if all operands are uniform.
Definition Uniformity.h:20
LLVM_ABI void scaleProfData(Instruction &I, uint64_t S, uint64_t T)
Scaling the profile data attached to 'I' using the ratio of S/T.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
Summary of memprof metadata on allocations.
Used to keep track of an operand bundle.
uint32_t End
The index in the Use& vector where operands for this operand bundle ends.
uint32_t Begin
The index in the Use& vector where operands for this operand bundle starts.
Incoming for lane maks phi as machine instruction, incoming register Reg and incoming block Block are...
static LLVM_ABI std::optional< bool > eq(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_EQ result.
static LLVM_ABI std::optional< bool > ne(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_NE result.
static LLVM_ABI std::optional< bool > sge(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SGE result.
static LLVM_ABI std::optional< bool > ugt(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_UGT result.
static LLVM_ABI std::optional< bool > slt(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SLT result.
static LLVM_ABI std::optional< bool > ult(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_ULT result.
static LLVM_ABI std::optional< bool > ule(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_ULE result.
static LLVM_ABI std::optional< bool > sle(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SLE result.
static LLVM_ABI std::optional< bool > sgt(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SGT result.
static LLVM_ABI std::optional< bool > uge(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_UGE result.
Matching combinators.
A MapVector that performs no allocations if smaller than a certain size.
Definition MapVector.h:276
Indicates this User has operands co-allocated.
Definition User.h:60
Indicates this User has operands and a descriptor co-allocated .
Definition User.h:66